增加接口

This commit is contained in:
zhouqunjie 2023-09-26 14:37:42 +08:00
parent a52fa198dd
commit e3da4b76fe
27 changed files with 6190 additions and 4205 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/.idea/*

View File

@ -1,13 +0,0 @@
NacosConfig:
DataId: pcm-th-rpc.yaml
Group: DEFAULT_GROUP
ServerConfigs:
- IpAddr: nacos.jcce.dev
Port: 8848
ClientConfig:
NamespaceId: test
TimeoutMs: 5000
NotLoadCacheAtStart: true
LogDir:
CacheDir:
LogLevel: debug

7
etc/slurm.yaml Normal file
View File

@ -0,0 +1,7 @@
Name: slurm.rpc
ListenOn: 0.0.0.0:2007
# core rpc
PcmCoreRpcConf:
Endpoints:
- 10.101.15.170:32456
NonBlock: true

41
go.mod
View File

@ -4,18 +4,25 @@ go 1.19
require (
github.com/jinzhu/copier v0.3.5
github.com/zeromicro/go-zero v1.5.3
gitlink.org.cn/jcce-pcm/pcm-coordinator v0.1.5
gitlink.org.cn/jcce-pcm/utils v0.0.1
google.golang.org/grpc v1.56.2
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/common v0.42.0
github.com/robfig/cron/v3 v3.0.1
github.com/zeromicro/go-zero v1.5.4
gitlink.org.cn/jcce-pcm/pcm-coordinator v0.1.7-0.20230828070533-7721b3b6ff0d
gitlink.org.cn/jcce-pcm/utils v0.0.2
google.golang.org/grpc v1.57.0
google.golang.org/protobuf v1.31.0
k8s.io/apimachinery v0.27.3
k8s.io/client-go v0.26.3
)
require (
github.com/JCCE-nudt/zero-contrib/zrpc/registry/nacos v0.0.0-20230419021610-13bbc83fbc3c // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1704 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/bwmarrin/snowflake v0.3.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
@ -31,6 +38,7 @@ require (
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-redis/redis/v8 v8.11.5 // indirect
github.com/go-resty/resty/v2 v2.7.0 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/mock v1.6.0 // indirect
@ -43,6 +51,8 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
@ -52,13 +62,10 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nacos-group/nacos-sdk-go/v2 v2.2.1 // indirect
github.com/openzipkin/zipkin-go v0.4.1 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.15.1 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
@ -78,23 +85,23 @@ require (
go.uber.org/automaxprocs v1.5.2 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/oauth2 v0.7.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.2 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.26.3 // indirect
k8s.io/apimachinery v0.27.3 // indirect
k8s.io/client-go v0.26.3 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect

71
go.sum
View File

@ -389,9 +389,12 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/JCCE-nudt/zero-contrib/zrpc/registry/nacos v0.0.0-20230419021610-13bbc83fbc3c h1:qoPu5FeE6dsvZ7AmH/l3Y5RxrJPNVKvsQSrjQpWUCMQ=
github.com/JCCE-nudt/zero-contrib/zrpc/registry/nacos v0.0.0-20230419021610-13bbc83fbc3c/go.mod h1:GuCS4be9IH3bSBTvbzz34nszPQDO33PIOv5nviEaFMw=
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@ -406,7 +409,7 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis/v2 v2.30.1/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg=
github.com/alicebob/miniredis/v2 v2.30.3 h1:hrqDB4cHFSHQf4gO3xu6YKQg8PqJpNjLYsQAFYHstqw=
github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo=
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1704 h1:PpfENOj/vPfhhy9N2OFRjpue0hjM5XqAp2thFmkXXIk=
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1704/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@ -421,6 +424,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0=
github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE=
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@ -529,6 +534,8 @@ github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@ -695,6 +702,7 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@ -724,6 +732,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@ -758,6 +770,7 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nacos-group/nacos-sdk-go/v2 v2.2.1 h1:f72CRRn1BQk0FpK0vAnXn56ddGQpmHcyp0QoHEajhow=
@ -794,8 +807,8 @@ github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
github.com/openzipkin/zipkin-go v0.4.1 h1:kNd/ST2yLLWhaWrkgchya40TJabe8Hioj9udfPcEO5A=
github.com/openzipkin/zipkin-go v0.4.1/go.mod h1:qY0VqDSN1pOBN94dBc6w2GJlWLiovAyg7Qt6/I9HecM=
github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0=
github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@ -818,8 +831,8 @@ github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrb
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -839,8 +852,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rabbitmq/amqp091-go v1.5.0/go.mod h1:JsV0ofX5f1nwOGafb8L5rBItt9GyhfQfcJj+oyz0dGg=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@ -882,8 +895,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/urfave/cli/v2 v2.11.0/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
@ -901,12 +914,12 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE=
github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/zeromicro/go-zero v1.5.1/go.mod h1:bGYm4XWsGN9GhDsO2O2BngpVoWjf3Eog2a5hUOMhlXs=
github.com/zeromicro/go-zero v1.5.3 h1:9poyd+raeL7gSMUu6P19N7bssTppieR2j7Oos2j1yFQ=
github.com/zeromicro/go-zero v1.5.3/go.mod h1:dmoBpgJTxt9KWmgrNGpv06XxZRPXMakrxUVgROFAR3g=
gitlink.org.cn/jcce-pcm/pcm-coordinator v0.1.5 h1:DOVrgx37QCxT4Q2r7jVChi2nXTBW/gFptMX/22H8rzA=
gitlink.org.cn/jcce-pcm/pcm-coordinator v0.1.5/go.mod h1:yDnVKS7sfhsTeueWD0ybg5GBWTakL8AF7bcKcPI7iLo=
gitlink.org.cn/jcce-pcm/utils v0.0.1 h1:3PH93Z/JFTH5JRO9MFf3dD1Gnd12aGiIIViWBlQGuhE=
gitlink.org.cn/jcce-pcm/utils v0.0.1/go.mod h1:5cwaaqM0+HK5GXVbYozGlWvgwoUby0KytdvhbwQW1ks=
github.com/zeromicro/go-zero v1.5.4 h1:kRvcYuxcHOkUZvg7887KQl77Qv4klGL7MqGkTBgkpS8=
github.com/zeromicro/go-zero v1.5.4/go.mod h1:x/aUyLmSwRECvOyjOf+lhwThBOilJIY+s3slmPAeboA=
gitlink.org.cn/jcce-pcm/pcm-coordinator v0.1.7-0.20230828070533-7721b3b6ff0d h1:yd2gCjDm5fkUSXJq1AxZqHNf0iI+ySw50m3Wq/QFYZk=
gitlink.org.cn/jcce-pcm/pcm-coordinator v0.1.7-0.20230828070533-7721b3b6ff0d/go.mod h1:JATg6GnFef6YXt1krXVvg6HoQdKXwO3RXLaE02EB6MI=
gitlink.org.cn/jcce-pcm/utils v0.0.2 h1:Stif8W9C9TOCS2hw4g+OlOywDrsVYNrkiyKfBrWkT0w=
gitlink.org.cn/jcce-pcm/utils v0.0.2/go.mod h1:u8PTlBpzUyOlbQJgfSiutq91q/JtrJIQiPNDe4S/pGs=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
@ -1091,8 +1104,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1137,8 +1150,9 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1235,8 +1249,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1247,8 +1261,8 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1264,8 +1278,9 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1519,8 +1534,12 @@ google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614G
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M=
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -1561,8 +1580,8 @@ google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=

File diff suppressed because it is too large Load Diff

View File

@ -1,183 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.19.4
// source: hpcTH.proto
package hpcTH
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// HpcTHClient is the client API for HpcTH service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type HpcTHClient interface {
// ListJob list all jobs
ListJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error)
// ListHistoryJob list all history jobs
ListHistoryJob(ctx context.Context, in *ListHistoryJobReq, opts ...grpc.CallOption) (*ListHistoryJobResp, error)
// Submit job
SubmitJob(ctx context.Context, in *SubmitJobReq, opts ...grpc.CallOption) (*SubmitJobResp, error)
}
type hpcTHClient struct {
cc grpc.ClientConnInterface
}
func NewHpcTHClient(cc grpc.ClientConnInterface) HpcTHClient {
return &hpcTHClient{cc}
}
func (c *hpcTHClient) ListJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error) {
out := new(ListJobResp)
err := c.cc.Invoke(ctx, "/hpcTH.hpcTH/ListJob", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *hpcTHClient) ListHistoryJob(ctx context.Context, in *ListHistoryJobReq, opts ...grpc.CallOption) (*ListHistoryJobResp, error) {
out := new(ListHistoryJobResp)
err := c.cc.Invoke(ctx, "/hpcTH.hpcTH/ListHistoryJob", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *hpcTHClient) SubmitJob(ctx context.Context, in *SubmitJobReq, opts ...grpc.CallOption) (*SubmitJobResp, error) {
out := new(SubmitJobResp)
err := c.cc.Invoke(ctx, "/hpcTH.hpcTH/SubmitJob", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// HpcTHServer is the server API for HpcTH service.
// All implementations must embed UnimplementedHpcTHServer
// for forward compatibility
type HpcTHServer interface {
// ListJob list all jobs
ListJob(context.Context, *ListJobReq) (*ListJobResp, error)
// ListHistoryJob list all history jobs
ListHistoryJob(context.Context, *ListHistoryJobReq) (*ListHistoryJobResp, error)
// Submit job
SubmitJob(context.Context, *SubmitJobReq) (*SubmitJobResp, error)
mustEmbedUnimplementedHpcTHServer()
}
// UnimplementedHpcTHServer must be embedded to have forward compatible implementations.
type UnimplementedHpcTHServer struct {
}
func (UnimplementedHpcTHServer) ListJob(context.Context, *ListJobReq) (*ListJobResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListJob not implemented")
}
func (UnimplementedHpcTHServer) ListHistoryJob(context.Context, *ListHistoryJobReq) (*ListHistoryJobResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListHistoryJob not implemented")
}
func (UnimplementedHpcTHServer) SubmitJob(context.Context, *SubmitJobReq) (*SubmitJobResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method SubmitJob not implemented")
}
func (UnimplementedHpcTHServer) mustEmbedUnimplementedHpcTHServer() {}
// UnsafeHpcTHServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HpcTHServer will
// result in compilation errors.
type UnsafeHpcTHServer interface {
mustEmbedUnimplementedHpcTHServer()
}
func RegisterHpcTHServer(s grpc.ServiceRegistrar, srv HpcTHServer) {
s.RegisterService(&HpcTH_ServiceDesc, srv)
}
func _HpcTH_ListJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListJobReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HpcTHServer).ListJob(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/hpcTH.hpcTH/ListJob",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HpcTHServer).ListJob(ctx, req.(*ListJobReq))
}
return interceptor(ctx, in, info, handler)
}
func _HpcTH_ListHistoryJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListHistoryJobReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HpcTHServer).ListHistoryJob(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/hpcTH.hpcTH/ListHistoryJob",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HpcTHServer).ListHistoryJob(ctx, req.(*ListHistoryJobReq))
}
return interceptor(ctx, in, info, handler)
}
func _HpcTH_SubmitJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SubmitJobReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HpcTHServer).SubmitJob(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/hpcTH.hpcTH/SubmitJob",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HpcTHServer).SubmitJob(ctx, req.(*SubmitJobReq))
}
return interceptor(ctx, in, info, handler)
}
// HpcTH_ServiceDesc is the grpc.ServiceDesc for HpcTH service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var HpcTH_ServiceDesc = grpc.ServiceDesc{
ServiceName: "hpcTH.hpcTH",
HandlerType: (*HpcTHServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListJob",
Handler: _HpcTH_ListJob_Handler,
},
{
MethodName: "ListHistoryJob",
Handler: _HpcTH_ListHistoryJob_Handler,
},
{
MethodName: "SubmitJob",
Handler: _HpcTH_SubmitJob_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "hpcTH.proto",
}

View File

@ -1,65 +0,0 @@
package main
import (
"flag"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/service"
"github.com/zeromicro/go-zero/zrpc"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/hpcTH"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/config"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/logic"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/server"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"gitlink.org.cn/jcce-pcm/utils/interceptor/rpcserver"
commonConfig "gitlink.org.cn/jcce-pcm/utils/nacos"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
var configFile = flag.String("f", "etc/hpcth.yaml", "the config file")
func main() {
flag.Parse()
var bootstrapConfig commonConfig.BootstrapConfig
conf.MustLoad(*configFile, &bootstrapConfig)
//解析业务配置
var c config.Config
nacosConfig := bootstrapConfig.NacosConfig
serviceConfigContent := nacosConfig.InitConfig(func(data string) {
err := conf.LoadFromYamlBytes([]byte(data), &c)
if err != nil {
panic(err)
}
})
err := conf.LoadFromYamlBytes([]byte(serviceConfigContent), &c)
if err != nil {
panic(err)
}
// start log component
logx.MustSetup(c.LogConf)
// 注册到nacos
nacosConfig.Discovery(&c.RpcServerConf)
ctx := svc.NewServiceContext(c)
s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) {
hpcTH.RegisterHpcTHServer(grpcServer, server.NewHpcTHServer(ctx))
if c.Mode == service.DevMode || c.Mode == service.TestMode {
reflection.Register(grpcServer)
}
})
//rpc log
s.AddUnaryInterceptors(rpcserver.LoggerInterceptor)
defer s.Stop()
logic.InitCron(ctx)
logx.Infof("Starting rpc server at %s...\n", c.ListenOn)
s.Start()
}

View File

@ -1,64 +0,0 @@
// Code generated by goctl. DO NOT EDIT.
// Source: hpcTH.proto
package hpcthclient
import (
"context"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/hpcTH"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc"
)
type (
Argv = hpcTH.Argv
Environment = hpcTH.Environment
HistoryJob = hpcTH.HistoryJob
Job = hpcTH.Job
ListHistoryJobReq = hpcTH.ListHistoryJobReq
ListHistoryJobResp = hpcTH.ListHistoryJobResp
ListJobReq = hpcTH.ListJobReq
ListJobResp = hpcTH.ListJobResp
SubmitJobReq = hpcTH.SubmitJobReq
SubmitJobResp = hpcTH.SubmitJobResp
SubmitResponseMsg = hpcTH.SubmitResponseMsg
HpcTH interface {
// ListJob list all jobs
ListJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error)
// ListHistoryJob list all history jobs
ListHistoryJob(ctx context.Context, in *ListHistoryJobReq, opts ...grpc.CallOption) (*ListHistoryJobResp, error)
// Submit job
SubmitJob(ctx context.Context, in *SubmitJobReq, opts ...grpc.CallOption) (*SubmitJobResp, error)
}
defaultHpcTH struct {
cli zrpc.Client
}
)
func NewHpcTH(cli zrpc.Client) HpcTH {
return &defaultHpcTH{
cli: cli,
}
}
// ListJob list all jobs
func (m *defaultHpcTH) ListJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error) {
client := hpcTH.NewHpcTHClient(m.cli.Conn())
return client.ListJob(ctx, in, opts...)
}
// ListHistoryJob list all history jobs
func (m *defaultHpcTH) ListHistoryJob(ctx context.Context, in *ListHistoryJobReq, opts ...grpc.CallOption) (*ListHistoryJobResp, error) {
client := hpcTH.NewHpcTHClient(m.cli.Conn())
return client.ListHistoryJob(ctx, in, opts...)
}
// Submit job
func (m *defaultHpcTH) SubmitJob(ctx context.Context, in *SubmitJobReq, opts ...grpc.CallOption) (*SubmitJobResp, error) {
client := hpcTH.NewHpcTHClient(m.cli.Conn())
return client.SubmitJob(ctx, in, opts...)
}

View File

@ -4,13 +4,10 @@ import (
"context"
"github.com/jinzhu/copier"
"github.com/zeromicro/go-zero/core/logx"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/pcmcoreclient"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/hpcTH"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/client/pcmcore"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"gitlink.org.cn/jcce-pcm/utils/enum"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/slurm"
"gitlink.org.cn/jcce-pcm/utils/tool"
"strconv"
"time"
)
func InitCron(svc *svc.ServiceContext) {
@ -20,9 +17,8 @@ func InitCron(svc *svc.ServiceContext) {
svc.Cron.AddFunc("*/5 * * * * ?", func() {
// 查询core端分发下来的任务列表
infoReq := pcmcoreclient.InfoListReq{
Kind: "hpc",
ServiceName: "th",
infoReq := pcmcore.InfoListReq{
Kind: "hpc",
}
infoList, err := svc.PcmCoreRpc.InfoList(context.Background(), &infoReq)
if err != nil {
@ -32,7 +28,7 @@ func InitCron(svc *svc.ServiceContext) {
// 提交任务
submitJob(infoList, submitJobLogic)
// 查询运行中的任务列表同步信息
listReq := hpcTH.ListJobReq{}
listReq := slurm.ListJobReq{}
listJob, err := listLogic.ListJob(&listReq)
if err != nil {
logx.Error(err)
@ -42,19 +38,18 @@ func InitCron(svc *svc.ServiceContext) {
for _, job := range listJob.Jobs {
if job.Name == infoList.HpcInfoList[index].Name {
copier.CopyWithOption(&infoList.HpcInfoList[index], job, copier.Option{Converters: tool.Converters})
infoList.HpcInfoList[index].JobId = strconv.Itoa(int(job.JobId))
infoList.HpcInfoList[index].StartTime = time.Unix(job.StartTime, 0).String()
infoList.HpcInfoList[index].RunningTime = int64(time.Now().Sub(time.Unix(job.StartTime, 0)).Seconds())
infoList.HpcInfoList[index].Status = enum.State(job.JobState).String()
infoList.HpcInfoList[index].JobId = job.JobId
infoList.HpcInfoList[index].StartTime = string(job.StartTime)
infoList.HpcInfoList[index].RunningTime = int64(job.EndTime - job.StartTime)
infoList.HpcInfoList[index].Status = job.JobState
infoList.HpcInfoList[index].Version = "slurm 2.6.9"
}
}
}
// 同步信息到core端
if len(infoList.HpcInfoList) != 0 {
syncInfoReq := pcmcoreclient.SyncInfoReq{
syncInfoReq := pcmcore.SyncInfoReq{
Kind: "hpc",
ServiceName: "th",
HpcInfoList: infoList.HpcInfoList,
}
svc.PcmCoreRpc.SyncInfo(context.Background(), &syncInfoReq)
@ -62,21 +57,18 @@ func InitCron(svc *svc.ServiceContext) {
})
}
func submitJob(infoList *pcmcoreclient.InfoListResp, submitJobLogic *SubmitJobLogic) {
func submitJob(infoList *pcmcore.InfoListResp, submitJobLogic *SubmitJobLogic) {
for index, _ := range infoList.HpcInfoList {
if infoList.HpcInfoList[index].Status == "Saved" {
submitReq := hpcTH.SubmitJobReq{
Account: infoList.HpcInfoList[index].Account,
Name: infoList.HpcInfoList[index].Name,
WorkDir: "/root",
Script: infoList.HpcInfoList[index].CmdScript,
UserId: 0,
MinNodes: 1,
submitReq := slurm.SubmitJobReq{
Script: "",
Job: nil,
Jobs: nil,
}
jobResult, _ := submitJobLogic.SubmitJob(&submitReq)
// 任务提交成功
infoList.HpcInfoList[index].Status = "Pending"
infoList.HpcInfoList[index].JobId = strconv.Itoa(int(jobResult.SubmitResponseMsg[0].JobId))
infoList.HpcInfoList[index].JobId = string(jobResult.JobId)
}
}
}

View File

@ -0,0 +1,30 @@
package logic
import (
"context"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/slurm"
"github.com/zeromicro/go-zero/core/logx"
)
type GetJobLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewGetJobLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetJobLogic {
return &GetJobLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *GetJobLogic) GetJob(in *slurm.ListJobReq) (*slurm.ListJobResp, error) {
// todo: add your logic here and delete this line
return &slurm.ListJobResp{}, nil
}

View File

@ -1,154 +0,0 @@
package logic
/*
#cgo LDFLAGS: -lslurmdb
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <slurm/slurm.h>
#include <slurm/slurmdb.h>
#include <slurm/slurm_errno.h>
#include <memory.h>
#include <malloc.h>
slurmdb_job_rec_t *get_all_slurmdb_job() {
slurmdb_job_cond_t *job_cond = NULL;
void *conn = slurmdb_connection_get();
List joblist = slurmdb_jobs_get(conn, job_cond);
uint16_t listsize = slurm_list_count(joblist);
//qosinfo.record_count = size;
slurmdb_job_rec_t *jobarray = malloc(listsize * sizeof(slurmdb_job_rec_t));
ListIterator itr = slurm_list_iterator_create(joblist);
slurmdb_job_rec_t *rec = NULL;
int i = 0;
while ((rec = slurm_list_next(itr))) {
jobarray[i] = *rec;
i++;
}
slurmdb_connection_close(&conn);
slurm_list_destroy(joblist);
//int arraysize = sizeof(jobarray);
//printf("%d\n", arraysize);
return jobarray;
}
slurmdb_job_rec_t *job_from_array(slurmdb_job_rec_t *job_rec_array, int i) {
return (slurmdb_job_rec_t *) &(job_rec_array[i]);
}
slurmdb_job_rec_t *job_from_array_by_id(slurmdb_job_rec_t *job_rec_array, int job_id) {
int i;
int arraysize = sizeof(job_rec_array);
for (i=0; i<arraysize; i++)
{
//printf("var: %d : %p : %d \n", var, &(job_rec_array[var]), (job_rec_array[var]).stats.cpu_min);
//printf("%d \n",(job_rec_array[i]).stats.cpu_min);
//printf("var: %d : %p : %d \n", var, &(job_rec_array[var]), (job_rec_array[var]).jobid);
if (job_id==(job_rec_array[i]).jobid)
{
break;
} else {
return NULL;
}
}
return (slurmdb_job_rec_t *) &(job_rec_array[i]);
}
int getLength(slurmdb_job_rec_t *job_rec_array) {
return sizeof(job_rec_array);
}
*/
import "C"
import (
"context"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/hpcTH"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"github.com/zeromicro/go-zero/core/logx"
)
type ListHistoryJobLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewListHistoryJobLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ListHistoryJobLogic {
return &ListHistoryJobLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
// ListHistoryJob list all jobs from slurmdb
func (l *ListHistoryJobLogic) ListHistoryJob(in *hpcTH.ListHistoryJobReq) (*hpcTH.ListHistoryJobResp, error) {
var dbJobResp hpcTH.ListHistoryJobResp
jobInfos := C.get_all_slurmdb_job()
size := uint32(C.getLength(jobInfos))
dbJobResp.RecordCount = size
for i := uint32(0); i < size; i++ {
cJob := C.job_from_array(jobInfos, C.int(i))
goJob := SlurmdbJobConvertCToGo(cJob)
dbJobResp.HistoryJobs = append(dbJobResp.HistoryJobs, &goJob)
}
dbJobResp.Code = 200
dbJobResp.Msg = "success"
return &dbJobResp, nil
}
func SlurmdbJobConvertCToGo(cStructJob *C.slurmdb_job_rec_t) hpcTH.HistoryJob {
var goStruct hpcTH.HistoryJob
goStruct.Account = C.GoString(cStructJob.account)
goStruct.Associd = uint32(cStructJob.associd)
goStruct.AllocCpu = uint32(cStructJob.alloc_cpus)
goStruct.AllocNodes = uint32(cStructJob.alloc_nodes)
goStruct.Blockid = C.GoString(cStructJob.blockid)
goStruct.Cluster = C.GoString(cStructJob.cluster)
goStruct.DerivedEs = C.GoString(cStructJob.derived_es)
goStruct.DerivedEc = uint32(cStructJob.derived_ec)
goStruct.End = int64(cStructJob.end)
goStruct.Elapsed = uint32(cStructJob.elapsed)
goStruct.Eligible = int64(cStructJob.eligible)
goStruct.Exitcode = uint32(cStructJob.exitcode)
goStruct.Gid = uint32(cStructJob.gid)
goStruct.Jobid = uint32(cStructJob.jobid)
goStruct.Jobname = C.GoString(cStructJob.jobname)
goStruct.Lft = uint32(cStructJob.lft)
goStruct.Nodes = C.GoString(cStructJob.nodes)
goStruct.Priority = uint32(cStructJob.priority)
goStruct.Partition = C.GoString(cStructJob.partition)
goStruct.Qosid = uint32(cStructJob.qosid)
goStruct.Resvid = uint32(cStructJob.resvid)
goStruct.Requid = uint32(cStructJob.requid)
goStruct.ReqCpus = uint32(cStructJob.req_cpus)
goStruct.ReqMem = uint32(cStructJob.req_mem)
goStruct.Start = int64(cStructJob.start)
goStruct.State = uint32(cStructJob.state)
goStruct.SysCpuUsec = uint32(cStructJob.sys_cpu_usec)
goStruct.SysCpuSec = uint32(cStructJob.sys_cpu_sec)
goStruct.Submit = int64(cStructJob.submit)
goStruct.Suspended = uint32(cStructJob.suspended)
goStruct.ShowFull = uint32(cStructJob.show_full)
//var stats_adrs []*pbslurm.SlurmdbStatsT
//var stats pbslurm.SlurmdbStatsT
//stat.ActCpufreq = float64(c_struct_job.stats.act_cpufreq)
//stats.CpuMin = uint32((c_struct_job.stats).cpu_min)
//stat.CpuAve = float64(c_struct_job.stats.cpu_ave)
//stats_adrs = append(stats_adrs, &stats)
//go_struct.Stats = stats
return goStruct
}

View File

@ -1,166 +1,13 @@
package logic
/*
#cgo LDFLAGS: -lslurm
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <slurm/slurm.h>
#include <slurm/slurm_errno.h>
#include <signal.h>
inline uint8_t uint8_ptr(uint8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int8_t int8_ptr(int8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint16_t uint16_ptr(uint16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int16_t int16_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint32_t uint32_ptr(uint32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int32_t int32_ptr(int32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint64_t uint64_ptr(uint64_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int64_t int64_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
struct job_info_msg *get_job_info(){
struct job_info_msg* job_buffer;
if(slurm_load_jobs ((time_t) NULL,
&job_buffer, SHOW_ALL)) {
return NULL;
}
return job_buffer;
}
struct job_info* job_from_list(struct job_info_msg *list, int i){
return &list->job_array[i];
}
struct job_info_msg *get_single_job_info(uint32_t id){
struct job_info_msg* job_buffer;
if( slurm_load_job (&job_buffer, id, SHOW_DETAIL)) {
return NULL;
}
return job_buffer;
}
*/
import "C"
import (
"context"
"github.com/zeromicro/go-zero/core/logx"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/hpcTH"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/slurm"
"gitlink.org.cn/jcce-pcm/utils/tool"
)
func JobCtoGo(cStruct *C.struct_job_info) hpcTH.Job {
var goStruct hpcTH.Job
goStruct.Account = C.GoString(cStruct.account)
goStruct.AllocNode = C.GoString(cStruct.alloc_node)
goStruct.AllocSid = uint32(cStruct.alloc_sid)
goStruct.ArrayJobId = uint32(cStruct.array_job_id)
goStruct.ArrayTaskId = uint32(cStruct.array_task_id)
goStruct.AssocId = uint32(cStruct.assoc_id)
goStruct.BatchFlag = uint32(cStruct.batch_flag)
goStruct.BatchHost = C.GoString(cStruct.batch_host)
goStruct.BoardsPerNode = uint32(cStruct.boards_per_node)
goStruct.BatchScript = C.GoString(cStruct.batch_script)
goStruct.Command = C.GoString(cStruct.command)
goStruct.Comment = C.GoString(cStruct.comment)
goStruct.Contiguous = uint32(cStruct.contiguous)
goStruct.CoresPerSocket = uint32(cStruct.cores_per_socket)
goStruct.CpusPerTask = uint32(cStruct.cpus_per_task)
goStruct.Dependency = C.GoString(cStruct.dependency)
goStruct.DerivedEc = uint32(cStruct.derived_ec)
goStruct.EligibleTime = int64(cStruct.eligible_time)
goStruct.EndTime = int64(cStruct.end_time)
goStruct.ExcNodes = C.GoString(cStruct.exc_nodes)
goStruct.ExcNodeInx = int32(C.int32_ptr(cStruct.exc_node_inx))
goStruct.ExitCode = uint32(cStruct.exit_code)
goStruct.Features = C.GoString(cStruct.features)
goStruct.GroupId = uint32(cStruct.group_id)
goStruct.Gres = C.GoString(cStruct.gres)
goStruct.JobId = uint32(cStruct.job_id)
goStruct.JobState = uint32(cStruct.job_state)
goStruct.Licenses = C.GoString(cStruct.licenses)
goStruct.MaxCpus = uint32(cStruct.max_cpus)
goStruct.MaxNodes = uint32(cStruct.max_nodes)
goStruct.Name = C.GoString(cStruct.name)
goStruct.Network = C.GoString(cStruct.network)
goStruct.Nodes = C.GoString(cStruct.nodes)
goStruct.Nice = uint32(cStruct.nice)
goStruct.NodeInx = int32(C.int32_ptr(cStruct.node_inx))
goStruct.NtasksPerCore = uint32(cStruct.ntasks_per_core)
goStruct.NtasksPerNode = uint32(cStruct.ntasks_per_node)
goStruct.NtasksPerSocket = uint32(cStruct.ntasks_per_socket)
goStruct.NtasksPerBoard = uint32(cStruct.ntasks_per_board)
goStruct.NumCpus = uint32(cStruct.num_cpus)
goStruct.NumNodes = uint32(cStruct.num_nodes)
goStruct.Partition = C.GoString(cStruct.partition)
goStruct.PnMinMemory = uint32(cStruct.pn_min_memory)
goStruct.PnMinCpus = uint32(cStruct.pn_min_cpus)
goStruct.PnMinTmpDisk = uint32(cStruct.pn_min_tmp_disk)
goStruct.PreemptTime = int64(cStruct.preempt_time)
goStruct.PreSusTime = int64(cStruct.pre_sus_time)
goStruct.Priority = uint32(cStruct.priority)
goStruct.Profile = uint32(cStruct.profile)
goStruct.Qos = C.GoString(cStruct.qos)
goStruct.ReqNodes = C.GoString(cStruct.req_nodes)
goStruct.ReqNodeInx = int32(C.int32_ptr(cStruct.req_node_inx))
goStruct.ReqSwitch = uint32(cStruct.req_switch)
goStruct.Requeue = uint32(cStruct.requeue)
goStruct.ResizeTime = int64(cStruct.resize_time)
goStruct.RestartCnt = uint32(cStruct.restart_cnt)
goStruct.ResvName = C.GoString(cStruct.resv_name)
goStruct.Shared = uint32(cStruct.shared)
goStruct.ShowFlags = uint32(cStruct.show_flags)
goStruct.SocketsPerBoard = uint32(cStruct.sockets_per_board)
goStruct.SocketsPerNode = uint32(cStruct.sockets_per_node)
goStruct.StartTime = int64(cStruct.start_time)
goStruct.StateDesc = C.GoString(cStruct.state_desc)
goStruct.StateReason = uint32(cStruct.state_reason)
goStruct.SubmitTime = int64(cStruct.submit_time)
goStruct.SuspendTime = int64(cStruct.suspend_time)
goStruct.TimeLimit = uint32(cStruct.time_limit)
goStruct.TimeMin = uint32(cStruct.time_min)
goStruct.ThreadsPerCore = uint32(cStruct.threads_per_core)
goStruct.UserId = uint32(cStruct.user_id)
goStruct.Wait4Switch = uint32(cStruct.wait4switch)
goStruct.Wckey = C.GoString(cStruct.wckey)
goStruct.WorkDir = C.GoString(cStruct.work_dir)
return goStruct
}
type ListJobLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
@ -176,25 +23,13 @@ func NewListJobLogic(ctx context.Context, svcCtx *svc.ServiceContext) *ListJobLo
}
// ListJob list all jobs
func (l *ListJobLogic) ListJob(in *hpcTH.ListJobReq) (*hpcTH.ListJobResp, error) {
func (l *ListJobLogic) ListJob(in *slurm.ListJobReq) (*slurm.ListJobResp, error) {
var jobResp hpcTH.ListJobResp
cJobBuffer := C.get_job_info()
if cJobBuffer == nil {
jobResp.RecordCount = uint32(0)
jobResp.Jobs = nil
return &jobResp, nil
}
jobResp.RecordCount = uint32(cJobBuffer.record_count)
for i := uint32(0); i < jobResp.RecordCount; i++ {
job := C.job_from_list(cJobBuffer, C.int(i))
goJob := JobCtoGo(job)
jobResp.Jobs = append(jobResp.Jobs, &goJob)
}
jobResp.Code = 200
jobResp.Msg = "success"
C.slurm_free_job_info_msg(cJobBuffer)
return &jobResp, nil
var listJobResp slurm.ListJobResp
url := "http://127.0.0.1:4523/m1/1136481-0-default/slurm/v0.0.37/jobs"
slurmHttpRequest := tool.GetACHttpRequest()
slurmHttpRequest.SetHeader(tool.ContentType, tool.ApplicationJson).SetPathParams(map[string]string{
"update_time": "1",
}).SetResult(&listJobResp).Get(url)
return &listJobResp, nil
}

View File

@ -1,44 +1,12 @@
package logic
/*
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
struct submit_response_msg *submit_job(struct job_descriptor *desc)
{
struct submit_response_msg *resp_msg;
if (slurm_submit_batch_job(desc,
&resp_msg)) {
return NULL;
}
return resp_msg;
}
void free_submit_response_msg(struct submit_response_msg *msg)
{
slurm_free_submit_response_response_msg(msg);
}
int update_job (struct job_descriptor *msg) {
return slurm_update_job (msg);
}
*/
import "C"
import (
"context"
"fmt"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/hpcTH"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"unsafe"
"encoding/json"
"github.com/zeromicro/go-zero/core/logx"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/slurm"
"gitlink.org.cn/jcce-pcm/utils/tool"
)
type SubmitJobLogic struct {
@ -55,348 +23,13 @@ func NewSubmitJobLogic(ctx context.Context, svcCtx *svc.ServiceContext) *SubmitJ
}
}
// Submit job
func (l *SubmitJobLogic) SubmitJob(in *hpcTH.SubmitJobReq) (*hpcTH.SubmitJobResp, error) {
var cStruct C.struct_job_descriptor
func (l *SubmitJobLogic) SubmitJob(in *slurm.SubmitJobReq) (*slurm.SubmitJobResp, error) {
C.slurm_init_job_desc_msg(&cStruct)
if in.Account != "" {
accountS := C.CString(in.Account)
defer C.free(unsafe.Pointer(accountS))
cStruct.account = accountS
}
if in.AcctgFreq != "" {
acctgFreqS := C.CString(in.AcctgFreq)
defer C.free(unsafe.Pointer(acctgFreqS))
cStruct.acctg_freq = acctgFreqS
}
if in.AllocNode != "" {
allocNodeS := C.CString(in.AllocNode)
defer C.free(unsafe.Pointer(allocNodeS))
cStruct.alloc_node = allocNodeS
}
if in.AllocRespPort != 0 {
cStruct.alloc_resp_port = C.uint16_t(in.AllocRespPort)
}
if in.AllocSid != 0 {
cStruct.alloc_sid = C.uint32_t(in.AllocSid)
}
if len(in.Argv) > 0 {
cStruct.argc = C.uint32_t(len(in.Argv))
cArray := C.malloc(C.size_t(C.size_t(len(in.Argv)) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
for i := 0; i < len(in.Argv); i++ {
a[i] = C.CString(in.Argv[i].String())
}
cStruct.argv = (**C.char)(cArray)
fmt.Printf("test\n")
}
if in.ArrayInx != "" {
arrayInxS := C.CString(in.ArrayInx)
defer C.free(unsafe.Pointer(arrayInxS))
cStruct.array_inx = arrayInxS
}
if in.BeginTime != 0 {
cStruct.begin_time = C.int64_t(in.BeginTime)
}
if in.CkptInterval != 0 {
cStruct.ckpt_interval = C.uint16_t(in.CkptInterval)
}
if in.CkptDir != "" {
ckptDirS := C.CString(in.CkptDir)
defer C.free(unsafe.Pointer(ckptDirS))
cStruct.ckpt_dir = ckptDirS
}
if in.Comment != "" {
commentS := C.CString(in.Comment)
defer C.free(unsafe.Pointer(commentS))
cStruct.comment = commentS
}
if in.Contiguous != 0 {
cStruct.contiguous = C.uint16_t(in.Contiguous)
}
if in.CpuBind != "" {
cpuBindS := C.CString(in.CpuBind)
defer C.free(unsafe.Pointer(cpuBindS))
cStruct.cpu_bind = cpuBindS
}
if in.CpuBindType != 0 {
cStruct.cpu_bind_type = C.uint16_t(in.CpuBindType)
}
if in.Dependency != "" {
dependencyS := C.CString(in.Dependency)
defer C.free(unsafe.Pointer(dependencyS))
cStruct.dependency = dependencyS
}
if in.EndTime != 0 {
cStruct.end_time = C.int64_t(in.EndTime)
}
if len(in.Environment) > 0 {
cStruct.env_size = C.uint32_t(len(in.Environment))
cArray := C.malloc(C.size_t(C.size_t(len(in.Environment)) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
for i := 0; i < len(in.Environment); i++ {
a[i] = C.CString(in.Environment[i].String())
defer C.free(unsafe.Pointer(a[i]))
}
cStruct.environment = (**C.char)(cArray)
} else {
cStruct.env_size = 1
cArray := C.malloc(C.size_t(C.size_t(1) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
a[0] = C.CString("SLURM_GO_JOB=TRUE")
defer C.free(unsafe.Pointer(a[0]))
cStruct.environment = (**C.char)(cArray)
}
if in.ExcNodes != "" {
excNodesS := C.CString(in.ExcNodes)
defer C.free(unsafe.Pointer(excNodesS))
cStruct.exc_nodes = excNodesS
}
if in.Features != "" {
featuresS := C.CString(in.Features)
defer C.free(unsafe.Pointer(featuresS))
cStruct.features = featuresS
}
if in.GroupId != 0 {
cStruct.group_id = C.uint32_t(in.GroupId)
}
if in.Immediate != 0 {
cStruct.immediate = C.uint16_t(in.Immediate)
}
if in.JobId != 0 {
cStruct.job_id = C.uint32_t(in.JobId)
}
if in.KillOnNodeFail != 0 {
cStruct.kill_on_node_fail = C.uint16_t(in.KillOnNodeFail)
}
if in.Licenses != "" {
licensesS := C.CString(in.Licenses)
defer C.free(unsafe.Pointer(licensesS))
cStruct.licenses = licensesS
}
if in.MailType != 0 {
cStruct.mail_type = C.uint16_t(in.MailType)
}
if in.MailUser != "" {
mailUserS := C.CString(in.MailUser)
defer C.free(unsafe.Pointer(mailUserS))
cStruct.mail_user = mailUserS
}
if in.MemBind != "" {
memBindS := C.CString(in.MemBind)
defer C.free(unsafe.Pointer(memBindS))
cStruct.mem_bind = memBindS
}
if in.MemBindType != 0 {
cStruct.mem_bind_type = C.uint16_t(in.MemBindType)
}
if in.Name != "" {
nameS := C.CString(in.Name)
defer C.free(unsafe.Pointer(nameS))
cStruct.name = nameS
}
if in.Network != "" {
networkS := C.CString(in.Network)
defer C.free(unsafe.Pointer(networkS))
cStruct.network = networkS
}
if in.Nice != 0 {
cStruct.nice = C.uint16_t(in.Nice)
}
if in.NumTasks != 0 {
cStruct.num_tasks = C.uint32_t(in.NumTasks)
}
if in.OpenMode != 0 {
cStruct.open_mode = C.uint8_t(in.OpenMode)
}
if in.OtherPort != 0 {
cStruct.other_port = C.uint16_t(in.OtherPort)
}
if in.Overcommit != 0 {
cStruct.overcommit = C.uint8_t(in.Overcommit)
}
if in.Partition != "" {
partitionS := C.CString(in.Partition)
defer C.free(unsafe.Pointer(partitionS))
cStruct.partition = partitionS
}
if in.PlaneSize != 0 {
cStruct.plane_size = C.uint16_t(in.PlaneSize)
}
if in.Priority != 0 {
cStruct.priority = C.uint32_t(in.Priority)
}
if in.Profile != 0 {
cStruct.profile = C.uint32_t(in.Profile)
}
if in.Qos != "" {
qosS := C.CString(in.Qos)
defer C.free(unsafe.Pointer(qosS))
cStruct.qos = qosS
}
if in.Reboot != 0 {
cStruct.reboot = C.uint16_t(in.Reboot)
}
if in.RespHost != "" {
respHostS := C.CString(in.RespHost)
defer C.free(unsafe.Pointer(respHostS))
cStruct.resp_host = respHostS
}
if in.ReqNodes != "" {
reqNodesS := C.CString(in.ReqNodes)
defer C.free(unsafe.Pointer(reqNodesS))
cStruct.req_nodes = reqNodesS
}
if in.Requeue != 0 {
cStruct.requeue = C.uint16_t(in.Requeue)
}
if in.Reservation != "" {
reservationS := C.CString(in.Reservation)
defer C.free(unsafe.Pointer(reservationS))
cStruct.reservation = reservationS
}
if in.Script != "" {
scriptS := C.CString(in.Script)
defer C.free(unsafe.Pointer(scriptS))
cStruct.script = scriptS
}
if in.Shared != 0 {
cStruct.shared = C.uint16_t(in.Shared)
}
if in.SpankJobEnvSize != 0 {
cStruct.spank_job_env_size = C.uint32_t(in.SpankJobEnvSize)
}
if in.TaskDist != 0 {
cStruct.task_dist = C.uint16_t(in.TaskDist)
}
if in.TimeLimit != 0 {
cStruct.time_limit = C.uint32_t(in.TimeLimit)
}
if in.TimeMin != 0 {
cStruct.time_min = C.uint32_t(in.TimeMin)
}
//if go_struct.User_id != 0 {
// c_struct.user_id = C.uint32_t(go_struct.User_id)
//}
cStruct.user_id = C.uint32_t(in.UserId)
if in.WaitAllNodes != 0 {
cStruct.wait_all_nodes = C.uint16_t(in.WaitAllNodes)
}
if in.WarnSignal != 0 {
cStruct.warn_signal = C.uint16_t(in.WarnSignal)
}
if in.WarnTime != 0 {
cStruct.warn_time = C.uint16_t(in.WarnTime)
}
if in.WorkDir != "" {
workDirS := C.CString(in.WorkDir)
defer C.free(unsafe.Pointer(workDirS))
cStruct.work_dir = workDirS
}
if in.CpusPerTask != 0 {
cStruct.cpus_per_task = C.uint16_t(in.CpusPerTask)
}
if in.MinCpus != 0 {
cStruct.min_cpus = C.uint32_t(in.MinCpus)
}
if in.MaxCpus != 0 {
cStruct.max_cpus = C.uint32_t(in.MaxCpus)
}
if in.MinNodes != 0 {
cStruct.min_nodes = C.uint32_t(in.MinNodes)
}
if in.MaxNodes != 0 {
cStruct.max_nodes = C.uint32_t(in.MaxNodes)
}
if in.BoardsPerNode != 0 {
cStruct.boards_per_node = C.uint16_t(in.BoardsPerNode)
}
if in.SocketsPerBoard != 0 {
cStruct.sockets_per_board = C.uint16_t(in.SocketsPerBoard)
}
if in.SocketsPerNode != 0 {
cStruct.sockets_per_node = C.uint16_t(in.SocketsPerNode)
}
if in.CoresPerSocket != 0 {
cStruct.cores_per_socket = C.uint16_t(in.CoresPerSocket)
}
if in.ThreadsPerCore != 0 {
cStruct.threads_per_core = C.uint16_t(in.ThreadsPerCore)
}
if in.NtasksPerNode != 0 {
cStruct.ntasks_per_node = C.uint16_t(in.NtasksPerNode)
}
if in.NtasksPerSocket != 0 {
cStruct.ntasks_per_socket = C.uint16_t(in.NtasksPerSocket)
}
if in.NtasksPerCore != 0 {
cStruct.ntasks_per_core = C.uint16_t(in.NtasksPerCore)
}
if in.NtasksPerBoard != 0 {
cStruct.ntasks_per_board = C.uint16_t(in.NtasksPerBoard)
}
if in.PnMinCpus != 0 {
cStruct.pn_min_cpus = C.uint16_t(in.PnMinCpus)
}
if in.PnMinMemory != 0 {
cStruct.pn_min_memory = C.uint32_t(in.PnMinMemory)
}
if in.PnMinTmpDisk != 0 {
cStruct.pn_min_tmp_disk = C.uint32_t(in.PnMinTmpDisk)
}
if in.ReqSwitch != 0 {
cStruct.req_switch = C.uint32_t(in.ReqSwitch)
}
if in.StdErr != "" {
stdErrS := C.CString(in.StdErr)
defer C.free(unsafe.Pointer(stdErrS))
cStruct.std_err = stdErrS
}
if in.StdIn != "" {
stdInS := C.CString(in.StdIn)
defer C.free(unsafe.Pointer(stdInS))
cStruct.std_in = stdInS
}
if in.StdOut != "" {
stdOutS := C.CString(in.StdOut)
defer C.free(unsafe.Pointer(stdOutS))
cStruct.std_out = stdOutS
}
if in.Wait4Switch != 0 {
cStruct.wait4switch = C.uint32_t(in.Wait4Switch)
}
if in.Wckey != "" {
wckeyS := C.CString(in.Wckey)
defer C.free(unsafe.Pointer(wckeyS))
cStruct.wckey = wckeyS
}
cMsg := C.submit_job(&cStruct)
defer C.free_submit_response_msg(cMsg)
if cMsg == nil {
goMsg := hpcTH.SubmitJobResp{}
goMsg.SubmitResponseMsg[0].JobId = 1<<31 - 1
goMsg.SubmitResponseMsg[0].ErrorCode = uint32(C.slurm_get_errno())
return &goMsg, nil
}
goMsg := submitResponseMsgConvertCToGo(cMsg)
return &goMsg, nil
}
func submitResponseMsgConvertCToGo(cStruct *C.struct_submit_response_msg) hpcTH.SubmitJobResp {
var goStruct hpcTH.SubmitJobResp
submitRespMsg := hpcTH.SubmitResponseMsg{}
submitRespMsg.JobId = uint32(cStruct.job_id)
submitRespMsg.StepId = uint32(cStruct.step_id)
submitRespMsg.ErrorCode = uint32(cStruct.error_code)
goStruct.SubmitResponseMsg = append(goStruct.SubmitResponseMsg, &submitRespMsg)
return goStruct
var submitJobResp slurm.SubmitJobResp
url := "http://127.0.0.1:4523/m1/1136481-0-default/slurm/v0.0.37/job/submit"
acHttpRequest := tool.GetACHttpRequest()
reqJson, _ := json.Marshal(in)
resp, _ := acHttpRequest.SetHeader(tool.ContentType, tool.ApplicationJson).SetBody(reqJson).SetResult(&submitJobResp).Post(url)
println(resp.Body())
return &submitJobResp, nil
}

View File

@ -0,0 +1,28 @@
package cron
import (
"context"
"github.com/zeromicro/go-zero/core/logx"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/client/participantservice"
"gitlink.org.cn/jcce-pcm/utils/tool"
)
func ReportHeartbeat(participantRpc participantservice.ParticipantService) {
participantId, err := tool.GetParticipantId("etc/kubernetes.yaml")
if err != nil {
logx.Errorf("获取participant id失败 err:", err)
return
}
resp, err := participantRpc.ReportHeartbeat(context.Background(), &participantservice.ParticipantHeartbeatReq{
ParticipantId: participantId,
Address: "10.101.15.3:6443",
})
if err != nil {
logx.Error(err)
return
}
if resp.Code != 200 {
logx.Error(resp.Msg)
}
logx.Info("心跳推送成功!")
}

22
internal/pkg/cron/cron.go Normal file
View File

@ -0,0 +1,22 @@
package cron
import (
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
)
func AddCronGroup(svc *svc.ServiceContext) {
// 同步任务信息到core端
svc.Cron.AddFunc("*/5 * * * * ?", func() {
SyncTask(svc)
})
// 推送p端心跳
svc.Cron.AddFunc("*/5 * * * * ?", func() {
ReportHeartbeat(svc.ParticipantRpc)
})
// 推送节点动态信息
svc.Cron.AddFunc("0 0/2 * * * ?", func() {
//tracker.NodesDynamicInfo(svc.Config.PrometheusUrl, svc.ParticipantRpc, svc.ClientSet)
})
}

View File

@ -0,0 +1,36 @@
package cron
import (
"context"
"github.com/zeromicro/go-zero/core/logx"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/pcmCore"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"gitlink.org.cn/jcce-pcm/utils/tool"
)
func SyncTask(svc *svc.ServiceContext) {
participantId, err := tool.GetParticipantId("etc/kubernetes.yaml")
if err != nil {
return
}
// 查询core端分发下来的任务列表
infoReq := pcmCore.InfoListReq{
Kind: "hpc",
ParticipantId: participantId,
}
infoList, err := svc.PcmCoreRpc.InfoList(context.Background(), &infoReq)
if err != nil {
logx.Error(err)
return
}
if len(infoList.CloudInfoList) != 0 {
// 同步信息到core端
SyncInfoReq := pcmCore.SyncInfoReq{
Kind: "cloud",
ParticipantId: participantId,
CloudInfoList: infoList.CloudInfoList,
}
svc.PcmCoreRpc.SyncInfo(context.Background(), &SyncInfoReq)
}
}

View File

@ -0,0 +1,204 @@
package tracker
import (
"context"
"encoding/json"
"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
proModel "github.com/prometheus/common/model"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/model"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/client/participantservice"
"gitlink.org.cn/jcce-pcm/utils/tool"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"time"
)
const (
NODE_DISK_AVAIL = "node_filesystem_avail_bytes{fstype=~\"ext4|xfs\"}"
NODE_DISK_TOTAL = "node_filesystem_size_bytes{fstype=~\"ext4|xfs\"}"
NODE_MEMORY_AVAIL = "node_memory_MemAvailable_bytes"
NODE_MEMORY_TOTAL = "node_memory_MemTotal_bytes"
NODE_CPU_TOTAL_COUNT = "node:node_num_cpu:sum"
NODE_CPU_USAGE = "sum by (instance)(increase(node_cpu_seconds_total{mode=\"idle\"}[5m])) / sum by (instance)(increase(node_cpu_seconds_total[5m]))"
)
var address string
func MetricsQuery(statement string) (*proModel.Vector, error) {
client, err := api.NewClient(api.Config{
Address: address,
})
if err != nil {
return nil, err
}
v1api := v1.NewAPI(client)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
metrics, _, err := v1api.Query(ctx, statement, time.Now(), v1.WithTimeout(5*time.Second))
marshal, err := json.Marshal(metrics)
if err != nil {
return nil, err
}
vector := proModel.Vector{}
json.Unmarshal(marshal, &vector)
return &vector, nil
}
// NodeDiskAvail 查询磁盘可用空间
func NodeDiskAvail(nodes []*model.ScNodeAvailInfo) ([]*model.ScNodeAvailInfo, error) {
vectors, err := MetricsQuery(NODE_DISK_AVAIL)
if err != nil {
return nil, err
}
for index, _ := range nodes {
for i := 0; i < vectors.Len(); i++ {
if string((*vectors)[i].Metric["instance"]) == nodes[index].NodeName {
nodes[index].DiskAvail = tool.StringToInt64((*vectors)[index].Value.String())
}
}
}
return nodes, nil
}
// NodeDiskTotal 查询磁盘总空间
func NodeDiskTotal(nodes []*model.ScNodeAvailInfo) ([]*model.ScNodeAvailInfo, error) {
vectors, err := MetricsQuery(NODE_DISK_TOTAL)
if err != nil {
return nil, err
}
for index, _ := range nodes {
for i := 0; i < vectors.Len(); i++ {
if string((*vectors)[i].Metric["instance"]) == nodes[index].NodeName {
nodes[index].DiskTotal = tool.StringToInt64((*vectors)[index].Value.String())
}
}
}
return nodes, nil
}
// NodeMemoryTotal 查询内存总大小
func NodeMemoryTotal(nodes []*model.ScNodeAvailInfo) ([]*model.ScNodeAvailInfo, error) {
vectors, err := MetricsQuery(NODE_MEMORY_TOTAL)
if err != nil {
return nil, err
}
for index, _ := range nodes {
for i := 0; i < vectors.Len(); i++ {
if string((*vectors)[i].Metric["instance"]) == nodes[index].NodeName {
nodes[index].MemTotal = tool.StringToInt64((*vectors)[index].Value.String())
}
}
}
return nodes, nil
}
// NodeMemoryAvail 查询内存可用大小
func NodeMemoryAvail(nodes []*model.ScNodeAvailInfo) ([]*model.ScNodeAvailInfo, error) {
vectors, err := MetricsQuery(NODE_MEMORY_AVAIL)
if err != nil {
return nil, err
}
for index, _ := range nodes {
for i := 0; i < vectors.Len(); i++ {
if string((*vectors)[i].Metric["instance"]) == nodes[index].NodeName {
nodes[index].MemAvail = tool.StringToInt64((*vectors)[index].Value.String())
}
}
}
return nodes, nil
}
// NodeCpuTotalCount 查询cpu总核数
func NodeCpuTotalCount(nodes []*model.ScNodeAvailInfo) ([]*model.ScNodeAvailInfo, error) {
vectors, err := MetricsQuery(NODE_CPU_TOTAL_COUNT)
if err != nil {
return nil, err
}
for index, _ := range nodes {
for i := 0; i < vectors.Len(); i++ {
if string((*vectors)[i].Metric["node"]) == nodes[index].NodeName {
nodes[index].CpuTotal = tool.StringToInt64((*vectors)[index].Value.String())
}
}
}
return nodes, nil
}
// NodeCpuUsage 查询cpu使用率
func NodeCpuUsage(nodes []*model.ScNodeAvailInfo) ([]*model.ScNodeAvailInfo, error) {
vectors, err := MetricsQuery(NODE_CPU_USAGE)
if err != nil {
return nil, err
}
for index, _ := range nodes {
for i := 0; i < vectors.Len(); i++ {
if string((*vectors)[i].Metric["instance"]) == nodes[index].NodeName {
nodes[index].CpuUsable = tool.StringToFloat64((*vectors)[index].Value.String())
}
}
}
return nodes, nil
}
func NodesDynamicInfo(metricsUrl string, participantRpc participantservice.ParticipantService, kubeClient *kubernetes.Clientset) ([]*model.ScNodeAvailInfo, error) {
participantId, err := tool.GetParticipantId("etc/kubernetes.yaml")
address = metricsUrl
var nodes []*model.ScNodeAvailInfo
nodeList, err := kubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range nodeList.Items {
nodePhy := model.ScNodeAvailInfo{
NodeName: node.ObjectMeta.Name,
ParticipantId: participantId,
}
nodes = append(nodes, &nodePhy)
}
NodeDiskAvail(nodes)
NodeDiskTotal(nodes)
NodeMemoryTotal(nodes)
NodeMemoryAvail(nodes)
NodeCpuUsage(nodes)
NodeCpuTotalCount(nodes)
nodeAvailInfo := []*participantservice.NodeAvailInfo{}
tool.Convert(nodes, &nodeAvailInfo)
if err != nil {
return nil, err
}
req := participantservice.ParticipantAvailReq{
ParticipantId: participantId,
NodeAvailInfo: nodeAvailInfo,
}
participantRpc.ReportAvailable(context.Background(), &req)
return nodes, nil
}
// NodesStaticInfo 同步节点静态信息
func NodesStaticInfo(kubeClient *kubernetes.Clientset) ([]*model.ScNodePhyInfo, error) {
var result []*model.ScNodePhyInfo
nodes, err := kubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
nodePhy := model.ScNodePhyInfo{
NodeName: node.ObjectMeta.Name,
OsName: node.Status.NodeInfo.OSImage,
ArchType: node.Status.NodeInfo.Architecture,
ArchName: node.Status.NodeInfo.KernelVersion,
}
result = append(result, &nodePhy)
}
return result, nil
}

View File

@ -1,41 +0,0 @@
// Code generated by goctl. DO NOT EDIT.
// Source: hpcTH.proto
package server
import (
"context"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/hpcTH"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/logic"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
)
type HpcTHServer struct {
svcCtx *svc.ServiceContext
hpcTH.UnimplementedHpcTHServer
}
func NewHpcTHServer(svcCtx *svc.ServiceContext) *HpcTHServer {
return &HpcTHServer{
svcCtx: svcCtx,
}
}
// ListJob list all jobs
func (s *HpcTHServer) ListJob(ctx context.Context, in *hpcTH.ListJobReq) (*hpcTH.ListJobResp, error) {
l := logic.NewListJobLogic(ctx, s.svcCtx)
return l.ListJob(in)
}
// ListHistoryJob list all history jobs
func (s *HpcTHServer) ListHistoryJob(ctx context.Context, in *hpcTH.ListHistoryJobReq) (*hpcTH.ListHistoryJobResp, error) {
l := logic.NewListHistoryJobLogic(ctx, s.svcCtx)
return l.ListHistoryJob(in)
}
// Submit job
func (s *HpcTHServer) SubmitJob(ctx context.Context, in *hpcTH.SubmitJobReq) (*hpcTH.SubmitJobResp, error) {
l := logic.NewSubmitJobLogic(ctx, s.svcCtx)
return l.SubmitJob(in)
}

View File

@ -0,0 +1,38 @@
// Code generated by goctl. DO NOT EDIT.
// Source: slurm.proto
package server
import (
"context"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/logic"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/slurm"
)
type SlurmServer struct {
svcCtx *svc.ServiceContext
slurm.UnimplementedSlurmServer
}
func NewSlurmServer(svcCtx *svc.ServiceContext) *SlurmServer {
return &SlurmServer{
svcCtx: svcCtx,
}
}
func (s *SlurmServer) SubmitJob(ctx context.Context, in *slurm.SubmitJobReq) (*slurm.SubmitJobResp, error) {
l := logic.NewSubmitJobLogic(ctx, s.svcCtx)
return l.SubmitJob(in)
}
func (s *SlurmServer) ListJob(ctx context.Context, in *slurm.ListJobReq) (*slurm.ListJobResp, error) {
l := logic.NewListJobLogic(ctx, s.svcCtx)
return l.ListJob(in)
}
func (s *SlurmServer) GetJob(ctx context.Context, in *slurm.ListJobReq) (*slurm.ListJobResp, error) {
l := logic.NewGetJobLogic(ctx, s.svcCtx)
return l.GetJob(in)
}

View File

@ -3,20 +3,22 @@ package svc
import (
"github.com/robfig/cron/v3"
"github.com/zeromicro/go-zero/zrpc"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/pcmcoreclient"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/client/participantservice"
"gitlink.org.cn/jcce-pcm/pcm-coordinator/rpc/client/pcmcore"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/config"
)
type ServiceContext struct {
Config config.Config
Cron *cron.Cron
PcmCoreRpc pcmcoreclient.PcmCore
Config config.Config
Cron *cron.Cron
PcmCoreRpc pcmcore.PcmCore
ParticipantRpc participantservice.ParticipantService
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Cron: cron.New(cron.WithSeconds()),
Config: c,
PcmCoreRpc: pcmcoreclient.NewPcmCore(zrpc.MustNewClient(c.PcmCoreRpcConf)),
PcmCoreRpc: pcmcore.NewPcmCore(zrpc.MustNewClient(c.PcmCoreRpcConf)),
}
}

View File

@ -1,287 +0,0 @@
syntax = "proto3";
package hpcTH;
option go_package = "/hpcTH";
/******************Job Start*************************/
message job{
string account = 1; // @gotags: copier:"Account"
string alloc_node = 2; // @gotags: copier:"AllocNode"
uint32 alloc_sid = 3; // @gotags: copier:"AllocSid"
uint32 array_job_id = 4; // @gotags: copier:"ArrayJobId"
uint32 array_task_id = 5; // @gotags: copier:"ArrayTaskId"
uint32 assoc_id = 6; // @gotags: copier:"AssocId"
uint32 batch_flag = 7; // @gotags: copier:"BatchFlag"
string batch_host = 8; // @gotags: copier:"BatchHost"
string batch_script = 9; // @gotags: copier:"BatchScript"
string command = 10; // @gotags: copier:"Command"
string comment = 11; // @gotags: copier:"Comment"
uint32 contiguous = 12; // @gotags: copier:"Contiguous"
uint32 cpus_per_task = 13; // @gotags: copier:"CpusPerTask"
string dependency = 14; // @gotags: copier:"Dependency"
uint32 derived_ec = 15; // @gotags: copier:"DerivedEc"
int64 eligible_time = 16; // @gotags: copier:"EligibleTime"
int64 end_time = 17; // @gotags: copier:"EndTime"
string exc_nodes = 18; // @gotags: copier:"ExcNodes"
int32 exc_node_inx = 19; // @gotags: copier:"ExcNodeInx"
uint32 exit_code = 20; // @gotags: copier:"ExitCode"
string features = 21; // @gotags: copier:"Features"
string gres = 22; // @gotags: copier:"Gres"
uint32 group_id = 23; // @gotags: copier:"GroupId"
uint32 job_id = 24; // @gotags: copier:"JobId"
uint32 job_state = 25; // @gotags: copier:"JobState"
string licenses = 26; // @gotags: copier:"Licenses"
uint32 max_cpus = 27; // @gotags: copier:"MaxCpus"
uint32 max_nodes = 28; // @gotags: copier:"MaxNodes"
uint32 boards_per_node = 29; // @gotags: copier:"BoardsPerNode"
uint32 sockets_per_board = 30; // @gotags: copier:"SocketsPerBoard"
uint32 sockets_per_node = 31; // @gotags: copier:"SocketsPerNode"
uint32 cores_per_socket = 32; // @gotags: copier:"CoresPerSocket"
uint32 threads_per_core = 33; // @gotags: copier:"ThreadsPerCore"
string name = 34; // @gotags: copier:"Name"
string network = 35; // @gotags: copier:"Network"
string nodes = 36; // @gotags: copier:"Nodes"
uint32 nice = 37; // @gotags: copier:"Nice"
int32 node_inx = 38; // @gotags: copier:"NodeInx"
uint32 ntasks_per_core = 39; // @gotags: copier:"NtasksPerCore"
uint32 ntasks_per_node = 40; // @gotags: copier:"NtasksPerNode"
uint32 ntasks_per_socket = 41; // @gotags: copier:"NtasksPerSocket"
uint32 ntasks_per_board = 42; // @gotags: copier:"NtasksPerBoard"
uint32 num_nodes = 43; // @gotags: copier:"NumNodes"
uint32 num_cpus = 44; // @gotags: copier:"NumCpus"
string partition = 45; // @gotags: copier:"Partition"
uint32 pn_min_memory = 46; // @gotags: copier:"PnMinMemory"
uint32 pn_min_cpus = 47; // @gotags: copier:"PnMinCpus"
uint32 pn_min_tmp_disk = 48; // @gotags: copier:"PnMinTmpDisk"
int64 pre_sus_time = 49; // @gotags: copier:"PreSusTime"
uint32 priority = 50; // @gotags: copier:"Priority"
uint32 profile = 51; // @gotags: copier:"Profile"
string qos = 52; // @gotags: copier:"Qos"
string req_nodes = 53; // @gotags: copier:"ReqNodes"
int32 req_node_inx = 54; // @gotags: copier:"ReqNodeInx"
uint32 req_switch = 55; // @gotags: copier:"ReqSwitch"
uint32 requeue = 56; // @gotags: copier:"Requeue"
int64 resize_time = 57; // @gotags: copier:"ResizeTime"
uint32 restart_cnt = 58; // @gotags: copier:"RestartCnt"
string resv_name = 59; // @gotags: copier:"ResvName"
uint32 shared = 60; // @gotags: copier:"Shared"
uint32 show_flags = 61; // @gotags: copier:"ShowFlags"
int64 start_time = 62; // @gotags: copier:"StartTime"
string state_desc = 63; // @gotags: copier:"StateDesc"
uint32 state_reason = 64; // @gotags: copier:"StateReason"
int64 submit_time = 65; // @gotags: copier:"SubmitTime"
int64 suspend_time = 66; // @gotags: copier:"SuspendTime"
uint32 time_limit = 67; // @gotags: copier:"TimeLimit"
uint32 time_min = 68; // @gotags: copier:"TimeMin"
uint32 user_id = 69; // @gotags: copier:"UserId"
int64 preempt_time = 70; // @gotags: copier:"PreemptTime"
uint32 wait4switch = 71; // @gotags: copier:"Wait4Switch"
string wckey = 72; // @gotags: copier:"Wckey"
string work_dir = 73; // @gotags: copier:"WorkDir"
}
message ListJobReq{
}
message ListJobResp{
uint32 code = 1; // @gotags: copier:"Code"
string msg = 2; // @gotags: copier:"Msg"
uint32 record_count = 3; // @gotags: copier:"RecordCount"
repeated job jobs = 4; // @gotags: copier:"Jobs"
}
/******************Job End*************************/
/******************History Job Start*************************/
message historyJob{
uint32 alloc_cpu = 1; // @gotags: copier:"AllocCPU"
uint32 alloc_nodes = 2; // @gotags: copier:"AllocNodes"
string account = 3; // @gotags: copier:"Account"
uint32 associd = 4; // @gotags: copier:"AssocId"
string blockid = 5; // @gotags: copier:"BlockId"
string cluster = 6; // @gotags: copier:"Cluster"
uint32 derived_ec = 7; // @gotags: copier:"DerivedEc"
string derived_es = 8; // @gotags: copier:"DerivedEs"
uint32 elapsed = 9; // @gotags: copier:"Elapsed"
int64 eligible = 10;// @gotags: copier:"Eligible"
int64 end = 11;// @gotags: copier:"End"
uint32 exitcode = 12;// @gotags: copier:"ExitCode"
uint32 gid = 13;// @gotags: copier:"Gid"
uint32 jobid = 14;// @gotags: copier:"JobId"
string jobname = 15;// @gotags: copier:"JobName"
uint32 lft = 16;// @gotags: copier:"Lft"
string partition = 17;// @gotags: copier:"Partition"
string nodes = 18;// @gotags: copier:"Nodes"
uint32 priority = 19;// @gotags: copier:"Priority"
uint32 qosid = 20;// @gotags: copier:"Qosid"
uint32 req_cpus = 21;// @gotags: copier:"ReqCpus"
uint32 req_mem = 22;// @gotags: copier:"ReqMem"
uint32 requid = 23;// @gotags: copier:"Requid"
uint32 resvid = 24;// @gotags: copier:"Resvid"
uint32 show_full = 25;// @gotags: copier:"ShowFull"
int64 start = 26;// @gotags: copier:"Start"
uint32 state = 27;// @gotags: copier:"State"
int64 submit = 28;// @gotags: copier:"Submit"
uint32 suspended = 29;// @gotags: copier:"Suspended"
uint32 sys_cpu_sec = 30;// @gotags: copier:"SysCpuSec"
uint32 sys_cpu_usec = 31;// @gotags: copier:"SysCpuUsec"
uint32 timelimit = 32;// @gotags: copier:"Timelimit"
uint32 tot_cpu_sec = 33;// @gotags: copier:"TotCpuSec"
uint32 tot_cpu_usec = 34;// @gotags: copier:"TotCpuUsec"
uint32 track_steps = 35;// @gotags: copier:"TrackSteps"
uint32 uid = 36;// @gotags: copier:"Uid"
string user = 37;// @gotags: copier:"User"
uint32 user_cpu_sec = 38;// @gotags: copier:"UserCpuSec"
uint32 user_cpu_usec = 39;// @gotags: copier:"UserCpuUsec"
string wckey = 40;// @gotags: copier:"Wckey"
uint32 wckeyid = 41;// @gotags: copier:"Wckeyid"
}
message ListHistoryJobReq{
}
message ListHistoryJobResp{
uint32 code = 1; // @gotags: copier:"Code"
string msg = 2; // @gotags: copier:"Msg"
uint32 record_count = 3; // @gotags: copier:"RecordCount"
repeated historyJob history_jobs = 4; // @gotags: copier:"HistoryJobs"
}
/******************History Job End*************************/
/******************Job(Submit) Start*************************/
message SubmitJobReq{
string account =1; // @gotags: copier:"account" /* charge to specified account */
string acctg_freq =2; // @gotags: copier:"acctg_freq" /* accounting polling intervals (seconds) */
string alloc_node=3; // @gotags: copier:"alloc_node" /* node making resource allocation request * NOTE: Normally set by slurm_submit* or * slurm_allocate* function */
uint32 alloc_resp_port=4; // @gotags: copier:"alloc_resp_port" /* port to send allocation confirmation to */
uint32 alloc_sid =5; // @gotags: copier:"alloc_sid" /* local sid making resource allocation request * NOTE: Normally set by slurm_submit* or * slurm_allocate* function * NOTE: Also used for update flags, see * ALLOC_SID_* flags */
uint32 argc =6; // @gotags: copier:"argc" /* number of arguments to the script */
repeated Argv argv = 7; // @gotags: copier:"Argv" /* arguments to the script */
string array_inx =8; // @gotags: copier:"array_inx" /* job array index values */ //void *array_bitmap; /* NOTE: Set by slurmctld */
int64 begin_time = 9; // @gotags: copier:"begin_time" /* delay initiation until this time */
uint32 ckpt_interval=10; // @gotags: copier:"ckpt_interval" /* periodically checkpoint this job */
string ckpt_dir =11; // @gotags: copier:"ckpt_dir" /* directory to store checkpoint images */
string comment =12; // @gotags: copier:"comment" /* arbitrary comment (used by Moab scheduler) */
uint32 contiguous=13; // @gotags: copier:"contiguous" /* 1 if job requires contiguous nodes,* 0 otherwise,default=0 */
string cpu_bind=14; // @gotags: copier:"cpu_bind" /* binding map for map/mask_cpu */
uint32 cpu_bind_type=15; // @gotags: copier:"cpu_bind_type" /* see cpu_bind_type_t */
string dependency =16; // @gotags: copier:"dependency" /* synchronize job execution with other jobs */
int64 end_time=17; // @gotags: copier:"end_time" /* time by which job must complete, used for * job update only now, possible deadline * scheduling in the future */
repeated Environment environment=18; // @gotags: copier:"Environment" /* environment variables to set for job, * name=value pairs, one per line */
uint32 env_size =19; // @gotags: copier:"env_size" /* element count in environment */
string exc_nodes =20; // @gotags: copier:"exc_nodes" /* comma separated list of nodes excluded * from job's allocation, default NONE */
string features =21; // @gotags: copier:"features" /* comma separated list of required features, * default NONE */
string gres =22; // @gotags: copier:"gres" /* comma separated list of required generic * resources, default NONE */
uint32 group_id =23; // @gotags: copier:"group_id" /* group to assume, if run as root. */
uint32 immediate=24; // @gotags: copier:"immediate" /* 1 if allocate to run or fail immediately, * 0 if to be queued awaiting resources */
uint32 job_id =25; // @gotags: copier:"job_id" /* job ID, default set by SLURM */
uint32 kill_on_node_fail=26; // @gotags: copier:"kill_on_node_fail" /* 1 if node failure to kill job, * 0 otherwise,default=1 */
string licenses=27; // @gotags: copier:"licenses" /* licenses required by the job */
uint32 mail_type=28; // @gotags: copier:"mail_type" /* see MAIL_JOB_ definitions above */
string mail_user =29; // @gotags: copier:"mail_user" /* user to receive notification */
string mem_bind =30; // @gotags: copier:"mem_bind" /* binding map for map/mask_cpu */
uint32 mem_bind_type=31; // @gotags: copier:"mem_bind_type" /* see mem_bind_type_t */
string name =32; // @gotags: copier:"name" /* name of the job, default "" */
string network=33; // @gotags: copier:"network" /* network use spec */
uint32 nice =34; // @gotags: copier:"nice" /* requested priority change, * NICE_OFFSET == no change */
uint32 num_tasks=35; // @gotags: copier:"num_tasks" /* number of tasks to be started, * for batch only */
uint32 open_mode=36; // @gotags: copier:"open_mode" /* out/err open mode truncate or append, * see OPEN_MODE_* */
uint32 other_port=37; // @gotags: copier:"other_port" /* port to send various notification msg to */
uint32 overcommit =38; // @gotags: copier:"overcommit" /* over subscribe resources, for batch only */
string partition=39; // @gotags: copier:"partition" /* name of requested partition, * default in SLURM config */
uint32 plane_size =40; // @gotags: copier:"plane_size" /* plane size when task_dist = SLURM_DIST_PLANE */
uint32 priority =41; // @gotags: copier:"priority" /* relative priority of the job, * explicitly set only for user root, * 0 == held (don't initiate) */
uint32 profile =42; // @gotags: copier:"profile" /* Level of acct_gather_profile {all | none} */
string qos =43; // @gotags: copier:"qos" /* Quality of Service */
string resp_host=44; // @gotags: copier:"resp_host" /* NOTE: Set by slurmctld */
string req_nodes=45; // @gotags: copier:"req_nodes" /* comma separated list of required nodes * default NONE */
uint32 requeue=46; // @gotags: copier:"requeue" /* enable or disable job requeue option */
string reservation=47; // @gotags: copier:"reservation" /* name of reservation to use */
string script=48; // @gotags: copier:"script" /* the actual job script, default NONE */
uint32 shared =49; // @gotags: copier:"shared" /* 1 if job can share nodes with other jobs, * 0 if job needs exclusive access to the node, * or NO_VAL to accept the system default. * SHARED_FORCE to eliminate user control. */ //char **spank_job_env; environment variables for job prolog/epilog // * scripts as set by SPANK plugins
uint32 spank_job_env_size=50; // @gotags: copier:"spank_job_env_size" /* element count in spank_env */
uint32 task_dist =51; // @gotags: copier:"task_dist" /* see enum task_dist_state */
uint32 time_limit =52; // @gotags: copier:"time_limit" /* maximum run time in minutes, default is * partition limit */
uint32 time_min =53; // @gotags: copier:"time_min" /* minimum run time in minutes, default is * time_limit */
uint32 user_id=54; // @gotags: copier:"user_id" /* set only if different from current UID, * can only be explicitly set by user root */
uint32 wait_all_nodes=55; // @gotags: copier:"wait_all_nodes" /* 0 to start job immediately after allocation * 1 to start job after all nodes booted * or NO_VAL to use system default */
uint32 warn_signal=56; // @gotags: copier:"warn_signal" /* signal to send when approaching end time */
uint32 warn_time=57; // @gotags: copier:"warn_time" /* time before end to send signal (seconds) */
string work_dir =58; // @gotags: copier:"work_dir" /* pathname of working directory */ /* job constraints: */
uint32 cpus_per_task=59; // @gotags: copier:"cpus_per_task" /* number of processors required for * each task */
uint32 min_cpus =60; // @gotags: copier:"min_cpus" /* minimum number of processors required, * default=0 */
uint32 max_cpus=61; // @gotags: copier:"max_cpus" /* maximum number of processors required, * default=0 */
uint32 min_nodes=62; // @gotags: copier:"min_nodes" /* minimum number of nodes required by job, * default=0 */
uint32 max_nodes=63; // @gotags: copier:"max_nodes" /* maximum number of nodes usable by job, * default=0 */
uint32 boards_per_node =64; // @gotags: copier:"boards_per_node" /* boards per node required by job */
uint32 sockets_per_board=65; // @gotags: copier:"sockets_per_board" /* sockets per board required by job */
uint32 sockets_per_node =66; // @gotags: copier:"sockets_per_node" /* sockets per node required by job */
uint32 cores_per_socket=67; // @gotags: copier:"cores_per_socket" /* cores per socket required by job */
uint32 threads_per_core=68; // @gotags: copier:"threads_per_core" /* threads per core required by job */
uint32 ntasks_per_node =69; // @gotags: copier:"ntasks_per_node" /* number of tasks to invoke on each node */
uint32 ntasks_per_socket=70; // @gotags: copier:"ntasks_per_socket" /* number of tasks to invoke on * each socket */
uint32 ntasks_per_core =71; // @gotags: copier:"ntasks_per_core" /* number of tasks to invoke on each core */
uint32 ntasks_per_board=72; // @gotags: copier:"ntasks_per_board" /* number of tasks to invoke on each board */
uint32 pn_min_cpus =73; // @gotags: copier:"pn_min_cpus" /* minimum # CPUs per node, default=0 */
uint32 pn_min_memory=74; // @gotags: copier:"pn_min_memory" /* minimum real memory per node OR * real memory per CPU | MEM_PER_CPU, * default=0 (no limit) */
uint32 pn_min_tmp_disk =75; // @gotags: copier:"pn_min_tmp_disk" /* minimum tmp disk per node, * default=0 */
/*
* The following parameters are only meaningful on a Blue Gene
* system at present. Some will be of value on other system. Don't remove these
* they are needed for LCRM and others that can't talk to the opaque data type
* select_jobinfo.
*/
//uint16_t geometry[HIGHEST_DIMENSIONS]; node count in various // * dimensions, e.g. X, Y, and Z
//uint16_t conn_type[HIGHEST_DIMENSIONS]; see enum connection_type
uint32 reboot=76; // @gotags: copier:"reboot" /* force node reboot before startup */
uint32 rotate=77; // @gotags: copier:"rotate" /* permit geometry rotation if set */
//char *blrtsimage; /* BlrtsImage for block */
//char *linuximage; /* LinuxImage for block */
//char *mloaderimage; /* MloaderImage for block */
//char *ramdiskimage; /* RamDiskImage for block */
/* End of Blue Gene specific values */
uint32 req_switch =78; // @gotags: copier:"req_switch" /* Minimum number of switches */ //dynamic_plugin_data_t *select_jobinfo; /* opaque data type, // * SLURM internal use only */
string std_err=79; // @gotags: copier:"std_err" /* pathname of stderr */
string std_in =80; // @gotags: copier:"std_in" /* pathname of stdin */
string std_out=81; // @gotags: copier:"std_out" /* pathname of stdout */
uint32 wait4switch=82; // @gotags: copier:"wait4switch" /* Maximum time to wait for minimum switches */
string wckey =83; // @gotags: copier:"wckey" /* wckey for job */
}
message SubmitJobResp{
repeated SubmitResponseMsg submit_response_msg = 1;
}
message SubmitResponseMsg{
uint32 job_id = 1;
uint32 step_id =2;
uint32 error_code=3;
}
message Argv{
string argv =1; // @gotags: copier:"argv"
}
message Environment{
string environment =1; // @gotags: copier:"environment"
}
/******************Job(Submit) End*************************/
// hpc Services for Tianhe
service hpcTH {
//ListJob list all jobs
rpc ListJob(ListJobReq) returns (ListJobResp);
//ListHistoryJob list all history jobs
rpc ListHistoryJob(ListHistoryJobReq) returns (ListHistoryJobResp);
//Submit job
rpc SubmitJob(SubmitJobReq) returns (SubmitJobResp);
}

451
pb/slurm.proto Normal file
View File

@ -0,0 +1,451 @@
syntax = "proto3";
package slurm;
option go_package = "/slurm";
message error{
string error = 1;
uint32 errno = 2;
}
message Environment{
}
message Cpus {
}
message Sockets {
}
message Cores {
}
message Allocated_nodes {
uint32 memory = 1;
Cpus cpus = 2;
Sockets sockets = 3;
Cores cores = 4;
}
message Job_resources {
string nodes = 1;
uint32 allocated_cpus = 2;
uint32 allocated_hosts = 3;
repeated Allocated_nodes allocated_nodes = 4;
}
//submit job request
message job_properties{
string account = 1;
string account_gather_frequency = 2;
repeated string argv = 3;
string array = 4;
string batch_features = 5;
uint32 begin_time = 6;
string burst_buffer = 7;
string cluster_constraint = 8;
string comment = 9;
string constraints = 10;
uint32 core_specification = 11;
uint32 cores_per_socket = 12;
string cpu_binding = 13;
string cpu_binding_hint = 14;
string cpu_frequency = 15;
string cpus_per_gpu = 16;
uint32 cpus_per_task = 17;
string current_working_directory = 18;
string deadline = 19;
uint32 delay_boot = 20;
string dependency = 21;
string distribution = 22;
Environment environment = 23;
string exclusive = 24;
bool get_user_environment = 25;
string gres = 26;
string gres_flags = 27;
string gpu_binding = 28;
string gpu_frequency = 29;
string gpus = 30;
string gpus_per_node = 31;
string gpus_per_socket = 32;
string gpus_per_task = 33;
bool hold = 34;
bool kill_on_invalid_dependency = 35;
string licenses = 36;
string mail_type = 37;
string mail_user = 38;
string mcs_label = 39;
string memory_binding = 40;
uint32 memory_per_cpu = 41;
uint32 memory_per_gpu = 42;
uint32 memory_per_node = 43;
uint32 minimum_cpus_per_node = 44;
bool minimum_nodes = 45;
string name = 46;
string nice = 47;
bool no_kill = 48;
repeated uint32 nodes = 49;
string open_mode = 50;
string partition = 51;
string priority = 52;
string qos = 53;
bool requeue = 54;
string reservation = 55;
string signal = 56;
uint32 sockets_per_node = 57;
bool spread_job = 58;
string standard_error = 59;
string standard_input = 60;
string standard_output = 61;
uint32 tasks = 62;
uint32 tasks_per_core = 63;
uint32 tasks_per_node = 64;
uint32 tasks_per_socket = 65;
uint32 thread_specification = 66;
uint32 threads_per_core = 67;
uint32 time_limit = 68;
uint32 time_minimum = 69;
bool wait_all_nodes = 70;
string wckey = 71;
}
//list job response
message job_response_properties{
string account = 1;
uint32 accrue_time = 2;
string admin_comment = 3;
string array_job_id = 4;
string array_task_id = 5;
string array_max_tasks = 6;
string array_task_string = 7;
string association_id = 8;
string batch_features = 9;
bool batch_flag = 10;
string batch_host = 11;
repeated string flags = 12;
string burst_buffer = 13;
string burst_buffer_state = 14;
string cluster = 15;
string cluster_features = 16;
string command = 17;
string comment = 18;
bool contiguous = 19;
string core_spec = 20;
string thread_spec = 21;
string cores_per_socket = 22;
string billable_tres = 23;
string cpus_per_task = 24;
string cpu_frequency_minimum = 25;
string cpu_frequency_maximum = 26;
string cpu_frequency_governor = 27;
string cpus_per_tres = 28;
string deadline = 29;
string delay_boot = 30;
string dependency = 31;
string derived_exit_code = 32;
uint32 eligible_time = 33;
uint32 end_time = 34;
string excluded_nodes = 35;
uint32 exit_code = 36;
string features = 37;
string federation_origin = 38;
string federation_siblings_active = 39;
string federation_siblings_viable = 40;
repeated string gres_detail = 41;
string group_id = 42;
string job_id = 43;
Job_resources job_resources = 44;
string job_state = 45;
string last_sched_evaluation = 46;
string licenses = 47;
string max_cpus = 48;
string max_nodes = 49;
string mcs_label = 50;
string memory_per_tres = 51;
string name = 52;
string nodes = 53;
string nice = 54;
string tasks_per_core = 55;
string tasks_per_socket = 56;
string tasks_per_board = 57;
string cpus = 58;
string node_count = 59;
string tasks = 60;
string het_job_id = 61;
string het_job_id_set = 62;
string het_job_offset = 63;
string partition = 64;
string memory_per_node = 65;
string memory_per_cpu = 66;
string minimum_cpus_per_node = 67;
string minimum_tmp_disk_per_node = 68;
uint32 preempt_time = 69;
uint32 pre_sus_time = 70;
string priority = 71;
repeated string profile = 72;
string qos = 73;
bool reboot = 74;
string required_nodes = 75;
bool requeue = 76;
uint32 resize_time = 77;
string restart_cnt = 78;
string resv_name = 79;
string shared = 80;
repeated string show_flags = 81;
string sockets_per_board = 82;
string sockets_per_node = 83;
uint32 start_time = 84;
string state_description = 85;
string state_reason = 86;
string standard_error = 87;
string standard_input = 88;
string standard_output = 89;
uint32 submit_time = 90;
uint32 suspend_time = 91;
string system_comment = 92;
string time_limit = 93;
string time_minimum = 94;
string threads_per_core = 95;
string tres_bind = 96;
string tres_freq = 97;
string tres_per_job = 98;
string tres_per_node = 99;
string tres_per_socket = 100;
string tres_per_task = 101;
string tres_req_str = 102;
string tres_alloc_str = 103;
string user_id = 104;
string user_name = 105;
string wckey = 106;
string current_working_directory = 107;
}
//get job response
message Jobs {
string account = 1;
uint32 accrue_time = 2;
string admin_comment = 3;
string array_job_id = 4;
string array_task_id = 5;
string array_max_tasks = 6;
string array_task_string = 7;
string association_id = 8;
string batch_features = 9;
bool batch_flag = 10;
string batch_host = 11;
repeated string flags = 12;
string burst_buffer = 13;
string burst_buffer_state = 14;
string cluster = 15;
string cluster_features = 16;
string command = 17;
string comment = 18;
bool contiguous = 19;
string core_spec = 20;
string thread_spec = 21;
string cores_per_socket = 22;
string billable_tres = 23;
string cpus_per_task = 24;
string cpu_frequency_minimum = 25;
string cpu_frequency_maximum = 26;
string cpu_frequency_governor = 27;
string cpus_per_tres = 28;
string deadline = 29;
string delay_boot = 30;
string dependency = 31;
string derived_exit_code = 32;
uint32 eligible_time = 33;
uint32 end_time = 34;
string excluded_nodes = 35;
uint32 exit_code = 36;
string features = 37;
string federation_origin = 38;
string federation_siblings_active = 39;
string federation_siblings_viable = 40;
repeated string gres_detail = 41;
string group_id = 42;
string job_id = 43;
Job_resources job_resources = 44;
string job_state = 45;
string last_sched_evaluation = 46;
string licenses = 47;
string max_cpus = 48;
string max_nodes = 49;
string mcs_label = 50;
string memory_per_tres = 51;
string name = 52;
string nodes = 53;
string nice = 54;
string tasks_per_core = 55;
string tasks_per_socket = 56;
string tasks_per_board = 57;
string cpus = 58;
string node_count = 59;
string tasks = 60;
string het_job_id = 61;
string het_job_id_set = 62;
string het_job_offset = 63;
string partition = 64;
string memory_per_node = 65;
string memory_per_cpu = 66;
string minimum_cpus_per_node = 67;
string minimum_tmp_disk_per_node = 68;
uint32 preempt_time = 69;
uint32 pre_sus_time = 70;
string priority = 71;
repeated string profile = 72;
string qos = 73;
bool reboot = 74;
string required_nodes = 75;
bool requeue = 76;
uint32 resize_time = 77;
string restart_cnt = 78;
string resv_name = 79;
string shared = 80;
repeated string show_flags = 81;
string sockets_per_board = 82;
string sockets_per_node = 83;
uint32 start_time = 84;
string state_description = 85;
string state_reason = 86;
string standard_error = 87;
string standard_input = 88;
string standard_output = 89;
uint32 submit_time = 90;
uint32 suspend_time = 91;
string system_comment = 92;
string time_limit = 93;
string time_minimum = 94;
string threads_per_core = 95;
string tres_bind = 96;
string tres_freq = 97;
string tres_per_job = 98;
string tres_per_node = 99;
string tres_per_socket = 100;
string tres_per_task = 101;
string tres_req_str = 102;
string tres_alloc_str = 103;
string user_id = 104;
string user_name = 105;
string wckey = 106;
string current_working_directory = 107;
}
message job_update {
string account = 1;
string account_gather_frequency = 2;
repeated string argv = 3;
string array = 4;
string batch_features = 5;
uint32 begin_time = 6;
string burst_buffer = 7;
string cluster_constraint = 8;
string comment = 9;
string constraints = 10;
uint32 core_specification = 11;
uint32 cores_per_socket = 12;
string cpu_binding = 13;
string cpu_binding_hint = 14;
string cpu_frequency = 15;
string cpus_per_gpu = 16;
uint32 cpus_per_task = 17;
string current_working_directory = 18;
string deadline = 19;
uint32 delay_boot = 20;
string dependency = 21;
string distribution = 22;
Environment environment = 23;
string exclusive = 24;
bool get_user_environment = 25;
string gres = 26;
string gres_flags = 27;
string gpu_binding = 28;
string gpu_frequency = 29;
string gpus = 30;
string gpus_per_node = 31;
string gpus_per_socket = 32;
string gpus_per_task = 33;
bool hold = 34;
bool kill_on_invalid_dependency = 35;
string licenses = 36;
string mail_type = 37;
string mail_user = 38;
string mcs_label = 39;
string memory_binding = 40;
uint32 memory_per_cpu = 41;
uint32 memory_per_gpu = 42;
uint32 memory_per_node = 43;
uint32 minimum_cpus_per_node = 44;
bool minimum_nodes = 45;
string name = 46;
string nice = 47;
bool no_kill = 48;
repeated uint32 nodes = 49;
string open_mode = 50;
string partition = 51;
string priority = 52;
string qos = 53;
bool requeue = 54;
string reservation = 55;
string signal = 56;
uint32 sockets_per_node = 57;
bool spread_job = 58;
string standard_error = 59;
string standard_input = 60;
string standard_output = 61;
uint32 tasks = 62;
uint32 tasks_per_core = 63;
uint32 tasks_per_node = 64;
uint32 tasks_per_socket = 65;
uint32 thread_specification = 66;
uint32 threads_per_core = 67;
uint32 time_limit = 68;
uint32 time_minimum = 69;
bool wait_all_nodes = 70;
string wckey = 71;
}
message submitJobReq{
string script = 1;
job_properties job = 2;
repeated job_properties jobs = 3;
}
message submitJobResp{
repeated error errors = 1;
uint32 job_id = 2;
string step_id = 3;
string job_submit_user_msg = 4;
}
message listJobReq{
uint32 update_time = 1;
}
message listJobResp{
repeated error errors = 1;
repeated job_response_properties jobs = 2;
}
message updateJobReq{
job_update job_update = 1;
}
message updateJobResp{
}
message getJobReq{
string job_id = 1;
}
message getJobResp{
Jobs jobs = 1;
}
service slurm {
rpc submitJob(submitJobReq) returns (submitJobResp) {};
rpc listJob(listJobReq) returns (listJobResp) {};
rpc getJob(listJobReq) returns (listJobResp) {};
}

43
slurm.go Normal file
View File

@ -0,0 +1,43 @@
package main
import (
"flag"
"fmt"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/core/service"
"github.com/zeromicro/go-zero/zrpc"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/config"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/server"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/internal/svc"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/slurm"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
var configFile = flag.String("f", "etc/slurm.yaml", "the config file")
func main() {
flag.Parse()
//解析业务配置
var c config.Config
conf.MustLoad(*configFile, &c)
ctx := svc.NewServiceContext(c)
s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) {
slurm.RegisterSlurmServer(grpcServer, server.NewSlurmServer(ctx))
if c.Mode == service.DevMode || c.Mode == service.TestMode {
reflection.Register(grpcServer)
}
})
defer s.Stop()
// 启动并添加定时任务
//ctx.Cron.Start()
//cron.AddCronGroup(ctx)
// 推送p端静态信息
//PushParticipantInfo(ctx.Config, ctx.ParticipantRpc, ctx.ClientSet)
fmt.Printf("Starting rpc server at %s...\n", c.ListenOn)
s.Start()
}

4973
slurm/slurm.pb.go Normal file

File diff suppressed because it is too large Load Diff

177
slurm/slurm_grpc.pb.go Normal file
View File

@ -0,0 +1,177 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.19.4
// source: slurm.proto
package slurm
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// SlurmClient is the client API for Slurm service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SlurmClient interface {
SubmitJob(ctx context.Context, in *SubmitJobReq, opts ...grpc.CallOption) (*SubmitJobResp, error)
ListJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error)
GetJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error)
}
type slurmClient struct {
cc grpc.ClientConnInterface
}
func NewSlurmClient(cc grpc.ClientConnInterface) SlurmClient {
return &slurmClient{cc}
}
func (c *slurmClient) SubmitJob(ctx context.Context, in *SubmitJobReq, opts ...grpc.CallOption) (*SubmitJobResp, error) {
out := new(SubmitJobResp)
err := c.cc.Invoke(ctx, "/slurm.slurm/submitJob", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *slurmClient) ListJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error) {
out := new(ListJobResp)
err := c.cc.Invoke(ctx, "/slurm.slurm/listJob", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *slurmClient) GetJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error) {
out := new(ListJobResp)
err := c.cc.Invoke(ctx, "/slurm.slurm/getJob", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// SlurmServer is the server API for Slurm service.
// All implementations must embed UnimplementedSlurmServer
// for forward compatibility
type SlurmServer interface {
SubmitJob(context.Context, *SubmitJobReq) (*SubmitJobResp, error)
ListJob(context.Context, *ListJobReq) (*ListJobResp, error)
GetJob(context.Context, *ListJobReq) (*ListJobResp, error)
mustEmbedUnimplementedSlurmServer()
}
// UnimplementedSlurmServer must be embedded to have forward compatible implementations.
type UnimplementedSlurmServer struct {
}
func (UnimplementedSlurmServer) SubmitJob(context.Context, *SubmitJobReq) (*SubmitJobResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method SubmitJob not implemented")
}
func (UnimplementedSlurmServer) ListJob(context.Context, *ListJobReq) (*ListJobResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListJob not implemented")
}
func (UnimplementedSlurmServer) GetJob(context.Context, *ListJobReq) (*ListJobResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetJob not implemented")
}
func (UnimplementedSlurmServer) mustEmbedUnimplementedSlurmServer() {}
// UnsafeSlurmServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SlurmServer will
// result in compilation errors.
type UnsafeSlurmServer interface {
mustEmbedUnimplementedSlurmServer()
}
func RegisterSlurmServer(s grpc.ServiceRegistrar, srv SlurmServer) {
s.RegisterService(&Slurm_ServiceDesc, srv)
}
func _Slurm_SubmitJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SubmitJobReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SlurmServer).SubmitJob(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/slurm.slurm/submitJob",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SlurmServer).SubmitJob(ctx, req.(*SubmitJobReq))
}
return interceptor(ctx, in, info, handler)
}
func _Slurm_ListJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListJobReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SlurmServer).ListJob(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/slurm.slurm/listJob",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SlurmServer).ListJob(ctx, req.(*ListJobReq))
}
return interceptor(ctx, in, info, handler)
}
func _Slurm_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListJobReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SlurmServer).GetJob(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/slurm.slurm/getJob",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SlurmServer).GetJob(ctx, req.(*ListJobReq))
}
return interceptor(ctx, in, info, handler)
}
// Slurm_ServiceDesc is the grpc.ServiceDesc for Slurm service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Slurm_ServiceDesc = grpc.ServiceDesc{
ServiceName: "slurm.slurm",
HandlerType: (*SlurmServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "submitJob",
Handler: _Slurm_SubmitJob_Handler,
},
{
MethodName: "listJob",
Handler: _Slurm_ListJob_Handler,
},
{
MethodName: "getJob",
Handler: _Slurm_GetJob_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "slurm.proto",
}

66
slurmclient/slurm.go Normal file
View File

@ -0,0 +1,66 @@
// Code generated by goctl. DO NOT EDIT.
// Source: slurm.proto
package slurmclient
import (
"context"
"gitlink.org.cn/jcce-pcm/pcm-participant-slurm/slurm"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc"
)
type (
AllocatedNodes = slurm.AllocatedNodes
Cores = slurm.Cores
Cpus = slurm.Cpus
Environment = slurm.Environment
Error = slurm.Error
GetJobReq = slurm.GetJobReq
GetJobResp = slurm.GetJobResp
JobProperties = slurm.JobProperties
JobResources = slurm.JobResources
JobResponseProperties = slurm.JobResponseProperties
JobUpdate = slurm.JobUpdate
Jobs = slurm.Jobs
ListJobReq = slurm.ListJobReq
ListJobResp = slurm.ListJobResp
Sockets = slurm.Sockets
SubmitJobReq = slurm.SubmitJobReq
SubmitJobResp = slurm.SubmitJobResp
UpdateJobReq = slurm.UpdateJobReq
UpdateJobResp = slurm.UpdateJobResp
Slurm interface {
SubmitJob(ctx context.Context, in *SubmitJobReq, opts ...grpc.CallOption) (*SubmitJobResp, error)
ListJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error)
GetJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error)
}
defaultSlurm struct {
cli zrpc.Client
}
)
func NewSlurm(cli zrpc.Client) Slurm {
return &defaultSlurm{
cli: cli,
}
}
func (m *defaultSlurm) SubmitJob(ctx context.Context, in *SubmitJobReq, opts ...grpc.CallOption) (*SubmitJobResp, error) {
client := slurm.NewSlurmClient(m.cli.Conn())
return client.SubmitJob(ctx, in, opts...)
}
func (m *defaultSlurm) ListJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error) {
client := slurm.NewSlurmClient(m.cli.Conn())
return client.ListJob(ctx, in, opts...)
}
func (m *defaultSlurm) GetJob(ctx context.Context, in *ListJobReq, opts ...grpc.CallOption) (*ListJobResp, error) {
client := slurm.NewSlurmClient(m.cli.Conn())
return client.GetJob(ctx, in, opts...)
}