173 lines
5.0 KiB
Go
173 lines
5.0 KiB
Go
package ac
|
|
|
|
import (
|
|
"github.com/go-resty/resty/v2"
|
|
coreClient "gitlink.org.cn/JointCloud/pcm-coordinator/client"
|
|
"gitlink.org.cn/JointCloud/pcm-coordinator/pkg/utils/httputils"
|
|
"gitlink.org.cn/JointCloud/pcm-hpc/global"
|
|
"log"
|
|
"math"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
)
|
|
|
|
type resource struct {
|
|
sync.RWMutex
|
|
client *client
|
|
log log.Logger
|
|
}
|
|
|
|
func newResource(client *client, options *ClientOptions) (*resource, error) {
|
|
resource := &resource{
|
|
RWMutex: sync.RWMutex{},
|
|
client: nil,
|
|
log: log.Logger{},
|
|
}
|
|
return resource, nil
|
|
}
|
|
|
|
func (r *resource) GetResourceStats(options ClientOptions) (getResourceStatsResp GetResourceStatsResp) {
|
|
httpClient := resty.New().R()
|
|
|
|
//balance
|
|
userInfo := &GetUserInfoResp{}
|
|
httpClient.SetHeader("token", r.client.token).SetResult(userInfo).Get("https://ac.sugon.com/ac/openapi/v2/user")
|
|
balance, _ := strconv.ParseFloat(userInfo.Data.AccountBalance, 64)
|
|
|
|
//quotaUrl
|
|
centerUrl := "https://ac.sugon.com/ac/openapi/v2/center"
|
|
var centerResp CenterResp
|
|
var quotaUrl string
|
|
httpClient.SetHeader("token", r.client.token).SetResult(¢erResp).Get(centerUrl)
|
|
for _, url := range centerResp.Data.HpcUrls {
|
|
if url.Enable == "true" {
|
|
quotaUrl = url.Url + "/hpc/openapi/v2/userquotas/userlimit"
|
|
}
|
|
}
|
|
hpcPrefixUrl := centerResp.Data.HpcUrls[0].Url
|
|
|
|
//jobManagerId
|
|
jobManagerUrl := centerUrl + "/hpc/openapi/v2/cluster"
|
|
var clusterResp ClusterResp
|
|
var jobManagerId int
|
|
httpClient.SetHeader("token", r.client.token).SetResult(&clusterResp).Get(jobManagerUrl)
|
|
for _, datum := range clusterResp.Data {
|
|
jobManagerId = datum.Id
|
|
}
|
|
|
|
//resource limit
|
|
var limitResp UserQuotasLimitResp
|
|
httpClient.SetHeader("token", r.client.token).
|
|
SetQueryParam("strJobManagerID", strconv.Itoa(jobManagerId)).
|
|
SetResult(&limitResp).
|
|
Get(quotaUrl)
|
|
totalCpu := limitResp.Data.AccountMaxCpu
|
|
totalDcu := limitResp.Data.AccountMaxDcu
|
|
|
|
//disk
|
|
var parastorQuotaResp ParastorQuota
|
|
parastorQuotaUrl := hpcPrefixUrl + "/hpc/openapi/v2/parastor/quota/usernames/{username}"
|
|
parastorQuotaUrl = strings.Replace(parastorQuotaUrl, "{username}", r.client.user, -1)
|
|
httpClient.SetHeader(httputils.ContentType, httputils.ApplicationJson).
|
|
SetHeader("token", r.client.token).
|
|
SetResult(¶storQuotaResp).
|
|
Get(parastorQuotaUrl)
|
|
|
|
totalDisk := RoundFloat(float64(parastorQuotaResp.Data[0].Threshold)*1024*1024*1024, 3)
|
|
availDisk := RoundFloat(float64(parastorQuotaResp.Data[0].Threshold)-float64(parastorQuotaResp.Data[0].Usage)*1024*1024*1024, 3)
|
|
|
|
//queueId
|
|
var queueId string
|
|
var queueNamesResp QueueNamesResp
|
|
var nodeResourceResp GetNodeResourcesResp
|
|
queueUrl := centerUrl + "/hpc/openapi/v2/queuenames/users/{username}"
|
|
nodeUrl := centerUrl + "/ai/openapi/v2/instance-service/resources"
|
|
httpClient.SetHeader("token", r.client.token).
|
|
SetPathParam("username", r.client.user).
|
|
SetQueryParam("strJobManagerID", strconv.Itoa(jobManagerId)).
|
|
SetResult(&queueNamesResp).
|
|
Get(queueUrl)
|
|
for _, datum := range queueNamesResp.Data {
|
|
queueId = datum.Id
|
|
}
|
|
//memory
|
|
httpClient.SetHeader("token", r.client.token).
|
|
SetQueryParam("acceleratorType", "dcu").
|
|
SetQueryParam("resourceGroup", queueId).
|
|
SetResult(&nodeResourceResp).
|
|
Get(nodeUrl)
|
|
memSize := RoundFloat(float64(nodeResourceResp.Data.MemorySize)*1024*1024, 3)
|
|
|
|
//resources being occupied
|
|
var memberResp GetMemberJobsResp
|
|
httpClient.SetHeader("token", r.client.token).
|
|
SetPathParam("clusterId", GetClusterId()).
|
|
SetPathParam("groupId", GetGroupId()).
|
|
SetPathParam("clusterUserName", r.client.user).
|
|
SetResult(&memberResp).
|
|
Get(r.client.baseEndpoint + "/ac/openapi/v2/groupmembers")
|
|
GetClusterId()
|
|
var CpuCoreAvail int64
|
|
var MemAvail float64
|
|
if len(memberResp.Data) != 0 {
|
|
CpuCoreAvail = totalCpu
|
|
MemAvail = memSize
|
|
} else {
|
|
var cpuCoreUsed int64
|
|
var memUsed float64
|
|
for _, datum := range memberResp.Data {
|
|
cpuCoreUsed += datum.CpuCore
|
|
}
|
|
memUsed = float64(cpuCoreUsed * 2 * 1024 * 1024 * 1024) // 2 GB per cpu core
|
|
if cpuCoreUsed > totalCpu {
|
|
CpuCoreAvail = 0
|
|
} else {
|
|
CpuCoreAvail = totalCpu - cpuCoreUsed
|
|
}
|
|
if memUsed > memSize {
|
|
MemAvail = 0
|
|
} else {
|
|
MemAvail = memSize - memUsed
|
|
}
|
|
}
|
|
|
|
//usable hours
|
|
var cards []*coreClient.Card
|
|
cardHours := RoundFloat(balance/2.0, 3)
|
|
cpuHours := RoundFloat(balance/0.09, 3)
|
|
|
|
dcu := coreClient.Card{
|
|
Platform: "ShuguangAi",
|
|
Type: "computeCard",
|
|
Name: "dcu",
|
|
TOpsAtFp16: 24.5,
|
|
CardHours: cardHours,
|
|
CardNum: int32(totalDcu),
|
|
}
|
|
cards = append(cards, &dcu)
|
|
resourceStats := coreClient.ResourceStats{
|
|
Name: global.PCM_CONFIG.System.CoreServerUrl,
|
|
Balance: balance,
|
|
CpuCoreTotal: totalCpu,
|
|
CpuCoreAvail: CpuCoreAvail,
|
|
DiskTotal: totalDisk,
|
|
DiskAvail: availDisk,
|
|
MemTotal: memSize,
|
|
MemAvail: MemAvail,
|
|
CpuCoreHours: cpuHours,
|
|
CardsAvail: cards,
|
|
}
|
|
getResourceStatsResp = GetResourceStatsResp{
|
|
Code: "200",
|
|
Msg: "success",
|
|
Data: resourceStats,
|
|
}
|
|
return getResourceStatsResp
|
|
}
|
|
|
|
func RoundFloat(val float64, precision uint) float64 {
|
|
ratio := math.Pow(10, float64(precision))
|
|
return math.Round(val*ratio) / ratio
|
|
}
|