cluster_helpers.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. package costmodel
  2. import (
  3. "fmt"
  4. "strconv"
  5. "time"
  6. "github.com/opencost/opencost/pkg/cloud/models"
  7. "github.com/opencost/opencost/pkg/cloud/provider"
  8. "github.com/opencost/opencost/core/pkg/log"
  9. "github.com/opencost/opencost/core/pkg/opencost"
  10. "github.com/opencost/opencost/core/pkg/source"
  11. "github.com/opencost/opencost/core/pkg/util"
  12. "github.com/opencost/opencost/pkg/env"
  13. )
  14. // mergeTypeMaps takes two maps of (cluster name, node name) -> node type
  15. // and combines them into a single map, preferring the k/v pairs in
  16. // the first map.
  17. func mergeTypeMaps(clusterAndNameToType1, clusterAndNameToType2 map[nodeIdentifierNoProviderID]string) map[nodeIdentifierNoProviderID]string {
  18. merged := map[nodeIdentifierNoProviderID]string{}
  19. for k, v := range clusterAndNameToType2 {
  20. merged[k] = v
  21. }
  22. // This ordering ensures the mappings in the first arg are preferred.
  23. for k, v := range clusterAndNameToType1 {
  24. merged[k] = v
  25. }
  26. return merged
  27. }
  28. func buildCPUCostMap(
  29. resNodeCPUCost []*source.NodeCPUPricePerHrResult,
  30. cp models.Provider,
  31. preemptible map[NodeIdentifier]bool,
  32. ) (map[NodeIdentifier]float64, map[nodeIdentifierNoProviderID]string) {
  33. cpuCostMap := make(map[NodeIdentifier]float64)
  34. clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
  35. customPricingEnabled := provider.CustomPricesEnabled(cp)
  36. customPricingConfig, err := cp.GetConfig()
  37. if err != nil {
  38. log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
  39. }
  40. for _, result := range resNodeCPUCost {
  41. cluster := result.Cluster
  42. if cluster == "" {
  43. cluster = env.GetClusterID()
  44. }
  45. name := result.Node
  46. if name == "" {
  47. log.Warnf("ClusterNodes: CPU cost data missing node")
  48. continue
  49. }
  50. nodeType := result.InstanceType
  51. providerID := result.ProviderID
  52. key := NodeIdentifier{
  53. Cluster: cluster,
  54. Name: name,
  55. ProviderID: provider.ParseID(providerID),
  56. }
  57. keyNon := nodeIdentifierNoProviderID{
  58. Cluster: cluster,
  59. Name: name,
  60. }
  61. var cpuCost float64
  62. if customPricingEnabled && customPricingConfig != nil {
  63. var customCPUStr string
  64. if spot, ok := preemptible[key]; ok && spot {
  65. customCPUStr = customPricingConfig.SpotCPU
  66. } else {
  67. customCPUStr = customPricingConfig.CPU
  68. }
  69. customCPUCost, err := strconv.ParseFloat(customCPUStr, 64)
  70. if err != nil {
  71. log.Warnf("ClusterNodes: error parsing custom CPU price: %s", customCPUStr)
  72. }
  73. cpuCost = customCPUCost
  74. } else {
  75. cpuCost = result.Data[0].Value
  76. }
  77. clusterAndNameToType[keyNon] = nodeType
  78. cpuCostMap[key] = cpuCost
  79. }
  80. return cpuCostMap, clusterAndNameToType
  81. }
  82. func buildRAMCostMap(
  83. resNodeRAMCost []*source.NodeRAMPricePerGiBHrResult,
  84. cp models.Provider,
  85. preemptible map[NodeIdentifier]bool,
  86. ) (map[NodeIdentifier]float64, map[nodeIdentifierNoProviderID]string) {
  87. ramCostMap := make(map[NodeIdentifier]float64)
  88. clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
  89. customPricingEnabled := provider.CustomPricesEnabled(cp)
  90. customPricingConfig, err := cp.GetConfig()
  91. if err != nil {
  92. log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
  93. }
  94. for _, result := range resNodeRAMCost {
  95. cluster := result.Cluster
  96. if cluster == "" {
  97. cluster = env.GetClusterID()
  98. }
  99. name := result.Node
  100. if name == "" {
  101. log.Warnf("ClusterNodes: RAM cost data missing node")
  102. continue
  103. }
  104. nodeType := result.InstanceType
  105. providerID := result.ProviderID
  106. key := NodeIdentifier{
  107. Cluster: cluster,
  108. Name: name,
  109. ProviderID: provider.ParseID(providerID),
  110. }
  111. keyNon := nodeIdentifierNoProviderID{
  112. Cluster: cluster,
  113. Name: name,
  114. }
  115. var ramCost float64
  116. if customPricingEnabled && customPricingConfig != nil {
  117. var customRAMStr string
  118. if spot, ok := preemptible[key]; ok && spot {
  119. customRAMStr = customPricingConfig.SpotRAM
  120. } else {
  121. customRAMStr = customPricingConfig.RAM
  122. }
  123. customRAMCost, err := strconv.ParseFloat(customRAMStr, 64)
  124. if err != nil {
  125. log.Warnf("ClusterNodes: error parsing custom RAM price: %s", customRAMStr)
  126. }
  127. ramCost = customRAMCost
  128. } else {
  129. ramCost = result.Data[0].Value
  130. }
  131. clusterAndNameToType[keyNon] = nodeType
  132. // covert to price per byte/hr
  133. ramCostMap[key] = ramCost / 1024.0 / 1024.0 / 1024.0
  134. }
  135. return ramCostMap, clusterAndNameToType
  136. }
  137. func buildGPUCostMap(
  138. resNodeGPUCost []*source.NodeGPUPricePerHrResult,
  139. gpuCountMap map[NodeIdentifier]float64,
  140. cp models.Provider,
  141. preemptible map[NodeIdentifier]bool,
  142. ) (map[NodeIdentifier]float64, map[nodeIdentifierNoProviderID]string) {
  143. gpuCostMap := make(map[NodeIdentifier]float64)
  144. clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
  145. customPricingEnabled := provider.CustomPricesEnabled(cp)
  146. customPricingConfig, err := cp.GetConfig()
  147. if err != nil {
  148. log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
  149. }
  150. for _, result := range resNodeGPUCost {
  151. cluster := result.Cluster
  152. if cluster == "" {
  153. cluster = env.GetClusterID()
  154. }
  155. name := result.Node
  156. if name == "" {
  157. log.Warnf("ClusterNodes: GPU cost data missing node")
  158. continue
  159. }
  160. nodeType := result.InstanceType
  161. providerID := result.ProviderID
  162. key := NodeIdentifier{
  163. Cluster: cluster,
  164. Name: name,
  165. ProviderID: provider.ParseID(providerID),
  166. }
  167. keyNon := nodeIdentifierNoProviderID{
  168. Cluster: cluster,
  169. Name: name,
  170. }
  171. var gpuCost float64
  172. if customPricingEnabled && customPricingConfig != nil {
  173. var customGPUStr string
  174. if spot, ok := preemptible[key]; ok && spot {
  175. customGPUStr = customPricingConfig.SpotGPU
  176. } else {
  177. customGPUStr = customPricingConfig.GPU
  178. }
  179. customGPUCost, err := strconv.ParseFloat(customGPUStr, 64)
  180. if err != nil {
  181. log.Warnf("ClusterNodes: error parsing custom GPU price: %s", customGPUStr)
  182. }
  183. gpuCost = customGPUCost
  184. } else {
  185. gpuCost = result.Data[0].Value
  186. }
  187. clusterAndNameToType[keyNon] = nodeType
  188. // If gpu count is available use it to multiply gpu cost
  189. if value, ok := gpuCountMap[key]; ok {
  190. gpuCostMap[key] = gpuCost * value
  191. } else {
  192. gpuCostMap[key] = 0
  193. }
  194. }
  195. return gpuCostMap, clusterAndNameToType
  196. }
  197. func buildGPUCountMap(resNodeGPUCount []*source.NodeGPUCountResult) map[NodeIdentifier]float64 {
  198. gpuCountMap := make(map[NodeIdentifier]float64)
  199. for _, result := range resNodeGPUCount {
  200. cluster := result.Cluster
  201. if cluster == "" {
  202. cluster = env.GetClusterID()
  203. }
  204. name := result.Node
  205. if name == "" {
  206. log.Warnf("ClusterNodes: GPU count data missing node")
  207. continue
  208. }
  209. gpuCount := result.Data[0].Value
  210. providerID := result.ProviderID
  211. key := NodeIdentifier{
  212. Cluster: cluster,
  213. Name: name,
  214. ProviderID: provider.ParseID(providerID),
  215. }
  216. gpuCountMap[key] = gpuCount
  217. }
  218. return gpuCountMap
  219. }
  220. func buildCPUCoresMap(resNodeCPUCores []*source.NodeCPUCoresCapacityResult) map[nodeIdentifierNoProviderID]float64 {
  221. m := make(map[nodeIdentifierNoProviderID]float64)
  222. for _, result := range resNodeCPUCores {
  223. cluster := result.Cluster
  224. if cluster == "" {
  225. cluster = env.GetClusterID()
  226. }
  227. name := result.Node
  228. if name == "" {
  229. log.Warnf("ClusterNodes: CPU cores data missing node")
  230. continue
  231. }
  232. cpuCores := result.Data[0].Value
  233. key := nodeIdentifierNoProviderID{
  234. Cluster: cluster,
  235. Name: name,
  236. }
  237. m[key] = cpuCores
  238. }
  239. return m
  240. }
  241. func buildRAMBytesMap(resNodeRAMBytes []*source.NodeRAMBytesCapacityResult) map[nodeIdentifierNoProviderID]float64 {
  242. m := make(map[nodeIdentifierNoProviderID]float64)
  243. for _, result := range resNodeRAMBytes {
  244. cluster := result.Cluster
  245. if cluster == "" {
  246. cluster = env.GetClusterID()
  247. }
  248. name := result.Node
  249. if name == "" {
  250. log.Warnf("ClusterNodes: RAM bytes data missing node")
  251. continue
  252. }
  253. ramBytes := result.Data[0].Value
  254. key := nodeIdentifierNoProviderID{
  255. Cluster: cluster,
  256. Name: name,
  257. }
  258. m[key] = ramBytes
  259. }
  260. return m
  261. }
  262. // Mapping of cluster/node=cpu for computing resource efficiency
  263. func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.NodeCPUModeTotalResult) map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown {
  264. cpuBreakdownMap := make(map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown)
  265. // Mapping of cluster/node=cpu for computing resource efficiency
  266. clusterNodeCPUTotal := map[nodeIdentifierNoProviderID]float64{}
  267. // Mapping of cluster/node:mode=cpu for computing resource efficiency
  268. clusterNodeModeCPUTotal := map[nodeIdentifierNoProviderID]map[string]float64{}
  269. // Build intermediate structures for CPU usage by (cluster, node) and by
  270. // (cluster, node, mode) for computing resouce efficiency
  271. for _, result := range resNodeCPUModeTotal {
  272. cluster := result.Cluster
  273. if cluster == "" {
  274. cluster = env.GetClusterID()
  275. }
  276. node := result.Node
  277. if node == "" {
  278. log.DedupedWarningf(5, "ClusterNodes: CPU mode data missing node")
  279. continue
  280. }
  281. mode := result.Mode
  282. if mode == "" {
  283. log.Warnf("ClusterNodes: unable to read CPU mode data.")
  284. mode = "other"
  285. }
  286. key := nodeIdentifierNoProviderID{
  287. Cluster: cluster,
  288. Name: node,
  289. }
  290. total := result.Data[0].Value
  291. // Increment total
  292. clusterNodeCPUTotal[key] += total
  293. // Increment mode
  294. if _, ok := clusterNodeModeCPUTotal[key]; !ok {
  295. clusterNodeModeCPUTotal[key] = map[string]float64{}
  296. }
  297. clusterNodeModeCPUTotal[key][mode] += total
  298. }
  299. // Compute resource efficiency from intermediate structures
  300. for key, total := range clusterNodeCPUTotal {
  301. if modeTotals, ok := clusterNodeModeCPUTotal[key]; ok {
  302. for mode, subtotal := range modeTotals {
  303. // Compute percentage for the current cluster, node, mode
  304. pct := 0.0
  305. if total > 0 {
  306. pct = subtotal / total
  307. }
  308. if _, ok := cpuBreakdownMap[key]; !ok {
  309. cpuBreakdownMap[key] = &ClusterCostsBreakdown{}
  310. }
  311. switch mode {
  312. case "idle":
  313. cpuBreakdownMap[key].Idle += pct
  314. case "system":
  315. cpuBreakdownMap[key].System += pct
  316. case "user":
  317. cpuBreakdownMap[key].User += pct
  318. default:
  319. cpuBreakdownMap[key].Other += pct
  320. }
  321. }
  322. }
  323. }
  324. return cpuBreakdownMap
  325. }
  326. func buildOverheadMap(capRam, allocRam, capCPU, allocCPU map[nodeIdentifierNoProviderID]float64) map[nodeIdentifierNoProviderID]*NodeOverhead {
  327. m := make(map[nodeIdentifierNoProviderID]*NodeOverhead, len(capRam))
  328. for identifier, ramCapacity := range capRam {
  329. allocatableRam, ok := allocRam[identifier]
  330. if !ok {
  331. log.Warnf("Could not find allocatable ram for node %s", identifier.Name)
  332. continue
  333. }
  334. overheadBytes := ramCapacity - allocatableRam
  335. m[identifier] = &NodeOverhead{
  336. RamOverheadFraction: overheadBytes / ramCapacity,
  337. }
  338. }
  339. for identifier, cpuCapacity := range capCPU {
  340. allocatableCPU, ok := allocCPU[identifier]
  341. if !ok {
  342. log.Warnf("Could not find allocatable cpu for node %s", identifier.Name)
  343. continue
  344. }
  345. overhead := cpuCapacity - allocatableCPU
  346. if _, found := m[identifier]; found {
  347. m[identifier].CpuOverheadFraction = overhead / cpuCapacity
  348. } else {
  349. m[identifier] = &NodeOverhead{
  350. CpuOverheadFraction: overhead / cpuCapacity,
  351. }
  352. }
  353. }
  354. return m
  355. }
  356. func buildRAMUserPctMap(resNodeRAMUserPct []*source.NodeRAMUserPercentResult) map[nodeIdentifierNoProviderID]float64 {
  357. m := make(map[nodeIdentifierNoProviderID]float64)
  358. for _, result := range resNodeRAMUserPct {
  359. cluster := result.Cluster
  360. if cluster == "" {
  361. cluster = env.GetClusterID()
  362. }
  363. name := result.Instance
  364. if name == "" {
  365. log.Warnf("ClusterNodes: RAM user percent missing node")
  366. continue
  367. }
  368. pct := result.Data[0].Value
  369. key := nodeIdentifierNoProviderID{
  370. Cluster: cluster,
  371. Name: name,
  372. }
  373. m[key] = pct
  374. }
  375. return m
  376. }
  377. func buildRAMSystemPctMap(resNodeRAMSystemPct []*source.NodeRAMSystemPercentResult) map[nodeIdentifierNoProviderID]float64 {
  378. m := make(map[nodeIdentifierNoProviderID]float64)
  379. for _, result := range resNodeRAMSystemPct {
  380. cluster := result.Cluster
  381. if cluster == "" {
  382. cluster = env.GetClusterID()
  383. }
  384. name := result.Instance
  385. if name == "" {
  386. log.Warnf("ClusterNodes: RAM system percent missing node")
  387. continue
  388. }
  389. pct := result.Data[0].Value
  390. key := nodeIdentifierNoProviderID{
  391. Cluster: cluster,
  392. Name: name,
  393. }
  394. m[key] = pct
  395. }
  396. return m
  397. }
  398. type activeData struct {
  399. start time.Time
  400. end time.Time
  401. minutes float64
  402. }
  403. // cluster management key gen
  404. func clusterManagementKeyGen(result *source.ClusterManagementDurationResult) (ClusterManagementIdentifier, bool) {
  405. cluster := result.Cluster
  406. if cluster == "" {
  407. cluster = env.GetClusterID()
  408. }
  409. provisionerName := result.Provisioner
  410. return ClusterManagementIdentifier{
  411. Cluster: cluster,
  412. Provisioner: provisionerName,
  413. }, true
  414. }
  415. func clusterManagementValues(result *source.ClusterManagementDurationResult) []*util.Vector {
  416. return result.Data
  417. }
  418. // node key gen
  419. func nodeKeyGen(result *source.NodeActiveMinutesResult) (NodeIdentifier, bool) {
  420. cluster := result.Cluster
  421. if cluster == "" {
  422. cluster = env.GetClusterID()
  423. }
  424. name := result.Node
  425. if name == "" {
  426. log.Warnf("ClusterNodes: active mins missing node")
  427. return NodeIdentifier{}, false
  428. }
  429. providerID := result.ProviderID
  430. return NodeIdentifier{
  431. Cluster: cluster,
  432. Name: name,
  433. ProviderID: provider.ParseID(providerID),
  434. }, true
  435. }
  436. func nodeValues(result *source.NodeActiveMinutesResult) []*util.Vector {
  437. return result.Data
  438. }
  439. func loadBalancerKeyGen(result *source.LBActiveMinutesResult) (LoadBalancerIdentifier, bool) {
  440. cluster := result.Cluster
  441. if cluster == "" {
  442. cluster = env.GetClusterID()
  443. }
  444. namespace := result.Namespace
  445. if namespace == "" {
  446. log.Warnf("ClusterLoadBalancers: LB cost data missing namespace")
  447. return LoadBalancerIdentifier{}, false
  448. }
  449. name := result.Service
  450. if name == "" {
  451. log.Warnf("ClusterLoadBalancers: LB cost data missing service_name")
  452. return LoadBalancerIdentifier{}, false
  453. }
  454. ingressIp := result.IngressIP
  455. if ingressIp == "" {
  456. log.DedupedWarningf(5, "ClusterLoadBalancers: LB cost data missing ingress_ip")
  457. // only update asset cost when an actual IP was returned
  458. return LoadBalancerIdentifier{}, false
  459. }
  460. return LoadBalancerIdentifier{
  461. Cluster: cluster,
  462. Namespace: namespace,
  463. Name: fmt.Sprintf("%s/%s", namespace, name), // TODO:ETL this is kept for backwards-compatibility, but not good,
  464. IngressIP: ingressIp,
  465. }, true
  466. }
  467. func lbValues(result *source.LBActiveMinutesResult) []*util.Vector {
  468. return result.Data
  469. }
  470. func buildActiveDataMap[T comparable, U any](
  471. results []*U,
  472. keyGen func(*U) (T, bool),
  473. valuesFunc func(*U) []*util.Vector,
  474. resolution time.Duration,
  475. window opencost.Window,
  476. ) map[T]activeData {
  477. m := make(map[T]activeData)
  478. for _, result := range results {
  479. key, ok := keyGen(result)
  480. values := valuesFunc(result)
  481. if !ok || len(values) == 0 {
  482. continue
  483. }
  484. s, e := calculateStartAndEnd(values, resolution, window)
  485. mins := e.Sub(s).Minutes()
  486. m[key] = activeData{
  487. start: s,
  488. end: e,
  489. minutes: mins,
  490. }
  491. }
  492. return m
  493. }
  494. // Determine preemptibility with node labels
  495. // node id -> is preemptible?
  496. func buildPreemptibleMap(
  497. resIsSpot []*source.NodeIsSpotResult,
  498. ) map[NodeIdentifier]bool {
  499. m := make(map[NodeIdentifier]bool)
  500. for _, result := range resIsSpot {
  501. cluster := result.Cluster
  502. if cluster == "" {
  503. cluster = env.GetClusterID()
  504. }
  505. name := result.Node
  506. if name == "" {
  507. log.Warnf("ClusterNodes: active mins missing node")
  508. continue
  509. }
  510. providerID := result.ProviderID
  511. key := NodeIdentifier{
  512. Cluster: cluster,
  513. Name: name,
  514. ProviderID: provider.ParseID(providerID),
  515. }
  516. // GCP preemptible label
  517. pre := result.Data[0].Value
  518. // TODO(michaelmdresser): check this condition at merge time?
  519. // if node, ok := nodeMap[key]; pre > 0.0 && ok {
  520. // node.Preemptible = true
  521. // }
  522. m[key] = pre > 0.0
  523. // TODO AWS preemptible
  524. // TODO Azure preemptible
  525. }
  526. return m
  527. }
  528. func buildAssetsPVCMap(resPVCInfo []*source.PVCInfoResult) map[DiskIdentifier]*Disk {
  529. diskMap := map[DiskIdentifier]*Disk{}
  530. for _, result := range resPVCInfo {
  531. cluster := result.Cluster
  532. if cluster == "" {
  533. cluster = env.GetClusterID()
  534. }
  535. volumeName := result.VolumeName
  536. if volumeName == "" {
  537. log.Debugf("ClusterDisks: pv claim data missing volumename")
  538. continue
  539. }
  540. claimName := result.PersistentVolumeClaim
  541. if claimName == "" {
  542. log.Debugf("ClusterDisks: pv claim data missing persistentvolumeclaim")
  543. continue
  544. }
  545. claimNamespace := result.Namespace
  546. if claimNamespace == "" {
  547. log.Debugf("ClusterDisks: pv claim data missing namespace")
  548. continue
  549. }
  550. key := DiskIdentifier{
  551. Cluster: cluster,
  552. Name: volumeName,
  553. }
  554. if _, ok := diskMap[key]; !ok {
  555. diskMap[key] = &Disk{
  556. Cluster: cluster,
  557. Name: volumeName,
  558. Breakdown: &ClusterCostsBreakdown{},
  559. }
  560. }
  561. diskMap[key].VolumeName = volumeName
  562. diskMap[key].ClaimName = claimName
  563. diskMap[key].ClaimNamespace = claimNamespace
  564. }
  565. return diskMap
  566. }
  567. func buildLabelsMap(
  568. resLabels []*source.NodeLabelsResult,
  569. ) map[nodeIdentifierNoProviderID]map[string]string {
  570. m := make(map[nodeIdentifierNoProviderID]map[string]string)
  571. // Copy labels into node
  572. for _, result := range resLabels {
  573. cluster := result.Cluster
  574. if cluster == "" {
  575. cluster = env.GetClusterID()
  576. }
  577. node := result.Node
  578. if node == "" {
  579. log.DedupedWarningf(5, "ClusterNodes: label data missing node")
  580. continue
  581. }
  582. key := nodeIdentifierNoProviderID{
  583. Cluster: cluster,
  584. Name: node,
  585. }
  586. // The QueryResult.GetLabels function needs to be called to sanitize the
  587. // ingested label data. This removes the label_ prefix that prometheus
  588. // adds to emitted labels. It also keeps from ingesting prometheus labels
  589. // that aren't a part of the asset.
  590. if _, ok := m[key]; !ok {
  591. m[key] = map[string]string{}
  592. }
  593. for k, l := range result.Labels {
  594. m[key][k] = l
  595. }
  596. }
  597. return m
  598. }
  599. // checkForKeyAndInitIfMissing inits a key in the provided nodemap if
  600. // it does not exist. Intended to be called ONLY by buildNodeMap
  601. func checkForKeyAndInitIfMissing(
  602. nodeMap map[NodeIdentifier]*Node,
  603. key NodeIdentifier,
  604. clusterAndNameToType map[nodeIdentifierNoProviderID]string,
  605. ) {
  606. if _, ok := nodeMap[key]; !ok {
  607. // default nodeType in case we don't have the mapping
  608. var nodeType string
  609. if t, ok := clusterAndNameToType[nodeIdentifierNoProviderID{
  610. Cluster: key.Cluster,
  611. Name: key.Name,
  612. }]; ok {
  613. nodeType = t
  614. } else {
  615. log.Warnf("ClusterNodes: Type does not exist for node identifier %s", key)
  616. }
  617. nodeMap[key] = &Node{
  618. Cluster: key.Cluster,
  619. Name: key.Name,
  620. NodeType: nodeType,
  621. ProviderID: key.ProviderID,
  622. CPUBreakdown: &ClusterCostsBreakdown{},
  623. RAMBreakdown: &ClusterCostsBreakdown{},
  624. }
  625. }
  626. }
  627. // buildNodeMap creates the main set of node data for ClusterNodes from
  628. // the data maps built from Prometheus queries. Some of the Prometheus
  629. // data has access to the provider_id field and some does not. To get
  630. // around this problem, we use the data that includes provider_id
  631. // to build up the definitive set of nodes and then use the data
  632. // with less-specific identifiers (i.e. without provider_id) to fill
  633. // in the remaining fields.
  634. //
  635. // For example, let's say we have nodes identified like so:
  636. // cluster name/node name/provider_id. For the sake of the example,
  637. // we will also limit data to CPU cost, CPU cores, and preemptibility.
  638. //
  639. // We have CPU cost data that looks like this:
  640. // cluster1/node1/prov_node1_A: $10
  641. // cluster1/node1/prov_node1_B: $8
  642. // cluster1/node2/prov_node2: $15
  643. //
  644. // We have Preemptible data that looks like this:
  645. // cluster1/node1/prov_node1_A: true
  646. // cluster1/node1/prov_node1_B: false
  647. // cluster1/node2/prov_node2_B: false
  648. //
  649. // We have CPU cores data that looks like this:
  650. // cluster1/node1: 4
  651. // cluster1/node2: 6
  652. //
  653. // This function first combines the data that is fully identified,
  654. // creating the following:
  655. // cluster1/node1/prov_node1_A: CPUCost($10), Preemptible(true)
  656. // cluster1/node1/prov_node1_B: CPUCost($8), Preemptible(false)
  657. // cluster1/node2/prov_node2: CPUCost($15), Preemptible(false)
  658. //
  659. // It then uses the less-specific data to extend the specific data,
  660. // making the following:
  661. // cluster1/node1/prov_node1_A: CPUCost($10), Preemptible(true), Cores(4)
  662. // cluster1/node1/prov_node1_B: CPUCost($8), Preemptible(false), Cores(4)
  663. // cluster1/node2/prov_node2: CPUCost($15), Preemptible(false), Cores(6)
  664. //
  665. // In the situation where provider_id doesn't exist for any metrics,
  666. // that is the same as all provider_ids being empty strings. If
  667. // provider_id doesn't exist at all, then we (without having to do
  668. // extra work) easily fall back on identifying nodes only by cluster name
  669. // and node name because the provider_id part of the key will always
  670. // be the empty string.
  671. //
  672. // It is worth nothing that, in this approach, if a node is not present
  673. // in the more specific data but is present in the less-specific data,
  674. // that data is never processed into the final node map. For example,
  675. // let's say the CPU cores map has the following entry:
  676. // cluster1/node8: 6
  677. // But none of the maps with provider_id (CPU cost, RAM cost, etc.)
  678. // have an identifier for cluster1/node8 (regardless of provider_id).
  679. // In this situation, the final node map will not have a cluster1/node8
  680. // entry. This could be fixed by iterating over all of the less specific
  681. // identifiers and, inside that iteration, all of the identifiers in
  682. // the node map, but this would introduce a roughly quadratic time
  683. // complexity.
  684. func buildNodeMap(
  685. cpuCostMap, ramCostMap, gpuCostMap, gpuCountMap map[NodeIdentifier]float64,
  686. cpuCoresMap, ramBytesMap, ramUserPctMap,
  687. ramSystemPctMap map[nodeIdentifierNoProviderID]float64,
  688. cpuBreakdownMap map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown,
  689. activeDataMap map[NodeIdentifier]activeData,
  690. preemptibleMap map[NodeIdentifier]bool,
  691. labelsMap map[nodeIdentifierNoProviderID]map[string]string,
  692. clusterAndNameToType map[nodeIdentifierNoProviderID]string,
  693. overheadMap map[nodeIdentifierNoProviderID]*NodeOverhead,
  694. ) map[NodeIdentifier]*Node {
  695. nodeMap := make(map[NodeIdentifier]*Node)
  696. // Initialize the map with the most-specific data:
  697. for id, cost := range cpuCostMap {
  698. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  699. nodeMap[id].CPUCost = cost
  700. }
  701. for id, cost := range ramCostMap {
  702. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  703. nodeMap[id].RAMCost = cost
  704. }
  705. for id, cost := range gpuCostMap {
  706. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  707. nodeMap[id].GPUCost = cost
  708. }
  709. for id, count := range gpuCountMap {
  710. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  711. nodeMap[id].GPUCount = count
  712. }
  713. for id, preemptible := range preemptibleMap {
  714. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  715. nodeMap[id].Preemptible = preemptible
  716. }
  717. for id, activeData := range activeDataMap {
  718. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  719. nodeMap[id].Start = activeData.start
  720. nodeMap[id].End = activeData.end
  721. nodeMap[id].Minutes = nodeMap[id].End.Sub(nodeMap[id].Start).Minutes()
  722. }
  723. // We now merge in data that doesn't have a provider id by looping over
  724. // all keys already added and inserting data according to their
  725. // cluster name/node name combos.
  726. for id, nodePtr := range nodeMap {
  727. clusterAndNameID := nodeIdentifierNoProviderID{
  728. Cluster: id.Cluster,
  729. Name: id.Name,
  730. }
  731. if cores, ok := cpuCoresMap[clusterAndNameID]; ok {
  732. nodePtr.CPUCores = cores
  733. if v, ok := partialCPUMap[nodePtr.NodeType]; ok {
  734. if cores > 0 {
  735. nodePtr.CPUCores = v
  736. adjustmentFactor := v / cores
  737. nodePtr.CPUCost = nodePtr.CPUCost * adjustmentFactor
  738. }
  739. }
  740. }
  741. if ramBytes, ok := ramBytesMap[clusterAndNameID]; ok {
  742. nodePtr.RAMBytes = ramBytes
  743. }
  744. if ramUserPct, ok := ramUserPctMap[clusterAndNameID]; ok {
  745. nodePtr.RAMBreakdown.User = ramUserPct
  746. }
  747. if ramSystemPct, ok := ramSystemPctMap[clusterAndNameID]; ok {
  748. nodePtr.RAMBreakdown.System = ramSystemPct
  749. }
  750. if cpuBreakdown, ok := cpuBreakdownMap[clusterAndNameID]; ok {
  751. nodePtr.CPUBreakdown = cpuBreakdown
  752. }
  753. if labels, ok := labelsMap[clusterAndNameID]; ok {
  754. nodePtr.Labels = labels
  755. }
  756. if overhead, ok := overheadMap[clusterAndNameID]; ok {
  757. nodePtr.Overhead = overhead
  758. } else {
  759. // we were unable to compute overhead for this node
  760. // assume default case of no overhead
  761. nodePtr.Overhead = &NodeOverhead{}
  762. log.Warnf("unable to compute overhead for node %s - defaulting to no overhead", clusterAndNameID.Name)
  763. }
  764. }
  765. return nodeMap
  766. }