2
0

cluster_helpers.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. package costmodel
  2. import (
  3. "fmt"
  4. "math"
  5. "strconv"
  6. "time"
  7. coreenv "github.com/opencost/opencost/core/pkg/env"
  8. "github.com/opencost/opencost/pkg/cloud/models"
  9. "github.com/opencost/opencost/pkg/cloud/provider"
  10. "github.com/opencost/opencost/core/pkg/log"
  11. "github.com/opencost/opencost/core/pkg/opencost"
  12. "github.com/opencost/opencost/core/pkg/source"
  13. "github.com/opencost/opencost/core/pkg/util"
  14. )
  15. // mergeTypeMaps takes two maps of (cluster name, node name) -> node type
  16. // and combines them into a single map, preferring the k/v pairs in
  17. // the first map.
  18. func mergeTypeMaps(clusterAndNameToType1, clusterAndNameToType2 map[nodeIdentifierNoProviderID]string) map[nodeIdentifierNoProviderID]string {
  19. merged := map[nodeIdentifierNoProviderID]string{}
  20. for k, v := range clusterAndNameToType2 {
  21. merged[k] = v
  22. }
  23. // This ordering ensures the mappings in the first arg are preferred.
  24. for k, v := range clusterAndNameToType1 {
  25. merged[k] = v
  26. }
  27. return merged
  28. }
  29. func buildCPUCostMap(
  30. resNodeCPUCost []*source.NodeCPUPricePerHrResult,
  31. cp models.Provider,
  32. preemptible map[NodeIdentifier]bool,
  33. ) (map[NodeIdentifier]float64, map[nodeIdentifierNoProviderID]string) {
  34. cpuCostMap := make(map[NodeIdentifier]float64)
  35. clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
  36. customPricingEnabled := provider.CustomPricesEnabled(cp)
  37. customPricingConfig, err := cp.GetConfig()
  38. if err != nil {
  39. log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
  40. }
  41. for _, result := range resNodeCPUCost {
  42. cluster := result.Cluster
  43. if cluster == "" {
  44. cluster = coreenv.GetClusterID()
  45. }
  46. name := result.Node
  47. if name == "" {
  48. log.Warnf("ClusterNodes: CPU cost data missing node")
  49. continue
  50. }
  51. nodeType := result.InstanceType
  52. providerID := result.ProviderID
  53. key := NodeIdentifier{
  54. Cluster: cluster,
  55. Name: name,
  56. ProviderID: provider.ParseID(providerID),
  57. }
  58. keyNon := nodeIdentifierNoProviderID{
  59. Cluster: cluster,
  60. Name: name,
  61. }
  62. var cpuCost float64
  63. // Start with the value from the data source (e.g., collector or Prometheus)
  64. cpuCost = result.Data[0].Value
  65. // If custom pricing is enabled or the data source value is invalid, use custom pricing
  66. if (customPricingEnabled && customPricingConfig != nil) || cpuCost == 0 || math.IsNaN(cpuCost) {
  67. if customPricingConfig != nil {
  68. var customCPUStr string
  69. if spot, ok := preemptible[key]; ok && spot {
  70. customCPUStr = customPricingConfig.SpotCPU
  71. } else {
  72. customCPUStr = customPricingConfig.CPU
  73. }
  74. customCPUCost, err := strconv.ParseFloat(customCPUStr, 64)
  75. if err != nil {
  76. log.Warnf("ClusterNodes: error parsing custom CPU price: %s", customCPUStr)
  77. } else {
  78. // Log the reason for using custom pricing
  79. if cpuCost == 0 {
  80. log.DedupedInfof(10, "ClusterNodes: node %s has invalid CPU cost (0) from data source; falling back to custom pricing: %f", name, customCPUCost)
  81. } else if math.IsNaN(cpuCost) {
  82. log.DedupedInfof(10, "ClusterNodes: node %s has invalid CPU cost (NaN) from data source; falling back to custom pricing: %f", name, customCPUCost)
  83. } else {
  84. log.DedupedInfof(10, "ClusterNodes: node %s using custom pricing: %f", name, customCPUCost)
  85. }
  86. cpuCost = customCPUCost
  87. }
  88. } else {
  89. // custom pricing config is nil, but we needed it because cpuCost was invalid
  90. if cpuCost == 0 || math.IsNaN(cpuCost) {
  91. log.Warnf("ClusterNodes: node %s has invalid CPU cost (0 or NaN), but was unable to fall back to custom pricing because it was nil", name)
  92. }
  93. }
  94. }
  95. clusterAndNameToType[keyNon] = nodeType
  96. cpuCostMap[key] = cpuCost
  97. }
  98. return cpuCostMap, clusterAndNameToType
  99. }
  100. func buildRAMCostMap(
  101. resNodeRAMCost []*source.NodeRAMPricePerGiBHrResult,
  102. cp models.Provider,
  103. preemptible map[NodeIdentifier]bool,
  104. ) (map[NodeIdentifier]float64, map[nodeIdentifierNoProviderID]string) {
  105. ramCostMap := make(map[NodeIdentifier]float64)
  106. clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
  107. customPricingEnabled := provider.CustomPricesEnabled(cp)
  108. customPricingConfig, err := cp.GetConfig()
  109. if err != nil {
  110. log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
  111. }
  112. for _, result := range resNodeRAMCost {
  113. cluster := result.Cluster
  114. if cluster == "" {
  115. cluster = coreenv.GetClusterID()
  116. }
  117. name := result.Node
  118. if name == "" {
  119. log.Warnf("ClusterNodes: RAM cost data missing node")
  120. continue
  121. }
  122. nodeType := result.InstanceType
  123. providerID := result.ProviderID
  124. key := NodeIdentifier{
  125. Cluster: cluster,
  126. Name: name,
  127. ProviderID: provider.ParseID(providerID),
  128. }
  129. keyNon := nodeIdentifierNoProviderID{
  130. Cluster: cluster,
  131. Name: name,
  132. }
  133. var ramCost float64
  134. // Start with the value from the data source (e.g., collector or Prometheus)
  135. ramCost = result.Data[0].Value
  136. // If custom pricing is enabled or the data source value is invalid, use custom pricing
  137. if (customPricingEnabled && customPricingConfig != nil) || ramCost == 0 || math.IsNaN(ramCost) {
  138. if customPricingConfig != nil {
  139. var customRAMStr string
  140. if spot, ok := preemptible[key]; ok && spot {
  141. customRAMStr = customPricingConfig.SpotRAM
  142. } else {
  143. customRAMStr = customPricingConfig.RAM
  144. }
  145. customRAMCost, err := strconv.ParseFloat(customRAMStr, 64)
  146. if err != nil {
  147. log.Warnf("ClusterNodes: error parsing custom RAM price: %s", customRAMStr)
  148. } else {
  149. // Log the reason for using custom pricing
  150. if ramCost == 0 {
  151. log.DedupedInfof(10, "ClusterNodes: node %s has invalid RAM cost (0) from data source; falling back to custom pricing: %f", name, customRAMCost)
  152. } else if math.IsNaN(ramCost) {
  153. log.DedupedInfof(10, "ClusterNodes: node %s has invalid RAM cost (NaN) from data source; falling back to custom pricing: %f", name, customRAMCost)
  154. } else {
  155. log.DedupedInfof(10, "ClusterNodes: node %s using custom pricing: %f", name, customRAMCost)
  156. }
  157. ramCost = customRAMCost
  158. }
  159. } else {
  160. if ramCost == 0 || math.IsNaN(ramCost) {
  161. log.Warnf("ClusterNodes: node %s has invalid RAM cost (0 or NaN), but was unable to fall back to custom pricing because it was nil", name)
  162. }
  163. }
  164. }
  165. clusterAndNameToType[keyNon] = nodeType
  166. // covert to price per byte/hr
  167. ramCostMap[key] = ramCost / 1024.0 / 1024.0 / 1024.0
  168. }
  169. return ramCostMap, clusterAndNameToType
  170. }
  171. func buildGPUCostMap(
  172. resNodeGPUCost []*source.NodeGPUPricePerHrResult,
  173. gpuCountMap map[NodeIdentifier]float64,
  174. cp models.Provider,
  175. preemptible map[NodeIdentifier]bool,
  176. ) (map[NodeIdentifier]float64, map[nodeIdentifierNoProviderID]string) {
  177. gpuCostMap := make(map[NodeIdentifier]float64)
  178. clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
  179. customPricingEnabled := provider.CustomPricesEnabled(cp)
  180. customPricingConfig, err := cp.GetConfig()
  181. if err != nil {
  182. log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
  183. }
  184. for _, result := range resNodeGPUCost {
  185. cluster := result.Cluster
  186. if cluster == "" {
  187. cluster = coreenv.GetClusterID()
  188. }
  189. name := result.Node
  190. if name == "" {
  191. log.Warnf("ClusterNodes: GPU cost data missing node")
  192. continue
  193. }
  194. nodeType := result.InstanceType
  195. providerID := result.ProviderID
  196. key := NodeIdentifier{
  197. Cluster: cluster,
  198. Name: name,
  199. ProviderID: provider.ParseID(providerID),
  200. }
  201. keyNon := nodeIdentifierNoProviderID{
  202. Cluster: cluster,
  203. Name: name,
  204. }
  205. var gpuCost float64
  206. if customPricingEnabled && customPricingConfig != nil {
  207. var customGPUStr string
  208. if spot, ok := preemptible[key]; ok && spot {
  209. customGPUStr = customPricingConfig.SpotGPU
  210. } else {
  211. customGPUStr = customPricingConfig.GPU
  212. }
  213. customGPUCost, err := strconv.ParseFloat(customGPUStr, 64)
  214. if err != nil {
  215. log.Warnf("ClusterNodes: error parsing custom GPU price: %s", customGPUStr)
  216. }
  217. gpuCost = customGPUCost
  218. } else {
  219. gpuCost = result.Data[0].Value
  220. }
  221. clusterAndNameToType[keyNon] = nodeType
  222. // If gpu count is available use it to multiply gpu cost
  223. if value, ok := gpuCountMap[key]; ok {
  224. gpuCostMap[key] = gpuCost * value
  225. } else {
  226. gpuCostMap[key] = 0
  227. }
  228. }
  229. return gpuCostMap, clusterAndNameToType
  230. }
  231. func buildGPUCountMap(resNodeGPUCount []*source.NodeGPUCountResult) map[NodeIdentifier]float64 {
  232. gpuCountMap := make(map[NodeIdentifier]float64)
  233. for _, result := range resNodeGPUCount {
  234. cluster := result.Cluster
  235. if cluster == "" {
  236. cluster = coreenv.GetClusterID()
  237. }
  238. name := result.Node
  239. if name == "" {
  240. log.Warnf("ClusterNodes: GPU count data missing node")
  241. continue
  242. }
  243. gpuCount := result.Data[0].Value
  244. providerID := result.ProviderID
  245. key := NodeIdentifier{
  246. Cluster: cluster,
  247. Name: name,
  248. ProviderID: provider.ParseID(providerID),
  249. }
  250. gpuCountMap[key] = gpuCount
  251. }
  252. return gpuCountMap
  253. }
  254. func buildCPUCoresMap(resNodeCPUCores []*source.NodeCPUCoresCapacityResult) map[nodeIdentifierNoProviderID]float64 {
  255. m := make(map[nodeIdentifierNoProviderID]float64)
  256. for _, result := range resNodeCPUCores {
  257. cluster := result.Cluster
  258. if cluster == "" {
  259. cluster = coreenv.GetClusterID()
  260. }
  261. name := result.Node
  262. if name == "" {
  263. log.Warnf("ClusterNodes: CPU cores data missing node")
  264. continue
  265. }
  266. cpuCores := result.Data[0].Value
  267. key := nodeIdentifierNoProviderID{
  268. Cluster: cluster,
  269. Name: name,
  270. }
  271. m[key] = cpuCores
  272. }
  273. return m
  274. }
  275. func buildRAMBytesMap(resNodeRAMBytes []*source.NodeRAMBytesCapacityResult) map[nodeIdentifierNoProviderID]float64 {
  276. m := make(map[nodeIdentifierNoProviderID]float64)
  277. for _, result := range resNodeRAMBytes {
  278. cluster := result.Cluster
  279. if cluster == "" {
  280. cluster = coreenv.GetClusterID()
  281. }
  282. name := result.Node
  283. if name == "" {
  284. log.Warnf("ClusterNodes: RAM bytes data missing node")
  285. continue
  286. }
  287. ramBytes := result.Data[0].Value
  288. key := nodeIdentifierNoProviderID{
  289. Cluster: cluster,
  290. Name: name,
  291. }
  292. m[key] = ramBytes
  293. }
  294. return m
  295. }
  296. // Mapping of cluster/node=cpu for computing resource efficiency
  297. func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.NodeCPUModeTotalResult) map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown {
  298. cpuBreakdownMap := make(map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown)
  299. // Mapping of cluster/node=cpu for computing resource efficiency
  300. clusterNodeCPUTotal := map[nodeIdentifierNoProviderID]float64{}
  301. // Mapping of cluster/node:mode=cpu for computing resource efficiency
  302. clusterNodeModeCPUTotal := map[nodeIdentifierNoProviderID]map[string]float64{}
  303. // Build intermediate structures for CPU usage by (cluster, node) and by
  304. // (cluster, node, mode) for computing resouce efficiency
  305. for _, result := range resNodeCPUModeTotal {
  306. cluster := result.Cluster
  307. if cluster == "" {
  308. cluster = coreenv.GetClusterID()
  309. }
  310. node := result.Node
  311. if node == "" {
  312. log.DedupedWarningf(5, "ClusterNodes: CPU mode data missing node")
  313. continue
  314. }
  315. mode := result.Mode
  316. if mode == "" {
  317. log.DedupedWarningf(10, "ClusterNodes: unable to read CPU mode data for node %s.", node)
  318. mode = "other"
  319. }
  320. key := nodeIdentifierNoProviderID{
  321. Cluster: cluster,
  322. Name: node,
  323. }
  324. total := result.Data[0].Value
  325. // Increment total
  326. clusterNodeCPUTotal[key] += total
  327. // Increment mode
  328. if _, ok := clusterNodeModeCPUTotal[key]; !ok {
  329. clusterNodeModeCPUTotal[key] = map[string]float64{}
  330. }
  331. clusterNodeModeCPUTotal[key][mode] += total
  332. }
  333. // Compute resource efficiency from intermediate structures
  334. for key, total := range clusterNodeCPUTotal {
  335. if modeTotals, ok := clusterNodeModeCPUTotal[key]; ok {
  336. for mode, subtotal := range modeTotals {
  337. // Compute percentage for the current cluster, node, mode
  338. pct := 0.0
  339. if total > 0 {
  340. pct = subtotal / total
  341. }
  342. if _, ok := cpuBreakdownMap[key]; !ok {
  343. cpuBreakdownMap[key] = &ClusterCostsBreakdown{}
  344. }
  345. switch mode {
  346. case "idle":
  347. cpuBreakdownMap[key].Idle += pct
  348. case "system":
  349. cpuBreakdownMap[key].System += pct
  350. case "user":
  351. cpuBreakdownMap[key].User += pct
  352. default:
  353. cpuBreakdownMap[key].Other += pct
  354. }
  355. }
  356. }
  357. }
  358. return cpuBreakdownMap
  359. }
  360. func buildOverheadMap(capRam, allocRam, capCPU, allocCPU map[nodeIdentifierNoProviderID]float64) map[nodeIdentifierNoProviderID]*NodeOverhead {
  361. m := make(map[nodeIdentifierNoProviderID]*NodeOverhead, len(capRam))
  362. for identifier, ramCapacity := range capRam {
  363. allocatableRam, ok := allocRam[identifier]
  364. if !ok {
  365. log.Warnf("Could not find allocatable ram for node %s", identifier.Name)
  366. continue
  367. }
  368. overheadBytes := ramCapacity - allocatableRam
  369. m[identifier] = &NodeOverhead{
  370. RamOverheadFraction: overheadBytes / ramCapacity,
  371. }
  372. }
  373. for identifier, cpuCapacity := range capCPU {
  374. allocatableCPU, ok := allocCPU[identifier]
  375. if !ok {
  376. log.Warnf("Could not find allocatable cpu for node %s", identifier.Name)
  377. continue
  378. }
  379. overhead := cpuCapacity - allocatableCPU
  380. if _, found := m[identifier]; found {
  381. m[identifier].CpuOverheadFraction = overhead / cpuCapacity
  382. } else {
  383. m[identifier] = &NodeOverhead{
  384. CpuOverheadFraction: overhead / cpuCapacity,
  385. }
  386. }
  387. }
  388. return m
  389. }
  390. func buildRAMUserPctMap(resNodeRAMUserPct []*source.NodeRAMUserPercentResult) map[nodeIdentifierNoProviderID]float64 {
  391. m := make(map[nodeIdentifierNoProviderID]float64)
  392. for _, result := range resNodeRAMUserPct {
  393. cluster := result.Cluster
  394. if cluster == "" {
  395. cluster = coreenv.GetClusterID()
  396. }
  397. name := result.Instance
  398. if name == "" {
  399. log.Warnf("ClusterNodes: RAM user percent missing node")
  400. continue
  401. }
  402. pct := result.Data[0].Value
  403. key := nodeIdentifierNoProviderID{
  404. Cluster: cluster,
  405. Name: name,
  406. }
  407. m[key] = pct
  408. }
  409. return m
  410. }
  411. func buildRAMSystemPctMap(resNodeRAMSystemPct []*source.NodeRAMSystemPercentResult) map[nodeIdentifierNoProviderID]float64 {
  412. m := make(map[nodeIdentifierNoProviderID]float64)
  413. for _, result := range resNodeRAMSystemPct {
  414. cluster := result.Cluster
  415. if cluster == "" {
  416. cluster = coreenv.GetClusterID()
  417. }
  418. name := result.Instance
  419. if name == "" {
  420. log.Warnf("ClusterNodes: RAM system percent missing node")
  421. continue
  422. }
  423. pct := result.Data[0].Value
  424. key := nodeIdentifierNoProviderID{
  425. Cluster: cluster,
  426. Name: name,
  427. }
  428. m[key] = pct
  429. }
  430. return m
  431. }
  432. type activeData struct {
  433. start time.Time
  434. end time.Time
  435. minutes float64
  436. }
  437. // cluster management key gen
  438. func clusterManagementKeyGen(result *source.ClusterManagementDurationResult) (ClusterManagementIdentifier, bool) {
  439. cluster := result.Cluster
  440. if cluster == "" {
  441. cluster = coreenv.GetClusterID()
  442. }
  443. provisionerName := result.Provisioner
  444. return ClusterManagementIdentifier{
  445. Cluster: cluster,
  446. Provisioner: provisionerName,
  447. }, true
  448. }
  449. func clusterManagementValues(result *source.ClusterManagementDurationResult) []*util.Vector {
  450. return result.Data
  451. }
  452. // node key gen
  453. func nodeKeyGen(result *source.NodeActiveMinutesResult) (NodeIdentifier, bool) {
  454. cluster := result.Cluster
  455. if cluster == "" {
  456. cluster = coreenv.GetClusterID()
  457. }
  458. name := result.Node
  459. if name == "" {
  460. log.Warnf("ClusterNodes: active mins missing node")
  461. return NodeIdentifier{}, false
  462. }
  463. providerID := result.ProviderID
  464. return NodeIdentifier{
  465. Cluster: cluster,
  466. Name: name,
  467. ProviderID: provider.ParseID(providerID),
  468. }, true
  469. }
  470. func nodeValues(result *source.NodeActiveMinutesResult) []*util.Vector {
  471. return result.Data
  472. }
  473. func loadBalancerKeyGen(result *source.LBActiveMinutesResult) (LoadBalancerIdentifier, bool) {
  474. cluster := result.Cluster
  475. if cluster == "" {
  476. cluster = coreenv.GetClusterID()
  477. }
  478. namespace := result.Namespace
  479. if namespace == "" {
  480. log.Warnf("ClusterLoadBalancers: LB cost data missing namespace")
  481. return LoadBalancerIdentifier{}, false
  482. }
  483. name := result.Service
  484. if name == "" {
  485. log.Warnf("ClusterLoadBalancers: LB cost data missing service_name")
  486. return LoadBalancerIdentifier{}, false
  487. }
  488. ingressIp := result.IngressIP
  489. if ingressIp == "" {
  490. log.DedupedWarningf(5, "ClusterLoadBalancers: LB cost data missing ingress_ip")
  491. // only update asset cost when an actual IP was returned
  492. return LoadBalancerIdentifier{}, false
  493. }
  494. return LoadBalancerIdentifier{
  495. Cluster: cluster,
  496. Namespace: namespace,
  497. Name: fmt.Sprintf("%s/%s", namespace, name), // TODO: this is kept for backwards-compatibility, but not good,
  498. IngressIP: ingressIp,
  499. }, true
  500. }
  501. func lbValues(result *source.LBActiveMinutesResult) []*util.Vector {
  502. return result.Data
  503. }
  504. func buildActiveDataMap[T comparable, U any](
  505. results []*U,
  506. keyGen func(*U) (T, bool),
  507. valuesFunc func(*U) []*util.Vector,
  508. resolution time.Duration,
  509. window opencost.Window,
  510. ) map[T]activeData {
  511. m := make(map[T]activeData)
  512. for _, result := range results {
  513. key, ok := keyGen(result)
  514. values := valuesFunc(result)
  515. if !ok || len(values) == 0 {
  516. continue
  517. }
  518. s, e := calculateStartAndEnd(values, resolution, window)
  519. mins := e.Sub(s).Minutes()
  520. m[key] = activeData{
  521. start: s,
  522. end: e,
  523. minutes: mins,
  524. }
  525. }
  526. return m
  527. }
  528. // Determine preemptibility with node labels
  529. // node id -> is preemptible?
  530. func buildPreemptibleMap(
  531. resIsSpot []*source.NodeIsSpotResult,
  532. ) map[NodeIdentifier]bool {
  533. m := make(map[NodeIdentifier]bool)
  534. for _, result := range resIsSpot {
  535. cluster := result.Cluster
  536. if cluster == "" {
  537. cluster = coreenv.GetClusterID()
  538. }
  539. name := result.Node
  540. if name == "" {
  541. log.Warnf("ClusterNodes: active mins missing node")
  542. continue
  543. }
  544. providerID := result.ProviderID
  545. key := NodeIdentifier{
  546. Cluster: cluster,
  547. Name: name,
  548. ProviderID: provider.ParseID(providerID),
  549. }
  550. // GCP preemptible label
  551. pre := result.Data[0].Value
  552. // TODO(michaelmdresser): check this condition at merge time?
  553. // if node, ok := nodeMap[key]; pre > 0.0 && ok {
  554. // node.Preemptible = true
  555. // }
  556. m[key] = pre > 0.0
  557. // TODO AWS preemptible
  558. // TODO Azure preemptible
  559. }
  560. return m
  561. }
  562. func buildAssetsPVCMap(resPVCInfo []*source.PVCInfoResult) map[DiskIdentifier]*Disk {
  563. diskMap := map[DiskIdentifier]*Disk{}
  564. for _, result := range resPVCInfo {
  565. cluster := result.Cluster
  566. if cluster == "" {
  567. cluster = coreenv.GetClusterID()
  568. }
  569. volumeName := result.VolumeName
  570. if volumeName == "" {
  571. log.Debugf("ClusterDisks: pv claim data missing volumename")
  572. continue
  573. }
  574. claimName := result.PersistentVolumeClaim
  575. if claimName == "" {
  576. log.Debugf("ClusterDisks: pv claim data missing persistentvolumeclaim")
  577. continue
  578. }
  579. claimNamespace := result.Namespace
  580. if claimNamespace == "" {
  581. log.Debugf("ClusterDisks: pv claim data missing namespace")
  582. continue
  583. }
  584. key := DiskIdentifier{
  585. Cluster: cluster,
  586. Name: volumeName,
  587. }
  588. if _, ok := diskMap[key]; !ok {
  589. diskMap[key] = &Disk{
  590. Cluster: cluster,
  591. Name: volumeName,
  592. Breakdown: &ClusterCostsBreakdown{},
  593. }
  594. }
  595. diskMap[key].VolumeName = volumeName
  596. diskMap[key].ClaimName = claimName
  597. diskMap[key].ClaimNamespace = claimNamespace
  598. }
  599. return diskMap
  600. }
  601. func buildLabelsMap(
  602. resLabels []*source.NodeLabelsResult,
  603. ) map[nodeIdentifierNoProviderID]map[string]string {
  604. m := make(map[nodeIdentifierNoProviderID]map[string]string)
  605. // Copy labels into node
  606. for _, result := range resLabels {
  607. cluster := result.Cluster
  608. if cluster == "" {
  609. cluster = coreenv.GetClusterID()
  610. }
  611. node := result.Node
  612. if node == "" {
  613. log.DedupedWarningf(5, "ClusterNodes: label data missing node")
  614. continue
  615. }
  616. key := nodeIdentifierNoProviderID{
  617. Cluster: cluster,
  618. Name: node,
  619. }
  620. // The QueryResult.GetLabels function needs to be called to sanitize the
  621. // ingested label data. This removes the label_ prefix that prometheus
  622. // adds to emitted labels. It also keeps from ingesting prometheus labels
  623. // that aren't a part of the asset.
  624. if _, ok := m[key]; !ok {
  625. m[key] = map[string]string{}
  626. }
  627. for k, l := range result.Labels {
  628. m[key][k] = l
  629. }
  630. }
  631. return m
  632. }
  633. // checkForKeyAndInitIfMissing inits a key in the provided nodemap if
  634. // it does not exist. Intended to be called ONLY by buildNodeMap
  635. func checkForKeyAndInitIfMissing(
  636. nodeMap map[NodeIdentifier]*Node,
  637. key NodeIdentifier,
  638. clusterAndNameToType map[nodeIdentifierNoProviderID]string,
  639. ) {
  640. if _, ok := nodeMap[key]; !ok {
  641. // default nodeType in case we don't have the mapping
  642. var nodeType string
  643. if t, ok := clusterAndNameToType[nodeIdentifierNoProviderID{
  644. Cluster: key.Cluster,
  645. Name: key.Name,
  646. }]; ok {
  647. nodeType = t
  648. } else {
  649. log.Warnf("ClusterNodes: Type does not exist for node identifier %s", key)
  650. }
  651. nodeMap[key] = &Node{
  652. Cluster: key.Cluster,
  653. Name: key.Name,
  654. NodeType: nodeType,
  655. ProviderID: key.ProviderID,
  656. CPUBreakdown: &ClusterCostsBreakdown{},
  657. RAMBreakdown: &ClusterCostsBreakdown{},
  658. }
  659. }
  660. }
  661. // buildNodeMap creates the main set of node data for ClusterNodes from
  662. // the data maps built from Prometheus queries. Some of the Prometheus
  663. // data has access to the provider_id field and some does not. To get
  664. // around this problem, we use the data that includes provider_id
  665. // to build up the definitive set of nodes and then use the data
  666. // with less-specific identifiers (i.e. without provider_id) to fill
  667. // in the remaining fields.
  668. //
  669. // For example, let's say we have nodes identified like so:
  670. // cluster name/node name/provider_id. For the sake of the example,
  671. // we will also limit data to CPU cost, CPU cores, and preemptibility.
  672. //
  673. // We have CPU cost data that looks like this:
  674. // cluster1/node1/prov_node1_A: $10
  675. // cluster1/node1/prov_node1_B: $8
  676. // cluster1/node2/prov_node2: $15
  677. //
  678. // We have Preemptible data that looks like this:
  679. // cluster1/node1/prov_node1_A: true
  680. // cluster1/node1/prov_node1_B: false
  681. // cluster1/node2/prov_node2_B: false
  682. //
  683. // We have CPU cores data that looks like this:
  684. // cluster1/node1: 4
  685. // cluster1/node2: 6
  686. //
  687. // This function first combines the data that is fully identified,
  688. // creating the following:
  689. // cluster1/node1/prov_node1_A: CPUCost($10), Preemptible(true)
  690. // cluster1/node1/prov_node1_B: CPUCost($8), Preemptible(false)
  691. // cluster1/node2/prov_node2: CPUCost($15), Preemptible(false)
  692. //
  693. // It then uses the less-specific data to extend the specific data,
  694. // making the following:
  695. // cluster1/node1/prov_node1_A: CPUCost($10), Preemptible(true), Cores(4)
  696. // cluster1/node1/prov_node1_B: CPUCost($8), Preemptible(false), Cores(4)
  697. // cluster1/node2/prov_node2: CPUCost($15), Preemptible(false), Cores(6)
  698. //
  699. // In the situation where provider_id doesn't exist for any metrics,
  700. // that is the same as all provider_ids being empty strings. If
  701. // provider_id doesn't exist at all, then we (without having to do
  702. // extra work) easily fall back on identifying nodes only by cluster name
  703. // and node name because the provider_id part of the key will always
  704. // be the empty string.
  705. //
  706. // It is worth nothing that, in this approach, if a node is not present
  707. // in the more specific data but is present in the less-specific data,
  708. // that data is never processed into the final node map. For example,
  709. // let's say the CPU cores map has the following entry:
  710. // cluster1/node8: 6
  711. // But none of the maps with provider_id (CPU cost, RAM cost, etc.)
  712. // have an identifier for cluster1/node8 (regardless of provider_id).
  713. // In this situation, the final node map will not have a cluster1/node8
  714. // entry. This could be fixed by iterating over all of the less specific
  715. // identifiers and, inside that iteration, all of the identifiers in
  716. // the node map, but this would introduce a roughly quadratic time
  717. // complexity.
  718. func buildNodeMap(
  719. cpuCostMap, ramCostMap, gpuCostMap, gpuCountMap map[NodeIdentifier]float64,
  720. cpuCoresMap, ramBytesMap, ramUserPctMap,
  721. ramSystemPctMap map[nodeIdentifierNoProviderID]float64,
  722. cpuBreakdownMap map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown,
  723. activeDataMap map[NodeIdentifier]activeData,
  724. preemptibleMap map[NodeIdentifier]bool,
  725. labelsMap map[nodeIdentifierNoProviderID]map[string]string,
  726. clusterAndNameToType map[nodeIdentifierNoProviderID]string,
  727. overheadMap map[nodeIdentifierNoProviderID]*NodeOverhead,
  728. ) map[NodeIdentifier]*Node {
  729. nodeMap := make(map[NodeIdentifier]*Node)
  730. // Initialize the map with the most-specific data:
  731. for id, cost := range cpuCostMap {
  732. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  733. nodeMap[id].CPUCost = cost
  734. }
  735. for id, cost := range ramCostMap {
  736. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  737. nodeMap[id].RAMCost = cost
  738. }
  739. for id, cost := range gpuCostMap {
  740. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  741. nodeMap[id].GPUCost = cost
  742. }
  743. for id, count := range gpuCountMap {
  744. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  745. nodeMap[id].GPUCount = count
  746. }
  747. for id, preemptible := range preemptibleMap {
  748. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  749. nodeMap[id].Preemptible = preemptible
  750. }
  751. for id, activeData := range activeDataMap {
  752. checkForKeyAndInitIfMissing(nodeMap, id, clusterAndNameToType)
  753. nodeMap[id].Start = activeData.start
  754. nodeMap[id].End = activeData.end
  755. nodeMap[id].Minutes = nodeMap[id].End.Sub(nodeMap[id].Start).Minutes()
  756. }
  757. // We now merge in data that doesn't have a provider id by looping over
  758. // all keys already added and inserting data according to their
  759. // cluster name/node name combos.
  760. for id, nodePtr := range nodeMap {
  761. clusterAndNameID := nodeIdentifierNoProviderID{
  762. Cluster: id.Cluster,
  763. Name: id.Name,
  764. }
  765. if cores, ok := cpuCoresMap[clusterAndNameID]; ok {
  766. nodePtr.CPUCores = cores
  767. if v, ok := partialCPUMap[nodePtr.NodeType]; ok {
  768. if cores > 0 {
  769. nodePtr.CPUCores = v
  770. adjustmentFactor := v / cores
  771. nodePtr.CPUCost = nodePtr.CPUCost * adjustmentFactor
  772. }
  773. }
  774. }
  775. if ramBytes, ok := ramBytesMap[clusterAndNameID]; ok {
  776. nodePtr.RAMBytes = ramBytes
  777. }
  778. if ramUserPct, ok := ramUserPctMap[clusterAndNameID]; ok {
  779. nodePtr.RAMBreakdown.User = ramUserPct
  780. }
  781. if ramSystemPct, ok := ramSystemPctMap[clusterAndNameID]; ok {
  782. nodePtr.RAMBreakdown.System = ramSystemPct
  783. }
  784. if cpuBreakdown, ok := cpuBreakdownMap[clusterAndNameID]; ok {
  785. nodePtr.CPUBreakdown = cpuBreakdown
  786. }
  787. if labels, ok := labelsMap[clusterAndNameID]; ok {
  788. nodePtr.Labels = labels
  789. }
  790. if overhead, ok := overheadMap[clusterAndNameID]; ok {
  791. nodePtr.Overhead = overhead
  792. } else {
  793. // we were unable to compute overhead for this node
  794. // assume default case of no overhead
  795. nodePtr.Overhead = &NodeOverhead{}
  796. log.Warnf("unable to compute overhead for node %s - defaulting to no overhead", clusterAndNameID.Name)
  797. }
  798. }
  799. return nodeMap
  800. }