costmodel.go 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698
  1. package costmodel
  2. import (
  3. "errors"
  4. "fmt"
  5. "math"
  6. "regexp"
  7. "strconv"
  8. "strings"
  9. "time"
  10. "github.com/opencost/opencost/core/pkg/clusters"
  11. "github.com/opencost/opencost/core/pkg/log"
  12. "github.com/opencost/opencost/core/pkg/opencost"
  13. "github.com/opencost/opencost/core/pkg/util"
  14. "github.com/opencost/opencost/core/pkg/util/promutil"
  15. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
  16. "github.com/opencost/opencost/pkg/clustercache"
  17. "github.com/opencost/opencost/pkg/env"
  18. "github.com/opencost/opencost/pkg/prom"
  19. prometheus "github.com/prometheus/client_golang/api"
  20. prometheusClient "github.com/prometheus/client_golang/api"
  21. v1 "k8s.io/api/core/v1"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. "k8s.io/apimachinery/pkg/labels"
  24. "golang.org/x/sync/singleflight"
  25. )
  26. const (
  27. statusAPIError = 422
  28. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  29. unmountedPVsContainer = "unmounted-pvs"
  30. apiPrefix = "/api/v1"
  31. epAlertManagers = apiPrefix + "/alertmanagers"
  32. epLabelValues = apiPrefix + "/label/:name/values"
  33. epSeries = apiPrefix + "/series"
  34. epTargets = apiPrefix + "/targets"
  35. epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
  36. epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
  37. epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
  38. epConfig = apiPrefix + "/status/config"
  39. epFlags = apiPrefix + "/status/flags"
  40. )
  41. // isCron matches a CronJob name and captures the non-timestamp name
  42. //
  43. // We support either a 10 character timestamp OR an 8 character timestamp
  44. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  45. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  46. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  47. type CostModel struct {
  48. Cache clustercache.ClusterCache
  49. ClusterMap clusters.ClusterMap
  50. MaxPrometheusQueryDuration time.Duration
  51. RequestGroup *singleflight.Group
  52. ScrapeInterval time.Duration
  53. PrometheusClient prometheus.Client
  54. Provider costAnalyzerCloud.Provider
  55. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  56. }
  57. func NewCostModel(client prometheus.Client, provider costAnalyzerCloud.Provider, cache clustercache.ClusterCache, clusterMap clusters.ClusterMap, scrapeInterval time.Duration) *CostModel {
  58. // request grouping to prevent over-requesting the same data prior to caching
  59. requestGroup := new(singleflight.Group)
  60. return &CostModel{
  61. Cache: cache,
  62. ClusterMap: clusterMap,
  63. MaxPrometheusQueryDuration: env.GetETLMaxPrometheusQueryDuration(),
  64. PrometheusClient: client,
  65. Provider: provider,
  66. RequestGroup: requestGroup,
  67. ScrapeInterval: scrapeInterval,
  68. }
  69. }
  70. type CostData struct {
  71. Name string `json:"name,omitempty"`
  72. PodName string `json:"podName,omitempty"`
  73. NodeName string `json:"nodeName,omitempty"`
  74. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  75. Namespace string `json:"namespace,omitempty"`
  76. Deployments []string `json:"deployments,omitempty"`
  77. Services []string `json:"services,omitempty"`
  78. Daemonsets []string `json:"daemonsets,omitempty"`
  79. Statefulsets []string `json:"statefulsets,omitempty"`
  80. Jobs []string `json:"jobs,omitempty"`
  81. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  82. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  83. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  84. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  85. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  86. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  87. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  88. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  89. NetworkData []*util.Vector `json:"network,omitempty"`
  90. Annotations map[string]string `json:"annotations,omitempty"`
  91. Labels map[string]string `json:"labels,omitempty"`
  92. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  93. ClusterID string `json:"clusterId"`
  94. ClusterName string `json:"clusterName"`
  95. }
  96. func (cd *CostData) String() string {
  97. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  98. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  99. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  100. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  101. }
  102. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  103. hasController = false
  104. if len(cd.Deployments) > 0 {
  105. name = cd.Deployments[0]
  106. kind = "deployment"
  107. hasController = true
  108. } else if len(cd.Statefulsets) > 0 {
  109. name = cd.Statefulsets[0]
  110. kind = "statefulset"
  111. hasController = true
  112. } else if len(cd.Daemonsets) > 0 {
  113. name = cd.Daemonsets[0]
  114. kind = "daemonset"
  115. hasController = true
  116. } else if len(cd.Jobs) > 0 {
  117. name = cd.Jobs[0]
  118. kind = "job"
  119. hasController = true
  120. match := isCron.FindStringSubmatch(name)
  121. if match != nil {
  122. name = match[1]
  123. }
  124. }
  125. return name, kind, hasController
  126. }
  127. const (
  128. queryRAMRequestsStr = `avg(
  129. label_replace(
  130. label_replace(
  131. sum_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="",container!="POD", node!="", %s}[%s] %s)
  132. , "container_name","$1","container","(.+)"
  133. ), "pod_name","$1","pod","(.+)"
  134. )
  135. ) by (namespace,container_name,pod_name,node,%s)`
  136. queryRAMUsageStr = `avg(
  137. label_replace(
  138. label_replace(
  139. label_replace(
  140. sum_over_time(container_memory_working_set_bytes{container!="", container!="POD", instance!="", %s}[%s] %s), "node", "$1", "instance", "(.+)"
  141. ), "container_name", "$1", "container", "(.+)"
  142. ), "pod_name", "$1", "pod", "(.+)"
  143. )
  144. ) by (namespace, container_name, pod_name, node, %s)`
  145. queryCPURequestsStr = `avg(
  146. label_replace(
  147. label_replace(
  148. sum_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="",container!="POD", node!="", %s}[%s] %s)
  149. , "container_name","$1","container","(.+)"
  150. ), "pod_name","$1","pod","(.+)"
  151. )
  152. ) by (namespace,container_name,pod_name,node,%s)`
  153. queryCPUUsageStr = `avg(
  154. label_replace(
  155. label_replace(
  156. label_replace(
  157. rate(
  158. container_cpu_usage_seconds_total{container!="", container!="POD", instance!="", %s}[%s] %s
  159. ), "node", "$1", "instance", "(.+)"
  160. ), "container_name", "$1", "container", "(.+)"
  161. ), "pod_name", "$1", "pod", "(.+)"
  162. )
  163. ) by (namespace, container_name, pod_name, node, %s)`
  164. queryGPURequestsStr = `avg(
  165. label_replace(
  166. label_replace(
  167. sum_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s] %s),
  168. "container_name","$1","container","(.+)"
  169. ), "pod_name","$1","pod","(.+)"
  170. )
  171. ) by (namespace,container_name,pod_name,node,%s)`
  172. queryPVRequestsStr = `avg(avg(kube_persistentvolumeclaim_info{volumename != "", %s}) by (persistentvolumeclaim, storageclass, namespace, volumename, %s, kubernetes_node)
  173. *
  174. on (persistentvolumeclaim, namespace, %s, kubernetes_node) group_right(storageclass, volumename)
  175. sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{%s}) by (persistentvolumeclaim, namespace, %s, kubernetes_node, kubernetes_name)) by (persistentvolumeclaim, storageclass, namespace, %s, volumename, kubernetes_node)`
  176. // queryRAMAllocationByteHours yields the total byte-hour RAM allocation over the given
  177. // window, aggregated by container.
  178. // [line 3] sum_over_time(each byte) = [byte*scrape] by metric
  179. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  180. // [lines 2,4] sum(") by unique container key and multiply [byte*scrape] * [hours/scrape] for byte*hours
  181. // [lines 1,5] relabeling
  182. queryRAMAllocationByteHours = `
  183. label_replace(label_replace(
  184. sum(
  185. sum_over_time(container_memory_allocation_bytes{container!="",container!="POD", node!="", %s}[%s])
  186. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  187. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  188. // queryCPUAllocationVCPUHours yields the total VCPU-hour CPU allocation over the given
  189. // window, aggregated by container.
  190. // [line 3] sum_over_time(each VCPU*mins in window) = [VCPU*scrape] by metric
  191. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  192. // [lines 2,4] sum(") by unique container key and multiply [VCPU*scrape] * [hours/scrape] for VCPU*hours
  193. // [lines 1,5] relabeling
  194. queryCPUAllocationVCPUHours = `
  195. label_replace(label_replace(
  196. sum(
  197. sum_over_time(container_cpu_allocation{container!="",container!="POD", node!="", %s}[%s])
  198. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  199. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  200. // queryPVCAllocationFmt yields the total byte-hour PVC allocation over the given window.
  201. // sum_over_time(each byte) = [byte*scrape] by metric *(scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by pod
  202. queryPVCAllocationFmt = `sum(sum_over_time(pod_pvc_allocation{%s}[%s])) by (%s, namespace, pod, persistentvolume, persistentvolumeclaim) * %f/60/60`
  203. queryPVHourlyCostFmt = `avg_over_time(pv_hourly_cost{%s}[%s])`
  204. queryNSLabels = `avg_over_time(kube_namespace_labels{%s}[%s])`
  205. queryPodLabels = `avg_over_time(kube_pod_labels{%s}[%s])`
  206. queryNSAnnotations = `avg_over_time(kube_namespace_annotations{%s}[%s])`
  207. queryPodAnnotations = `avg_over_time(kube_pod_annotations{%s}[%s])`
  208. queryDeploymentLabels = `avg_over_time(deployment_match_labels{%s}[%s])`
  209. queryStatefulsetLabels = `avg_over_time(statefulSet_match_labels{%s}[%s])`
  210. queryPodDaemonsets = `sum(kube_pod_owner{owner_kind="DaemonSet", %s}) by (namespace,pod,owner_name,%s)`
  211. queryPodJobs = `sum(kube_pod_owner{owner_kind="Job", %s}) by (namespace,pod,owner_name,%s)`
  212. queryServiceLabels = `avg_over_time(service_selector_labels{%s}[%s])`
  213. queryZoneNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  214. queryRegionNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="false", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  215. queryInternetNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  216. normalizationStr = `max(count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[%s] %s))`
  217. )
  218. func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
  219. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
  220. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
  221. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  222. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  223. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  224. queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), window, offset)
  225. // Cluster ID is specific to the source cluster
  226. clusterID := env.GetClusterID()
  227. // Submit all Prometheus queries asynchronously
  228. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  229. resChRAMUsage := ctx.Query(queryRAMUsage)
  230. resChCPUUsage := ctx.Query(queryCPUUsage)
  231. resChNetZoneRequests := ctx.Query(queryNetZoneRequests)
  232. resChNetRegionRequests := ctx.Query(queryNetRegionRequests)
  233. resChNetInternetRequests := ctx.Query(queryNetInternetRequests)
  234. resChNormalization := ctx.Query(queryNormalization)
  235. // Pull pod information from k8s API
  236. podlist := cm.Cache.GetAllPods()
  237. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  238. if err != nil {
  239. return nil, err
  240. }
  241. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  242. if err != nil {
  243. return nil, err
  244. }
  245. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  246. if err != nil {
  247. return nil, err
  248. }
  249. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  250. if err != nil {
  251. return nil, err
  252. }
  253. // Process Prometheus query results. Handle errors using ctx.Errors.
  254. resRAMUsage, _ := resChRAMUsage.Await()
  255. resCPUUsage, _ := resChCPUUsage.Await()
  256. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  257. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  258. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  259. resNormalization, _ := resChNormalization.Await()
  260. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  261. // NOTE: will not propagate unless coupled with errors.
  262. if ctx.HasErrors() {
  263. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  264. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  265. for _, promErr := range ctx.Errors() {
  266. if promErr.Error != nil {
  267. log.Errorf("ComputeCostData: Request Error: %s", promErr.Error)
  268. }
  269. if promErr.ParseError != nil {
  270. log.Errorf("ComputeCostData: Parsing Error: %s", promErr.ParseError)
  271. }
  272. }
  273. // ErrorCollection is an collection of errors wrapped in a single error implementation
  274. // We opt to not return an error for the sake of running as a pure exporter.
  275. log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", ctx.ErrorCollection().Error())
  276. }
  277. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  278. normalizationValue, err := getNormalization(resNormalization)
  279. if err != nil {
  280. // We opt to not return an error for the sake of running as a pure exporter.
  281. log.Warnf("ComputeCostData: continuing despite error parsing normalization values from %s: %s", queryNormalization, err.Error())
  282. }
  283. nodes, err := cm.GetNodeCost(cp)
  284. if err != nil {
  285. log.Warnf("GetNodeCost: no node cost model available: " + err.Error())
  286. return nil, err
  287. }
  288. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  289. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  290. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  291. if err != nil {
  292. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  293. }
  294. if pvClaimMapping != nil {
  295. err = addPVData(cm.Cache, pvClaimMapping, cp)
  296. if err != nil {
  297. return nil, err
  298. }
  299. // copy claim mappings into zombies, then remove as they're discovered
  300. for k, v := range pvClaimMapping {
  301. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  302. }
  303. }
  304. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  305. if err != nil {
  306. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  307. networkUsageMap = make(map[string]*NetworkUsageData)
  308. }
  309. containerNameCost := make(map[string]*CostData)
  310. containers := make(map[string]bool)
  311. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, true, normalizationValue, clusterID)
  312. if err != nil {
  313. return nil, err
  314. }
  315. for key := range RAMUsedMap {
  316. containers[key] = true
  317. }
  318. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, false, 0, clusterID) // No need to normalize here, as this comes from a counter
  319. if err != nil {
  320. return nil, err
  321. }
  322. for key := range CPUUsedMap {
  323. containers[key] = true
  324. }
  325. currentContainers := make(map[string]clustercache.Pod)
  326. for _, pod := range podlist {
  327. if pod.Status.Phase != v1.PodRunning {
  328. continue
  329. }
  330. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  331. if err != nil {
  332. return nil, err
  333. }
  334. for _, c := range cs {
  335. containers[c.Key()] = true // captures any containers that existed for a time < a prometheus scrape interval. We currently charge 0 for this but should charge something.
  336. currentContainers[c.Key()] = *pod
  337. }
  338. }
  339. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  340. missingContainers := make(map[string]*CostData)
  341. for key := range containers {
  342. if _, ok := containerNameCost[key]; ok {
  343. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  344. }
  345. // The _else_ case for this statement is the case in which the container has been
  346. // deleted so we have usage information but not request information. In that case,
  347. // we return partial data for CPU and RAM: only usage and not requests.
  348. if pod, ok := currentContainers[key]; ok {
  349. podName := pod.Name
  350. ns := pod.Namespace
  351. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  352. podLabels := pod.Labels
  353. if podLabels == nil {
  354. podLabels = make(map[string]string)
  355. }
  356. for k, v := range nsLabels {
  357. if _, ok := podLabels[k]; !ok {
  358. podLabels[k] = v
  359. }
  360. }
  361. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  362. podAnnotations := pod.Annotations
  363. if podAnnotations == nil {
  364. podAnnotations = make(map[string]string)
  365. }
  366. for k, v := range nsAnnotations {
  367. if _, ok := podAnnotations[k]; !ok {
  368. podAnnotations[k] = v
  369. }
  370. }
  371. nodeName := pod.Spec.NodeName
  372. var nodeData *costAnalyzerCloud.Node
  373. if _, ok := nodes[nodeName]; ok {
  374. nodeData = nodes[nodeName]
  375. }
  376. nsKey := ns + "," + clusterID
  377. var podDeployments []string
  378. if _, ok := podDeploymentsMapping[nsKey]; ok {
  379. if ds, ok := podDeploymentsMapping[nsKey][pod.Name]; ok {
  380. podDeployments = ds
  381. } else {
  382. podDeployments = []string{}
  383. }
  384. }
  385. var podPVs []*PersistentVolumeClaimData
  386. podClaims := pod.Spec.Volumes
  387. for _, vol := range podClaims {
  388. if vol.PersistentVolumeClaim != nil {
  389. name := vol.PersistentVolumeClaim.ClaimName
  390. key := ns + "," + name + "," + clusterID
  391. if pvClaim, ok := pvClaimMapping[key]; ok {
  392. pvClaim.TimesClaimed++
  393. podPVs = append(podPVs, pvClaim)
  394. // Remove entry from potential unmounted pvs
  395. delete(unmountedPVs, key)
  396. }
  397. }
  398. }
  399. var podNetCosts []*util.Vector
  400. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  401. netCosts, err := GetNetworkCost(usage, cp)
  402. if err != nil {
  403. log.Debugf("Error pulling network costs: %s", err.Error())
  404. } else {
  405. podNetCosts = netCosts
  406. }
  407. }
  408. var podServices []string
  409. if _, ok := podServicesMapping[nsKey]; ok {
  410. if svcs, ok := podServicesMapping[nsKey][pod.Name]; ok {
  411. podServices = svcs
  412. } else {
  413. podServices = []string{}
  414. }
  415. }
  416. for i, container := range pod.Spec.Containers {
  417. containerName := container.Name
  418. // recreate the key and look up data for this container
  419. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  420. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  421. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  422. // details on the "amount" API. See
  423. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  424. // for the units of memory and CPU.
  425. ramRequestBytes := container.Resources.Requests.Memory().Value()
  426. // Because information on container RAM & CPU requests isn't
  427. // coming from Prometheus, it won't have a timestamp associated
  428. // with it. We need to provide a timestamp.
  429. RAMReqV := []*util.Vector{
  430. {
  431. Value: float64(ramRequestBytes),
  432. Timestamp: float64(time.Now().UTC().Unix()),
  433. },
  434. }
  435. // use millicores so we can convert to cores in a float64 format
  436. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  437. CPUReqV := []*util.Vector{
  438. {
  439. Value: float64(cpuRequestMilliCores) / 1000,
  440. Timestamp: float64(time.Now().UTC().Unix()),
  441. },
  442. }
  443. gpuReqCount := 0.0
  444. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  445. gpuReqCount = g.AsApproximateFloat64()
  446. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  447. gpuReqCount = g.AsApproximateFloat64()
  448. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  449. gpuReqCount = g.AsApproximateFloat64()
  450. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  451. gpuReqCount = g.AsApproximateFloat64()
  452. }
  453. GPUReqV := []*util.Vector{
  454. {
  455. Value: float64(gpuReqCount),
  456. Timestamp: float64(time.Now().UTC().Unix()),
  457. },
  458. }
  459. RAMUsedV, ok := RAMUsedMap[newKey]
  460. if !ok {
  461. log.Debug("no RAM usage for " + newKey)
  462. RAMUsedV = []*util.Vector{{}}
  463. }
  464. CPUUsedV, ok := CPUUsedMap[newKey]
  465. if !ok {
  466. log.Debug("no CPU usage for " + newKey)
  467. CPUUsedV = []*util.Vector{{}}
  468. }
  469. var pvReq []*PersistentVolumeClaimData
  470. var netReq []*util.Vector
  471. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  472. pvReq = podPVs
  473. netReq = podNetCosts
  474. }
  475. costs := &CostData{
  476. Name: containerName,
  477. PodName: podName,
  478. NodeName: nodeName,
  479. Namespace: ns,
  480. Deployments: podDeployments,
  481. Services: podServices,
  482. Daemonsets: getDaemonsetsOfPod(pod),
  483. Jobs: getJobsOfPod(pod),
  484. Statefulsets: getStatefulSetsOfPod(pod),
  485. NodeData: nodeData,
  486. RAMReq: RAMReqV,
  487. RAMUsed: RAMUsedV,
  488. CPUReq: CPUReqV,
  489. CPUUsed: CPUUsedV,
  490. GPUReq: GPUReqV,
  491. PVCData: pvReq,
  492. NetworkData: netReq,
  493. Annotations: podAnnotations,
  494. Labels: podLabels,
  495. NamespaceLabels: nsLabels,
  496. ClusterID: clusterID,
  497. ClusterName: cm.ClusterMap.NameFor(clusterID),
  498. }
  499. var cpuReq, cpuUse *util.Vector
  500. if len(costs.CPUReq) > 0 {
  501. cpuReq = costs.CPUReq[0]
  502. }
  503. if len(costs.CPUUsed) > 0 {
  504. cpuUse = costs.CPUUsed[0]
  505. }
  506. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  507. var ramReq, ramUse *util.Vector
  508. if len(costs.RAMReq) > 0 {
  509. ramReq = costs.RAMReq[0]
  510. }
  511. if len(costs.RAMUsed) > 0 {
  512. ramUse = costs.RAMUsed[0]
  513. }
  514. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  515. if filterNamespace == "" {
  516. containerNameCost[newKey] = costs
  517. } else if costs.Namespace == filterNamespace {
  518. containerNameCost[newKey] = costs
  519. }
  520. }
  521. } else {
  522. // The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
  523. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  524. c, err := NewContainerMetricFromKey(key)
  525. if err != nil {
  526. return nil, err
  527. }
  528. // CPU and RAM requests are obtained from the Kubernetes API.
  529. // If this case has been reached, the Kubernetes API will not
  530. // have information about the pod because it no longer exists.
  531. //
  532. // The case where this matters is minimal, mainly in environments
  533. // with very short-lived pods that over-request resources.
  534. RAMReqV := []*util.Vector{{}}
  535. CPUReqV := []*util.Vector{{}}
  536. GPUReqV := []*util.Vector{{}}
  537. RAMUsedV, ok := RAMUsedMap[key]
  538. if !ok {
  539. log.Debug("no RAM usage for " + key)
  540. RAMUsedV = []*util.Vector{{}}
  541. }
  542. CPUUsedV, ok := CPUUsedMap[key]
  543. if !ok {
  544. log.Debug("no CPU usage for " + key)
  545. CPUUsedV = []*util.Vector{{}}
  546. }
  547. node, ok := nodes[c.NodeName]
  548. if !ok {
  549. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  550. if n, ok := missingNodes[c.NodeName]; ok {
  551. node = n
  552. } else {
  553. node = &costAnalyzerCloud.Node{}
  554. missingNodes[c.NodeName] = node
  555. }
  556. }
  557. namespacelabels, _ := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  558. namespaceAnnotations, _ := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  559. costs := &CostData{
  560. Name: c.ContainerName,
  561. PodName: c.PodName,
  562. NodeName: c.NodeName,
  563. NodeData: node,
  564. Namespace: c.Namespace,
  565. RAMReq: RAMReqV,
  566. RAMUsed: RAMUsedV,
  567. CPUReq: CPUReqV,
  568. CPUUsed: CPUUsedV,
  569. GPUReq: GPUReqV,
  570. Annotations: namespaceAnnotations,
  571. NamespaceLabels: namespacelabels,
  572. ClusterID: c.ClusterID,
  573. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  574. }
  575. var cpuReq, cpuUse *util.Vector
  576. if len(costs.CPUReq) > 0 {
  577. cpuReq = costs.CPUReq[0]
  578. }
  579. if len(costs.CPUUsed) > 0 {
  580. cpuUse = costs.CPUUsed[0]
  581. }
  582. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  583. var ramReq, ramUse *util.Vector
  584. if len(costs.RAMReq) > 0 {
  585. ramReq = costs.RAMReq[0]
  586. }
  587. if len(costs.RAMUsed) > 0 {
  588. ramUse = costs.RAMUsed[0]
  589. }
  590. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  591. if filterNamespace == "" {
  592. containerNameCost[key] = costs
  593. missingContainers[key] = costs
  594. } else if costs.Namespace == filterNamespace {
  595. containerNameCost[key] = costs
  596. missingContainers[key] = costs
  597. }
  598. }
  599. }
  600. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  601. // to pass along the cost data
  602. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  603. for k, costs := range unmounted {
  604. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  605. if filterNamespace == "" {
  606. containerNameCost[k] = costs
  607. } else if costs.Namespace == filterNamespace {
  608. containerNameCost[k] = costs
  609. }
  610. }
  611. err = findDeletedNodeInfo(cli, missingNodes, window, "")
  612. if err != nil {
  613. log.Errorf("Error fetching historical node data: %s", err.Error())
  614. }
  615. err = findDeletedPodInfo(cli, missingContainers, window)
  616. if err != nil {
  617. log.Errorf("Error fetching historical pod data: %s", err.Error())
  618. }
  619. return containerNameCost, err
  620. }
  621. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  622. costs := make(map[string]*CostData)
  623. if len(unmountedPVs) == 0 {
  624. return costs
  625. }
  626. for k, pv := range unmountedPVs {
  627. keyParts := strings.Split(k, ",")
  628. if len(keyParts) != 3 {
  629. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  630. continue
  631. }
  632. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  633. namespacelabels, _ := namespaceLabelsMapping[ns+","+clusterID]
  634. namespaceAnnotations, _ := namespaceAnnotationsMapping[ns+","+clusterID]
  635. metric := NewContainerMetricFromValues(ns, unmountedPVsContainer, unmountedPVsContainer, "", clusterID)
  636. key := metric.Key()
  637. if costData, ok := costs[key]; !ok {
  638. costs[key] = &CostData{
  639. Name: unmountedPVsContainer,
  640. PodName: unmountedPVsContainer,
  641. NodeName: "",
  642. Annotations: namespaceAnnotations,
  643. Namespace: ns,
  644. NamespaceLabels: namespacelabels,
  645. Labels: namespacelabels,
  646. ClusterID: clusterID,
  647. ClusterName: clusterMap.NameFor(clusterID),
  648. PVCData: pv,
  649. }
  650. } else {
  651. costData.PVCData = append(costData.PVCData, pv...)
  652. }
  653. }
  654. return costs
  655. }
  656. func findDeletedPodInfo(cli prometheusClient.Client, missingContainers map[string]*CostData, window string) error {
  657. if len(missingContainers) > 0 {
  658. queryHistoricalPodLabels := fmt.Sprintf(`kube_pod_labels{%s}[%s]`, env.GetPromClusterFilter(), window)
  659. podLabelsResult, _, err := prom.NewNamedContext(cli, prom.ComputeCostDataContextName).QuerySync(queryHistoricalPodLabels)
  660. if err != nil {
  661. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  662. }
  663. podLabels := make(map[string]map[string]string)
  664. if podLabelsResult != nil {
  665. podLabels, err = parsePodLabels(podLabelsResult)
  666. if err != nil {
  667. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  668. }
  669. }
  670. for key, costData := range missingContainers {
  671. cm, _ := NewContainerMetricFromKey(key)
  672. labels, ok := podLabels[cm.PodName]
  673. if !ok {
  674. labels = make(map[string]string)
  675. }
  676. for k, v := range costData.NamespaceLabels {
  677. labels[k] = v
  678. }
  679. costData.Labels = labels
  680. }
  681. }
  682. return nil
  683. }
  684. func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*costAnalyzerCloud.Node, window, offset string) error {
  685. if len(missingNodes) > 0 {
  686. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  687. offsetStr := ""
  688. if offset != "" {
  689. offsetStr = fmt.Sprintf("offset %s", offset)
  690. }
  691. queryHistoricalCPUCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  692. queryHistoricalRAMCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  693. queryHistoricalGPUCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  694. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  695. cpuCostResCh := ctx.Query(queryHistoricalCPUCost)
  696. ramCostResCh := ctx.Query(queryHistoricalRAMCost)
  697. gpuCostResCh := ctx.Query(queryHistoricalGPUCost)
  698. cpuCostRes, _ := cpuCostResCh.Await()
  699. ramCostRes, _ := ramCostResCh.Await()
  700. gpuCostRes, _ := gpuCostResCh.Await()
  701. if ctx.HasErrors() {
  702. return ctx.ErrorCollection()
  703. }
  704. cpuCosts, err := getCost(cpuCostRes)
  705. if err != nil {
  706. return err
  707. }
  708. ramCosts, err := getCost(ramCostRes)
  709. if err != nil {
  710. return err
  711. }
  712. gpuCosts, err := getCost(gpuCostRes)
  713. if err != nil {
  714. return err
  715. }
  716. if len(cpuCosts) == 0 {
  717. log.Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  718. }
  719. for node, costv := range cpuCosts {
  720. if _, ok := missingNodes[node]; ok {
  721. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  722. } else {
  723. log.DedupedWarningf(5, "Node `%s` in prometheus but not k8s api", node)
  724. }
  725. }
  726. for node, costv := range ramCosts {
  727. if _, ok := missingNodes[node]; ok {
  728. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  729. }
  730. }
  731. for node, costv := range gpuCosts {
  732. if _, ok := missingNodes[node]; ok {
  733. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  734. }
  735. }
  736. }
  737. return nil
  738. }
  739. // getContainerAllocation takes the max between request and usage. This function
  740. // returns a slice containing a single element describing the container's
  741. // allocation.
  742. //
  743. // Additionally, the timestamp of the allocation will be the highest value
  744. // timestamp between the two vectors. This mitigates situations where
  745. // Timestamp=0. This should have no effect on the metrics emitted by the
  746. // CostModelMetricsEmitter
  747. func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
  748. var result []*util.Vector
  749. if req != nil && used != nil {
  750. x1 := req.Value
  751. if math.IsNaN(x1) {
  752. log.Debugf("NaN value found during %s allocation calculation for requests.", allocationType)
  753. x1 = 0.0
  754. }
  755. y1 := used.Value
  756. if math.IsNaN(y1) {
  757. log.Debugf("NaN value found during %s allocation calculation for used.", allocationType)
  758. y1 = 0.0
  759. }
  760. result = []*util.Vector{
  761. {
  762. Value: math.Max(x1, y1),
  763. Timestamp: math.Max(req.Timestamp, used.Timestamp),
  764. },
  765. }
  766. if result[0].Value == 0 && result[0].Timestamp == 0 {
  767. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  768. }
  769. } else if req != nil {
  770. result = []*util.Vector{
  771. {
  772. Value: req.Value,
  773. Timestamp: req.Timestamp,
  774. },
  775. }
  776. } else if used != nil {
  777. result = []*util.Vector{
  778. {
  779. Value: used.Value,
  780. Timestamp: used.Timestamp,
  781. },
  782. }
  783. } else {
  784. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  785. result = []*util.Vector{
  786. {
  787. Value: 0,
  788. Timestamp: float64(time.Now().UTC().Unix()),
  789. },
  790. }
  791. }
  792. return result
  793. }
  794. func addPVData(cache clustercache.ClusterCache, pvClaimMapping map[string]*PersistentVolumeClaimData, cloud costAnalyzerCloud.Provider) error {
  795. cfg, err := cloud.GetConfig()
  796. if err != nil {
  797. return err
  798. }
  799. // Pull a region from the first node
  800. var defaultRegion string
  801. nodeList := cache.GetAllNodes()
  802. if len(nodeList) > 0 {
  803. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  804. }
  805. storageClasses := cache.GetAllStorageClasses()
  806. storageClassMap := make(map[string]map[string]string)
  807. for _, storageClass := range storageClasses {
  808. params := storageClass.Parameters
  809. storageClassMap[storageClass.Name] = params
  810. if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.Annotations["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  811. storageClassMap["default"] = params
  812. storageClassMap[""] = params
  813. }
  814. }
  815. pvs := cache.GetAllPersistentVolumes()
  816. pvMap := make(map[string]*costAnalyzerCloud.PV)
  817. for _, pv := range pvs {
  818. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  819. if !ok {
  820. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  821. }
  822. var region string
  823. if r, ok := util.GetRegion(pv.Labels); ok {
  824. region = r
  825. } else {
  826. region = defaultRegion
  827. }
  828. cacPv := &costAnalyzerCloud.PV{
  829. Class: pv.Spec.StorageClassName,
  830. Region: region,
  831. Parameters: parameters,
  832. }
  833. err := GetPVCost(cacPv, pv, cloud, region)
  834. if err != nil {
  835. return err
  836. }
  837. pvMap[pv.Name] = cacPv
  838. }
  839. for _, pvc := range pvClaimMapping {
  840. if vol, ok := pvMap[pvc.VolumeName]; ok {
  841. pvc.Volume = vol
  842. } else {
  843. log.Debugf("PV not found, using default")
  844. pvc.Volume = &costAnalyzerCloud.PV{
  845. Cost: cfg.Storage,
  846. }
  847. }
  848. }
  849. return nil
  850. }
  851. func GetPVCost(pv *costAnalyzerCloud.PV, kpv *clustercache.PersistentVolume, cp costAnalyzerCloud.Provider, defaultRegion string) error {
  852. cfg, err := cp.GetConfig()
  853. if err != nil {
  854. return err
  855. }
  856. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  857. pv.ProviderID = key.ID()
  858. pvWithCost, err := cp.PVPricing(key)
  859. if err != nil {
  860. pv.Cost = cfg.Storage
  861. return err
  862. }
  863. if pvWithCost == nil || pvWithCost.Cost == "" {
  864. pv.Cost = cfg.Storage
  865. return nil // set default cost
  866. }
  867. pv.Cost = pvWithCost.Cost
  868. return nil
  869. }
  870. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  871. if cm.pricingMetadata != nil {
  872. return cm.pricingMetadata, nil
  873. } else {
  874. return nil, fmt.Errorf("Node costs not yet calculated")
  875. }
  876. }
  877. func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*costAnalyzerCloud.Node, error) {
  878. cfg, err := cp.GetConfig()
  879. if err != nil {
  880. return nil, err
  881. }
  882. nodeList := cm.Cache.GetAllNodes()
  883. nodes := make(map[string]*costAnalyzerCloud.Node)
  884. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  885. TotalNodes: 0,
  886. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  887. }
  888. for _, n := range nodeList {
  889. name := n.Name
  890. nodeLabels := n.Labels
  891. nodeLabels["providerID"] = n.SpecProviderID
  892. pmd.TotalNodes++
  893. cnode, _, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  894. if err != nil {
  895. log.Infof("Error getting node pricing. Error: %s", err.Error())
  896. if cnode != nil {
  897. nodes[name] = cnode
  898. continue
  899. } else {
  900. cnode = &costAnalyzerCloud.Node{
  901. VCPUCost: cfg.CPU,
  902. RAMCost: cfg.RAM,
  903. }
  904. }
  905. }
  906. if _, ok := pmd.PricingTypeCounts[cnode.PricingType]; ok {
  907. pmd.PricingTypeCounts[cnode.PricingType]++
  908. } else {
  909. pmd.PricingTypeCounts[cnode.PricingType] = 1
  910. }
  911. // newCnode builds upon cnode but populates/overrides certain fields.
  912. // cnode was populated leveraging cloud provider public pricing APIs.
  913. newCnode := *cnode
  914. if newCnode.InstanceType == "" {
  915. it, _ := util.GetInstanceType(n.Labels)
  916. newCnode.InstanceType = it
  917. }
  918. if newCnode.Region == "" {
  919. region, _ := util.GetRegion(n.Labels)
  920. newCnode.Region = region
  921. }
  922. if newCnode.ArchType == "" {
  923. arch, _ := util.GetArchType(n.Labels)
  924. newCnode.ArchType = arch
  925. }
  926. newCnode.ProviderID = n.SpecProviderID
  927. var cpu float64
  928. if newCnode.VCPU == "" {
  929. cpu = float64(n.Status.Capacity.Cpu().Value())
  930. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  931. } else {
  932. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  933. if err != nil {
  934. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  935. }
  936. }
  937. if math.IsNaN(cpu) {
  938. log.Warnf("cpu parsed as NaN. Setting to 0.")
  939. cpu = 0
  940. }
  941. var ram float64
  942. if newCnode.RAM == "" {
  943. newCnode.RAM = n.Status.Capacity.Memory().String()
  944. }
  945. ram = float64(n.Status.Capacity.Memory().Value())
  946. if math.IsNaN(ram) {
  947. log.Warnf("ram parsed as NaN. Setting to 0.")
  948. ram = 0
  949. }
  950. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  951. gpuc, err := strconv.ParseFloat(newCnode.GPU, 64)
  952. if err != nil {
  953. gpuc = 0.0
  954. }
  955. // The k8s API will often report more accurate results for GPU count
  956. // than cloud provider public pricing APIs. If found, override the
  957. // original value.
  958. gpuOverride, vgpuOverride, err := getGPUCount(cm.Cache, n)
  959. if err != nil {
  960. log.Warnf("Unable to get GPUCount for node %s: %s", n.Name, err.Error())
  961. }
  962. if gpuOverride > 0 {
  963. newCnode.GPU = fmt.Sprintf("%f", gpuOverride)
  964. gpuc = gpuOverride
  965. }
  966. if vgpuOverride > 0 {
  967. newCnode.VGPU = fmt.Sprintf("%f", vgpuOverride)
  968. }
  969. // Special case for SUSE rancher, since it won't behave with normal
  970. // calculations, courtesy of the instance type not being "real" (a
  971. // recognizable AWS instance type.)
  972. if newCnode.InstanceType == "rke2" {
  973. log.Infof(
  974. "Found a SUSE Rancher node %s, defaulting and skipping math",
  975. cp.GetKey(nodeLabels, n).Features(),
  976. )
  977. defaultCPUCorePrice, err := strconv.ParseFloat(cfg.CPU, 64)
  978. if err != nil {
  979. log.Errorf("Could not parse default cpu price")
  980. defaultCPUCorePrice = 0
  981. }
  982. if math.IsNaN(defaultCPUCorePrice) {
  983. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  984. defaultCPUCorePrice = 0
  985. }
  986. defaultRAMPrice, err := strconv.ParseFloat(cfg.RAM, 64)
  987. if err != nil {
  988. log.Errorf("Could not parse default ram price")
  989. defaultRAMPrice = 0
  990. }
  991. if math.IsNaN(defaultRAMPrice) {
  992. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  993. defaultRAMPrice = 0
  994. }
  995. defaultGPUPrice, err := strconv.ParseFloat(cfg.GPU, 64)
  996. if err != nil {
  997. log.Errorf("Could not parse default gpu price")
  998. defaultGPUPrice = 0
  999. }
  1000. if math.IsNaN(defaultGPUPrice) {
  1001. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  1002. defaultGPUPrice = 0
  1003. }
  1004. // Just say no to doing the ratios!
  1005. cpuCost := defaultCPUCorePrice * cpu
  1006. gpuCost := defaultGPUPrice * gpuc
  1007. ramCost := defaultRAMPrice * ram
  1008. nodeCost := cpuCost + gpuCost + ramCost
  1009. newCnode.Cost = fmt.Sprintf("%f", nodeCost)
  1010. newCnode.VCPUCost = fmt.Sprintf("%f", defaultCPUCorePrice)
  1011. newCnode.GPUCost = fmt.Sprintf("%f", defaultGPUPrice)
  1012. newCnode.RAMCost = fmt.Sprintf("%f", defaultRAMPrice)
  1013. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1014. } else if newCnode.GPU != "" && newCnode.GPUCost == "" {
  1015. // was the big thing to investigate. All the funky ratio math
  1016. // we were doing was messing with their default pricing. for SUSE Rancher.
  1017. // We reach this when a GPU is detected on a node, but no cost for
  1018. // the GPU is defined in the OnDemand pricing. Calculate ratios of
  1019. // CPU to RAM and GPU to RAM costs, then distribute the total node
  1020. // cost among the CPU, RAM, and GPU.
  1021. log.Tracef("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1022. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1023. if err != nil {
  1024. log.Errorf("Could not parse default cpu price")
  1025. defaultCPU = 0
  1026. }
  1027. if math.IsNaN(defaultCPU) {
  1028. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1029. defaultCPU = 0
  1030. }
  1031. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1032. if err != nil {
  1033. log.Errorf("Could not parse default ram price")
  1034. defaultRAM = 0
  1035. }
  1036. if math.IsNaN(defaultRAM) {
  1037. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1038. defaultRAM = 0
  1039. }
  1040. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  1041. if err != nil {
  1042. log.Errorf("Could not parse default gpu price")
  1043. defaultGPU = 0
  1044. }
  1045. if math.IsNaN(defaultGPU) {
  1046. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  1047. defaultGPU = 0
  1048. }
  1049. cpuToRAMRatio := defaultCPU / defaultRAM
  1050. if math.IsNaN(cpuToRAMRatio) {
  1051. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1052. cpuToRAMRatio = 10
  1053. }
  1054. gpuToRAMRatio := defaultGPU / defaultRAM
  1055. if math.IsNaN(gpuToRAMRatio) {
  1056. log.Warnf("gpuToRAMRatio is NaN. Setting to 100.")
  1057. gpuToRAMRatio = 100
  1058. }
  1059. ramGB := ram / 1024 / 1024 / 1024
  1060. if math.IsNaN(ramGB) {
  1061. log.Warnf("ramGB is NaN. Setting to 0.")
  1062. ramGB = 0
  1063. }
  1064. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  1065. if math.IsNaN(ramMultiple) {
  1066. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1067. ramMultiple = 0
  1068. }
  1069. var nodePrice float64
  1070. if newCnode.Cost != "" {
  1071. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1072. if err != nil {
  1073. log.Errorf("Could not parse total node price")
  1074. return nil, err
  1075. }
  1076. } else if newCnode.VCPUCost != "" {
  1077. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1078. if err != nil {
  1079. log.Errorf("Could not parse node vcpu price")
  1080. return nil, err
  1081. }
  1082. } else { // add case to use default pricing model when API data fails.
  1083. log.Debugf("No node price or CPUprice found, falling back to default")
  1084. nodePrice = defaultCPU*cpu + defaultRAM*ram + gpuc*defaultGPU
  1085. }
  1086. if math.IsNaN(nodePrice) {
  1087. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1088. nodePrice = 0
  1089. }
  1090. ramPrice := (nodePrice / ramMultiple)
  1091. if math.IsNaN(ramPrice) {
  1092. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1093. ramPrice = 0
  1094. }
  1095. cpuPrice := ramPrice * cpuToRAMRatio
  1096. gpuPrice := ramPrice * gpuToRAMRatio
  1097. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1098. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1099. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1100. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1101. } else if newCnode.RAMCost == "" {
  1102. // We reach this when no RAM cost is defined in the OnDemand
  1103. // pricing. It calculates a cpuToRAMRatio and ramMultiple to
  1104. // distrubte the total node cost among CPU and RAM costs.
  1105. log.Tracef("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1106. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1107. if err != nil {
  1108. log.Warnf("Could not parse default cpu price")
  1109. defaultCPU = 0
  1110. }
  1111. if math.IsNaN(defaultCPU) {
  1112. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1113. defaultCPU = 0
  1114. }
  1115. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1116. if err != nil {
  1117. log.Warnf("Could not parse default ram price")
  1118. defaultRAM = 0
  1119. }
  1120. if math.IsNaN(defaultRAM) {
  1121. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1122. defaultRAM = 0
  1123. }
  1124. cpuToRAMRatio := defaultCPU / defaultRAM
  1125. if math.IsNaN(cpuToRAMRatio) {
  1126. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1127. cpuToRAMRatio = 10
  1128. }
  1129. ramGB := ram / 1024 / 1024 / 1024
  1130. if math.IsNaN(ramGB) {
  1131. log.Warnf("ramGB is NaN. Setting to 0.")
  1132. ramGB = 0
  1133. }
  1134. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1135. if math.IsNaN(ramMultiple) {
  1136. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1137. ramMultiple = 0
  1138. }
  1139. var nodePrice float64
  1140. if newCnode.Cost != "" {
  1141. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1142. if err != nil {
  1143. log.Warnf("Could not parse total node price")
  1144. return nil, err
  1145. }
  1146. if newCnode.GPUCost != "" {
  1147. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1148. if err != nil {
  1149. log.Warnf("Could not parse node gpu price")
  1150. return nil, err
  1151. }
  1152. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1153. }
  1154. } else if newCnode.VCPUCost != "" {
  1155. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1156. if err != nil {
  1157. log.Warnf("Could not parse node vcpu price")
  1158. return nil, err
  1159. }
  1160. } else { // add case to use default pricing model when API data fails.
  1161. log.Debugf("No node price or CPUprice found, falling back to default")
  1162. nodePrice = defaultCPU*cpu + defaultRAM*ramGB
  1163. }
  1164. if math.IsNaN(nodePrice) {
  1165. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1166. nodePrice = 0
  1167. }
  1168. ramPrice := (nodePrice / ramMultiple)
  1169. if math.IsNaN(ramPrice) {
  1170. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1171. ramPrice = 0
  1172. }
  1173. cpuPrice := ramPrice * cpuToRAMRatio
  1174. if defaultRAM != 0 {
  1175. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1176. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1177. } else { // just assign the full price to CPU
  1178. if cpu != 0 {
  1179. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1180. } else {
  1181. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1182. }
  1183. }
  1184. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1185. log.Tracef("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1186. }
  1187. nodes[name] = &newCnode
  1188. }
  1189. cm.pricingMetadata = pmd
  1190. cp.ApplyReservedInstancePricing(nodes)
  1191. return nodes, nil
  1192. }
  1193. // TODO: drop some logs
  1194. func (cm *CostModel) GetLBCost(cp costAnalyzerCloud.Provider) (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1195. // for fetching prices from cloud provider
  1196. // cfg, err := cp.GetConfig()
  1197. // if err != nil {
  1198. // return nil, err
  1199. // }
  1200. servicesList := cm.Cache.GetAllServices()
  1201. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1202. for _, service := range servicesList {
  1203. namespace := service.Namespace
  1204. name := service.Name
  1205. key := serviceKey{
  1206. Cluster: env.GetClusterID(),
  1207. Namespace: namespace,
  1208. Service: name,
  1209. }
  1210. if service.Type == "LoadBalancer" {
  1211. loadBalancer, err := cp.LoadBalancerPricing()
  1212. if err != nil {
  1213. return nil, err
  1214. }
  1215. newLoadBalancer := *loadBalancer
  1216. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1217. address := loadBalancerIngress.IP
  1218. // Some cloud providers use hostname rather than IP
  1219. if address == "" {
  1220. address = loadBalancerIngress.Hostname
  1221. }
  1222. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1223. }
  1224. loadBalancerMap[key] = &newLoadBalancer
  1225. }
  1226. }
  1227. return loadBalancerMap, nil
  1228. }
  1229. func getPodServices(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1230. servicesList := cache.GetAllServices()
  1231. podServicesMapping := make(map[string]map[string][]string)
  1232. for _, service := range servicesList {
  1233. namespace := service.Namespace
  1234. name := service.Name
  1235. key := namespace + "," + clusterID
  1236. if _, ok := podServicesMapping[key]; !ok {
  1237. podServicesMapping[key] = make(map[string][]string)
  1238. }
  1239. s := labels.Nothing()
  1240. if service.SpecSelector != nil && len(service.SpecSelector) > 0 {
  1241. s = labels.Set(service.SpecSelector).AsSelectorPreValidated()
  1242. }
  1243. for _, pod := range podList {
  1244. labelSet := labels.Set(pod.Labels)
  1245. if s.Matches(labelSet) && pod.Namespace == namespace {
  1246. services, ok := podServicesMapping[key][pod.Name]
  1247. if ok {
  1248. podServicesMapping[key][pod.Name] = append(services, name)
  1249. } else {
  1250. podServicesMapping[key][pod.Name] = []string{name}
  1251. }
  1252. }
  1253. }
  1254. }
  1255. return podServicesMapping, nil
  1256. }
  1257. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1258. ssList := cache.GetAllStatefulSets()
  1259. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1260. for _, ss := range ssList {
  1261. namespace := ss.Namespace
  1262. name := ss.Name
  1263. key := namespace + "," + clusterID
  1264. if _, ok := podSSMapping[key]; !ok {
  1265. podSSMapping[key] = make(map[string][]string)
  1266. }
  1267. s, err := metav1.LabelSelectorAsSelector(ss.SpecSelector)
  1268. if err != nil {
  1269. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1270. }
  1271. for _, pod := range podList {
  1272. labelSet := labels.Set(pod.Labels)
  1273. if s.Matches(labelSet) && pod.Namespace == namespace {
  1274. sss, ok := podSSMapping[key][pod.Name]
  1275. if ok {
  1276. podSSMapping[key][pod.Name] = append(sss, name)
  1277. } else {
  1278. podSSMapping[key][pod.Name] = []string{name}
  1279. }
  1280. }
  1281. }
  1282. }
  1283. return podSSMapping, nil
  1284. }
  1285. func getPodDeployments(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1286. deploymentsList := cache.GetAllDeployments()
  1287. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1288. for _, deployment := range deploymentsList {
  1289. namespace := deployment.Namespace
  1290. name := deployment.Name
  1291. key := namespace + "," + clusterID
  1292. if _, ok := podDeploymentsMapping[key]; !ok {
  1293. podDeploymentsMapping[key] = make(map[string][]string)
  1294. }
  1295. s, err := metav1.LabelSelectorAsSelector(deployment.SpecSelector)
  1296. if err != nil {
  1297. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1298. }
  1299. for _, pod := range podList {
  1300. labelSet := labels.Set(pod.Labels)
  1301. if s.Matches(labelSet) && pod.Namespace == namespace {
  1302. deployments, ok := podDeploymentsMapping[key][pod.Name]
  1303. if ok {
  1304. podDeploymentsMapping[key][pod.Name] = append(deployments, name)
  1305. } else {
  1306. podDeploymentsMapping[key][pod.Name] = []string{name}
  1307. }
  1308. }
  1309. }
  1310. }
  1311. return podDeploymentsMapping, nil
  1312. }
  1313. func getPodDeploymentsWithMetrics(deploymentLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1314. podDeploymentsMapping := make(map[string]map[string][]string)
  1315. for depKey, depLabels := range deploymentLabels {
  1316. kt, err := NewKeyTuple(depKey)
  1317. if err != nil {
  1318. continue
  1319. }
  1320. namespace := kt.Namespace()
  1321. name := kt.Key()
  1322. clusterID := kt.ClusterID()
  1323. key := namespace + "," + clusterID
  1324. if _, ok := podDeploymentsMapping[key]; !ok {
  1325. podDeploymentsMapping[key] = make(map[string][]string)
  1326. }
  1327. s := labels.Set(depLabels).AsSelectorPreValidated()
  1328. for podKey, pLabels := range podLabels {
  1329. pkey, err := NewKeyTuple(podKey)
  1330. if err != nil {
  1331. continue
  1332. }
  1333. podNamespace := pkey.Namespace()
  1334. podName := pkey.Key()
  1335. podClusterID := pkey.ClusterID()
  1336. labelSet := labels.Set(pLabels)
  1337. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1338. deployments, ok := podDeploymentsMapping[key][podName]
  1339. if ok {
  1340. podDeploymentsMapping[key][podName] = append(deployments, name)
  1341. } else {
  1342. podDeploymentsMapping[key][podName] = []string{name}
  1343. }
  1344. }
  1345. }
  1346. }
  1347. // Remove any duplicate data created by metric names
  1348. pruneDuplicateData(podDeploymentsMapping)
  1349. return podDeploymentsMapping, nil
  1350. }
  1351. func getPodServicesWithMetrics(serviceLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1352. podServicesMapping := make(map[string]map[string][]string)
  1353. for servKey, servLabels := range serviceLabels {
  1354. kt, err := NewKeyTuple(servKey)
  1355. if err != nil {
  1356. continue
  1357. }
  1358. namespace := kt.Namespace()
  1359. name := kt.Key()
  1360. clusterID := kt.ClusterID()
  1361. key := namespace + "," + clusterID
  1362. if _, ok := podServicesMapping[key]; !ok {
  1363. podServicesMapping[key] = make(map[string][]string)
  1364. }
  1365. s := labels.Nothing()
  1366. if servLabels != nil && len(servLabels) > 0 {
  1367. s = labels.Set(servLabels).AsSelectorPreValidated()
  1368. }
  1369. for podKey, pLabels := range podLabels {
  1370. pkey, err := NewKeyTuple(podKey)
  1371. if err != nil {
  1372. continue
  1373. }
  1374. podNamespace := pkey.Namespace()
  1375. podName := pkey.Key()
  1376. podClusterID := pkey.ClusterID()
  1377. labelSet := labels.Set(pLabels)
  1378. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1379. services, ok := podServicesMapping[key][podName]
  1380. if ok {
  1381. podServicesMapping[key][podName] = append(services, name)
  1382. } else {
  1383. podServicesMapping[key][podName] = []string{name}
  1384. }
  1385. }
  1386. }
  1387. }
  1388. // Remove any duplicate data created by metric names
  1389. pruneDuplicateData(podServicesMapping)
  1390. return podServicesMapping, nil
  1391. }
  1392. // This method alleviates an issue with metrics that used a '_' to replace '-' in deployment
  1393. // and service names. To avoid counting these as multiple deployments/services, we'll remove
  1394. // the '_' version. Not optimal, but takes care of the issue
  1395. func pruneDuplicateData(data map[string]map[string][]string) {
  1396. for _, podMap := range data {
  1397. for podName, values := range podMap {
  1398. podMap[podName] = pruneDuplicates(values)
  1399. }
  1400. }
  1401. }
  1402. // Determine if there is an underscore in the value of a slice. If so, replace _ with -, and then
  1403. // check to see if the result exists in the slice. If both are true, then we DO NOT include that
  1404. // original value in the new slice.
  1405. func pruneDuplicates(s []string) []string {
  1406. m := sliceToSet(s)
  1407. for _, v := range s {
  1408. if strings.Contains(v, "_") {
  1409. name := strings.Replace(v, "_", "-", -1)
  1410. if !m[name] {
  1411. m[name] = true
  1412. }
  1413. delete(m, v)
  1414. }
  1415. }
  1416. return setToSlice(m)
  1417. }
  1418. // Creates a map[string]bool containing the slice values as keys
  1419. func sliceToSet(s []string) map[string]bool {
  1420. m := make(map[string]bool)
  1421. for _, v := range s {
  1422. m[v] = true
  1423. }
  1424. return m
  1425. }
  1426. func setToSlice(m map[string]bool) []string {
  1427. var result []string
  1428. for k := range m {
  1429. result = append(result, k)
  1430. }
  1431. return result
  1432. }
  1433. func costDataPassesFilters(cm clusters.ClusterMap, costs *CostData, namespace string, cluster string) bool {
  1434. passesNamespace := namespace == "" || costs.Namespace == namespace
  1435. passesCluster := cluster == "" || costs.ClusterID == cluster || costs.ClusterName == cluster
  1436. return passesNamespace && passesCluster
  1437. }
  1438. // Finds the a closest multiple less than value
  1439. func floorMultiple(value int64, multiple int64) int64 {
  1440. return (value / multiple) * multiple
  1441. }
  1442. // Attempt to create a key for the request. Reduce the times to minutes in order to more easily group requests based on
  1443. // real time ranges. If for any reason, the key generation fails, return a uuid to ensure uniqueness.
  1444. func requestKeyFor(window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) string {
  1445. keyLayout := "2006-01-02T15:04Z"
  1446. // We "snap" start time and duration to their closest 5 min multiple less than itself, by
  1447. // applying a snapped duration to a snapped start time.
  1448. durMins := int64(window.Minutes())
  1449. durMins = floorMultiple(durMins, 5)
  1450. sMins := int64(window.Start().Minute())
  1451. sOffset := sMins - floorMultiple(sMins, 5)
  1452. sTime := window.Start().Add(-time.Duration(sOffset) * time.Minute)
  1453. eTime := window.Start().Add(time.Duration(durMins) * time.Minute)
  1454. startKey := sTime.Format(keyLayout)
  1455. endKey := eTime.Format(keyLayout)
  1456. return fmt.Sprintf("%s,%s,%s,%s,%s,%t", startKey, endKey, resolution.String(), filterNamespace, filterCluster, remoteEnabled)
  1457. }
  1458. // ComputeCostDataRange executes a range query for cost data.
  1459. // Note that "offset" represents the time between the function call and "endString", and is also passed for convenience
  1460. func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1461. // Create a request key for request grouping. This key will be used to represent the cost-model result
  1462. // for the specific inputs to prevent multiple queries for identical data.
  1463. key := requestKeyFor(window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1464. log.Debugf("ComputeCostDataRange with Key: %s", key)
  1465. // If there is already a request out that uses the same data, wait for it to return to share the results.
  1466. // Otherwise, start executing.
  1467. result, err, _ := cm.RequestGroup.Do(key, func() (interface{}, error) {
  1468. return cm.costDataRange(cli, cp, window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1469. })
  1470. data, ok := result.(map[string]*CostData)
  1471. if !ok {
  1472. return nil, fmt.Errorf("Failed to cast result as map[string]*CostData")
  1473. }
  1474. return data, err
  1475. }
  1476. func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1477. clusterID := env.GetClusterID()
  1478. // durHrs := end.Sub(start).Hours() + 1
  1479. if window.IsOpen() {
  1480. return nil, fmt.Errorf("illegal window: %s", window)
  1481. }
  1482. start := *window.Start()
  1483. end := *window.End()
  1484. // Snap resolution to the nearest minute
  1485. resMins := int64(math.Trunc(resolution.Minutes()))
  1486. if resMins == 0 {
  1487. return nil, fmt.Errorf("resolution must be greater than 0.0")
  1488. }
  1489. resolution = time.Duration(resMins) * time.Minute
  1490. // Warn if resolution does not evenly divide window
  1491. if int64(window.Minutes())%int64(resolution.Minutes()) != 0 {
  1492. log.Warnf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
  1493. }
  1494. // Convert to Prometheus-style duration string in terms of m or h
  1495. resStr := fmt.Sprintf("%dm", resMins)
  1496. if resMins%60 == 0 {
  1497. resStr = fmt.Sprintf("%dh", resMins/60)
  1498. }
  1499. if remoteEnabled {
  1500. remoteLayout := "2006-01-02T15:04:05Z"
  1501. remoteStartStr := window.Start().Format(remoteLayout)
  1502. remoteEndStr := window.End().Format(remoteLayout)
  1503. log.Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
  1504. return CostDataRangeFromSQL("", "", resolution.String(), remoteStartStr, remoteEndStr)
  1505. }
  1506. scrapeIntervalSeconds := cm.ScrapeInterval.Seconds()
  1507. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataRangeContextName)
  1508. queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1509. queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1510. queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1511. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1512. queryCPURequests := fmt.Sprintf(queryCPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1513. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1514. queryGPURequests := fmt.Sprintf(queryGPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1515. queryPVRequests := fmt.Sprintf(queryPVRequestsStr, env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1516. queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1517. queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, env.GetPromClusterFilter(), resStr)
  1518. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1519. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1520. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1521. queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), resStr, "")
  1522. // Submit all queries for concurrent evaluation
  1523. resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, resolution)
  1524. resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, resolution)
  1525. resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, resolution)
  1526. resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, resolution)
  1527. resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, resolution)
  1528. resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, resolution)
  1529. resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, resolution)
  1530. resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, resolution)
  1531. resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, resolution)
  1532. resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
  1533. resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
  1534. resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
  1535. resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
  1536. resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1537. resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1538. resChNSAnnotations := ctx.QueryRange(fmt.Sprintf(queryNSAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1539. resChPodAnnotations := ctx.QueryRange(fmt.Sprintf(queryPodAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1540. resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1541. resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1542. resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1543. resChJobs := ctx.QueryRange(fmt.Sprintf(queryPodJobs, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
  1544. resChDaemonsets := ctx.QueryRange(fmt.Sprintf(queryPodDaemonsets, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
  1545. resChNormalization := ctx.QueryRange(queryNormalization, start, end, resolution)
  1546. // Pull k8s pod, controller, service, and namespace details
  1547. podlist := cm.Cache.GetAllPods()
  1548. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  1549. if err != nil {
  1550. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1551. }
  1552. podStatefulsetsMapping, err := getPodStatefulsets(cm.Cache, podlist, clusterID)
  1553. if err != nil {
  1554. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1555. }
  1556. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  1557. if err != nil {
  1558. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1559. }
  1560. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  1561. if err != nil {
  1562. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1563. }
  1564. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  1565. if err != nil {
  1566. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1567. }
  1568. // Process query results. Handle errors afterwards using ctx.Errors.
  1569. resRAMRequests, _ := resChRAMRequests.Await()
  1570. resRAMUsage, _ := resChRAMUsage.Await()
  1571. resRAMAlloc, _ := resChRAMAlloc.Await()
  1572. resCPURequests, _ := resChCPURequests.Await()
  1573. resCPUUsage, _ := resChCPUUsage.Await()
  1574. resCPUAlloc, _ := resChCPUAlloc.Await()
  1575. resGPURequests, _ := resChGPURequests.Await()
  1576. resPVRequests, _ := resChPVRequests.Await()
  1577. resPVCAlloc, _ := resChPVCAlloc.Await()
  1578. resPVHourlyCost, _ := resChPVHourlyCost.Await()
  1579. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  1580. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  1581. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  1582. resNSLabels, _ := resChNSLabels.Await()
  1583. resPodLabels, _ := resChPodLabels.Await()
  1584. resNSAnnotations, _ := resChNSAnnotations.Await()
  1585. resPodAnnotations, _ := resChPodAnnotations.Await()
  1586. resServiceLabels, _ := resChServiceLabels.Await()
  1587. resDeploymentLabels, _ := resChDeploymentLabels.Await()
  1588. resStatefulsetLabels, _ := resChStatefulsetLabels.Await()
  1589. resDaemonsets, _ := resChDaemonsets.Await()
  1590. resJobs, _ := resChJobs.Await()
  1591. resNormalization, _ := resChNormalization.Await()
  1592. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  1593. // NOTE: will not propagate unless coupled with errors.
  1594. if ctx.HasErrors() {
  1595. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  1596. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  1597. for _, promErr := range ctx.Errors() {
  1598. if promErr.Error != nil {
  1599. log.Errorf("CostDataRange: Request Error: %s", promErr.Error)
  1600. }
  1601. if promErr.ParseError != nil {
  1602. log.Errorf("CostDataRange: Parsing Error: %s", promErr.ParseError)
  1603. }
  1604. }
  1605. // ErrorCollection is an collection of errors wrapped in a single error implementation
  1606. return nil, ctx.ErrorCollection()
  1607. }
  1608. normalizationValue, err := getNormalizations(resNormalization)
  1609. if err != nil {
  1610. msg := fmt.Sprintf("error computing normalization for start=%s, end=%s, res=%s", start, end, resolution)
  1611. return nil, prom.WrapError(err, msg)
  1612. }
  1613. pvClaimMapping, err := GetPVInfo(resPVRequests, clusterID)
  1614. if err != nil {
  1615. // Just log for compatibility with KSM less than 1.6
  1616. log.Infof("Unable to get PV Data: %s", err.Error())
  1617. }
  1618. if pvClaimMapping != nil {
  1619. err = addPVData(cm.Cache, pvClaimMapping, cp)
  1620. if err != nil {
  1621. return nil, fmt.Errorf("pvClaimMapping: %s", err)
  1622. }
  1623. }
  1624. pvCostMapping, err := GetPVCostMetrics(resPVHourlyCost, clusterID)
  1625. if err != nil {
  1626. log.Errorf("Unable to get PV Hourly Cost Data: %s", err.Error())
  1627. }
  1628. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  1629. pvAllocationMapping, err := GetPVAllocationMetrics(resPVCAlloc, clusterID)
  1630. if err != nil {
  1631. log.Errorf("Unable to get PV Allocation Cost Data: %s", err.Error())
  1632. }
  1633. if pvAllocationMapping != nil {
  1634. addMetricPVData(pvAllocationMapping, pvCostMapping, cp)
  1635. for k, v := range pvAllocationMapping {
  1636. unmountedPVs[k] = v
  1637. }
  1638. }
  1639. nsLabels, err := GetNamespaceLabelsMetrics(resNSLabels, clusterID)
  1640. if err != nil {
  1641. log.Errorf("Unable to get Namespace Labels for Metrics: %s", err.Error())
  1642. }
  1643. if nsLabels != nil {
  1644. mergeStringMap(namespaceLabelsMapping, nsLabels)
  1645. }
  1646. podLabels, err := GetPodLabelsMetrics(resPodLabels, clusterID)
  1647. if err != nil {
  1648. log.Errorf("Unable to get Pod Labels for Metrics: %s", err.Error())
  1649. }
  1650. nsAnnotations, err := GetNamespaceAnnotationsMetrics(resNSAnnotations, clusterID)
  1651. if err != nil {
  1652. log.Errorf("Unable to get Namespace Annotations for Metrics: %s", err.Error())
  1653. }
  1654. if nsAnnotations != nil {
  1655. mergeStringMap(namespaceAnnotationsMapping, nsAnnotations)
  1656. }
  1657. podAnnotations, err := GetPodAnnotationsMetrics(resPodAnnotations, clusterID)
  1658. if err != nil {
  1659. log.Errorf("Unable to get Pod Annotations for Metrics: %s", err.Error())
  1660. }
  1661. serviceLabels, err := GetServiceSelectorLabelsMetrics(resServiceLabels, clusterID)
  1662. if err != nil {
  1663. log.Errorf("Unable to get Service Selector Labels for Metrics: %s", err.Error())
  1664. }
  1665. deploymentLabels, err := GetDeploymentMatchLabelsMetrics(resDeploymentLabels, clusterID)
  1666. if err != nil {
  1667. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1668. }
  1669. statefulsetLabels, err := GetStatefulsetMatchLabelsMetrics(resStatefulsetLabels, clusterID)
  1670. if err != nil {
  1671. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1672. }
  1673. podStatefulsetMetricsMapping, err := getPodDeploymentsWithMetrics(statefulsetLabels, podLabels)
  1674. if err != nil {
  1675. log.Errorf("Unable to get match Statefulset Labels Metrics to Pods: %s", err.Error())
  1676. }
  1677. appendLabelsList(podStatefulsetsMapping, podStatefulsetMetricsMapping)
  1678. podDeploymentsMetricsMapping, err := getPodDeploymentsWithMetrics(deploymentLabels, podLabels)
  1679. if err != nil {
  1680. log.Errorf("Unable to get match Deployment Labels Metrics to Pods: %s", err.Error())
  1681. }
  1682. appendLabelsList(podDeploymentsMapping, podDeploymentsMetricsMapping)
  1683. podDaemonsets, err := GetPodDaemonsetsWithMetrics(resDaemonsets, clusterID)
  1684. if err != nil {
  1685. log.Errorf("Unable to get Pod Daemonsets for Metrics: %s", err.Error())
  1686. }
  1687. podJobs, err := GetPodJobsWithMetrics(resJobs, clusterID)
  1688. if err != nil {
  1689. log.Errorf("Unable to get Pod Jobs for Metrics: %s", err.Error())
  1690. }
  1691. podServicesMetricsMapping, err := getPodServicesWithMetrics(serviceLabels, podLabels)
  1692. if err != nil {
  1693. log.Errorf("Unable to get match Service Labels Metrics to Pods: %s", err.Error())
  1694. }
  1695. appendLabelsList(podServicesMapping, podServicesMetricsMapping)
  1696. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  1697. if err != nil {
  1698. log.Errorf("Unable to get Network Cost Data: %s", err.Error())
  1699. networkUsageMap = make(map[string]*NetworkUsageData)
  1700. }
  1701. containerNameCost := make(map[string]*CostData)
  1702. containers := make(map[string]bool)
  1703. otherClusterPVRecorded := make(map[string]bool)
  1704. RAMReqMap, err := GetNormalizedContainerMetricVectors(resRAMRequests, normalizationValue, clusterID)
  1705. if err != nil {
  1706. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMRequests)")
  1707. }
  1708. for key := range RAMReqMap {
  1709. containers[key] = true
  1710. }
  1711. RAMUsedMap, err := GetNormalizedContainerMetricVectors(resRAMUsage, normalizationValue, clusterID)
  1712. if err != nil {
  1713. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMUsage)")
  1714. }
  1715. for key := range RAMUsedMap {
  1716. containers[key] = true
  1717. }
  1718. CPUReqMap, err := GetNormalizedContainerMetricVectors(resCPURequests, normalizationValue, clusterID)
  1719. if err != nil {
  1720. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(CPURequests)")
  1721. }
  1722. for key := range CPUReqMap {
  1723. containers[key] = true
  1724. }
  1725. // No need to normalize here, as this comes from a counter, namely:
  1726. // rate(container_cpu_usage_seconds_total) which properly accounts for normalized rates
  1727. CPUUsedMap, err := GetContainerMetricVectors(resCPUUsage, clusterID)
  1728. if err != nil {
  1729. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUUsage)")
  1730. }
  1731. for key := range CPUUsedMap {
  1732. containers[key] = true
  1733. }
  1734. RAMAllocMap, err := GetContainerMetricVectors(resRAMAlloc, clusterID)
  1735. if err != nil {
  1736. return nil, prom.WrapError(err, "GetContainerMetricVectors(RAMAllocations)")
  1737. }
  1738. for key := range RAMAllocMap {
  1739. containers[key] = true
  1740. }
  1741. CPUAllocMap, err := GetContainerMetricVectors(resCPUAlloc, clusterID)
  1742. if err != nil {
  1743. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUAllocations)")
  1744. }
  1745. for key := range CPUAllocMap {
  1746. containers[key] = true
  1747. }
  1748. GPUReqMap, err := GetNormalizedContainerMetricVectors(resGPURequests, normalizationValue, clusterID)
  1749. if err != nil {
  1750. return nil, prom.WrapError(err, "GetContainerMetricVectors(GPURequests)")
  1751. }
  1752. for key := range GPUReqMap {
  1753. containers[key] = true
  1754. }
  1755. // Request metrics can show up after pod eviction and completion.
  1756. // This method synchronizes requests to allocations such that when
  1757. // allocation is 0, so are requests
  1758. applyAllocationToRequests(RAMAllocMap, RAMReqMap)
  1759. applyAllocationToRequests(CPUAllocMap, CPUReqMap)
  1760. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  1761. missingContainers := make(map[string]*CostData)
  1762. for key := range containers {
  1763. if _, ok := containerNameCost[key]; ok {
  1764. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  1765. }
  1766. c, _ := NewContainerMetricFromKey(key)
  1767. RAMReqV, ok := RAMReqMap[key]
  1768. if !ok {
  1769. log.Debug("no RAM requests for " + key)
  1770. RAMReqV = []*util.Vector{}
  1771. }
  1772. RAMUsedV, ok := RAMUsedMap[key]
  1773. if !ok {
  1774. log.Debug("no RAM usage for " + key)
  1775. RAMUsedV = []*util.Vector{}
  1776. }
  1777. CPUReqV, ok := CPUReqMap[key]
  1778. if !ok {
  1779. log.Debug("no CPU requests for " + key)
  1780. CPUReqV = []*util.Vector{}
  1781. }
  1782. CPUUsedV, ok := CPUUsedMap[key]
  1783. if !ok {
  1784. log.Debug("no CPU usage for " + key)
  1785. CPUUsedV = []*util.Vector{}
  1786. }
  1787. RAMAllocsV, ok := RAMAllocMap[key]
  1788. if !ok {
  1789. log.Debug("no RAM allocation for " + key)
  1790. RAMAllocsV = []*util.Vector{}
  1791. }
  1792. CPUAllocsV, ok := CPUAllocMap[key]
  1793. if !ok {
  1794. log.Debug("no CPU allocation for " + key)
  1795. CPUAllocsV = []*util.Vector{}
  1796. }
  1797. GPUReqV, ok := GPUReqMap[key]
  1798. if !ok {
  1799. log.Debug("no GPU requests for " + key)
  1800. GPUReqV = []*util.Vector{}
  1801. }
  1802. var node *costAnalyzerCloud.Node
  1803. if n, ok := missingNodes[c.NodeName]; ok {
  1804. node = n
  1805. } else {
  1806. node = &costAnalyzerCloud.Node{}
  1807. missingNodes[c.NodeName] = node
  1808. }
  1809. nsKey := c.Namespace + "," + c.ClusterID
  1810. podKey := c.Namespace + "," + c.PodName + "," + c.ClusterID
  1811. namespaceLabels, _ := namespaceLabelsMapping[nsKey]
  1812. pLabels := podLabels[podKey]
  1813. if pLabels == nil {
  1814. pLabels = make(map[string]string)
  1815. }
  1816. for k, v := range namespaceLabels {
  1817. if _, ok := pLabels[k]; !ok {
  1818. pLabels[k] = v
  1819. }
  1820. }
  1821. namespaceAnnotations, _ := namespaceAnnotationsMapping[nsKey]
  1822. pAnnotations := podAnnotations[podKey]
  1823. if pAnnotations == nil {
  1824. pAnnotations = make(map[string]string)
  1825. }
  1826. for k, v := range namespaceAnnotations {
  1827. if _, ok := pAnnotations[k]; !ok {
  1828. pAnnotations[k] = v
  1829. }
  1830. }
  1831. var podDeployments []string
  1832. if _, ok := podDeploymentsMapping[nsKey]; ok {
  1833. if ds, ok := podDeploymentsMapping[nsKey][c.PodName]; ok {
  1834. podDeployments = ds
  1835. } else {
  1836. podDeployments = []string{}
  1837. }
  1838. }
  1839. var podStatefulSets []string
  1840. if _, ok := podStatefulsetsMapping[nsKey]; ok {
  1841. if ss, ok := podStatefulsetsMapping[nsKey][c.PodName]; ok {
  1842. podStatefulSets = ss
  1843. } else {
  1844. podStatefulSets = []string{}
  1845. }
  1846. }
  1847. var podServices []string
  1848. if _, ok := podServicesMapping[nsKey]; ok {
  1849. if svcs, ok := podServicesMapping[nsKey][c.PodName]; ok {
  1850. podServices = svcs
  1851. } else {
  1852. podServices = []string{}
  1853. }
  1854. }
  1855. var podPVs []*PersistentVolumeClaimData
  1856. var podNetCosts []*util.Vector
  1857. // For PVC data, we'll need to find the claim mapping and cost data. Will need to append
  1858. // cost data since that was populated by cluster data previously. We do this with
  1859. // the pod_pvc_allocation metric
  1860. podPVData, ok := pvAllocationMapping[podKey]
  1861. if !ok {
  1862. log.Debugf("Failed to locate pv allocation mapping for missing pod.")
  1863. }
  1864. // Delete the current pod key from potentially unmounted pvs
  1865. delete(unmountedPVs, podKey)
  1866. // For network costs, we'll use existing map since it should still contain the
  1867. // correct data.
  1868. var podNetworkCosts []*util.Vector
  1869. if usage, ok := networkUsageMap[podKey]; ok {
  1870. netCosts, err := GetNetworkCost(usage, cp)
  1871. if err != nil {
  1872. log.Errorf("Error pulling network costs: %s", err.Error())
  1873. } else {
  1874. podNetworkCosts = netCosts
  1875. }
  1876. }
  1877. // Check to see if any other data has been recorded for this namespace, pod, clusterId
  1878. // Follow the pattern of only allowing claims data per pod
  1879. if !otherClusterPVRecorded[podKey] {
  1880. otherClusterPVRecorded[podKey] = true
  1881. podPVs = podPVData
  1882. podNetCosts = podNetworkCosts
  1883. }
  1884. pds := []string{}
  1885. if ds, ok := podDaemonsets[podKey]; ok {
  1886. pds = []string{ds}
  1887. }
  1888. jobs := []string{}
  1889. if job, ok := podJobs[podKey]; ok {
  1890. jobs = []string{job}
  1891. }
  1892. costs := &CostData{
  1893. Name: c.ContainerName,
  1894. PodName: c.PodName,
  1895. NodeName: c.NodeName,
  1896. NodeData: node,
  1897. Namespace: c.Namespace,
  1898. Services: podServices,
  1899. Deployments: podDeployments,
  1900. Daemonsets: pds,
  1901. Statefulsets: podStatefulSets,
  1902. Jobs: jobs,
  1903. RAMReq: RAMReqV,
  1904. RAMUsed: RAMUsedV,
  1905. CPUReq: CPUReqV,
  1906. CPUUsed: CPUUsedV,
  1907. RAMAllocation: RAMAllocsV,
  1908. CPUAllocation: CPUAllocsV,
  1909. GPUReq: GPUReqV,
  1910. Annotations: pAnnotations,
  1911. Labels: pLabels,
  1912. NamespaceLabels: namespaceLabels,
  1913. PVCData: podPVs,
  1914. NetworkData: podNetCosts,
  1915. ClusterID: c.ClusterID,
  1916. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  1917. }
  1918. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1919. containerNameCost[key] = costs
  1920. missingContainers[key] = costs
  1921. }
  1922. }
  1923. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  1924. for k, costs := range unmounted {
  1925. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  1926. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1927. containerNameCost[k] = costs
  1928. }
  1929. }
  1930. if window.Minutes() > 0 {
  1931. dur, off := window.DurationOffsetStrings()
  1932. err = findDeletedNodeInfo(cli, missingNodes, dur, off)
  1933. if err != nil {
  1934. log.Errorf("Error fetching historical node data: %s", err.Error())
  1935. }
  1936. }
  1937. return containerNameCost, nil
  1938. }
  1939. func applyAllocationToRequests(allocationMap map[string][]*util.Vector, requestMap map[string][]*util.Vector) {
  1940. // The result of the normalize operation will be a new []*util.Vector to replace the requests
  1941. normalizeOp := func(r *util.Vector, x *float64, y *float64) bool {
  1942. // Omit data (return false) if both x and y inputs don't exist
  1943. if x == nil || y == nil {
  1944. return false
  1945. }
  1946. // If the allocation value is 0, 0 out request value
  1947. if *x == 0 {
  1948. r.Value = 0
  1949. } else {
  1950. r.Value = *y
  1951. }
  1952. return true
  1953. }
  1954. // Run normalization on all request vectors in the mapping
  1955. for k, requests := range requestMap {
  1956. // Only run normalization where there are valid allocations
  1957. allocations, ok := allocationMap[k]
  1958. if !ok {
  1959. delete(requestMap, k)
  1960. continue
  1961. }
  1962. // Replace request map with normalized
  1963. requestMap[k] = util.ApplyVectorOp(allocations, requests, normalizeOp)
  1964. }
  1965. }
  1966. func addMetricPVData(pvAllocationMap map[string][]*PersistentVolumeClaimData, pvCostMap map[string]*costAnalyzerCloud.PV, cp costAnalyzerCloud.Provider) {
  1967. cfg, err := cp.GetConfig()
  1968. if err != nil {
  1969. log.Errorf("Failed to get provider config while adding pv metrics data.")
  1970. return
  1971. }
  1972. for _, pvcDataArray := range pvAllocationMap {
  1973. for _, pvcData := range pvcDataArray {
  1974. costKey := fmt.Sprintf("%s,%s", pvcData.VolumeName, pvcData.ClusterID)
  1975. pvCost, ok := pvCostMap[costKey]
  1976. if !ok {
  1977. pvcData.Volume = &costAnalyzerCloud.PV{
  1978. Cost: cfg.Storage,
  1979. }
  1980. continue
  1981. }
  1982. pvcData.Volume = pvCost
  1983. }
  1984. }
  1985. }
  1986. // Add values that don't already exist in origMap from mergeMap into origMap
  1987. func mergeStringMap(origMap map[string]map[string]string, mergeMap map[string]map[string]string) {
  1988. for k, v := range mergeMap {
  1989. if _, ok := origMap[k]; !ok {
  1990. origMap[k] = v
  1991. }
  1992. }
  1993. }
  1994. func appendLabelsList(mainLabels map[string]map[string][]string, labels map[string]map[string][]string) {
  1995. for k, v := range labels {
  1996. mainLabels[k] = v
  1997. }
  1998. }
  1999. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  2000. nsToLabels := make(map[string]map[string]string)
  2001. nss := cache.GetAllNamespaces()
  2002. for _, ns := range nss {
  2003. labels := make(map[string]string)
  2004. for k, v := range ns.Labels {
  2005. labels[promutil.SanitizeLabelName(k)] = v
  2006. }
  2007. nsToLabels[ns.Name+","+clusterID] = labels
  2008. }
  2009. return nsToLabels, nil
  2010. }
  2011. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  2012. nsToAnnotations := make(map[string]map[string]string)
  2013. nss := cache.GetAllNamespaces()
  2014. for _, ns := range nss {
  2015. annotations := make(map[string]string)
  2016. for k, v := range ns.Annotations {
  2017. annotations[promutil.SanitizeLabelName(k)] = v
  2018. }
  2019. nsToAnnotations[ns.Name+","+clusterID] = annotations
  2020. }
  2021. return nsToAnnotations, nil
  2022. }
  2023. func getDaemonsetsOfPod(pod clustercache.Pod) []string {
  2024. for _, ownerReference := range pod.OwnerReferences {
  2025. if ownerReference.Kind == "DaemonSet" {
  2026. return []string{ownerReference.Name}
  2027. }
  2028. }
  2029. return []string{}
  2030. }
  2031. func getJobsOfPod(pod clustercache.Pod) []string {
  2032. for _, ownerReference := range pod.OwnerReferences {
  2033. if ownerReference.Kind == "Job" {
  2034. return []string{ownerReference.Name}
  2035. }
  2036. }
  2037. return []string{}
  2038. }
  2039. func getStatefulSetsOfPod(pod clustercache.Pod) []string {
  2040. for _, ownerReference := range pod.OwnerReferences {
  2041. if ownerReference.Kind == "StatefulSet" {
  2042. return []string{ownerReference.Name}
  2043. }
  2044. }
  2045. return []string{}
  2046. }
  2047. // getGPUCount reads the node's Status and Labels (via the k8s API) to identify
  2048. // the number of GPUs and vGPUs are equipped on the node. If unable to identify
  2049. // a GPU count, it will return -1.
  2050. func getGPUCount(cache clustercache.ClusterCache, n *clustercache.Node) (float64, float64, error) {
  2051. g, hasGpu := n.Status.Capacity["nvidia.com/gpu"]
  2052. _, hasReplicas := n.Labels["nvidia.com/gpu.replicas"]
  2053. // Case 1: Standard NVIDIA GPU
  2054. if hasGpu && g.Value() != 0 && !hasReplicas {
  2055. return float64(g.Value()), float64(g.Value()), nil
  2056. }
  2057. // Case 2: NVIDIA GPU with GPU Feature Discovery (GFD) Pod enabled.
  2058. // Ref: https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/gpu-sharing.html#verifying-the-gpu-time-slicing-configuration
  2059. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L44-L45
  2060. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L103-L118
  2061. if hasReplicas {
  2062. resultGPU := 0.0
  2063. resultVGPU := 0.0
  2064. if c, ok := n.Labels["nvidia.com/gpu.count"]; ok {
  2065. var err error
  2066. resultGPU, err = strconv.ParseFloat(c, 64)
  2067. if err != nil {
  2068. return -1, -1, fmt.Errorf("could not parse label \"nvidia.com/gpu.count\": %v", err)
  2069. }
  2070. }
  2071. if s, ok := n.Status.Capacity["nvidia.com/gpu.shared"]; ok { // GFD configured `renameByDefault=true`
  2072. resultVGPU = float64(s.Value())
  2073. } else if g, ok := n.Status.Capacity["nvidia.com/gpu"]; ok { // GFD configured `renameByDefault=false`
  2074. resultVGPU = float64(g.Value())
  2075. } else {
  2076. resultVGPU = resultGPU
  2077. }
  2078. return resultGPU, resultVGPU, nil
  2079. }
  2080. // Case 3: AWS vGPU
  2081. if vgpu, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  2082. vgpuCount, err := getAllocatableVGPUs(cache)
  2083. if err != nil {
  2084. return -1, -1, err
  2085. }
  2086. vgpuCoeff := 10.0
  2087. if vgpuCount > 0.0 {
  2088. vgpuCoeff = vgpuCount
  2089. }
  2090. if vgpu.Value() != 0 {
  2091. resultGPU := float64(vgpu.Value()) / vgpuCoeff
  2092. resultVGPU := float64(vgpu.Value())
  2093. return resultGPU, resultVGPU, nil
  2094. }
  2095. }
  2096. // No GPU found
  2097. return -1, -1, nil
  2098. }
  2099. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  2100. daemonsets := cache.GetAllDaemonSets()
  2101. vgpuCount := 0.0
  2102. for _, ds := range daemonsets {
  2103. dsContainerList := &ds.SpecContainers
  2104. for _, ctnr := range *dsContainerList {
  2105. if ctnr.Args != nil {
  2106. for _, arg := range ctnr.Args {
  2107. if strings.Contains(arg, "--vgpu=") {
  2108. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  2109. if err != nil {
  2110. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  2111. continue
  2112. }
  2113. vgpuCount = vgpus
  2114. return vgpuCount, nil
  2115. }
  2116. }
  2117. }
  2118. }
  2119. }
  2120. return vgpuCount, nil
  2121. }
  2122. type PersistentVolumeClaimData struct {
  2123. Class string `json:"class"`
  2124. Claim string `json:"claim"`
  2125. Namespace string `json:"namespace"`
  2126. ClusterID string `json:"clusterId"`
  2127. TimesClaimed int `json:"timesClaimed"`
  2128. VolumeName string `json:"volumeName"`
  2129. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  2130. Values []*util.Vector `json:"values"`
  2131. }
  2132. func measureTime(start time.Time, threshold time.Duration, name string) {
  2133. elapsed := time.Since(start)
  2134. if elapsed > threshold {
  2135. log.Infof("[Profiler] %s: %s", elapsed, name)
  2136. }
  2137. }
  2138. func measureTimeAsync(start time.Time, threshold time.Duration, name string, ch chan string) {
  2139. elapsed := time.Since(start)
  2140. if elapsed > threshold {
  2141. ch <- fmt.Sprintf("%s took %s", name, time.Since(start))
  2142. }
  2143. }
  2144. func (cm *CostModel) QueryAllocation(window opencost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy opencost.AccumulateOption, shareIdle bool) (*opencost.AllocationSetRange, error) {
  2145. // Validate window is legal
  2146. if window.IsOpen() || window.IsNegative() {
  2147. return nil, fmt.Errorf("illegal window: %s", window)
  2148. }
  2149. var totalsStore opencost.TotalsStore
  2150. // Idle is required for proportional asset costs
  2151. if includeProportionalAssetResourceCosts {
  2152. if !includeIdle {
  2153. return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
  2154. }
  2155. totalsStore = opencost.NewMemoryTotalsStore()
  2156. }
  2157. // Begin with empty response
  2158. asr := opencost.NewAllocationSetRange()
  2159. // Query for AllocationSets in increments of the given step duration,
  2160. // appending each to the response.
  2161. stepStart := *window.Start()
  2162. stepEnd := stepStart.Add(step)
  2163. var isAKS bool
  2164. for window.End().After(stepStart) {
  2165. allocSet, err := cm.ComputeAllocation(stepStart, stepEnd, resolution)
  2166. if err != nil {
  2167. return nil, fmt.Errorf("error computing allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  2168. }
  2169. if includeIdle {
  2170. assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
  2171. if err != nil {
  2172. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  2173. }
  2174. if includeProportionalAssetResourceCosts {
  2175. // AKS is a special case - there can be a maximum of 2
  2176. // load balancers (1 public and 1 private) in an AKS cluster
  2177. // therefore, when calculating PARCs for load balancers,
  2178. // we must know if this is an AKS cluster
  2179. for _, node := range assetSet.Nodes {
  2180. if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
  2181. isAKS = true
  2182. break
  2183. }
  2184. }
  2185. _, err := opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  2186. if err != nil {
  2187. log.Errorf("ETL: error updating asset resource totals for %s: %s", assetSet.Window, err)
  2188. }
  2189. }
  2190. idleSet, err := computeIdleAllocations(allocSet, assetSet, true)
  2191. if err != nil {
  2192. return nil, fmt.Errorf("error computing idle allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  2193. }
  2194. for _, idleAlloc := range idleSet.Allocations {
  2195. allocSet.Insert(idleAlloc)
  2196. }
  2197. }
  2198. asr.Append(allocSet)
  2199. stepStart = stepEnd
  2200. stepEnd = stepStart.Add(step)
  2201. }
  2202. // Set aggregation options and aggregate
  2203. var shareIdleOpt string
  2204. if shareIdle {
  2205. shareIdleOpt = opencost.ShareWeighted
  2206. } else {
  2207. shareIdleOpt = opencost.ShareNone
  2208. }
  2209. opts := &opencost.AllocationAggregationOptions{
  2210. IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
  2211. IdleByNode: idleByNode,
  2212. IncludeAggregatedMetadata: includeAggregatedMetadata,
  2213. ShareIdle: shareIdleOpt,
  2214. }
  2215. // Aggregate
  2216. err := asr.AggregateBy(aggregate, opts)
  2217. if err != nil {
  2218. return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
  2219. }
  2220. // Accumulate, if requested
  2221. if accumulateBy != opencost.AccumulateOptionNone {
  2222. asr, err = asr.Accumulate(accumulateBy)
  2223. if err != nil {
  2224. log.Errorf("error accumulating by %v: %s", accumulateBy, err)
  2225. return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
  2226. }
  2227. // when accumulating and returning PARCs, we need the totals for the
  2228. // accumulated windows to accurately compute a fraction
  2229. if includeProportionalAssetResourceCosts {
  2230. assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
  2231. if err != nil {
  2232. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  2233. }
  2234. _, err = opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  2235. if err != nil {
  2236. log.Errorf("ETL: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  2237. }
  2238. }
  2239. }
  2240. if includeProportionalAssetResourceCosts {
  2241. for _, as := range asr.Allocations {
  2242. totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
  2243. if !ok {
  2244. log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  2245. return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  2246. }
  2247. totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
  2248. if !ok {
  2249. log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  2250. return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  2251. }
  2252. var totalPublicLbCost, totalPrivateLbCost float64
  2253. if isAKS && sharedLoadBalancer {
  2254. // loop through all assetTotals, adding all load balancer costs by public and private
  2255. for _, tot := range totalStoreByNode {
  2256. if tot.PrivateLoadBalancer {
  2257. totalPrivateLbCost += tot.LoadBalancerCost
  2258. } else {
  2259. totalPublicLbCost += tot.LoadBalancerCost
  2260. }
  2261. }
  2262. }
  2263. // loop through each allocation set, using total cost from totals store
  2264. for _, alloc := range as.Allocations {
  2265. for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
  2266. key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
  2267. // for each parc , check the totals store for each
  2268. // on a totals hit, set the corresponding total and calculate percentage
  2269. var totals *opencost.AssetTotals
  2270. if totalsLoc, found := totalStoreByCluster[key]; found {
  2271. totals = totalsLoc
  2272. }
  2273. if totalsLoc, found := totalStoreByNode[key]; found {
  2274. totals = totalsLoc
  2275. }
  2276. if totals == nil {
  2277. log.Errorf("unable to locate asset totals for allocation %s, corresponding PARC is being skipped", key)
  2278. continue
  2279. }
  2280. parc.CPUTotalCost = totals.CPUCost
  2281. parc.GPUTotalCost = totals.GPUCost
  2282. parc.RAMTotalCost = totals.RAMCost
  2283. parc.PVTotalCost = totals.PersistentVolumeCost
  2284. if isAKS && sharedLoadBalancer && len(alloc.LoadBalancers) > 0 {
  2285. // Azure is a special case - use computed totals above
  2286. // use the lbAllocations in the object to determine if
  2287. // this PARC is a public or private load balancer
  2288. // then set the total accordingly
  2289. // AKS only has 1 public and 1 private load balancer
  2290. lbAlloc, found := alloc.LoadBalancers[key]
  2291. if found {
  2292. if lbAlloc.Private {
  2293. parc.LoadBalancerTotalCost = totalPrivateLbCost
  2294. } else {
  2295. parc.LoadBalancerTotalCost = totalPublicLbCost
  2296. }
  2297. }
  2298. } else {
  2299. parc.LoadBalancerTotalCost = totals.LoadBalancerCost
  2300. }
  2301. opencost.ComputePercentages(&parc)
  2302. alloc.ProportionalAssetResourceCosts[rawKey] = parc
  2303. }
  2304. }
  2305. }
  2306. }
  2307. return asr, nil
  2308. }
  2309. func computeIdleAllocations(allocSet *opencost.AllocationSet, assetSet *opencost.AssetSet, idleByNode bool) (*opencost.AllocationSet, error) {
  2310. if !allocSet.Window.Equal(assetSet.Window) {
  2311. return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
  2312. }
  2313. var allocTotals map[string]*opencost.AllocationTotals
  2314. var assetTotals map[string]*opencost.AssetTotals
  2315. if idleByNode {
  2316. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationNodeProp)
  2317. assetTotals = opencost.ComputeAssetTotals(assetSet, true)
  2318. } else {
  2319. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationClusterProp)
  2320. assetTotals = opencost.ComputeAssetTotals(assetSet, false)
  2321. }
  2322. start, end := *allocSet.Window.Start(), *allocSet.Window.End()
  2323. idleSet := opencost.NewAllocationSet(start, end)
  2324. for key, assetTotal := range assetTotals {
  2325. allocTotal, ok := allocTotals[key]
  2326. if !ok {
  2327. log.Warnf("ETL: did not find allocations for asset key: %s", key)
  2328. // Use a zero-value set of totals. This indicates either (1) an
  2329. // error computing totals, or (2) that no allocations ran on the
  2330. // given node for the given window.
  2331. allocTotal = &opencost.AllocationTotals{
  2332. Cluster: assetTotal.Cluster,
  2333. Node: assetTotal.Node,
  2334. Start: assetTotal.Start,
  2335. End: assetTotal.End,
  2336. }
  2337. }
  2338. // Insert one idle allocation for each key (whether by node or
  2339. // by cluster), defined as the difference between the total
  2340. // asset cost and the allocated cost per-resource.
  2341. name := fmt.Sprintf("%s/%s", key, opencost.IdleSuffix)
  2342. err := idleSet.Insert(&opencost.Allocation{
  2343. Name: name,
  2344. Window: idleSet.Window.Clone(),
  2345. Properties: &opencost.AllocationProperties{
  2346. Cluster: assetTotal.Cluster,
  2347. Node: assetTotal.Node,
  2348. ProviderID: assetTotal.Node,
  2349. },
  2350. Start: assetTotal.Start,
  2351. End: assetTotal.End,
  2352. CPUCost: assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost(),
  2353. GPUCost: assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost(),
  2354. RAMCost: assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost(),
  2355. })
  2356. if err != nil {
  2357. return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
  2358. }
  2359. }
  2360. return idleSet, nil
  2361. }