2
0

costmodel.go 93 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655
  1. package costmodel
  2. import (
  3. "errors"
  4. "fmt"
  5. "math"
  6. "regexp"
  7. "strconv"
  8. "strings"
  9. "time"
  10. "github.com/opencost/opencost/core/pkg/clusters"
  11. "github.com/opencost/opencost/core/pkg/log"
  12. "github.com/opencost/opencost/core/pkg/opencost"
  13. "github.com/opencost/opencost/core/pkg/util"
  14. "github.com/opencost/opencost/core/pkg/util/promutil"
  15. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
  16. "github.com/opencost/opencost/pkg/clustercache"
  17. "github.com/opencost/opencost/pkg/env"
  18. "github.com/opencost/opencost/pkg/prom"
  19. prometheus "github.com/prometheus/client_golang/api"
  20. prometheusClient "github.com/prometheus/client_golang/api"
  21. v1 "k8s.io/api/core/v1"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. "k8s.io/apimachinery/pkg/labels"
  24. "golang.org/x/sync/singleflight"
  25. )
  26. const (
  27. statusAPIError = 422
  28. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  29. unmountedPVsContainer = "unmounted-pvs"
  30. apiPrefix = "/api/v1"
  31. epAlertManagers = apiPrefix + "/alertmanagers"
  32. epLabelValues = apiPrefix + "/label/:name/values"
  33. epSeries = apiPrefix + "/series"
  34. epTargets = apiPrefix + "/targets"
  35. epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
  36. epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
  37. epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
  38. epConfig = apiPrefix + "/status/config"
  39. epFlags = apiPrefix + "/status/flags"
  40. )
  41. // isCron matches a CronJob name and captures the non-timestamp name
  42. //
  43. // We support either a 10 character timestamp OR an 8 character timestamp
  44. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  45. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  46. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  47. type CostModel struct {
  48. Cache clustercache.ClusterCache
  49. ClusterMap clusters.ClusterMap
  50. MaxPrometheusQueryDuration time.Duration
  51. RequestGroup *singleflight.Group
  52. ScrapeInterval time.Duration
  53. PrometheusClient prometheus.Client
  54. Provider costAnalyzerCloud.Provider
  55. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  56. }
  57. func NewCostModel(client prometheus.Client, provider costAnalyzerCloud.Provider, cache clustercache.ClusterCache, clusterMap clusters.ClusterMap, scrapeInterval time.Duration) *CostModel {
  58. // request grouping to prevent over-requesting the same data prior to caching
  59. requestGroup := new(singleflight.Group)
  60. return &CostModel{
  61. Cache: cache,
  62. ClusterMap: clusterMap,
  63. MaxPrometheusQueryDuration: env.GetETLMaxPrometheusQueryDuration(),
  64. PrometheusClient: client,
  65. Provider: provider,
  66. RequestGroup: requestGroup,
  67. ScrapeInterval: scrapeInterval,
  68. }
  69. }
  70. type CostData struct {
  71. Name string `json:"name,omitempty"`
  72. PodName string `json:"podName,omitempty"`
  73. NodeName string `json:"nodeName,omitempty"`
  74. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  75. Namespace string `json:"namespace,omitempty"`
  76. Deployments []string `json:"deployments,omitempty"`
  77. Services []string `json:"services,omitempty"`
  78. Daemonsets []string `json:"daemonsets,omitempty"`
  79. Statefulsets []string `json:"statefulsets,omitempty"`
  80. Jobs []string `json:"jobs,omitempty"`
  81. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  82. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  83. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  84. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  85. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  86. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  87. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  88. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  89. NetworkData []*util.Vector `json:"network,omitempty"`
  90. Annotations map[string]string `json:"annotations,omitempty"`
  91. Labels map[string]string `json:"labels,omitempty"`
  92. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  93. ClusterID string `json:"clusterId"`
  94. ClusterName string `json:"clusterName"`
  95. }
  96. func (cd *CostData) String() string {
  97. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  98. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  99. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  100. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  101. }
  102. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  103. hasController = false
  104. if len(cd.Deployments) > 0 {
  105. name = cd.Deployments[0]
  106. kind = "deployment"
  107. hasController = true
  108. } else if len(cd.Statefulsets) > 0 {
  109. name = cd.Statefulsets[0]
  110. kind = "statefulset"
  111. hasController = true
  112. } else if len(cd.Daemonsets) > 0 {
  113. name = cd.Daemonsets[0]
  114. kind = "daemonset"
  115. hasController = true
  116. } else if len(cd.Jobs) > 0 {
  117. name = cd.Jobs[0]
  118. kind = "job"
  119. hasController = true
  120. match := isCron.FindStringSubmatch(name)
  121. if match != nil {
  122. name = match[1]
  123. }
  124. }
  125. return name, kind, hasController
  126. }
  127. const (
  128. queryRAMRequestsStr = `avg(
  129. label_replace(
  130. label_replace(
  131. avg(
  132. sum_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="",container!="POD", node!="", %s}[%s] %s)
  133. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  134. ), "pod_name","$1","pod","(.+)"
  135. )
  136. ) by (namespace,container_name,pod_name,node,%s)`
  137. queryRAMUsageStr = `avg(
  138. label_replace(
  139. label_replace(
  140. label_replace(
  141. sum_over_time(container_memory_working_set_bytes{container!="", container!="POD", instance!="", %s}[%s] %s), "node", "$1", "instance", "(.+)"
  142. ), "container_name", "$1", "container", "(.+)"
  143. ), "pod_name", "$1", "pod", "(.+)"
  144. )
  145. ) by (namespace, container_name, pod_name, node, %s)`
  146. queryCPURequestsStr = `avg(
  147. label_replace(
  148. label_replace(
  149. avg(
  150. sum_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="",container!="POD", node!="", %s}[%s] %s)
  151. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  152. ), "pod_name","$1","pod","(.+)"
  153. )
  154. ) by (namespace,container_name,pod_name,node,%s)`
  155. queryCPUUsageStr = `avg(
  156. label_replace(
  157. label_replace(
  158. label_replace(
  159. rate(
  160. container_cpu_usage_seconds_total{container!="", container!="POD", instance!="", %s}[%s] %s
  161. ), "node", "$1", "instance", "(.+)"
  162. ), "container_name", "$1", "container", "(.+)"
  163. ), "pod_name", "$1", "pod", "(.+)"
  164. )
  165. ) by (namespace, container_name, pod_name, node, %s)`
  166. queryGPURequestsStr = `avg(
  167. label_replace(
  168. label_replace(
  169. avg(
  170. sum_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s] %s)
  171. * %f
  172. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  173. ), "pod_name","$1","pod","(.+)"
  174. )
  175. ) by (namespace,container_name,pod_name,node,%s)
  176. * on (pod_name, namespace, %s) group_left(container) label_replace(avg(avg_over_time(kube_pod_status_phase{phase="Running", %s}[%s] %s)) by (pod,namespace,%s), "pod_name","$1","pod","(.+)")`
  177. queryPVRequestsStr = `avg(avg(kube_persistentvolumeclaim_info{volumename != "", %s}) by (persistentvolumeclaim, storageclass, namespace, volumename, %s, kubernetes_node)
  178. *
  179. on (persistentvolumeclaim, namespace, %s, kubernetes_node) group_right(storageclass, volumename)
  180. sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{%s}) by (persistentvolumeclaim, namespace, %s, kubernetes_node, kubernetes_name)) by (persistentvolumeclaim, storageclass, namespace, %s, volumename, kubernetes_node)`
  181. // queryRAMAllocationByteHours yields the total byte-hour RAM allocation over the given
  182. // window, aggregated by container.
  183. // [line 3] sum_over_time(each byte) = [byte*scrape] by metric
  184. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  185. // [lines 2,4] sum(") by unique container key and multiply [byte*scrape] * [hours/scrape] for byte*hours
  186. // [lines 1,5] relabeling
  187. queryRAMAllocationByteHours = `
  188. label_replace(label_replace(
  189. sum(
  190. sum_over_time(container_memory_allocation_bytes{container!="",container!="POD", node!="", %s}[%s])
  191. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  192. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  193. // queryCPUAllocationVCPUHours yields the total VCPU-hour CPU allocation over the given
  194. // window, aggregated by container.
  195. // [line 3] sum_over_time(each VCPU*mins in window) = [VCPU*scrape] by metric
  196. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  197. // [lines 2,4] sum(") by unique container key and multiply [VCPU*scrape] * [hours/scrape] for VCPU*hours
  198. // [lines 1,5] relabeling
  199. queryCPUAllocationVCPUHours = `
  200. label_replace(label_replace(
  201. sum(
  202. sum_over_time(container_cpu_allocation{container!="",container!="POD", node!="", %s}[%s])
  203. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  204. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  205. // queryPVCAllocationFmt yields the total byte-hour PVC allocation over the given window.
  206. // sum_over_time(each byte) = [byte*scrape] by metric *(scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by pod
  207. queryPVCAllocationFmt = `sum(sum_over_time(pod_pvc_allocation{%s}[%s])) by (%s, namespace, pod, persistentvolume, persistentvolumeclaim) * %f/60/60`
  208. queryPVHourlyCostFmt = `avg_over_time(pv_hourly_cost{%s}[%s])`
  209. queryNSLabels = `avg_over_time(kube_namespace_labels{%s}[%s])`
  210. queryPodLabels = `avg_over_time(kube_pod_labels{%s}[%s])`
  211. queryNSAnnotations = `avg_over_time(kube_namespace_annotations{%s}[%s])`
  212. queryPodAnnotations = `avg_over_time(kube_pod_annotations{%s}[%s])`
  213. queryDeploymentLabels = `avg_over_time(deployment_match_labels{%s}[%s])`
  214. queryStatefulsetLabels = `avg_over_time(statefulSet_match_labels{%s}[%s])`
  215. queryPodDaemonsets = `sum(kube_pod_owner{owner_kind="DaemonSet", %s}) by (namespace,pod,owner_name,%s)`
  216. queryPodJobs = `sum(kube_pod_owner{owner_kind="Job", %s}) by (namespace,pod,owner_name,%s)`
  217. queryServiceLabels = `avg_over_time(service_selector_labels{%s}[%s])`
  218. queryZoneNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  219. queryRegionNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="false", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  220. queryInternetNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  221. normalizationStr = `max(count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[%s] %s))`
  222. )
  223. func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
  224. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
  225. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
  226. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  227. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  228. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  229. queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), window, offset)
  230. // Cluster ID is specific to the source cluster
  231. clusterID := env.GetClusterID()
  232. // Submit all Prometheus queries asynchronously
  233. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  234. resChRAMUsage := ctx.Query(queryRAMUsage)
  235. resChCPUUsage := ctx.Query(queryCPUUsage)
  236. resChNetZoneRequests := ctx.Query(queryNetZoneRequests)
  237. resChNetRegionRequests := ctx.Query(queryNetRegionRequests)
  238. resChNetInternetRequests := ctx.Query(queryNetInternetRequests)
  239. resChNormalization := ctx.Query(queryNormalization)
  240. // Pull pod information from k8s API
  241. podlist := cm.Cache.GetAllPods()
  242. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  243. if err != nil {
  244. return nil, err
  245. }
  246. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  247. if err != nil {
  248. return nil, err
  249. }
  250. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  251. if err != nil {
  252. return nil, err
  253. }
  254. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  255. if err != nil {
  256. return nil, err
  257. }
  258. // Process Prometheus query results. Handle errors using ctx.Errors.
  259. resRAMUsage, _ := resChRAMUsage.Await()
  260. resCPUUsage, _ := resChCPUUsage.Await()
  261. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  262. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  263. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  264. resNormalization, _ := resChNormalization.Await()
  265. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  266. // NOTE: will not propagate unless coupled with errors.
  267. if ctx.HasErrors() {
  268. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  269. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  270. for _, promErr := range ctx.Errors() {
  271. if promErr.Error != nil {
  272. log.Errorf("ComputeCostData: Request Error: %s", promErr.Error)
  273. }
  274. if promErr.ParseError != nil {
  275. log.Errorf("ComputeCostData: Parsing Error: %s", promErr.ParseError)
  276. }
  277. }
  278. // ErrorCollection is an collection of errors wrapped in a single error implementation
  279. // We opt to not return an error for the sake of running as a pure exporter.
  280. log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", ctx.ErrorCollection().Error())
  281. }
  282. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  283. normalizationValue, err := getNormalization(resNormalization)
  284. if err != nil {
  285. // We opt to not return an error for the sake of running as a pure exporter.
  286. log.Warnf("ComputeCostData: continuing despite error parsing normalization values from %s: %s", queryNormalization, err.Error())
  287. }
  288. // Determine if there are vgpus configured and if so get the total allocatable number
  289. // If there are no vgpus, the coefficient is set to 1.0
  290. vgpuCount, err := getAllocatableVGPUs(cm.Cache)
  291. if err != nil {
  292. log.Warnf("getAllocatableVGCPUs error: %s", err.Error())
  293. }
  294. vgpuCoeff := 10.0
  295. if vgpuCount > 0.0 {
  296. vgpuCoeff = vgpuCount
  297. }
  298. nodes, err := cm.GetNodeCost(cp)
  299. if err != nil {
  300. log.Warnf("GetNodeCost: no node cost model available: " + err.Error())
  301. return nil, err
  302. }
  303. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  304. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  305. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  306. if err != nil {
  307. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  308. }
  309. if pvClaimMapping != nil {
  310. err = addPVData(cm.Cache, pvClaimMapping, cp)
  311. if err != nil {
  312. return nil, err
  313. }
  314. // copy claim mappings into zombies, then remove as they're discovered
  315. for k, v := range pvClaimMapping {
  316. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  317. }
  318. }
  319. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  320. if err != nil {
  321. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  322. networkUsageMap = make(map[string]*NetworkUsageData)
  323. }
  324. containerNameCost := make(map[string]*CostData)
  325. containers := make(map[string]bool)
  326. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, true, normalizationValue, clusterID)
  327. if err != nil {
  328. return nil, err
  329. }
  330. for key := range RAMUsedMap {
  331. containers[key] = true
  332. }
  333. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, false, 0, clusterID) // No need to normalize here, as this comes from a counter
  334. if err != nil {
  335. return nil, err
  336. }
  337. for key := range CPUUsedMap {
  338. containers[key] = true
  339. }
  340. currentContainers := make(map[string]v1.Pod)
  341. for _, pod := range podlist {
  342. if pod.Status.Phase != v1.PodRunning {
  343. continue
  344. }
  345. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  346. if err != nil {
  347. return nil, err
  348. }
  349. for _, c := range cs {
  350. containers[c.Key()] = true // captures any containers that existed for a time < a prometheus scrape interval. We currently charge 0 for this but should charge something.
  351. currentContainers[c.Key()] = *pod
  352. }
  353. }
  354. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  355. missingContainers := make(map[string]*CostData)
  356. for key := range containers {
  357. if _, ok := containerNameCost[key]; ok {
  358. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  359. }
  360. // The _else_ case for this statement is the case in which the container has been
  361. // deleted so we have usage information but not request information. In that case,
  362. // we return partial data for CPU and RAM: only usage and not requests.
  363. if pod, ok := currentContainers[key]; ok {
  364. podName := pod.GetObjectMeta().GetName()
  365. ns := pod.GetObjectMeta().GetNamespace()
  366. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  367. podLabels := pod.GetObjectMeta().GetLabels()
  368. if podLabels == nil {
  369. podLabels = make(map[string]string)
  370. }
  371. for k, v := range nsLabels {
  372. if _, ok := podLabels[k]; !ok {
  373. podLabels[k] = v
  374. }
  375. }
  376. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  377. podAnnotations := pod.GetObjectMeta().GetAnnotations()
  378. if podAnnotations == nil {
  379. podAnnotations = make(map[string]string)
  380. }
  381. for k, v := range nsAnnotations {
  382. if _, ok := podAnnotations[k]; !ok {
  383. podAnnotations[k] = v
  384. }
  385. }
  386. nodeName := pod.Spec.NodeName
  387. var nodeData *costAnalyzerCloud.Node
  388. if _, ok := nodes[nodeName]; ok {
  389. nodeData = nodes[nodeName]
  390. }
  391. nsKey := ns + "," + clusterID
  392. var podDeployments []string
  393. if _, ok := podDeploymentsMapping[nsKey]; ok {
  394. if ds, ok := podDeploymentsMapping[nsKey][pod.GetObjectMeta().GetName()]; ok {
  395. podDeployments = ds
  396. } else {
  397. podDeployments = []string{}
  398. }
  399. }
  400. var podPVs []*PersistentVolumeClaimData
  401. podClaims := pod.Spec.Volumes
  402. for _, vol := range podClaims {
  403. if vol.PersistentVolumeClaim != nil {
  404. name := vol.PersistentVolumeClaim.ClaimName
  405. key := ns + "," + name + "," + clusterID
  406. if pvClaim, ok := pvClaimMapping[key]; ok {
  407. pvClaim.TimesClaimed++
  408. podPVs = append(podPVs, pvClaim)
  409. // Remove entry from potential unmounted pvs
  410. delete(unmountedPVs, key)
  411. }
  412. }
  413. }
  414. var podNetCosts []*util.Vector
  415. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  416. netCosts, err := GetNetworkCost(usage, cp)
  417. if err != nil {
  418. log.Debugf("Error pulling network costs: %s", err.Error())
  419. } else {
  420. podNetCosts = netCosts
  421. }
  422. }
  423. var podServices []string
  424. if _, ok := podServicesMapping[nsKey]; ok {
  425. if svcs, ok := podServicesMapping[nsKey][pod.GetObjectMeta().GetName()]; ok {
  426. podServices = svcs
  427. } else {
  428. podServices = []string{}
  429. }
  430. }
  431. for i, container := range pod.Spec.Containers {
  432. containerName := container.Name
  433. // recreate the key and look up data for this container
  434. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  435. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  436. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  437. // details on the "amount" API. See
  438. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  439. // for the units of memory and CPU.
  440. ramRequestBytes := container.Resources.Requests.Memory().Value()
  441. // Because information on container RAM & CPU requests isn't
  442. // coming from Prometheus, it won't have a timestamp associated
  443. // with it. We need to provide a timestamp.
  444. RAMReqV := []*util.Vector{
  445. {
  446. Value: float64(ramRequestBytes),
  447. Timestamp: float64(time.Now().UTC().Unix()),
  448. },
  449. }
  450. // use millicores so we can convert to cores in a float64 format
  451. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  452. CPUReqV := []*util.Vector{
  453. {
  454. Value: float64(cpuRequestMilliCores) / 1000,
  455. Timestamp: float64(time.Now().UTC().Unix()),
  456. },
  457. }
  458. gpuReqCount := 0.0
  459. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  460. gpuReqCount = g.AsApproximateFloat64()
  461. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  462. gpuReqCount = g.AsApproximateFloat64()
  463. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  464. // divide vgpu request/limits by total vgpus to get the portion of physical gpus requested
  465. gpuReqCount = g.AsApproximateFloat64() / vgpuCoeff
  466. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  467. gpuReqCount = g.AsApproximateFloat64() / vgpuCoeff
  468. }
  469. GPUReqV := []*util.Vector{
  470. {
  471. Value: float64(gpuReqCount),
  472. Timestamp: float64(time.Now().UTC().Unix()),
  473. },
  474. }
  475. RAMUsedV, ok := RAMUsedMap[newKey]
  476. if !ok {
  477. log.Debug("no RAM usage for " + newKey)
  478. RAMUsedV = []*util.Vector{{}}
  479. }
  480. CPUUsedV, ok := CPUUsedMap[newKey]
  481. if !ok {
  482. log.Debug("no CPU usage for " + newKey)
  483. CPUUsedV = []*util.Vector{{}}
  484. }
  485. var pvReq []*PersistentVolumeClaimData
  486. var netReq []*util.Vector
  487. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  488. pvReq = podPVs
  489. netReq = podNetCosts
  490. }
  491. costs := &CostData{
  492. Name: containerName,
  493. PodName: podName,
  494. NodeName: nodeName,
  495. Namespace: ns,
  496. Deployments: podDeployments,
  497. Services: podServices,
  498. Daemonsets: getDaemonsetsOfPod(pod),
  499. Jobs: getJobsOfPod(pod),
  500. Statefulsets: getStatefulSetsOfPod(pod),
  501. NodeData: nodeData,
  502. RAMReq: RAMReqV,
  503. RAMUsed: RAMUsedV,
  504. CPUReq: CPUReqV,
  505. CPUUsed: CPUUsedV,
  506. GPUReq: GPUReqV,
  507. PVCData: pvReq,
  508. NetworkData: netReq,
  509. Annotations: podAnnotations,
  510. Labels: podLabels,
  511. NamespaceLabels: nsLabels,
  512. ClusterID: clusterID,
  513. ClusterName: cm.ClusterMap.NameFor(clusterID),
  514. }
  515. var cpuReq, cpuUse *util.Vector
  516. if len(costs.CPUReq) > 0 {
  517. cpuReq = costs.CPUReq[0]
  518. }
  519. if len(costs.CPUUsed) > 0 {
  520. cpuUse = costs.CPUUsed[0]
  521. }
  522. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  523. var ramReq, ramUse *util.Vector
  524. if len(costs.RAMReq) > 0 {
  525. ramReq = costs.RAMReq[0]
  526. }
  527. if len(costs.RAMUsed) > 0 {
  528. ramUse = costs.RAMUsed[0]
  529. }
  530. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  531. if filterNamespace == "" {
  532. containerNameCost[newKey] = costs
  533. } else if costs.Namespace == filterNamespace {
  534. containerNameCost[newKey] = costs
  535. }
  536. }
  537. } else {
  538. // The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
  539. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  540. c, err := NewContainerMetricFromKey(key)
  541. if err != nil {
  542. return nil, err
  543. }
  544. // CPU and RAM requests are obtained from the Kubernetes API.
  545. // If this case has been reached, the Kubernetes API will not
  546. // have information about the pod because it no longer exists.
  547. //
  548. // The case where this matters is minimal, mainly in environments
  549. // with very short-lived pods that over-request resources.
  550. RAMReqV := []*util.Vector{{}}
  551. CPUReqV := []*util.Vector{{}}
  552. GPUReqV := []*util.Vector{{}}
  553. RAMUsedV, ok := RAMUsedMap[key]
  554. if !ok {
  555. log.Debug("no RAM usage for " + key)
  556. RAMUsedV = []*util.Vector{{}}
  557. }
  558. CPUUsedV, ok := CPUUsedMap[key]
  559. if !ok {
  560. log.Debug("no CPU usage for " + key)
  561. CPUUsedV = []*util.Vector{{}}
  562. }
  563. node, ok := nodes[c.NodeName]
  564. if !ok {
  565. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  566. if n, ok := missingNodes[c.NodeName]; ok {
  567. node = n
  568. } else {
  569. node = &costAnalyzerCloud.Node{}
  570. missingNodes[c.NodeName] = node
  571. }
  572. }
  573. namespacelabels, _ := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  574. namespaceAnnotations, _ := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  575. costs := &CostData{
  576. Name: c.ContainerName,
  577. PodName: c.PodName,
  578. NodeName: c.NodeName,
  579. NodeData: node,
  580. Namespace: c.Namespace,
  581. RAMReq: RAMReqV,
  582. RAMUsed: RAMUsedV,
  583. CPUReq: CPUReqV,
  584. CPUUsed: CPUUsedV,
  585. GPUReq: GPUReqV,
  586. Annotations: namespaceAnnotations,
  587. NamespaceLabels: namespacelabels,
  588. ClusterID: c.ClusterID,
  589. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  590. }
  591. var cpuReq, cpuUse *util.Vector
  592. if len(costs.CPUReq) > 0 {
  593. cpuReq = costs.CPUReq[0]
  594. }
  595. if len(costs.CPUUsed) > 0 {
  596. cpuUse = costs.CPUUsed[0]
  597. }
  598. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  599. var ramReq, ramUse *util.Vector
  600. if len(costs.RAMReq) > 0 {
  601. ramReq = costs.RAMReq[0]
  602. }
  603. if len(costs.RAMUsed) > 0 {
  604. ramUse = costs.RAMUsed[0]
  605. }
  606. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  607. if filterNamespace == "" {
  608. containerNameCost[key] = costs
  609. missingContainers[key] = costs
  610. } else if costs.Namespace == filterNamespace {
  611. containerNameCost[key] = costs
  612. missingContainers[key] = costs
  613. }
  614. }
  615. }
  616. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  617. // to pass along the cost data
  618. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  619. for k, costs := range unmounted {
  620. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  621. if filterNamespace == "" {
  622. containerNameCost[k] = costs
  623. } else if costs.Namespace == filterNamespace {
  624. containerNameCost[k] = costs
  625. }
  626. }
  627. err = findDeletedNodeInfo(cli, missingNodes, window, "")
  628. if err != nil {
  629. log.Errorf("Error fetching historical node data: %s", err.Error())
  630. }
  631. err = findDeletedPodInfo(cli, missingContainers, window)
  632. if err != nil {
  633. log.Errorf("Error fetching historical pod data: %s", err.Error())
  634. }
  635. return containerNameCost, err
  636. }
  637. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  638. costs := make(map[string]*CostData)
  639. if len(unmountedPVs) == 0 {
  640. return costs
  641. }
  642. for k, pv := range unmountedPVs {
  643. keyParts := strings.Split(k, ",")
  644. if len(keyParts) != 3 {
  645. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  646. continue
  647. }
  648. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  649. namespacelabels, _ := namespaceLabelsMapping[ns+","+clusterID]
  650. namespaceAnnotations, _ := namespaceAnnotationsMapping[ns+","+clusterID]
  651. metric := NewContainerMetricFromValues(ns, unmountedPVsContainer, unmountedPVsContainer, "", clusterID)
  652. key := metric.Key()
  653. if costData, ok := costs[key]; !ok {
  654. costs[key] = &CostData{
  655. Name: unmountedPVsContainer,
  656. PodName: unmountedPVsContainer,
  657. NodeName: "",
  658. Annotations: namespaceAnnotations,
  659. Namespace: ns,
  660. NamespaceLabels: namespacelabels,
  661. Labels: namespacelabels,
  662. ClusterID: clusterID,
  663. ClusterName: clusterMap.NameFor(clusterID),
  664. PVCData: pv,
  665. }
  666. } else {
  667. costData.PVCData = append(costData.PVCData, pv...)
  668. }
  669. }
  670. return costs
  671. }
  672. func findDeletedPodInfo(cli prometheusClient.Client, missingContainers map[string]*CostData, window string) error {
  673. if len(missingContainers) > 0 {
  674. queryHistoricalPodLabels := fmt.Sprintf(`kube_pod_labels{%s}[%s]`, env.GetPromClusterFilter(), window)
  675. podLabelsResult, _, err := prom.NewNamedContext(cli, prom.ComputeCostDataContextName).QuerySync(queryHistoricalPodLabels)
  676. if err != nil {
  677. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  678. }
  679. podLabels := make(map[string]map[string]string)
  680. if podLabelsResult != nil {
  681. podLabels, err = parsePodLabels(podLabelsResult)
  682. if err != nil {
  683. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  684. }
  685. }
  686. for key, costData := range missingContainers {
  687. cm, _ := NewContainerMetricFromKey(key)
  688. labels, ok := podLabels[cm.PodName]
  689. if !ok {
  690. labels = make(map[string]string)
  691. }
  692. for k, v := range costData.NamespaceLabels {
  693. labels[k] = v
  694. }
  695. costData.Labels = labels
  696. }
  697. }
  698. return nil
  699. }
  700. func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*costAnalyzerCloud.Node, window, offset string) error {
  701. if len(missingNodes) > 0 {
  702. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  703. offsetStr := ""
  704. if offset != "" {
  705. offsetStr = fmt.Sprintf("offset %s", offset)
  706. }
  707. queryHistoricalCPUCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  708. queryHistoricalRAMCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  709. queryHistoricalGPUCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  710. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  711. cpuCostResCh := ctx.Query(queryHistoricalCPUCost)
  712. ramCostResCh := ctx.Query(queryHistoricalRAMCost)
  713. gpuCostResCh := ctx.Query(queryHistoricalGPUCost)
  714. cpuCostRes, _ := cpuCostResCh.Await()
  715. ramCostRes, _ := ramCostResCh.Await()
  716. gpuCostRes, _ := gpuCostResCh.Await()
  717. if ctx.HasErrors() {
  718. return ctx.ErrorCollection()
  719. }
  720. cpuCosts, err := getCost(cpuCostRes)
  721. if err != nil {
  722. return err
  723. }
  724. ramCosts, err := getCost(ramCostRes)
  725. if err != nil {
  726. return err
  727. }
  728. gpuCosts, err := getCost(gpuCostRes)
  729. if err != nil {
  730. return err
  731. }
  732. if len(cpuCosts) == 0 {
  733. log.Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  734. }
  735. for node, costv := range cpuCosts {
  736. if _, ok := missingNodes[node]; ok {
  737. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  738. } else {
  739. log.DedupedWarningf(5, "Node `%s` in prometheus but not k8s api", node)
  740. }
  741. }
  742. for node, costv := range ramCosts {
  743. if _, ok := missingNodes[node]; ok {
  744. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  745. }
  746. }
  747. for node, costv := range gpuCosts {
  748. if _, ok := missingNodes[node]; ok {
  749. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  750. }
  751. }
  752. }
  753. return nil
  754. }
  755. // getContainerAllocation takes the max between request and usage. This function
  756. // returns a slice containing a single element describing the container's
  757. // allocation.
  758. //
  759. // Additionally, the timestamp of the allocation will be the highest value
  760. // timestamp between the two vectors. This mitigates situations where
  761. // Timestamp=0. This should have no effect on the metrics emitted by the
  762. // CostModelMetricsEmitter
  763. func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
  764. var result []*util.Vector
  765. if req != nil && used != nil {
  766. x1 := req.Value
  767. if math.IsNaN(x1) {
  768. log.Warnf("NaN value found during %s allocation calculation for requests.", allocationType)
  769. x1 = 0.0
  770. }
  771. y1 := used.Value
  772. if math.IsNaN(y1) {
  773. log.Warnf("NaN value found during %s allocation calculation for used.", allocationType)
  774. y1 = 0.0
  775. }
  776. result = []*util.Vector{
  777. {
  778. Value: math.Max(x1, y1),
  779. Timestamp: math.Max(req.Timestamp, used.Timestamp),
  780. },
  781. }
  782. if result[0].Value == 0 && result[0].Timestamp == 0 {
  783. log.Warnf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  784. }
  785. } else if req != nil {
  786. result = []*util.Vector{
  787. {
  788. Value: req.Value,
  789. Timestamp: req.Timestamp,
  790. },
  791. }
  792. } else if used != nil {
  793. result = []*util.Vector{
  794. {
  795. Value: used.Value,
  796. Timestamp: used.Timestamp,
  797. },
  798. }
  799. } else {
  800. log.Warnf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  801. result = []*util.Vector{
  802. {
  803. Value: 0,
  804. Timestamp: float64(time.Now().UTC().Unix()),
  805. },
  806. }
  807. }
  808. return result
  809. }
  810. func addPVData(cache clustercache.ClusterCache, pvClaimMapping map[string]*PersistentVolumeClaimData, cloud costAnalyzerCloud.Provider) error {
  811. cfg, err := cloud.GetConfig()
  812. if err != nil {
  813. return err
  814. }
  815. // Pull a region from the first node
  816. var defaultRegion string
  817. nodeList := cache.GetAllNodes()
  818. if len(nodeList) > 0 {
  819. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  820. }
  821. storageClasses := cache.GetAllStorageClasses()
  822. storageClassMap := make(map[string]map[string]string)
  823. for _, storageClass := range storageClasses {
  824. params := storageClass.Parameters
  825. storageClassMap[storageClass.ObjectMeta.Name] = params
  826. if storageClass.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.GetAnnotations()["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  827. storageClassMap["default"] = params
  828. storageClassMap[""] = params
  829. }
  830. }
  831. pvs := cache.GetAllPersistentVolumes()
  832. pvMap := make(map[string]*costAnalyzerCloud.PV)
  833. for _, pv := range pvs {
  834. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  835. if !ok {
  836. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  837. }
  838. var region string
  839. if r, ok := util.GetRegion(pv.Labels); ok {
  840. region = r
  841. } else {
  842. region = defaultRegion
  843. }
  844. cacPv := &costAnalyzerCloud.PV{
  845. Class: pv.Spec.StorageClassName,
  846. Region: region,
  847. Parameters: parameters,
  848. }
  849. err := GetPVCost(cacPv, pv, cloud, region)
  850. if err != nil {
  851. return err
  852. }
  853. pvMap[pv.Name] = cacPv
  854. }
  855. for _, pvc := range pvClaimMapping {
  856. if vol, ok := pvMap[pvc.VolumeName]; ok {
  857. pvc.Volume = vol
  858. } else {
  859. log.Debugf("PV not found, using default")
  860. pvc.Volume = &costAnalyzerCloud.PV{
  861. Cost: cfg.Storage,
  862. }
  863. }
  864. }
  865. return nil
  866. }
  867. func GetPVCost(pv *costAnalyzerCloud.PV, kpv *v1.PersistentVolume, cp costAnalyzerCloud.Provider, defaultRegion string) error {
  868. cfg, err := cp.GetConfig()
  869. if err != nil {
  870. return err
  871. }
  872. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  873. pv.ProviderID = key.ID()
  874. pvWithCost, err := cp.PVPricing(key)
  875. if err != nil {
  876. pv.Cost = cfg.Storage
  877. return err
  878. }
  879. if pvWithCost == nil || pvWithCost.Cost == "" {
  880. pv.Cost = cfg.Storage
  881. return nil // set default cost
  882. }
  883. pv.Cost = pvWithCost.Cost
  884. return nil
  885. }
  886. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  887. if cm.pricingMetadata != nil {
  888. return cm.pricingMetadata, nil
  889. } else {
  890. return nil, fmt.Errorf("Node costs not yet calculated")
  891. }
  892. }
  893. func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*costAnalyzerCloud.Node, error) {
  894. cfg, err := cp.GetConfig()
  895. if err != nil {
  896. return nil, err
  897. }
  898. nodeList := cm.Cache.GetAllNodes()
  899. nodes := make(map[string]*costAnalyzerCloud.Node)
  900. vgpuCount, err := getAllocatableVGPUs(cm.Cache)
  901. if err != nil {
  902. return nil, err
  903. }
  904. vgpuCoeff := 10.0
  905. if vgpuCount > 0.0 {
  906. vgpuCoeff = vgpuCount
  907. }
  908. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  909. TotalNodes: 0,
  910. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  911. }
  912. for _, n := range nodeList {
  913. name := n.GetObjectMeta().GetName()
  914. nodeLabels := n.GetObjectMeta().GetLabels()
  915. nodeLabels["providerID"] = n.Spec.ProviderID
  916. pmd.TotalNodes++
  917. cnode, _, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  918. if err != nil {
  919. log.Infof("Error getting node pricing. Error: %s", err.Error())
  920. if cnode != nil {
  921. nodes[name] = cnode
  922. continue
  923. } else {
  924. cnode = &costAnalyzerCloud.Node{
  925. VCPUCost: cfg.CPU,
  926. RAMCost: cfg.RAM,
  927. }
  928. }
  929. }
  930. if _, ok := pmd.PricingTypeCounts[cnode.PricingType]; ok {
  931. pmd.PricingTypeCounts[cnode.PricingType]++
  932. } else {
  933. pmd.PricingTypeCounts[cnode.PricingType] = 1
  934. }
  935. newCnode := *cnode
  936. if newCnode.InstanceType == "" {
  937. it, _ := util.GetInstanceType(n.Labels)
  938. newCnode.InstanceType = it
  939. }
  940. if newCnode.Region == "" {
  941. region, _ := util.GetRegion(n.Labels)
  942. newCnode.Region = region
  943. }
  944. if newCnode.ArchType == "" {
  945. arch, _ := util.GetArchType(n.Labels)
  946. newCnode.ArchType = arch
  947. }
  948. newCnode.ProviderID = n.Spec.ProviderID
  949. var cpu float64
  950. if newCnode.VCPU == "" {
  951. cpu = float64(n.Status.Capacity.Cpu().Value())
  952. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  953. } else {
  954. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  955. if err != nil {
  956. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  957. }
  958. }
  959. if math.IsNaN(cpu) {
  960. log.Warnf("cpu parsed as NaN. Setting to 0.")
  961. cpu = 0
  962. }
  963. var ram float64
  964. if newCnode.RAM == "" {
  965. newCnode.RAM = n.Status.Capacity.Memory().String()
  966. }
  967. ram = float64(n.Status.Capacity.Memory().Value())
  968. if math.IsNaN(ram) {
  969. log.Warnf("ram parsed as NaN. Setting to 0.")
  970. ram = 0
  971. }
  972. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  973. // Azure does not seem to provide a GPU count in its pricing API. GKE supports attaching multiple GPUs
  974. // So the k8s api will often report more accurate results for GPU count under status > capacity > nvidia.com/gpu than the cloud providers billing data
  975. // not all providers are guaranteed to use this, so don't overwrite a Provider assignment if we can't find something under that capacity exists
  976. gpuc := 0.0
  977. q, ok := n.Status.Capacity["nvidia.com/gpu"]
  978. if ok {
  979. gpuCount := q.Value()
  980. if gpuCount != 0 {
  981. newCnode.GPU = fmt.Sprintf("%d", gpuCount)
  982. gpuc = float64(gpuCount)
  983. }
  984. } else if g, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  985. gpuCount := g.Value()
  986. if gpuCount != 0 {
  987. newCnode.GPU = fmt.Sprintf("%d", int(float64(gpuCount)/vgpuCoeff))
  988. gpuc = float64(gpuCount) / vgpuCoeff
  989. }
  990. } else {
  991. gpuc, err = strconv.ParseFloat(newCnode.GPU, 64)
  992. if err != nil {
  993. gpuc = 0.0
  994. }
  995. }
  996. if math.IsNaN(gpuc) {
  997. log.Warnf("gpu count parsed as NaN. Setting to 0.")
  998. gpuc = 0.0
  999. }
  1000. // Special case for SUSE rancher, since it won't behave with normal
  1001. // calculations, courtesy of the instance type not being "real" (a
  1002. // recognizable AWS instance type.)
  1003. if newCnode.InstanceType == "rke2" {
  1004. log.Infof(
  1005. "Found a SUSE Rancher node %s, defaulting and skipping math",
  1006. cp.GetKey(nodeLabels, n).Features(),
  1007. )
  1008. defaultCPUCorePrice, err := strconv.ParseFloat(cfg.CPU, 64)
  1009. if err != nil {
  1010. log.Errorf("Could not parse default cpu price")
  1011. defaultCPUCorePrice = 0
  1012. }
  1013. if math.IsNaN(defaultCPUCorePrice) {
  1014. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1015. defaultCPUCorePrice = 0
  1016. }
  1017. defaultRAMPrice, err := strconv.ParseFloat(cfg.RAM, 64)
  1018. if err != nil {
  1019. log.Errorf("Could not parse default ram price")
  1020. defaultRAMPrice = 0
  1021. }
  1022. if math.IsNaN(defaultRAMPrice) {
  1023. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1024. defaultRAMPrice = 0
  1025. }
  1026. defaultGPUPrice, err := strconv.ParseFloat(cfg.GPU, 64)
  1027. if err != nil {
  1028. log.Errorf("Could not parse default gpu price")
  1029. defaultGPUPrice = 0
  1030. }
  1031. if math.IsNaN(defaultGPUPrice) {
  1032. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  1033. defaultGPUPrice = 0
  1034. }
  1035. // Just say no to doing the ratios!
  1036. cpuCost := defaultCPUCorePrice * cpu
  1037. gpuCost := defaultGPUPrice * gpuc
  1038. ramCost := defaultRAMPrice * ram
  1039. nodeCost := cpuCost + gpuCost + ramCost
  1040. newCnode.Cost = fmt.Sprintf("%f", nodeCost)
  1041. newCnode.VCPUCost = fmt.Sprintf("%f", cpuCost)
  1042. newCnode.GPUCost = fmt.Sprintf("%f", gpuCost)
  1043. newCnode.RAMCost = fmt.Sprintf("%f", ramCost)
  1044. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1045. } else if newCnode.GPU != "" && newCnode.GPUCost == "" {
  1046. // was the big thing to investigate. All the funky ratio math
  1047. // we were doing was messing with their default pricing. for SUSE Rancher.
  1048. // We couldn't find a gpu cost, so fix cpu and ram, then accordingly
  1049. log.Infof("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1050. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1051. if err != nil {
  1052. log.Errorf("Could not parse default cpu price")
  1053. defaultCPU = 0
  1054. }
  1055. if math.IsNaN(defaultCPU) {
  1056. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1057. defaultCPU = 0
  1058. }
  1059. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1060. if err != nil {
  1061. log.Errorf("Could not parse default ram price")
  1062. defaultRAM = 0
  1063. }
  1064. if math.IsNaN(defaultRAM) {
  1065. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1066. defaultRAM = 0
  1067. }
  1068. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  1069. if err != nil {
  1070. log.Errorf("Could not parse default gpu price")
  1071. defaultGPU = 0
  1072. }
  1073. if math.IsNaN(defaultGPU) {
  1074. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  1075. defaultGPU = 0
  1076. }
  1077. cpuToRAMRatio := defaultCPU / defaultRAM
  1078. if math.IsNaN(cpuToRAMRatio) {
  1079. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1080. cpuToRAMRatio = 10
  1081. }
  1082. gpuToRAMRatio := defaultGPU / defaultRAM
  1083. if math.IsNaN(gpuToRAMRatio) {
  1084. log.Warnf("gpuToRAMRatio is NaN. Setting to 100.")
  1085. gpuToRAMRatio = 100
  1086. }
  1087. ramGB := ram / 1024 / 1024 / 1024
  1088. if math.IsNaN(ramGB) {
  1089. log.Warnf("ramGB is NaN. Setting to 0.")
  1090. ramGB = 0
  1091. }
  1092. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  1093. if math.IsNaN(ramMultiple) {
  1094. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1095. ramMultiple = 0
  1096. }
  1097. var nodePrice float64
  1098. if newCnode.Cost != "" {
  1099. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1100. if err != nil {
  1101. log.Errorf("Could not parse total node price")
  1102. return nil, err
  1103. }
  1104. } else if newCnode.VCPUCost != "" {
  1105. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1106. if err != nil {
  1107. log.Errorf("Could not parse node vcpu price")
  1108. return nil, err
  1109. }
  1110. } else { // add case to use default pricing model when API data fails.
  1111. log.Debugf("No node price or CPUprice found, falling back to default")
  1112. nodePrice = defaultCPU*cpu + defaultRAM*ram + gpuc*defaultGPU
  1113. }
  1114. if math.IsNaN(nodePrice) {
  1115. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1116. nodePrice = 0
  1117. }
  1118. ramPrice := (nodePrice / ramMultiple)
  1119. if math.IsNaN(ramPrice) {
  1120. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1121. ramPrice = 0
  1122. }
  1123. cpuPrice := ramPrice * cpuToRAMRatio
  1124. gpuPrice := ramPrice * gpuToRAMRatio
  1125. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1126. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1127. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1128. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1129. } else if newCnode.RAMCost == "" {
  1130. // We couldn't find a ramcost, so fix cpu and allocate ram accordingly
  1131. log.Debugf("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1132. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1133. if err != nil {
  1134. log.Warnf("Could not parse default cpu price")
  1135. defaultCPU = 0
  1136. }
  1137. if math.IsNaN(defaultCPU) {
  1138. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1139. defaultCPU = 0
  1140. }
  1141. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1142. if err != nil {
  1143. log.Warnf("Could not parse default ram price")
  1144. defaultRAM = 0
  1145. }
  1146. if math.IsNaN(defaultRAM) {
  1147. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1148. defaultRAM = 0
  1149. }
  1150. cpuToRAMRatio := defaultCPU / defaultRAM
  1151. if math.IsNaN(cpuToRAMRatio) {
  1152. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1153. cpuToRAMRatio = 10
  1154. }
  1155. ramGB := ram / 1024 / 1024 / 1024
  1156. if math.IsNaN(ramGB) {
  1157. log.Warnf("ramGB is NaN. Setting to 0.")
  1158. ramGB = 0
  1159. }
  1160. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1161. if math.IsNaN(ramMultiple) {
  1162. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1163. ramMultiple = 0
  1164. }
  1165. var nodePrice float64
  1166. if newCnode.Cost != "" {
  1167. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1168. if err != nil {
  1169. log.Warnf("Could not parse total node price")
  1170. return nil, err
  1171. }
  1172. if newCnode.GPUCost != "" {
  1173. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1174. if err != nil {
  1175. log.Warnf("Could not parse node gpu price")
  1176. return nil, err
  1177. }
  1178. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1179. }
  1180. } else if newCnode.VCPUCost != "" {
  1181. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1182. if err != nil {
  1183. log.Warnf("Could not parse node vcpu price")
  1184. return nil, err
  1185. }
  1186. } else { // add case to use default pricing model when API data fails.
  1187. log.Debugf("No node price or CPUprice found, falling back to default")
  1188. nodePrice = defaultCPU*cpu + defaultRAM*ramGB
  1189. }
  1190. if math.IsNaN(nodePrice) {
  1191. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1192. nodePrice = 0
  1193. }
  1194. ramPrice := (nodePrice / ramMultiple)
  1195. if math.IsNaN(ramPrice) {
  1196. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1197. ramPrice = 0
  1198. }
  1199. cpuPrice := ramPrice * cpuToRAMRatio
  1200. if defaultRAM != 0 {
  1201. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1202. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1203. } else { // just assign the full price to CPU
  1204. if cpu != 0 {
  1205. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1206. } else {
  1207. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1208. }
  1209. }
  1210. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1211. log.Debugf("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1212. }
  1213. nodes[name] = &newCnode
  1214. }
  1215. cm.pricingMetadata = pmd
  1216. cp.ApplyReservedInstancePricing(nodes)
  1217. return nodes, nil
  1218. }
  1219. // TODO: drop some logs
  1220. func (cm *CostModel) GetLBCost(cp costAnalyzerCloud.Provider) (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1221. // for fetching prices from cloud provider
  1222. // cfg, err := cp.GetConfig()
  1223. // if err != nil {
  1224. // return nil, err
  1225. // }
  1226. servicesList := cm.Cache.GetAllServices()
  1227. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1228. for _, service := range servicesList {
  1229. namespace := service.GetObjectMeta().GetNamespace()
  1230. name := service.GetObjectMeta().GetName()
  1231. key := serviceKey{
  1232. Cluster: env.GetClusterID(),
  1233. Namespace: namespace,
  1234. Service: name,
  1235. }
  1236. if service.Spec.Type == "LoadBalancer" {
  1237. loadBalancer, err := cp.LoadBalancerPricing()
  1238. if err != nil {
  1239. return nil, err
  1240. }
  1241. newLoadBalancer := *loadBalancer
  1242. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1243. address := loadBalancerIngress.IP
  1244. // Some cloud providers use hostname rather than IP
  1245. if address == "" {
  1246. address = loadBalancerIngress.Hostname
  1247. }
  1248. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1249. }
  1250. loadBalancerMap[key] = &newLoadBalancer
  1251. }
  1252. }
  1253. return loadBalancerMap, nil
  1254. }
  1255. func getPodServices(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1256. servicesList := cache.GetAllServices()
  1257. podServicesMapping := make(map[string]map[string][]string)
  1258. for _, service := range servicesList {
  1259. namespace := service.GetObjectMeta().GetNamespace()
  1260. name := service.GetObjectMeta().GetName()
  1261. key := namespace + "," + clusterID
  1262. if _, ok := podServicesMapping[key]; !ok {
  1263. podServicesMapping[key] = make(map[string][]string)
  1264. }
  1265. s := labels.Nothing()
  1266. if service.Spec.Selector != nil && len(service.Spec.Selector) > 0 {
  1267. s = labels.Set(service.Spec.Selector).AsSelectorPreValidated()
  1268. }
  1269. for _, pod := range podList {
  1270. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1271. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1272. services, ok := podServicesMapping[key][pod.GetObjectMeta().GetName()]
  1273. if ok {
  1274. podServicesMapping[key][pod.GetObjectMeta().GetName()] = append(services, name)
  1275. } else {
  1276. podServicesMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1277. }
  1278. }
  1279. }
  1280. }
  1281. return podServicesMapping, nil
  1282. }
  1283. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1284. ssList := cache.GetAllStatefulSets()
  1285. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1286. for _, ss := range ssList {
  1287. namespace := ss.GetObjectMeta().GetNamespace()
  1288. name := ss.GetObjectMeta().GetName()
  1289. key := namespace + "," + clusterID
  1290. if _, ok := podSSMapping[key]; !ok {
  1291. podSSMapping[key] = make(map[string][]string)
  1292. }
  1293. s, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
  1294. if err != nil {
  1295. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1296. }
  1297. for _, pod := range podList {
  1298. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1299. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1300. sss, ok := podSSMapping[key][pod.GetObjectMeta().GetName()]
  1301. if ok {
  1302. podSSMapping[key][pod.GetObjectMeta().GetName()] = append(sss, name)
  1303. } else {
  1304. podSSMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1305. }
  1306. }
  1307. }
  1308. }
  1309. return podSSMapping, nil
  1310. }
  1311. func getPodDeployments(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1312. deploymentsList := cache.GetAllDeployments()
  1313. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1314. for _, deployment := range deploymentsList {
  1315. namespace := deployment.GetObjectMeta().GetNamespace()
  1316. name := deployment.GetObjectMeta().GetName()
  1317. key := namespace + "," + clusterID
  1318. if _, ok := podDeploymentsMapping[key]; !ok {
  1319. podDeploymentsMapping[key] = make(map[string][]string)
  1320. }
  1321. s, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
  1322. if err != nil {
  1323. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1324. }
  1325. for _, pod := range podList {
  1326. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1327. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1328. deployments, ok := podDeploymentsMapping[key][pod.GetObjectMeta().GetName()]
  1329. if ok {
  1330. podDeploymentsMapping[key][pod.GetObjectMeta().GetName()] = append(deployments, name)
  1331. } else {
  1332. podDeploymentsMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1333. }
  1334. }
  1335. }
  1336. }
  1337. return podDeploymentsMapping, nil
  1338. }
  1339. func getPodDeploymentsWithMetrics(deploymentLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1340. podDeploymentsMapping := make(map[string]map[string][]string)
  1341. for depKey, depLabels := range deploymentLabels {
  1342. kt, err := NewKeyTuple(depKey)
  1343. if err != nil {
  1344. continue
  1345. }
  1346. namespace := kt.Namespace()
  1347. name := kt.Key()
  1348. clusterID := kt.ClusterID()
  1349. key := namespace + "," + clusterID
  1350. if _, ok := podDeploymentsMapping[key]; !ok {
  1351. podDeploymentsMapping[key] = make(map[string][]string)
  1352. }
  1353. s := labels.Set(depLabels).AsSelectorPreValidated()
  1354. for podKey, pLabels := range podLabels {
  1355. pkey, err := NewKeyTuple(podKey)
  1356. if err != nil {
  1357. continue
  1358. }
  1359. podNamespace := pkey.Namespace()
  1360. podName := pkey.Key()
  1361. podClusterID := pkey.ClusterID()
  1362. labelSet := labels.Set(pLabels)
  1363. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1364. deployments, ok := podDeploymentsMapping[key][podName]
  1365. if ok {
  1366. podDeploymentsMapping[key][podName] = append(deployments, name)
  1367. } else {
  1368. podDeploymentsMapping[key][podName] = []string{name}
  1369. }
  1370. }
  1371. }
  1372. }
  1373. // Remove any duplicate data created by metric names
  1374. pruneDuplicateData(podDeploymentsMapping)
  1375. return podDeploymentsMapping, nil
  1376. }
  1377. func getPodServicesWithMetrics(serviceLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1378. podServicesMapping := make(map[string]map[string][]string)
  1379. for servKey, servLabels := range serviceLabels {
  1380. kt, err := NewKeyTuple(servKey)
  1381. if err != nil {
  1382. continue
  1383. }
  1384. namespace := kt.Namespace()
  1385. name := kt.Key()
  1386. clusterID := kt.ClusterID()
  1387. key := namespace + "," + clusterID
  1388. if _, ok := podServicesMapping[key]; !ok {
  1389. podServicesMapping[key] = make(map[string][]string)
  1390. }
  1391. s := labels.Nothing()
  1392. if servLabels != nil && len(servLabels) > 0 {
  1393. s = labels.Set(servLabels).AsSelectorPreValidated()
  1394. }
  1395. for podKey, pLabels := range podLabels {
  1396. pkey, err := NewKeyTuple(podKey)
  1397. if err != nil {
  1398. continue
  1399. }
  1400. podNamespace := pkey.Namespace()
  1401. podName := pkey.Key()
  1402. podClusterID := pkey.ClusterID()
  1403. labelSet := labels.Set(pLabels)
  1404. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1405. services, ok := podServicesMapping[key][podName]
  1406. if ok {
  1407. podServicesMapping[key][podName] = append(services, name)
  1408. } else {
  1409. podServicesMapping[key][podName] = []string{name}
  1410. }
  1411. }
  1412. }
  1413. }
  1414. // Remove any duplicate data created by metric names
  1415. pruneDuplicateData(podServicesMapping)
  1416. return podServicesMapping, nil
  1417. }
  1418. // This method alleviates an issue with metrics that used a '_' to replace '-' in deployment
  1419. // and service names. To avoid counting these as multiple deployments/services, we'll remove
  1420. // the '_' version. Not optimal, but takes care of the issue
  1421. func pruneDuplicateData(data map[string]map[string][]string) {
  1422. for _, podMap := range data {
  1423. for podName, values := range podMap {
  1424. podMap[podName] = pruneDuplicates(values)
  1425. }
  1426. }
  1427. }
  1428. // Determine if there is an underscore in the value of a slice. If so, replace _ with -, and then
  1429. // check to see if the result exists in the slice. If both are true, then we DO NOT include that
  1430. // original value in the new slice.
  1431. func pruneDuplicates(s []string) []string {
  1432. m := sliceToSet(s)
  1433. for _, v := range s {
  1434. if strings.Contains(v, "_") {
  1435. name := strings.Replace(v, "_", "-", -1)
  1436. if !m[name] {
  1437. m[name] = true
  1438. }
  1439. delete(m, v)
  1440. }
  1441. }
  1442. return setToSlice(m)
  1443. }
  1444. // Creates a map[string]bool containing the slice values as keys
  1445. func sliceToSet(s []string) map[string]bool {
  1446. m := make(map[string]bool)
  1447. for _, v := range s {
  1448. m[v] = true
  1449. }
  1450. return m
  1451. }
  1452. func setToSlice(m map[string]bool) []string {
  1453. var result []string
  1454. for k := range m {
  1455. result = append(result, k)
  1456. }
  1457. return result
  1458. }
  1459. func costDataPassesFilters(cm clusters.ClusterMap, costs *CostData, namespace string, cluster string) bool {
  1460. passesNamespace := namespace == "" || costs.Namespace == namespace
  1461. passesCluster := cluster == "" || costs.ClusterID == cluster || costs.ClusterName == cluster
  1462. return passesNamespace && passesCluster
  1463. }
  1464. // Finds the a closest multiple less than value
  1465. func floorMultiple(value int64, multiple int64) int64 {
  1466. return (value / multiple) * multiple
  1467. }
  1468. // Attempt to create a key for the request. Reduce the times to minutes in order to more easily group requests based on
  1469. // real time ranges. If for any reason, the key generation fails, return a uuid to ensure uniqueness.
  1470. func requestKeyFor(window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) string {
  1471. keyLayout := "2006-01-02T15:04Z"
  1472. // We "snap" start time and duration to their closest 5 min multiple less than itself, by
  1473. // applying a snapped duration to a snapped start time.
  1474. durMins := int64(window.Minutes())
  1475. durMins = floorMultiple(durMins, 5)
  1476. sMins := int64(window.Start().Minute())
  1477. sOffset := sMins - floorMultiple(sMins, 5)
  1478. sTime := window.Start().Add(-time.Duration(sOffset) * time.Minute)
  1479. eTime := window.Start().Add(time.Duration(durMins) * time.Minute)
  1480. startKey := sTime.Format(keyLayout)
  1481. endKey := eTime.Format(keyLayout)
  1482. return fmt.Sprintf("%s,%s,%s,%s,%s,%t", startKey, endKey, resolution.String(), filterNamespace, filterCluster, remoteEnabled)
  1483. }
  1484. // ComputeCostDataRange executes a range query for cost data.
  1485. // Note that "offset" represents the time between the function call and "endString", and is also passed for convenience
  1486. func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1487. // Create a request key for request grouping. This key will be used to represent the cost-model result
  1488. // for the specific inputs to prevent multiple queries for identical data.
  1489. key := requestKeyFor(window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1490. log.Debugf("ComputeCostDataRange with Key: %s", key)
  1491. // If there is already a request out that uses the same data, wait for it to return to share the results.
  1492. // Otherwise, start executing.
  1493. result, err, _ := cm.RequestGroup.Do(key, func() (interface{}, error) {
  1494. return cm.costDataRange(cli, cp, window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1495. })
  1496. data, ok := result.(map[string]*CostData)
  1497. if !ok {
  1498. return nil, fmt.Errorf("Failed to cast result as map[string]*CostData")
  1499. }
  1500. return data, err
  1501. }
  1502. func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1503. clusterID := env.GetClusterID()
  1504. // durHrs := end.Sub(start).Hours() + 1
  1505. if window.IsOpen() {
  1506. return nil, fmt.Errorf("illegal window: %s", window)
  1507. }
  1508. start := *window.Start()
  1509. end := *window.End()
  1510. // Snap resolution to the nearest minute
  1511. resMins := int64(math.Trunc(resolution.Minutes()))
  1512. if resMins == 0 {
  1513. return nil, fmt.Errorf("resolution must be greater than 0.0")
  1514. }
  1515. resolution = time.Duration(resMins) * time.Minute
  1516. // Warn if resolution does not evenly divide window
  1517. if int64(window.Minutes())%int64(resolution.Minutes()) != 0 {
  1518. log.Warnf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
  1519. }
  1520. // Convert to Prometheus-style duration string in terms of m or h
  1521. resStr := fmt.Sprintf("%dm", resMins)
  1522. if resMins%60 == 0 {
  1523. resStr = fmt.Sprintf("%dh", resMins/60)
  1524. }
  1525. if remoteEnabled {
  1526. remoteLayout := "2006-01-02T15:04:05Z"
  1527. remoteStartStr := window.Start().Format(remoteLayout)
  1528. remoteEndStr := window.End().Format(remoteLayout)
  1529. log.Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
  1530. return CostDataRangeFromSQL("", "", resolution.String(), remoteStartStr, remoteEndStr)
  1531. }
  1532. scrapeIntervalSeconds := cm.ScrapeInterval.Seconds()
  1533. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataRangeContextName)
  1534. queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1535. queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1536. queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1537. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1538. queryCPURequests := fmt.Sprintf(queryCPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1539. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1540. queryGPURequests := fmt.Sprintf(queryGPURequestsStr, env.GetPromClusterFilter(), resStr, "", resolution.Hours(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1541. queryPVRequests := fmt.Sprintf(queryPVRequestsStr, env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1542. queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1543. queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, env.GetPromClusterFilter(), resStr)
  1544. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1545. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1546. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1547. queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), resStr, "")
  1548. // Submit all queries for concurrent evaluation
  1549. resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, resolution)
  1550. resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, resolution)
  1551. resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, resolution)
  1552. resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, resolution)
  1553. resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, resolution)
  1554. resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, resolution)
  1555. resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, resolution)
  1556. resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, resolution)
  1557. resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, resolution)
  1558. resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
  1559. resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
  1560. resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
  1561. resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
  1562. resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1563. resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1564. resChNSAnnotations := ctx.QueryRange(fmt.Sprintf(queryNSAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1565. resChPodAnnotations := ctx.QueryRange(fmt.Sprintf(queryPodAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1566. resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1567. resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1568. resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1569. resChJobs := ctx.QueryRange(fmt.Sprintf(queryPodJobs, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
  1570. resChDaemonsets := ctx.QueryRange(fmt.Sprintf(queryPodDaemonsets, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
  1571. resChNormalization := ctx.QueryRange(queryNormalization, start, end, resolution)
  1572. // Pull k8s pod, controller, service, and namespace details
  1573. podlist := cm.Cache.GetAllPods()
  1574. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  1575. if err != nil {
  1576. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1577. }
  1578. podStatefulsetsMapping, err := getPodStatefulsets(cm.Cache, podlist, clusterID)
  1579. if err != nil {
  1580. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1581. }
  1582. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  1583. if err != nil {
  1584. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1585. }
  1586. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  1587. if err != nil {
  1588. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1589. }
  1590. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  1591. if err != nil {
  1592. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1593. }
  1594. // Process query results. Handle errors afterwards using ctx.Errors.
  1595. resRAMRequests, _ := resChRAMRequests.Await()
  1596. resRAMUsage, _ := resChRAMUsage.Await()
  1597. resRAMAlloc, _ := resChRAMAlloc.Await()
  1598. resCPURequests, _ := resChCPURequests.Await()
  1599. resCPUUsage, _ := resChCPUUsage.Await()
  1600. resCPUAlloc, _ := resChCPUAlloc.Await()
  1601. resGPURequests, _ := resChGPURequests.Await()
  1602. resPVRequests, _ := resChPVRequests.Await()
  1603. resPVCAlloc, _ := resChPVCAlloc.Await()
  1604. resPVHourlyCost, _ := resChPVHourlyCost.Await()
  1605. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  1606. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  1607. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  1608. resNSLabels, _ := resChNSLabels.Await()
  1609. resPodLabels, _ := resChPodLabels.Await()
  1610. resNSAnnotations, _ := resChNSAnnotations.Await()
  1611. resPodAnnotations, _ := resChPodAnnotations.Await()
  1612. resServiceLabels, _ := resChServiceLabels.Await()
  1613. resDeploymentLabels, _ := resChDeploymentLabels.Await()
  1614. resStatefulsetLabels, _ := resChStatefulsetLabels.Await()
  1615. resDaemonsets, _ := resChDaemonsets.Await()
  1616. resJobs, _ := resChJobs.Await()
  1617. resNormalization, _ := resChNormalization.Await()
  1618. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  1619. // NOTE: will not propagate unless coupled with errors.
  1620. if ctx.HasErrors() {
  1621. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  1622. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  1623. for _, promErr := range ctx.Errors() {
  1624. if promErr.Error != nil {
  1625. log.Errorf("CostDataRange: Request Error: %s", promErr.Error)
  1626. }
  1627. if promErr.ParseError != nil {
  1628. log.Errorf("CostDataRange: Parsing Error: %s", promErr.ParseError)
  1629. }
  1630. }
  1631. // ErrorCollection is an collection of errors wrapped in a single error implementation
  1632. return nil, ctx.ErrorCollection()
  1633. }
  1634. normalizationValue, err := getNormalizations(resNormalization)
  1635. if err != nil {
  1636. msg := fmt.Sprintf("error computing normalization for start=%s, end=%s, res=%s", start, end, resolution)
  1637. return nil, prom.WrapError(err, msg)
  1638. }
  1639. pvClaimMapping, err := GetPVInfo(resPVRequests, clusterID)
  1640. if err != nil {
  1641. // Just log for compatibility with KSM less than 1.6
  1642. log.Infof("Unable to get PV Data: %s", err.Error())
  1643. }
  1644. if pvClaimMapping != nil {
  1645. err = addPVData(cm.Cache, pvClaimMapping, cp)
  1646. if err != nil {
  1647. return nil, fmt.Errorf("pvClaimMapping: %s", err)
  1648. }
  1649. }
  1650. pvCostMapping, err := GetPVCostMetrics(resPVHourlyCost, clusterID)
  1651. if err != nil {
  1652. log.Errorf("Unable to get PV Hourly Cost Data: %s", err.Error())
  1653. }
  1654. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  1655. pvAllocationMapping, err := GetPVAllocationMetrics(resPVCAlloc, clusterID)
  1656. if err != nil {
  1657. log.Errorf("Unable to get PV Allocation Cost Data: %s", err.Error())
  1658. }
  1659. if pvAllocationMapping != nil {
  1660. addMetricPVData(pvAllocationMapping, pvCostMapping, cp)
  1661. for k, v := range pvAllocationMapping {
  1662. unmountedPVs[k] = v
  1663. }
  1664. }
  1665. nsLabels, err := GetNamespaceLabelsMetrics(resNSLabels, clusterID)
  1666. if err != nil {
  1667. log.Errorf("Unable to get Namespace Labels for Metrics: %s", err.Error())
  1668. }
  1669. if nsLabels != nil {
  1670. mergeStringMap(namespaceLabelsMapping, nsLabels)
  1671. }
  1672. podLabels, err := GetPodLabelsMetrics(resPodLabels, clusterID)
  1673. if err != nil {
  1674. log.Errorf("Unable to get Pod Labels for Metrics: %s", err.Error())
  1675. }
  1676. nsAnnotations, err := GetNamespaceAnnotationsMetrics(resNSAnnotations, clusterID)
  1677. if err != nil {
  1678. log.Errorf("Unable to get Namespace Annotations for Metrics: %s", err.Error())
  1679. }
  1680. if nsAnnotations != nil {
  1681. mergeStringMap(namespaceAnnotationsMapping, nsAnnotations)
  1682. }
  1683. podAnnotations, err := GetPodAnnotationsMetrics(resPodAnnotations, clusterID)
  1684. if err != nil {
  1685. log.Errorf("Unable to get Pod Annotations for Metrics: %s", err.Error())
  1686. }
  1687. serviceLabels, err := GetServiceSelectorLabelsMetrics(resServiceLabels, clusterID)
  1688. if err != nil {
  1689. log.Errorf("Unable to get Service Selector Labels for Metrics: %s", err.Error())
  1690. }
  1691. deploymentLabels, err := GetDeploymentMatchLabelsMetrics(resDeploymentLabels, clusterID)
  1692. if err != nil {
  1693. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1694. }
  1695. statefulsetLabels, err := GetStatefulsetMatchLabelsMetrics(resStatefulsetLabels, clusterID)
  1696. if err != nil {
  1697. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1698. }
  1699. podStatefulsetMetricsMapping, err := getPodDeploymentsWithMetrics(statefulsetLabels, podLabels)
  1700. if err != nil {
  1701. log.Errorf("Unable to get match Statefulset Labels Metrics to Pods: %s", err.Error())
  1702. }
  1703. appendLabelsList(podStatefulsetsMapping, podStatefulsetMetricsMapping)
  1704. podDeploymentsMetricsMapping, err := getPodDeploymentsWithMetrics(deploymentLabels, podLabels)
  1705. if err != nil {
  1706. log.Errorf("Unable to get match Deployment Labels Metrics to Pods: %s", err.Error())
  1707. }
  1708. appendLabelsList(podDeploymentsMapping, podDeploymentsMetricsMapping)
  1709. podDaemonsets, err := GetPodDaemonsetsWithMetrics(resDaemonsets, clusterID)
  1710. if err != nil {
  1711. log.Errorf("Unable to get Pod Daemonsets for Metrics: %s", err.Error())
  1712. }
  1713. podJobs, err := GetPodJobsWithMetrics(resJobs, clusterID)
  1714. if err != nil {
  1715. log.Errorf("Unable to get Pod Jobs for Metrics: %s", err.Error())
  1716. }
  1717. podServicesMetricsMapping, err := getPodServicesWithMetrics(serviceLabels, podLabels)
  1718. if err != nil {
  1719. log.Errorf("Unable to get match Service Labels Metrics to Pods: %s", err.Error())
  1720. }
  1721. appendLabelsList(podServicesMapping, podServicesMetricsMapping)
  1722. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  1723. if err != nil {
  1724. log.Errorf("Unable to get Network Cost Data: %s", err.Error())
  1725. networkUsageMap = make(map[string]*NetworkUsageData)
  1726. }
  1727. containerNameCost := make(map[string]*CostData)
  1728. containers := make(map[string]bool)
  1729. otherClusterPVRecorded := make(map[string]bool)
  1730. RAMReqMap, err := GetNormalizedContainerMetricVectors(resRAMRequests, normalizationValue, clusterID)
  1731. if err != nil {
  1732. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMRequests)")
  1733. }
  1734. for key := range RAMReqMap {
  1735. containers[key] = true
  1736. }
  1737. RAMUsedMap, err := GetNormalizedContainerMetricVectors(resRAMUsage, normalizationValue, clusterID)
  1738. if err != nil {
  1739. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMUsage)")
  1740. }
  1741. for key := range RAMUsedMap {
  1742. containers[key] = true
  1743. }
  1744. CPUReqMap, err := GetNormalizedContainerMetricVectors(resCPURequests, normalizationValue, clusterID)
  1745. if err != nil {
  1746. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(CPURequests)")
  1747. }
  1748. for key := range CPUReqMap {
  1749. containers[key] = true
  1750. }
  1751. // No need to normalize here, as this comes from a counter, namely:
  1752. // rate(container_cpu_usage_seconds_total) which properly accounts for normalized rates
  1753. CPUUsedMap, err := GetContainerMetricVectors(resCPUUsage, clusterID)
  1754. if err != nil {
  1755. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUUsage)")
  1756. }
  1757. for key := range CPUUsedMap {
  1758. containers[key] = true
  1759. }
  1760. RAMAllocMap, err := GetContainerMetricVectors(resRAMAlloc, clusterID)
  1761. if err != nil {
  1762. return nil, prom.WrapError(err, "GetContainerMetricVectors(RAMAllocations)")
  1763. }
  1764. for key := range RAMAllocMap {
  1765. containers[key] = true
  1766. }
  1767. CPUAllocMap, err := GetContainerMetricVectors(resCPUAlloc, clusterID)
  1768. if err != nil {
  1769. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUAllocations)")
  1770. }
  1771. for key := range CPUAllocMap {
  1772. containers[key] = true
  1773. }
  1774. GPUReqMap, err := GetNormalizedContainerMetricVectors(resGPURequests, normalizationValue, clusterID)
  1775. if err != nil {
  1776. return nil, prom.WrapError(err, "GetContainerMetricVectors(GPURequests)")
  1777. }
  1778. for key := range GPUReqMap {
  1779. containers[key] = true
  1780. }
  1781. // Request metrics can show up after pod eviction and completion.
  1782. // This method synchronizes requests to allocations such that when
  1783. // allocation is 0, so are requests
  1784. applyAllocationToRequests(RAMAllocMap, RAMReqMap)
  1785. applyAllocationToRequests(CPUAllocMap, CPUReqMap)
  1786. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  1787. missingContainers := make(map[string]*CostData)
  1788. for key := range containers {
  1789. if _, ok := containerNameCost[key]; ok {
  1790. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  1791. }
  1792. c, _ := NewContainerMetricFromKey(key)
  1793. RAMReqV, ok := RAMReqMap[key]
  1794. if !ok {
  1795. log.Debug("no RAM requests for " + key)
  1796. RAMReqV = []*util.Vector{}
  1797. }
  1798. RAMUsedV, ok := RAMUsedMap[key]
  1799. if !ok {
  1800. log.Debug("no RAM usage for " + key)
  1801. RAMUsedV = []*util.Vector{}
  1802. }
  1803. CPUReqV, ok := CPUReqMap[key]
  1804. if !ok {
  1805. log.Debug("no CPU requests for " + key)
  1806. CPUReqV = []*util.Vector{}
  1807. }
  1808. CPUUsedV, ok := CPUUsedMap[key]
  1809. if !ok {
  1810. log.Debug("no CPU usage for " + key)
  1811. CPUUsedV = []*util.Vector{}
  1812. }
  1813. RAMAllocsV, ok := RAMAllocMap[key]
  1814. if !ok {
  1815. log.Debug("no RAM allocation for " + key)
  1816. RAMAllocsV = []*util.Vector{}
  1817. }
  1818. CPUAllocsV, ok := CPUAllocMap[key]
  1819. if !ok {
  1820. log.Debug("no CPU allocation for " + key)
  1821. CPUAllocsV = []*util.Vector{}
  1822. }
  1823. GPUReqV, ok := GPUReqMap[key]
  1824. if !ok {
  1825. log.Debug("no GPU requests for " + key)
  1826. GPUReqV = []*util.Vector{}
  1827. }
  1828. var node *costAnalyzerCloud.Node
  1829. if n, ok := missingNodes[c.NodeName]; ok {
  1830. node = n
  1831. } else {
  1832. node = &costAnalyzerCloud.Node{}
  1833. missingNodes[c.NodeName] = node
  1834. }
  1835. nsKey := c.Namespace + "," + c.ClusterID
  1836. podKey := c.Namespace + "," + c.PodName + "," + c.ClusterID
  1837. namespaceLabels, _ := namespaceLabelsMapping[nsKey]
  1838. pLabels := podLabels[podKey]
  1839. if pLabels == nil {
  1840. pLabels = make(map[string]string)
  1841. }
  1842. for k, v := range namespaceLabels {
  1843. if _, ok := pLabels[k]; !ok {
  1844. pLabels[k] = v
  1845. }
  1846. }
  1847. namespaceAnnotations, _ := namespaceAnnotationsMapping[nsKey]
  1848. pAnnotations := podAnnotations[podKey]
  1849. if pAnnotations == nil {
  1850. pAnnotations = make(map[string]string)
  1851. }
  1852. for k, v := range namespaceAnnotations {
  1853. if _, ok := pAnnotations[k]; !ok {
  1854. pAnnotations[k] = v
  1855. }
  1856. }
  1857. var podDeployments []string
  1858. if _, ok := podDeploymentsMapping[nsKey]; ok {
  1859. if ds, ok := podDeploymentsMapping[nsKey][c.PodName]; ok {
  1860. podDeployments = ds
  1861. } else {
  1862. podDeployments = []string{}
  1863. }
  1864. }
  1865. var podStatefulSets []string
  1866. if _, ok := podStatefulsetsMapping[nsKey]; ok {
  1867. if ss, ok := podStatefulsetsMapping[nsKey][c.PodName]; ok {
  1868. podStatefulSets = ss
  1869. } else {
  1870. podStatefulSets = []string{}
  1871. }
  1872. }
  1873. var podServices []string
  1874. if _, ok := podServicesMapping[nsKey]; ok {
  1875. if svcs, ok := podServicesMapping[nsKey][c.PodName]; ok {
  1876. podServices = svcs
  1877. } else {
  1878. podServices = []string{}
  1879. }
  1880. }
  1881. var podPVs []*PersistentVolumeClaimData
  1882. var podNetCosts []*util.Vector
  1883. // For PVC data, we'll need to find the claim mapping and cost data. Will need to append
  1884. // cost data since that was populated by cluster data previously. We do this with
  1885. // the pod_pvc_allocation metric
  1886. podPVData, ok := pvAllocationMapping[podKey]
  1887. if !ok {
  1888. log.Debugf("Failed to locate pv allocation mapping for missing pod.")
  1889. }
  1890. // Delete the current pod key from potentially unmounted pvs
  1891. delete(unmountedPVs, podKey)
  1892. // For network costs, we'll use existing map since it should still contain the
  1893. // correct data.
  1894. var podNetworkCosts []*util.Vector
  1895. if usage, ok := networkUsageMap[podKey]; ok {
  1896. netCosts, err := GetNetworkCost(usage, cp)
  1897. if err != nil {
  1898. log.Errorf("Error pulling network costs: %s", err.Error())
  1899. } else {
  1900. podNetworkCosts = netCosts
  1901. }
  1902. }
  1903. // Check to see if any other data has been recorded for this namespace, pod, clusterId
  1904. // Follow the pattern of only allowing claims data per pod
  1905. if !otherClusterPVRecorded[podKey] {
  1906. otherClusterPVRecorded[podKey] = true
  1907. podPVs = podPVData
  1908. podNetCosts = podNetworkCosts
  1909. }
  1910. pds := []string{}
  1911. if ds, ok := podDaemonsets[podKey]; ok {
  1912. pds = []string{ds}
  1913. }
  1914. jobs := []string{}
  1915. if job, ok := podJobs[podKey]; ok {
  1916. jobs = []string{job}
  1917. }
  1918. costs := &CostData{
  1919. Name: c.ContainerName,
  1920. PodName: c.PodName,
  1921. NodeName: c.NodeName,
  1922. NodeData: node,
  1923. Namespace: c.Namespace,
  1924. Services: podServices,
  1925. Deployments: podDeployments,
  1926. Daemonsets: pds,
  1927. Statefulsets: podStatefulSets,
  1928. Jobs: jobs,
  1929. RAMReq: RAMReqV,
  1930. RAMUsed: RAMUsedV,
  1931. CPUReq: CPUReqV,
  1932. CPUUsed: CPUUsedV,
  1933. RAMAllocation: RAMAllocsV,
  1934. CPUAllocation: CPUAllocsV,
  1935. GPUReq: GPUReqV,
  1936. Annotations: pAnnotations,
  1937. Labels: pLabels,
  1938. NamespaceLabels: namespaceLabels,
  1939. PVCData: podPVs,
  1940. NetworkData: podNetCosts,
  1941. ClusterID: c.ClusterID,
  1942. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  1943. }
  1944. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1945. containerNameCost[key] = costs
  1946. missingContainers[key] = costs
  1947. }
  1948. }
  1949. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  1950. for k, costs := range unmounted {
  1951. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  1952. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1953. containerNameCost[k] = costs
  1954. }
  1955. }
  1956. if window.Minutes() > 0 {
  1957. dur, off := window.DurationOffsetStrings()
  1958. err = findDeletedNodeInfo(cli, missingNodes, dur, off)
  1959. if err != nil {
  1960. log.Errorf("Error fetching historical node data: %s", err.Error())
  1961. }
  1962. }
  1963. return containerNameCost, nil
  1964. }
  1965. func applyAllocationToRequests(allocationMap map[string][]*util.Vector, requestMap map[string][]*util.Vector) {
  1966. // The result of the normalize operation will be a new []*util.Vector to replace the requests
  1967. normalizeOp := func(r *util.Vector, x *float64, y *float64) bool {
  1968. // Omit data (return false) if both x and y inputs don't exist
  1969. if x == nil || y == nil {
  1970. return false
  1971. }
  1972. // If the allocation value is 0, 0 out request value
  1973. if *x == 0 {
  1974. r.Value = 0
  1975. } else {
  1976. r.Value = *y
  1977. }
  1978. return true
  1979. }
  1980. // Run normalization on all request vectors in the mapping
  1981. for k, requests := range requestMap {
  1982. // Only run normalization where there are valid allocations
  1983. allocations, ok := allocationMap[k]
  1984. if !ok {
  1985. delete(requestMap, k)
  1986. continue
  1987. }
  1988. // Replace request map with normalized
  1989. requestMap[k] = util.ApplyVectorOp(allocations, requests, normalizeOp)
  1990. }
  1991. }
  1992. func addMetricPVData(pvAllocationMap map[string][]*PersistentVolumeClaimData, pvCostMap map[string]*costAnalyzerCloud.PV, cp costAnalyzerCloud.Provider) {
  1993. cfg, err := cp.GetConfig()
  1994. if err != nil {
  1995. log.Errorf("Failed to get provider config while adding pv metrics data.")
  1996. return
  1997. }
  1998. for _, pvcDataArray := range pvAllocationMap {
  1999. for _, pvcData := range pvcDataArray {
  2000. costKey := fmt.Sprintf("%s,%s", pvcData.VolumeName, pvcData.ClusterID)
  2001. pvCost, ok := pvCostMap[costKey]
  2002. if !ok {
  2003. pvcData.Volume = &costAnalyzerCloud.PV{
  2004. Cost: cfg.Storage,
  2005. }
  2006. continue
  2007. }
  2008. pvcData.Volume = pvCost
  2009. }
  2010. }
  2011. }
  2012. // Add values that don't already exist in origMap from mergeMap into origMap
  2013. func mergeStringMap(origMap map[string]map[string]string, mergeMap map[string]map[string]string) {
  2014. for k, v := range mergeMap {
  2015. if _, ok := origMap[k]; !ok {
  2016. origMap[k] = v
  2017. }
  2018. }
  2019. }
  2020. func appendLabelsList(mainLabels map[string]map[string][]string, labels map[string]map[string][]string) {
  2021. for k, v := range labels {
  2022. mainLabels[k] = v
  2023. }
  2024. }
  2025. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  2026. nsToLabels := make(map[string]map[string]string)
  2027. nss := cache.GetAllNamespaces()
  2028. for _, ns := range nss {
  2029. labels := make(map[string]string)
  2030. for k, v := range ns.Labels {
  2031. labels[promutil.SanitizeLabelName(k)] = v
  2032. }
  2033. nsToLabels[ns.Name+","+clusterID] = labels
  2034. }
  2035. return nsToLabels, nil
  2036. }
  2037. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  2038. nsToAnnotations := make(map[string]map[string]string)
  2039. nss := cache.GetAllNamespaces()
  2040. for _, ns := range nss {
  2041. annotations := make(map[string]string)
  2042. for k, v := range ns.Annotations {
  2043. annotations[promutil.SanitizeLabelName(k)] = v
  2044. }
  2045. nsToAnnotations[ns.Name+","+clusterID] = annotations
  2046. }
  2047. return nsToAnnotations, nil
  2048. }
  2049. func getDaemonsetsOfPod(pod v1.Pod) []string {
  2050. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2051. if ownerReference.Kind == "DaemonSet" {
  2052. return []string{ownerReference.Name}
  2053. }
  2054. }
  2055. return []string{}
  2056. }
  2057. func getJobsOfPod(pod v1.Pod) []string {
  2058. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2059. if ownerReference.Kind == "Job" {
  2060. return []string{ownerReference.Name}
  2061. }
  2062. }
  2063. return []string{}
  2064. }
  2065. func getStatefulSetsOfPod(pod v1.Pod) []string {
  2066. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2067. if ownerReference.Kind == "StatefulSet" {
  2068. return []string{ownerReference.Name}
  2069. }
  2070. }
  2071. return []string{}
  2072. }
  2073. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  2074. daemonsets := cache.GetAllDaemonSets()
  2075. vgpuCount := 0.0
  2076. for _, ds := range daemonsets {
  2077. dsContainerList := &ds.Spec.Template.Spec.Containers
  2078. for _, ctnr := range *dsContainerList {
  2079. if ctnr.Args != nil {
  2080. for _, arg := range ctnr.Args {
  2081. if strings.Contains(arg, "--vgpu=") {
  2082. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  2083. if err != nil {
  2084. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  2085. continue
  2086. }
  2087. vgpuCount = vgpus
  2088. return vgpuCount, nil
  2089. }
  2090. }
  2091. }
  2092. }
  2093. }
  2094. return vgpuCount, nil
  2095. }
  2096. type PersistentVolumeClaimData struct {
  2097. Class string `json:"class"`
  2098. Claim string `json:"claim"`
  2099. Namespace string `json:"namespace"`
  2100. ClusterID string `json:"clusterId"`
  2101. TimesClaimed int `json:"timesClaimed"`
  2102. VolumeName string `json:"volumeName"`
  2103. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  2104. Values []*util.Vector `json:"values"`
  2105. }
  2106. func measureTime(start time.Time, threshold time.Duration, name string) {
  2107. elapsed := time.Since(start)
  2108. if elapsed > threshold {
  2109. log.Infof("[Profiler] %s: %s", elapsed, name)
  2110. }
  2111. }
  2112. func measureTimeAsync(start time.Time, threshold time.Duration, name string, ch chan string) {
  2113. elapsed := time.Since(start)
  2114. if elapsed > threshold {
  2115. ch <- fmt.Sprintf("%s took %s", name, time.Since(start))
  2116. }
  2117. }
  2118. func (cm *CostModel) QueryAllocation(window opencost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy opencost.AccumulateOption) (*opencost.AllocationSetRange, error) {
  2119. // Validate window is legal
  2120. if window.IsOpen() || window.IsNegative() {
  2121. return nil, fmt.Errorf("illegal window: %s", window)
  2122. }
  2123. var totalsStore opencost.TotalsStore
  2124. // Idle is required for proportional asset costs
  2125. if includeProportionalAssetResourceCosts {
  2126. if !includeIdle {
  2127. return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
  2128. }
  2129. totalsStore = opencost.NewMemoryTotalsStore()
  2130. }
  2131. // Begin with empty response
  2132. asr := opencost.NewAllocationSetRange()
  2133. // Query for AllocationSets in increments of the given step duration,
  2134. // appending each to the response.
  2135. stepStart := *window.Start()
  2136. stepEnd := stepStart.Add(step)
  2137. var isAKS bool
  2138. for window.End().After(stepStart) {
  2139. allocSet, err := cm.ComputeAllocation(stepStart, stepEnd, resolution)
  2140. if err != nil {
  2141. return nil, fmt.Errorf("error computing allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  2142. }
  2143. if includeIdle {
  2144. assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
  2145. if err != nil {
  2146. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  2147. }
  2148. if includeProportionalAssetResourceCosts {
  2149. // AKS is a special case - there can be a maximum of 2
  2150. // load balancers (1 public and 1 private) in an AKS cluster
  2151. // therefore, when calculating PARCs for load balancers,
  2152. // we must know if this is an AKS cluster
  2153. for _, node := range assetSet.Nodes {
  2154. if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
  2155. isAKS = true
  2156. break
  2157. }
  2158. }
  2159. _, err := opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  2160. if err != nil {
  2161. log.Errorf("ETL: error updating asset resource totals for %s: %s", assetSet.Window, err)
  2162. }
  2163. }
  2164. idleSet, err := computeIdleAllocations(allocSet, assetSet, true)
  2165. if err != nil {
  2166. return nil, fmt.Errorf("error computing idle allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  2167. }
  2168. for _, idleAlloc := range idleSet.Allocations {
  2169. allocSet.Insert(idleAlloc)
  2170. }
  2171. }
  2172. asr.Append(allocSet)
  2173. stepStart = stepEnd
  2174. stepEnd = stepStart.Add(step)
  2175. }
  2176. // Set aggregation options and aggregate
  2177. opts := &opencost.AllocationAggregationOptions{
  2178. IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
  2179. IdleByNode: idleByNode,
  2180. IncludeAggregatedMetadata: includeAggregatedMetadata,
  2181. }
  2182. // Aggregate
  2183. err := asr.AggregateBy(aggregate, opts)
  2184. if err != nil {
  2185. return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
  2186. }
  2187. // Accumulate, if requested
  2188. if accumulateBy != opencost.AccumulateOptionNone {
  2189. asr, err = asr.Accumulate(accumulateBy)
  2190. if err != nil {
  2191. log.Errorf("error accumulating by %v: %s", accumulateBy, err)
  2192. return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
  2193. }
  2194. // when accumulating and returning PARCs, we need the totals for the
  2195. // accumulated windows to accurately compute a fraction
  2196. if includeProportionalAssetResourceCosts {
  2197. assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
  2198. if err != nil {
  2199. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  2200. }
  2201. _, err = opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  2202. if err != nil {
  2203. log.Errorf("ETL: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  2204. }
  2205. }
  2206. }
  2207. if includeProportionalAssetResourceCosts {
  2208. for _, as := range asr.Allocations {
  2209. totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
  2210. if !ok {
  2211. log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  2212. return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  2213. }
  2214. totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
  2215. if !ok {
  2216. log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  2217. return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  2218. }
  2219. var totalPublicLbCost, totalPrivateLbCost float64
  2220. if isAKS && sharedLoadBalancer {
  2221. // loop through all assetTotals, adding all load balancer costs by public and private
  2222. for _, tot := range totalStoreByNode {
  2223. if tot.PrivateLoadBalancer {
  2224. totalPrivateLbCost += tot.LoadBalancerCost
  2225. } else {
  2226. totalPublicLbCost += tot.LoadBalancerCost
  2227. }
  2228. }
  2229. }
  2230. // loop through each allocation set, using total cost from totals store
  2231. for _, alloc := range as.Allocations {
  2232. for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
  2233. key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
  2234. // for each parc , check the totals store for each
  2235. // on a totals hit, set the corresponding total and calculate percentage
  2236. var totals *opencost.AssetTotals
  2237. if totalsLoc, found := totalStoreByCluster[key]; found {
  2238. totals = totalsLoc
  2239. }
  2240. if totalsLoc, found := totalStoreByNode[key]; found {
  2241. totals = totalsLoc
  2242. }
  2243. if totals == nil {
  2244. log.Errorf("unable to locate asset totals for allocation %s, corresponding PARC is being skipped", key)
  2245. continue
  2246. }
  2247. parc.CPUTotalCost = totals.CPUCost
  2248. parc.GPUTotalCost = totals.GPUCost
  2249. parc.RAMTotalCost = totals.RAMCost
  2250. parc.PVTotalCost = totals.PersistentVolumeCost
  2251. if isAKS && sharedLoadBalancer && len(alloc.LoadBalancers) > 0 {
  2252. // Azure is a special case - use computed totals above
  2253. // use the lbAllocations in the object to determine if
  2254. // this PARC is a public or private load balancer
  2255. // then set the total accordingly
  2256. // AKS only has 1 public and 1 private load balancer
  2257. lbAlloc, found := alloc.LoadBalancers[key]
  2258. if found {
  2259. if lbAlloc.Private {
  2260. parc.LoadBalancerTotalCost = totalPrivateLbCost
  2261. } else {
  2262. parc.LoadBalancerTotalCost = totalPublicLbCost
  2263. }
  2264. }
  2265. } else {
  2266. parc.LoadBalancerTotalCost = totals.LoadBalancerCost
  2267. }
  2268. opencost.ComputePercentages(&parc)
  2269. alloc.ProportionalAssetResourceCosts[rawKey] = parc
  2270. }
  2271. }
  2272. }
  2273. }
  2274. return asr, nil
  2275. }
  2276. func computeIdleAllocations(allocSet *opencost.AllocationSet, assetSet *opencost.AssetSet, idleByNode bool) (*opencost.AllocationSet, error) {
  2277. if !allocSet.Window.Equal(assetSet.Window) {
  2278. return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
  2279. }
  2280. var allocTotals map[string]*opencost.AllocationTotals
  2281. var assetTotals map[string]*opencost.AssetTotals
  2282. if idleByNode {
  2283. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationNodeProp)
  2284. assetTotals = opencost.ComputeAssetTotals(assetSet, true)
  2285. } else {
  2286. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationClusterProp)
  2287. assetTotals = opencost.ComputeAssetTotals(assetSet, false)
  2288. }
  2289. start, end := *allocSet.Window.Start(), *allocSet.Window.End()
  2290. idleSet := opencost.NewAllocationSet(start, end)
  2291. for key, assetTotal := range assetTotals {
  2292. allocTotal, ok := allocTotals[key]
  2293. if !ok {
  2294. log.Warnf("ETL: did not find allocations for asset key: %s", key)
  2295. // Use a zero-value set of totals. This indicates either (1) an
  2296. // error computing totals, or (2) that no allocations ran on the
  2297. // given node for the given window.
  2298. allocTotal = &opencost.AllocationTotals{
  2299. Cluster: assetTotal.Cluster,
  2300. Node: assetTotal.Node,
  2301. Start: assetTotal.Start,
  2302. End: assetTotal.End,
  2303. }
  2304. }
  2305. // Insert one idle allocation for each key (whether by node or
  2306. // by cluster), defined as the difference between the total
  2307. // asset cost and the allocated cost per-resource.
  2308. name := fmt.Sprintf("%s/%s", key, opencost.IdleSuffix)
  2309. err := idleSet.Insert(&opencost.Allocation{
  2310. Name: name,
  2311. Window: idleSet.Window.Clone(),
  2312. Properties: &opencost.AllocationProperties{
  2313. Cluster: assetTotal.Cluster,
  2314. Node: assetTotal.Node,
  2315. ProviderID: assetTotal.Node,
  2316. },
  2317. Start: assetTotal.Start,
  2318. End: assetTotal.End,
  2319. CPUCost: assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost(),
  2320. GPUCost: assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost(),
  2321. RAMCost: assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost(),
  2322. })
  2323. if err != nil {
  2324. return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
  2325. }
  2326. }
  2327. return idleSet, nil
  2328. }