costmodel.go 91 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603
  1. package costmodel
  2. import (
  3. "errors"
  4. "fmt"
  5. "math"
  6. "regexp"
  7. "strconv"
  8. "strings"
  9. "time"
  10. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
  11. "github.com/opencost/opencost/pkg/clustercache"
  12. "github.com/opencost/opencost/pkg/costmodel/clusters"
  13. "github.com/opencost/opencost/pkg/env"
  14. "github.com/opencost/opencost/pkg/kubecost"
  15. "github.com/opencost/opencost/pkg/log"
  16. "github.com/opencost/opencost/pkg/prom"
  17. "github.com/opencost/opencost/pkg/util"
  18. prometheus "github.com/prometheus/client_golang/api"
  19. prometheusClient "github.com/prometheus/client_golang/api"
  20. v1 "k8s.io/api/core/v1"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/labels"
  23. "golang.org/x/sync/singleflight"
  24. )
  25. const (
  26. statusAPIError = 422
  27. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  28. apiPrefix = "/api/v1"
  29. epAlertManagers = apiPrefix + "/alertmanagers"
  30. epLabelValues = apiPrefix + "/label/:name/values"
  31. epSeries = apiPrefix + "/series"
  32. epTargets = apiPrefix + "/targets"
  33. epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
  34. epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
  35. epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
  36. epConfig = apiPrefix + "/status/config"
  37. epFlags = apiPrefix + "/status/flags"
  38. )
  39. // isCron matches a CronJob name and captures the non-timestamp name
  40. //
  41. // We support either a 10 character timestamp OR an 8 character timestamp
  42. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  43. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  44. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  45. type CostModel struct {
  46. Cache clustercache.ClusterCache
  47. ClusterMap clusters.ClusterMap
  48. MaxPrometheusQueryDuration time.Duration
  49. RequestGroup *singleflight.Group
  50. ScrapeInterval time.Duration
  51. PrometheusClient prometheus.Client
  52. Provider costAnalyzerCloud.Provider
  53. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  54. }
  55. func NewCostModel(client prometheus.Client, provider costAnalyzerCloud.Provider, cache clustercache.ClusterCache, clusterMap clusters.ClusterMap, scrapeInterval time.Duration) *CostModel {
  56. // request grouping to prevent over-requesting the same data prior to caching
  57. requestGroup := new(singleflight.Group)
  58. return &CostModel{
  59. Cache: cache,
  60. ClusterMap: clusterMap,
  61. MaxPrometheusQueryDuration: env.GetETLMaxPrometheusQueryDuration(),
  62. PrometheusClient: client,
  63. Provider: provider,
  64. RequestGroup: requestGroup,
  65. ScrapeInterval: scrapeInterval,
  66. }
  67. }
  68. type CostData struct {
  69. Name string `json:"name,omitempty"`
  70. PodName string `json:"podName,omitempty"`
  71. NodeName string `json:"nodeName,omitempty"`
  72. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  73. Namespace string `json:"namespace,omitempty"`
  74. Deployments []string `json:"deployments,omitempty"`
  75. Services []string `json:"services,omitempty"`
  76. Daemonsets []string `json:"daemonsets,omitempty"`
  77. Statefulsets []string `json:"statefulsets,omitempty"`
  78. Jobs []string `json:"jobs,omitempty"`
  79. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  80. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  81. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  82. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  83. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  84. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  85. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  86. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  87. NetworkData []*util.Vector `json:"network,omitempty"`
  88. Annotations map[string]string `json:"annotations,omitempty"`
  89. Labels map[string]string `json:"labels,omitempty"`
  90. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  91. ClusterID string `json:"clusterId"`
  92. ClusterName string `json:"clusterName"`
  93. }
  94. func (cd *CostData) String() string {
  95. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  96. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  97. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  98. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  99. }
  100. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  101. hasController = false
  102. if len(cd.Deployments) > 0 {
  103. name = cd.Deployments[0]
  104. kind = "deployment"
  105. hasController = true
  106. } else if len(cd.Statefulsets) > 0 {
  107. name = cd.Statefulsets[0]
  108. kind = "statefulset"
  109. hasController = true
  110. } else if len(cd.Daemonsets) > 0 {
  111. name = cd.Daemonsets[0]
  112. kind = "daemonset"
  113. hasController = true
  114. } else if len(cd.Jobs) > 0 {
  115. name = cd.Jobs[0]
  116. kind = "job"
  117. hasController = true
  118. match := isCron.FindStringSubmatch(name)
  119. if match != nil {
  120. name = match[1]
  121. }
  122. }
  123. return name, kind, hasController
  124. }
  125. const (
  126. queryRAMRequestsStr = `avg(
  127. label_replace(
  128. label_replace(
  129. avg(
  130. sum_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="",container!="POD", node!="", %s}[%s] %s)
  131. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  132. ), "pod_name","$1","pod","(.+)"
  133. )
  134. ) by (namespace,container_name,pod_name,node,%s)`
  135. queryRAMUsageStr = `avg(
  136. label_replace(
  137. label_replace(
  138. label_replace(
  139. sum_over_time(container_memory_working_set_bytes{container!="", container!="POD", instance!="", %s}[%s] %s), "node", "$1", "instance", "(.+)"
  140. ), "container_name", "$1", "container", "(.+)"
  141. ), "pod_name", "$1", "pod", "(.+)"
  142. )
  143. ) by (namespace, container_name, pod_name, node, %s)`
  144. queryCPURequestsStr = `avg(
  145. label_replace(
  146. label_replace(
  147. avg(
  148. sum_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="",container!="POD", node!="", %s}[%s] %s)
  149. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  150. ), "pod_name","$1","pod","(.+)"
  151. )
  152. ) by (namespace,container_name,pod_name,node,%s)`
  153. queryCPUUsageStr = `avg(
  154. label_replace(
  155. label_replace(
  156. label_replace(
  157. rate(
  158. container_cpu_usage_seconds_total{container!="", container!="POD", instance!="", %s}[%s] %s
  159. ), "node", "$1", "instance", "(.+)"
  160. ), "container_name", "$1", "container", "(.+)"
  161. ), "pod_name", "$1", "pod", "(.+)"
  162. )
  163. ) by (namespace, container_name, pod_name, node, %s)`
  164. queryGPURequestsStr = `avg(
  165. label_replace(
  166. label_replace(
  167. avg(
  168. sum_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s] %s)
  169. * %f
  170. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  171. ), "pod_name","$1","pod","(.+)"
  172. )
  173. ) by (namespace,container_name,pod_name,node,%s)
  174. * on (pod_name, namespace, %s) group_left(container) label_replace(avg(avg_over_time(kube_pod_status_phase{phase="Running", %s}[%s] %s)) by (pod,namespace,%s), "pod_name","$1","pod","(.+)")`
  175. queryPVRequestsStr = `avg(avg(kube_persistentvolumeclaim_info{volumename != "", %s}) by (persistentvolumeclaim, storageclass, namespace, volumename, %s, kubernetes_node)
  176. *
  177. on (persistentvolumeclaim, namespace, %s, kubernetes_node) group_right(storageclass, volumename)
  178. sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{%s}) by (persistentvolumeclaim, namespace, %s, kubernetes_node, kubernetes_name)) by (persistentvolumeclaim, storageclass, namespace, %s, volumename, kubernetes_node)`
  179. // queryRAMAllocationByteHours yields the total byte-hour RAM allocation over the given
  180. // window, aggregated by container.
  181. // [line 3] sum_over_time(each byte) = [byte*scrape] by metric
  182. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  183. // [lines 2,4] sum(") by unique container key and multiply [byte*scrape] * [hours/scrape] for byte*hours
  184. // [lines 1,5] relabeling
  185. queryRAMAllocationByteHours = `
  186. label_replace(label_replace(
  187. sum(
  188. sum_over_time(container_memory_allocation_bytes{container!="",container!="POD", node!="", %s}[%s])
  189. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  190. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  191. // queryCPUAllocationVCPUHours yields the total VCPU-hour CPU allocation over the given
  192. // window, aggregated by container.
  193. // [line 3] sum_over_time(each VCPU*mins in window) = [VCPU*scrape] by metric
  194. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  195. // [lines 2,4] sum(") by unique container key and multiply [VCPU*scrape] * [hours/scrape] for VCPU*hours
  196. // [lines 1,5] relabeling
  197. queryCPUAllocationVCPUHours = `
  198. label_replace(label_replace(
  199. sum(
  200. sum_over_time(container_cpu_allocation{container!="",container!="POD", node!="", %s}[%s])
  201. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  202. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  203. // queryPVCAllocationFmt yields the total byte-hour PVC allocation over the given window.
  204. // sum_over_time(each byte) = [byte*scrape] by metric *(scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by pod
  205. queryPVCAllocationFmt = `sum(sum_over_time(pod_pvc_allocation{%s}[%s])) by (%s, namespace, pod, persistentvolume, persistentvolumeclaim) * %f/60/60`
  206. queryPVHourlyCostFmt = `avg_over_time(pv_hourly_cost{%s}[%s])`
  207. queryNSLabels = `avg_over_time(kube_namespace_labels{%s}[%s])`
  208. queryPodLabels = `avg_over_time(kube_pod_labels{%s}[%s])`
  209. queryNSAnnotations = `avg_over_time(kube_namespace_annotations{%s}[%s])`
  210. queryPodAnnotations = `avg_over_time(kube_pod_annotations{%s}[%s])`
  211. queryDeploymentLabels = `avg_over_time(deployment_match_labels{%s}[%s])`
  212. queryStatefulsetLabels = `avg_over_time(statefulSet_match_labels{%s}[%s])`
  213. queryPodDaemonsets = `sum(kube_pod_owner{owner_kind="DaemonSet", %s}) by (namespace,pod,owner_name,%s)`
  214. queryPodJobs = `sum(kube_pod_owner{owner_kind="Job", %s}) by (namespace,pod,owner_name,%s)`
  215. queryServiceLabels = `avg_over_time(service_selector_labels{%s}[%s])`
  216. queryZoneNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  217. queryRegionNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="false", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  218. queryInternetNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  219. normalizationStr = `max(count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[%s] %s))`
  220. )
  221. func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
  222. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
  223. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
  224. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  225. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  226. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  227. queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), window, offset)
  228. // Cluster ID is specific to the source cluster
  229. clusterID := env.GetClusterID()
  230. // Submit all Prometheus queries asynchronously
  231. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  232. resChRAMUsage := ctx.Query(queryRAMUsage)
  233. resChCPUUsage := ctx.Query(queryCPUUsage)
  234. resChNetZoneRequests := ctx.Query(queryNetZoneRequests)
  235. resChNetRegionRequests := ctx.Query(queryNetRegionRequests)
  236. resChNetInternetRequests := ctx.Query(queryNetInternetRequests)
  237. resChNormalization := ctx.Query(queryNormalization)
  238. // Pull pod information from k8s API
  239. podlist := cm.Cache.GetAllPods()
  240. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  241. if err != nil {
  242. return nil, err
  243. }
  244. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  245. if err != nil {
  246. return nil, err
  247. }
  248. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  249. if err != nil {
  250. return nil, err
  251. }
  252. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  253. if err != nil {
  254. return nil, err
  255. }
  256. // Process Prometheus query results. Handle errors using ctx.Errors.
  257. resRAMUsage, _ := resChRAMUsage.Await()
  258. resCPUUsage, _ := resChCPUUsage.Await()
  259. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  260. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  261. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  262. resNormalization, _ := resChNormalization.Await()
  263. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  264. // NOTE: will not propagate unless coupled with errors.
  265. if ctx.HasErrors() {
  266. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  267. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  268. for _, promErr := range ctx.Errors() {
  269. if promErr.Error != nil {
  270. log.Errorf("ComputeCostData: Request Error: %s", promErr.Error)
  271. }
  272. if promErr.ParseError != nil {
  273. log.Errorf("ComputeCostData: Parsing Error: %s", promErr.ParseError)
  274. }
  275. }
  276. // ErrorCollection is an collection of errors wrapped in a single error implementation
  277. // We opt to not return an error for the sake of running as a pure exporter.
  278. log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", ctx.ErrorCollection().Error())
  279. }
  280. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  281. normalizationValue, err := getNormalization(resNormalization)
  282. if err != nil {
  283. // We opt to not return an error for the sake of running as a pure exporter.
  284. log.Warnf("ComputeCostData: continuing despite error parsing normalization values from %s: %s", queryNormalization, err.Error())
  285. }
  286. // Determine if there are vgpus configured and if so get the total allocatable number
  287. // If there are no vgpus, the coefficient is set to 1.0
  288. vgpuCount, err := getAllocatableVGPUs(cm.Cache)
  289. if err != nil {
  290. log.Warnf("getAllocatableVGCPUs error: %s", err.Error())
  291. }
  292. vgpuCoeff := 10.0
  293. if vgpuCount > 0.0 {
  294. vgpuCoeff = vgpuCount
  295. }
  296. nodes, err := cm.GetNodeCost(cp)
  297. if err != nil {
  298. log.Warnf("GetNodeCost: no node cost model available: " + err.Error())
  299. return nil, err
  300. }
  301. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  302. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  303. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  304. if err != nil {
  305. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  306. }
  307. if pvClaimMapping != nil {
  308. err = addPVData(cm.Cache, pvClaimMapping, cp)
  309. if err != nil {
  310. return nil, err
  311. }
  312. // copy claim mappings into zombies, then remove as they're discovered
  313. for k, v := range pvClaimMapping {
  314. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  315. }
  316. }
  317. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  318. if err != nil {
  319. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  320. networkUsageMap = make(map[string]*NetworkUsageData)
  321. }
  322. containerNameCost := make(map[string]*CostData)
  323. containers := make(map[string]bool)
  324. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, true, normalizationValue, clusterID)
  325. if err != nil {
  326. return nil, err
  327. }
  328. for key := range RAMUsedMap {
  329. containers[key] = true
  330. }
  331. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, false, 0, clusterID) // No need to normalize here, as this comes from a counter
  332. if err != nil {
  333. return nil, err
  334. }
  335. for key := range CPUUsedMap {
  336. containers[key] = true
  337. }
  338. currentContainers := make(map[string]v1.Pod)
  339. for _, pod := range podlist {
  340. if pod.Status.Phase != v1.PodRunning {
  341. continue
  342. }
  343. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  344. if err != nil {
  345. return nil, err
  346. }
  347. for _, c := range cs {
  348. containers[c.Key()] = true // captures any containers that existed for a time < a prometheus scrape interval. We currently charge 0 for this but should charge something.
  349. currentContainers[c.Key()] = *pod
  350. }
  351. }
  352. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  353. missingContainers := make(map[string]*CostData)
  354. for key := range containers {
  355. if _, ok := containerNameCost[key]; ok {
  356. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  357. }
  358. // The _else_ case for this statement is the case in which the container has been
  359. // deleted so we have usage information but not request information. In that case,
  360. // we return partial data for CPU and RAM: only usage and not requests.
  361. if pod, ok := currentContainers[key]; ok {
  362. podName := pod.GetObjectMeta().GetName()
  363. ns := pod.GetObjectMeta().GetNamespace()
  364. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  365. podLabels := pod.GetObjectMeta().GetLabels()
  366. if podLabels == nil {
  367. podLabels = make(map[string]string)
  368. }
  369. for k, v := range nsLabels {
  370. if _, ok := podLabels[k]; !ok {
  371. podLabels[k] = v
  372. }
  373. }
  374. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  375. podAnnotations := pod.GetObjectMeta().GetAnnotations()
  376. if podAnnotations == nil {
  377. podAnnotations = make(map[string]string)
  378. }
  379. for k, v := range nsAnnotations {
  380. if _, ok := podAnnotations[k]; !ok {
  381. podAnnotations[k] = v
  382. }
  383. }
  384. nodeName := pod.Spec.NodeName
  385. var nodeData *costAnalyzerCloud.Node
  386. if _, ok := nodes[nodeName]; ok {
  387. nodeData = nodes[nodeName]
  388. }
  389. nsKey := ns + "," + clusterID
  390. var podDeployments []string
  391. if _, ok := podDeploymentsMapping[nsKey]; ok {
  392. if ds, ok := podDeploymentsMapping[nsKey][pod.GetObjectMeta().GetName()]; ok {
  393. podDeployments = ds
  394. } else {
  395. podDeployments = []string{}
  396. }
  397. }
  398. var podPVs []*PersistentVolumeClaimData
  399. podClaims := pod.Spec.Volumes
  400. for _, vol := range podClaims {
  401. if vol.PersistentVolumeClaim != nil {
  402. name := vol.PersistentVolumeClaim.ClaimName
  403. key := ns + "," + name + "," + clusterID
  404. if pvClaim, ok := pvClaimMapping[key]; ok {
  405. pvClaim.TimesClaimed++
  406. podPVs = append(podPVs, pvClaim)
  407. // Remove entry from potential unmounted pvs
  408. delete(unmountedPVs, key)
  409. }
  410. }
  411. }
  412. var podNetCosts []*util.Vector
  413. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  414. netCosts, err := GetNetworkCost(usage, cp)
  415. if err != nil {
  416. log.Debugf("Error pulling network costs: %s", err.Error())
  417. } else {
  418. podNetCosts = netCosts
  419. }
  420. }
  421. var podServices []string
  422. if _, ok := podServicesMapping[nsKey]; ok {
  423. if svcs, ok := podServicesMapping[nsKey][pod.GetObjectMeta().GetName()]; ok {
  424. podServices = svcs
  425. } else {
  426. podServices = []string{}
  427. }
  428. }
  429. for i, container := range pod.Spec.Containers {
  430. containerName := container.Name
  431. // recreate the key and look up data for this container
  432. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  433. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  434. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  435. // details on the "amount" API. See
  436. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  437. // for the units of memory and CPU.
  438. ramRequestBytes := container.Resources.Requests.Memory().Value()
  439. // Because information on container RAM & CPU requests isn't
  440. // coming from Prometheus, it won't have a timestamp associated
  441. // with it. We need to provide a timestamp.
  442. RAMReqV := []*util.Vector{
  443. {
  444. Value: float64(ramRequestBytes),
  445. Timestamp: float64(time.Now().UTC().Unix()),
  446. },
  447. }
  448. // use millicores so we can convert to cores in a float64 format
  449. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  450. CPUReqV := []*util.Vector{
  451. {
  452. Value: float64(cpuRequestMilliCores) / 1000,
  453. Timestamp: float64(time.Now().UTC().Unix()),
  454. },
  455. }
  456. gpuReqCount := 0.0
  457. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  458. gpuReqCount = g.AsApproximateFloat64()
  459. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  460. gpuReqCount = g.AsApproximateFloat64()
  461. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  462. // divide vgpu request/limits by total vgpus to get the portion of physical gpus requested
  463. gpuReqCount = g.AsApproximateFloat64() / vgpuCoeff
  464. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  465. gpuReqCount = g.AsApproximateFloat64() / vgpuCoeff
  466. }
  467. GPUReqV := []*util.Vector{
  468. {
  469. Value: float64(gpuReqCount),
  470. Timestamp: float64(time.Now().UTC().Unix()),
  471. },
  472. }
  473. RAMUsedV, ok := RAMUsedMap[newKey]
  474. if !ok {
  475. log.Debug("no RAM usage for " + newKey)
  476. RAMUsedV = []*util.Vector{{}}
  477. }
  478. CPUUsedV, ok := CPUUsedMap[newKey]
  479. if !ok {
  480. log.Debug("no CPU usage for " + newKey)
  481. CPUUsedV = []*util.Vector{{}}
  482. }
  483. var pvReq []*PersistentVolumeClaimData
  484. var netReq []*util.Vector
  485. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  486. pvReq = podPVs
  487. netReq = podNetCosts
  488. }
  489. costs := &CostData{
  490. Name: containerName,
  491. PodName: podName,
  492. NodeName: nodeName,
  493. Namespace: ns,
  494. Deployments: podDeployments,
  495. Services: podServices,
  496. Daemonsets: getDaemonsetsOfPod(pod),
  497. Jobs: getJobsOfPod(pod),
  498. Statefulsets: getStatefulSetsOfPod(pod),
  499. NodeData: nodeData,
  500. RAMReq: RAMReqV,
  501. RAMUsed: RAMUsedV,
  502. CPUReq: CPUReqV,
  503. CPUUsed: CPUUsedV,
  504. GPUReq: GPUReqV,
  505. PVCData: pvReq,
  506. NetworkData: netReq,
  507. Annotations: podAnnotations,
  508. Labels: podLabels,
  509. NamespaceLabels: nsLabels,
  510. ClusterID: clusterID,
  511. ClusterName: cm.ClusterMap.NameFor(clusterID),
  512. }
  513. var cpuReq, cpuUse *util.Vector
  514. if len(costs.CPUReq) > 0 {
  515. cpuReq = costs.CPUReq[0]
  516. }
  517. if len(costs.CPUUsed) > 0 {
  518. cpuUse = costs.CPUUsed[0]
  519. }
  520. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  521. var ramReq, ramUse *util.Vector
  522. if len(costs.RAMReq) > 0 {
  523. ramReq = costs.RAMReq[0]
  524. }
  525. if len(costs.RAMUsed) > 0 {
  526. ramUse = costs.RAMUsed[0]
  527. }
  528. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  529. if filterNamespace == "" {
  530. containerNameCost[newKey] = costs
  531. } else if costs.Namespace == filterNamespace {
  532. containerNameCost[newKey] = costs
  533. }
  534. }
  535. } else {
  536. // The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
  537. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  538. c, err := NewContainerMetricFromKey(key)
  539. if err != nil {
  540. return nil, err
  541. }
  542. // CPU and RAM requests are obtained from the Kubernetes API.
  543. // If this case has been reached, the Kubernetes API will not
  544. // have information about the pod because it no longer exists.
  545. //
  546. // The case where this matters is minimal, mainly in environments
  547. // with very short-lived pods that over-request resources.
  548. RAMReqV := []*util.Vector{{}}
  549. CPUReqV := []*util.Vector{{}}
  550. GPUReqV := []*util.Vector{{}}
  551. RAMUsedV, ok := RAMUsedMap[key]
  552. if !ok {
  553. log.Debug("no RAM usage for " + key)
  554. RAMUsedV = []*util.Vector{{}}
  555. }
  556. CPUUsedV, ok := CPUUsedMap[key]
  557. if !ok {
  558. log.Debug("no CPU usage for " + key)
  559. CPUUsedV = []*util.Vector{{}}
  560. }
  561. node, ok := nodes[c.NodeName]
  562. if !ok {
  563. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  564. if n, ok := missingNodes[c.NodeName]; ok {
  565. node = n
  566. } else {
  567. node = &costAnalyzerCloud.Node{}
  568. missingNodes[c.NodeName] = node
  569. }
  570. }
  571. namespacelabels, _ := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  572. namespaceAnnotations, _ := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  573. costs := &CostData{
  574. Name: c.ContainerName,
  575. PodName: c.PodName,
  576. NodeName: c.NodeName,
  577. NodeData: node,
  578. Namespace: c.Namespace,
  579. RAMReq: RAMReqV,
  580. RAMUsed: RAMUsedV,
  581. CPUReq: CPUReqV,
  582. CPUUsed: CPUUsedV,
  583. GPUReq: GPUReqV,
  584. Annotations: namespaceAnnotations,
  585. NamespaceLabels: namespacelabels,
  586. ClusterID: c.ClusterID,
  587. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  588. }
  589. var cpuReq, cpuUse *util.Vector
  590. if len(costs.CPUReq) > 0 {
  591. cpuReq = costs.CPUReq[0]
  592. }
  593. if len(costs.CPUUsed) > 0 {
  594. cpuUse = costs.CPUUsed[0]
  595. }
  596. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  597. var ramReq, ramUse *util.Vector
  598. if len(costs.RAMReq) > 0 {
  599. ramReq = costs.RAMReq[0]
  600. }
  601. if len(costs.RAMUsed) > 0 {
  602. ramUse = costs.RAMUsed[0]
  603. }
  604. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  605. if filterNamespace == "" {
  606. containerNameCost[key] = costs
  607. missingContainers[key] = costs
  608. } else if costs.Namespace == filterNamespace {
  609. containerNameCost[key] = costs
  610. missingContainers[key] = costs
  611. }
  612. }
  613. }
  614. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  615. // to pass along the cost data
  616. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  617. for k, costs := range unmounted {
  618. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  619. if filterNamespace == "" {
  620. containerNameCost[k] = costs
  621. } else if costs.Namespace == filterNamespace {
  622. containerNameCost[k] = costs
  623. }
  624. }
  625. err = findDeletedNodeInfo(cli, missingNodes, window, "")
  626. if err != nil {
  627. log.Errorf("Error fetching historical node data: %s", err.Error())
  628. }
  629. err = findDeletedPodInfo(cli, missingContainers, window)
  630. if err != nil {
  631. log.Errorf("Error fetching historical pod data: %s", err.Error())
  632. }
  633. return containerNameCost, err
  634. }
  635. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  636. costs := make(map[string]*CostData)
  637. if len(unmountedPVs) == 0 {
  638. return costs
  639. }
  640. for k, pv := range unmountedPVs {
  641. keyParts := strings.Split(k, ",")
  642. if len(keyParts) != 3 {
  643. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  644. continue
  645. }
  646. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  647. namespacelabels, _ := namespaceLabelsMapping[ns+","+clusterID]
  648. namespaceAnnotations, _ := namespaceAnnotationsMapping[ns+","+clusterID]
  649. // Should be a unique "Unmounted" cost data type
  650. name := "unmounted-pvs"
  651. metric := NewContainerMetricFromValues(ns, name, name, "", clusterID)
  652. key := metric.Key()
  653. if costData, ok := costs[key]; !ok {
  654. costs[key] = &CostData{
  655. Name: name,
  656. PodName: name,
  657. NodeName: "",
  658. Annotations: namespaceAnnotations,
  659. Namespace: ns,
  660. NamespaceLabels: namespacelabels,
  661. Labels: namespacelabels,
  662. ClusterID: clusterID,
  663. ClusterName: clusterMap.NameFor(clusterID),
  664. PVCData: pv,
  665. }
  666. } else {
  667. costData.PVCData = append(costData.PVCData, pv...)
  668. }
  669. }
  670. return costs
  671. }
  672. func findDeletedPodInfo(cli prometheusClient.Client, missingContainers map[string]*CostData, window string) error {
  673. if len(missingContainers) > 0 {
  674. queryHistoricalPodLabels := fmt.Sprintf(`kube_pod_labels{%s}[%s]`, env.GetPromClusterFilter(), window)
  675. podLabelsResult, _, err := prom.NewNamedContext(cli, prom.ComputeCostDataContextName).QuerySync(queryHistoricalPodLabels)
  676. if err != nil {
  677. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  678. }
  679. podLabels := make(map[string]map[string]string)
  680. if podLabelsResult != nil {
  681. podLabels, err = parsePodLabels(podLabelsResult)
  682. if err != nil {
  683. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  684. }
  685. }
  686. for key, costData := range missingContainers {
  687. cm, _ := NewContainerMetricFromKey(key)
  688. labels, ok := podLabels[cm.PodName]
  689. if !ok {
  690. labels = make(map[string]string)
  691. }
  692. for k, v := range costData.NamespaceLabels {
  693. labels[k] = v
  694. }
  695. costData.Labels = labels
  696. }
  697. }
  698. return nil
  699. }
  700. func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*costAnalyzerCloud.Node, window, offset string) error {
  701. if len(missingNodes) > 0 {
  702. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  703. offsetStr := ""
  704. if offset != "" {
  705. offsetStr = fmt.Sprintf("offset %s", offset)
  706. }
  707. queryHistoricalCPUCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  708. queryHistoricalRAMCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  709. queryHistoricalGPUCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  710. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  711. cpuCostResCh := ctx.Query(queryHistoricalCPUCost)
  712. ramCostResCh := ctx.Query(queryHistoricalRAMCost)
  713. gpuCostResCh := ctx.Query(queryHistoricalGPUCost)
  714. cpuCostRes, _ := cpuCostResCh.Await()
  715. ramCostRes, _ := ramCostResCh.Await()
  716. gpuCostRes, _ := gpuCostResCh.Await()
  717. if ctx.HasErrors() {
  718. return ctx.ErrorCollection()
  719. }
  720. cpuCosts, err := getCost(cpuCostRes)
  721. if err != nil {
  722. return err
  723. }
  724. ramCosts, err := getCost(ramCostRes)
  725. if err != nil {
  726. return err
  727. }
  728. gpuCosts, err := getCost(gpuCostRes)
  729. if err != nil {
  730. return err
  731. }
  732. if len(cpuCosts) == 0 {
  733. log.Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  734. }
  735. for node, costv := range cpuCosts {
  736. if _, ok := missingNodes[node]; ok {
  737. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  738. } else {
  739. log.DedupedWarningf(5, "Node `%s` in prometheus but not k8s api", node)
  740. }
  741. }
  742. for node, costv := range ramCosts {
  743. if _, ok := missingNodes[node]; ok {
  744. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  745. }
  746. }
  747. for node, costv := range gpuCosts {
  748. if _, ok := missingNodes[node]; ok {
  749. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  750. }
  751. }
  752. }
  753. return nil
  754. }
  755. // getContainerAllocation takes the max between request and usage. This function
  756. // returns a slice containing a single element describing the container's
  757. // allocation.
  758. //
  759. // Additionally, the timestamp of the allocation will be the highest value
  760. // timestamp between the two vectors. This mitigates situations where
  761. // Timestamp=0. This should have no effect on the metrics emitted by the
  762. // CostModelMetricsEmitter
  763. func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
  764. var result []*util.Vector
  765. if req != nil && used != nil {
  766. x1 := req.Value
  767. if math.IsNaN(x1) {
  768. log.Warnf("NaN value found during %s allocation calculation for requests.", allocationType)
  769. x1 = 0.0
  770. }
  771. y1 := used.Value
  772. if math.IsNaN(y1) {
  773. log.Warnf("NaN value found during %s allocation calculation for used.", allocationType)
  774. y1 = 0.0
  775. }
  776. result = []*util.Vector{
  777. {
  778. Value: math.Max(x1, y1),
  779. Timestamp: math.Max(req.Timestamp, used.Timestamp),
  780. },
  781. }
  782. if result[0].Value == 0 && result[0].Timestamp == 0 {
  783. log.Warnf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  784. }
  785. } else if req != nil {
  786. result = []*util.Vector{
  787. {
  788. Value: req.Value,
  789. Timestamp: req.Timestamp,
  790. },
  791. }
  792. } else if used != nil {
  793. result = []*util.Vector{
  794. {
  795. Value: used.Value,
  796. Timestamp: used.Timestamp,
  797. },
  798. }
  799. } else {
  800. log.Warnf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  801. result = []*util.Vector{
  802. {
  803. Value: 0,
  804. Timestamp: float64(time.Now().UTC().Unix()),
  805. },
  806. }
  807. }
  808. return result
  809. }
  810. func addPVData(cache clustercache.ClusterCache, pvClaimMapping map[string]*PersistentVolumeClaimData, cloud costAnalyzerCloud.Provider) error {
  811. cfg, err := cloud.GetConfig()
  812. if err != nil {
  813. return err
  814. }
  815. // Pull a region from the first node
  816. var defaultRegion string
  817. nodeList := cache.GetAllNodes()
  818. if len(nodeList) > 0 {
  819. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  820. }
  821. storageClasses := cache.GetAllStorageClasses()
  822. storageClassMap := make(map[string]map[string]string)
  823. for _, storageClass := range storageClasses {
  824. params := storageClass.Parameters
  825. storageClassMap[storageClass.ObjectMeta.Name] = params
  826. if storageClass.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.GetAnnotations()["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  827. storageClassMap["default"] = params
  828. storageClassMap[""] = params
  829. }
  830. }
  831. pvs := cache.GetAllPersistentVolumes()
  832. pvMap := make(map[string]*costAnalyzerCloud.PV)
  833. for _, pv := range pvs {
  834. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  835. if !ok {
  836. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  837. }
  838. var region string
  839. if r, ok := util.GetRegion(pv.Labels); ok {
  840. region = r
  841. } else {
  842. region = defaultRegion
  843. }
  844. cacPv := &costAnalyzerCloud.PV{
  845. Class: pv.Spec.StorageClassName,
  846. Region: region,
  847. Parameters: parameters,
  848. }
  849. err := GetPVCost(cacPv, pv, cloud, region)
  850. if err != nil {
  851. return err
  852. }
  853. pvMap[pv.Name] = cacPv
  854. }
  855. for _, pvc := range pvClaimMapping {
  856. if vol, ok := pvMap[pvc.VolumeName]; ok {
  857. pvc.Volume = vol
  858. } else {
  859. log.Debugf("PV not found, using default")
  860. pvc.Volume = &costAnalyzerCloud.PV{
  861. Cost: cfg.Storage,
  862. }
  863. }
  864. }
  865. return nil
  866. }
  867. func GetPVCost(pv *costAnalyzerCloud.PV, kpv *v1.PersistentVolume, cp costAnalyzerCloud.Provider, defaultRegion string) error {
  868. cfg, err := cp.GetConfig()
  869. if err != nil {
  870. return err
  871. }
  872. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  873. pv.ProviderID = key.ID()
  874. pvWithCost, err := cp.PVPricing(key)
  875. if err != nil {
  876. pv.Cost = cfg.Storage
  877. return err
  878. }
  879. if pvWithCost == nil || pvWithCost.Cost == "" {
  880. pv.Cost = cfg.Storage
  881. return nil // set default cost
  882. }
  883. pv.Cost = pvWithCost.Cost
  884. return nil
  885. }
  886. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  887. if cm.pricingMetadata != nil {
  888. return cm.pricingMetadata, nil
  889. } else {
  890. return nil, fmt.Errorf("Node costs not yet calculated")
  891. }
  892. }
  893. func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*costAnalyzerCloud.Node, error) {
  894. cfg, err := cp.GetConfig()
  895. if err != nil {
  896. return nil, err
  897. }
  898. nodeList := cm.Cache.GetAllNodes()
  899. nodes := make(map[string]*costAnalyzerCloud.Node)
  900. vgpuCount, err := getAllocatableVGPUs(cm.Cache)
  901. if err != nil {
  902. return nil, err
  903. }
  904. vgpuCoeff := 10.0
  905. if vgpuCount > 0.0 {
  906. vgpuCoeff = vgpuCount
  907. }
  908. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  909. TotalNodes: 0,
  910. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  911. }
  912. for _, n := range nodeList {
  913. name := n.GetObjectMeta().GetName()
  914. nodeLabels := n.GetObjectMeta().GetLabels()
  915. nodeLabels["providerID"] = n.Spec.ProviderID
  916. pmd.TotalNodes++
  917. cnode, _, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  918. if err != nil {
  919. log.Infof("Error getting node pricing. Error: %s", err.Error())
  920. if cnode != nil {
  921. nodes[name] = cnode
  922. continue
  923. } else {
  924. cnode = &costAnalyzerCloud.Node{
  925. VCPUCost: cfg.CPU,
  926. RAMCost: cfg.RAM,
  927. }
  928. }
  929. }
  930. if _, ok := pmd.PricingTypeCounts[cnode.PricingType]; ok {
  931. pmd.PricingTypeCounts[cnode.PricingType]++
  932. } else {
  933. pmd.PricingTypeCounts[cnode.PricingType] = 1
  934. }
  935. newCnode := *cnode
  936. if newCnode.InstanceType == "" {
  937. it, _ := util.GetInstanceType(n.Labels)
  938. newCnode.InstanceType = it
  939. }
  940. if newCnode.Region == "" {
  941. region, _ := util.GetRegion(n.Labels)
  942. newCnode.Region = region
  943. }
  944. if newCnode.ArchType == "" {
  945. arch, _ := util.GetArchType(n.Labels)
  946. newCnode.ArchType = arch
  947. }
  948. newCnode.ProviderID = n.Spec.ProviderID
  949. var cpu float64
  950. if newCnode.VCPU == "" {
  951. cpu = float64(n.Status.Capacity.Cpu().Value())
  952. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  953. } else {
  954. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  955. if err != nil {
  956. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  957. }
  958. }
  959. if math.IsNaN(cpu) {
  960. log.Warnf("cpu parsed as NaN. Setting to 0.")
  961. cpu = 0
  962. }
  963. var ram float64
  964. if newCnode.RAM == "" {
  965. newCnode.RAM = n.Status.Capacity.Memory().String()
  966. }
  967. ram = float64(n.Status.Capacity.Memory().Value())
  968. if math.IsNaN(ram) {
  969. log.Warnf("ram parsed as NaN. Setting to 0.")
  970. ram = 0
  971. }
  972. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  973. // Azure does not seem to provide a GPU count in its pricing API. GKE supports attaching multiple GPUs
  974. // So the k8s api will often report more accurate results for GPU count under status > capacity > nvidia.com/gpu than the cloud providers billing data
  975. // not all providers are guaranteed to use this, so don't overwrite a Provider assignment if we can't find something under that capacity exists
  976. gpuc := 0.0
  977. q, ok := n.Status.Capacity["nvidia.com/gpu"]
  978. if ok {
  979. gpuCount := q.Value()
  980. if gpuCount != 0 {
  981. newCnode.GPU = fmt.Sprintf("%d", gpuCount)
  982. gpuc = float64(gpuCount)
  983. }
  984. } else if g, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  985. gpuCount := g.Value()
  986. if gpuCount != 0 {
  987. newCnode.GPU = fmt.Sprintf("%d", int(float64(gpuCount)/vgpuCoeff))
  988. gpuc = float64(gpuCount) / vgpuCoeff
  989. }
  990. } else {
  991. gpuc, err = strconv.ParseFloat(newCnode.GPU, 64)
  992. if err != nil {
  993. gpuc = 0.0
  994. }
  995. }
  996. if math.IsNaN(gpuc) {
  997. log.Warnf("gpu count parsed as NaN. Setting to 0.")
  998. gpuc = 0.0
  999. }
  1000. if newCnode.GPU != "" && newCnode.GPUCost == "" {
  1001. // We couldn't find a gpu cost, so fix cpu and ram, then accordingly
  1002. log.Infof("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1003. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1004. if err != nil {
  1005. log.Errorf("Could not parse default cpu price")
  1006. defaultCPU = 0
  1007. }
  1008. if math.IsNaN(defaultCPU) {
  1009. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1010. defaultCPU = 0
  1011. }
  1012. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1013. if err != nil {
  1014. log.Errorf("Could not parse default ram price")
  1015. defaultRAM = 0
  1016. }
  1017. if math.IsNaN(defaultRAM) {
  1018. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1019. defaultRAM = 0
  1020. }
  1021. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  1022. if err != nil {
  1023. log.Errorf("Could not parse default gpu price")
  1024. defaultGPU = 0
  1025. }
  1026. if math.IsNaN(defaultGPU) {
  1027. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  1028. defaultGPU = 0
  1029. }
  1030. cpuToRAMRatio := defaultCPU / defaultRAM
  1031. if math.IsNaN(cpuToRAMRatio) {
  1032. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1033. cpuToRAMRatio = 10
  1034. }
  1035. gpuToRAMRatio := defaultGPU / defaultRAM
  1036. if math.IsNaN(gpuToRAMRatio) {
  1037. log.Warnf("gpuToRAMRatio is NaN. Setting to 100.")
  1038. gpuToRAMRatio = 100
  1039. }
  1040. ramGB := ram / 1024 / 1024 / 1024
  1041. if math.IsNaN(ramGB) {
  1042. log.Warnf("ramGB is NaN. Setting to 0.")
  1043. ramGB = 0
  1044. }
  1045. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  1046. if math.IsNaN(ramMultiple) {
  1047. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1048. ramMultiple = 0
  1049. }
  1050. var nodePrice float64
  1051. if newCnode.Cost != "" {
  1052. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1053. if err != nil {
  1054. log.Errorf("Could not parse total node price")
  1055. return nil, err
  1056. }
  1057. } else if newCnode.VCPUCost != "" {
  1058. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1059. if err != nil {
  1060. log.Errorf("Could not parse node vcpu price")
  1061. return nil, err
  1062. }
  1063. } else { // add case to use default pricing model when API data fails.
  1064. log.Debugf("No node price or CPUprice found, falling back to default")
  1065. nodePrice = defaultCPU*cpu + defaultRAM*ram + gpuc*defaultGPU
  1066. }
  1067. if math.IsNaN(nodePrice) {
  1068. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1069. nodePrice = 0
  1070. }
  1071. ramPrice := (nodePrice / ramMultiple)
  1072. if math.IsNaN(ramPrice) {
  1073. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1074. ramPrice = 0
  1075. }
  1076. cpuPrice := ramPrice * cpuToRAMRatio
  1077. gpuPrice := ramPrice * gpuToRAMRatio
  1078. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1079. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1080. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1081. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1082. } else if newCnode.RAMCost == "" {
  1083. // We couldn't find a ramcost, so fix cpu and allocate ram accordingly
  1084. log.Debugf("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1085. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1086. if err != nil {
  1087. log.Warnf("Could not parse default cpu price")
  1088. defaultCPU = 0
  1089. }
  1090. if math.IsNaN(defaultCPU) {
  1091. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1092. defaultCPU = 0
  1093. }
  1094. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1095. if err != nil {
  1096. log.Warnf("Could not parse default ram price")
  1097. defaultRAM = 0
  1098. }
  1099. if math.IsNaN(defaultRAM) {
  1100. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1101. defaultRAM = 0
  1102. }
  1103. cpuToRAMRatio := defaultCPU / defaultRAM
  1104. if math.IsNaN(cpuToRAMRatio) {
  1105. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1106. cpuToRAMRatio = 10
  1107. }
  1108. ramGB := ram / 1024 / 1024 / 1024
  1109. if math.IsNaN(ramGB) {
  1110. log.Warnf("ramGB is NaN. Setting to 0.")
  1111. ramGB = 0
  1112. }
  1113. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1114. if math.IsNaN(ramMultiple) {
  1115. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1116. ramMultiple = 0
  1117. }
  1118. var nodePrice float64
  1119. if newCnode.Cost != "" {
  1120. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1121. if err != nil {
  1122. log.Warnf("Could not parse total node price")
  1123. return nil, err
  1124. }
  1125. if newCnode.GPUCost != "" {
  1126. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1127. if err != nil {
  1128. log.Warnf("Could not parse node gpu price")
  1129. return nil, err
  1130. }
  1131. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1132. }
  1133. } else if newCnode.VCPUCost != "" {
  1134. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1135. if err != nil {
  1136. log.Warnf("Could not parse node vcpu price")
  1137. return nil, err
  1138. }
  1139. } else { // add case to use default pricing model when API data fails.
  1140. log.Debugf("No node price or CPUprice found, falling back to default")
  1141. nodePrice = defaultCPU*cpu + defaultRAM*ramGB
  1142. }
  1143. if math.IsNaN(nodePrice) {
  1144. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1145. nodePrice = 0
  1146. }
  1147. ramPrice := (nodePrice / ramMultiple)
  1148. if math.IsNaN(ramPrice) {
  1149. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1150. ramPrice = 0
  1151. }
  1152. cpuPrice := ramPrice * cpuToRAMRatio
  1153. if defaultRAM != 0 {
  1154. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1155. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1156. } else { // just assign the full price to CPU
  1157. if cpu != 0 {
  1158. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1159. } else {
  1160. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1161. }
  1162. }
  1163. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1164. log.Debugf("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1165. }
  1166. nodes[name] = &newCnode
  1167. }
  1168. cm.pricingMetadata = pmd
  1169. cp.ApplyReservedInstancePricing(nodes)
  1170. return nodes, nil
  1171. }
  1172. // TODO: drop some logs
  1173. func (cm *CostModel) GetLBCost(cp costAnalyzerCloud.Provider) (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1174. // for fetching prices from cloud provider
  1175. // cfg, err := cp.GetConfig()
  1176. // if err != nil {
  1177. // return nil, err
  1178. // }
  1179. servicesList := cm.Cache.GetAllServices()
  1180. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1181. for _, service := range servicesList {
  1182. namespace := service.GetObjectMeta().GetNamespace()
  1183. name := service.GetObjectMeta().GetName()
  1184. key := serviceKey{
  1185. Cluster: env.GetClusterID(),
  1186. Namespace: namespace,
  1187. Service: name,
  1188. }
  1189. if service.Spec.Type == "LoadBalancer" {
  1190. loadBalancer, err := cp.LoadBalancerPricing()
  1191. if err != nil {
  1192. return nil, err
  1193. }
  1194. newLoadBalancer := *loadBalancer
  1195. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1196. address := loadBalancerIngress.IP
  1197. // Some cloud providers use hostname rather than IP
  1198. if address == "" {
  1199. address = loadBalancerIngress.Hostname
  1200. }
  1201. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1202. }
  1203. loadBalancerMap[key] = &newLoadBalancer
  1204. }
  1205. }
  1206. return loadBalancerMap, nil
  1207. }
  1208. func getPodServices(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1209. servicesList := cache.GetAllServices()
  1210. podServicesMapping := make(map[string]map[string][]string)
  1211. for _, service := range servicesList {
  1212. namespace := service.GetObjectMeta().GetNamespace()
  1213. name := service.GetObjectMeta().GetName()
  1214. key := namespace + "," + clusterID
  1215. if _, ok := podServicesMapping[key]; !ok {
  1216. podServicesMapping[key] = make(map[string][]string)
  1217. }
  1218. s := labels.Nothing()
  1219. if service.Spec.Selector != nil && len(service.Spec.Selector) > 0 {
  1220. s = labels.Set(service.Spec.Selector).AsSelectorPreValidated()
  1221. }
  1222. for _, pod := range podList {
  1223. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1224. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1225. services, ok := podServicesMapping[key][pod.GetObjectMeta().GetName()]
  1226. if ok {
  1227. podServicesMapping[key][pod.GetObjectMeta().GetName()] = append(services, name)
  1228. } else {
  1229. podServicesMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1230. }
  1231. }
  1232. }
  1233. }
  1234. return podServicesMapping, nil
  1235. }
  1236. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1237. ssList := cache.GetAllStatefulSets()
  1238. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1239. for _, ss := range ssList {
  1240. namespace := ss.GetObjectMeta().GetNamespace()
  1241. name := ss.GetObjectMeta().GetName()
  1242. key := namespace + "," + clusterID
  1243. if _, ok := podSSMapping[key]; !ok {
  1244. podSSMapping[key] = make(map[string][]string)
  1245. }
  1246. s, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
  1247. if err != nil {
  1248. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1249. }
  1250. for _, pod := range podList {
  1251. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1252. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1253. sss, ok := podSSMapping[key][pod.GetObjectMeta().GetName()]
  1254. if ok {
  1255. podSSMapping[key][pod.GetObjectMeta().GetName()] = append(sss, name)
  1256. } else {
  1257. podSSMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1258. }
  1259. }
  1260. }
  1261. }
  1262. return podSSMapping, nil
  1263. }
  1264. func getPodDeployments(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1265. deploymentsList := cache.GetAllDeployments()
  1266. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1267. for _, deployment := range deploymentsList {
  1268. namespace := deployment.GetObjectMeta().GetNamespace()
  1269. name := deployment.GetObjectMeta().GetName()
  1270. key := namespace + "," + clusterID
  1271. if _, ok := podDeploymentsMapping[key]; !ok {
  1272. podDeploymentsMapping[key] = make(map[string][]string)
  1273. }
  1274. s, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
  1275. if err != nil {
  1276. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1277. }
  1278. for _, pod := range podList {
  1279. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1280. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1281. deployments, ok := podDeploymentsMapping[key][pod.GetObjectMeta().GetName()]
  1282. if ok {
  1283. podDeploymentsMapping[key][pod.GetObjectMeta().GetName()] = append(deployments, name)
  1284. } else {
  1285. podDeploymentsMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1286. }
  1287. }
  1288. }
  1289. }
  1290. return podDeploymentsMapping, nil
  1291. }
  1292. func getPodDeploymentsWithMetrics(deploymentLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1293. podDeploymentsMapping := make(map[string]map[string][]string)
  1294. for depKey, depLabels := range deploymentLabels {
  1295. kt, err := NewKeyTuple(depKey)
  1296. if err != nil {
  1297. continue
  1298. }
  1299. namespace := kt.Namespace()
  1300. name := kt.Key()
  1301. clusterID := kt.ClusterID()
  1302. key := namespace + "," + clusterID
  1303. if _, ok := podDeploymentsMapping[key]; !ok {
  1304. podDeploymentsMapping[key] = make(map[string][]string)
  1305. }
  1306. s := labels.Set(depLabels).AsSelectorPreValidated()
  1307. for podKey, pLabels := range podLabels {
  1308. pkey, err := NewKeyTuple(podKey)
  1309. if err != nil {
  1310. continue
  1311. }
  1312. podNamespace := pkey.Namespace()
  1313. podName := pkey.Key()
  1314. podClusterID := pkey.ClusterID()
  1315. labelSet := labels.Set(pLabels)
  1316. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1317. deployments, ok := podDeploymentsMapping[key][podName]
  1318. if ok {
  1319. podDeploymentsMapping[key][podName] = append(deployments, name)
  1320. } else {
  1321. podDeploymentsMapping[key][podName] = []string{name}
  1322. }
  1323. }
  1324. }
  1325. }
  1326. // Remove any duplicate data created by metric names
  1327. pruneDuplicateData(podDeploymentsMapping)
  1328. return podDeploymentsMapping, nil
  1329. }
  1330. func getPodServicesWithMetrics(serviceLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1331. podServicesMapping := make(map[string]map[string][]string)
  1332. for servKey, servLabels := range serviceLabels {
  1333. kt, err := NewKeyTuple(servKey)
  1334. if err != nil {
  1335. continue
  1336. }
  1337. namespace := kt.Namespace()
  1338. name := kt.Key()
  1339. clusterID := kt.ClusterID()
  1340. key := namespace + "," + clusterID
  1341. if _, ok := podServicesMapping[key]; !ok {
  1342. podServicesMapping[key] = make(map[string][]string)
  1343. }
  1344. s := labels.Nothing()
  1345. if servLabels != nil && len(servLabels) > 0 {
  1346. s = labels.Set(servLabels).AsSelectorPreValidated()
  1347. }
  1348. for podKey, pLabels := range podLabels {
  1349. pkey, err := NewKeyTuple(podKey)
  1350. if err != nil {
  1351. continue
  1352. }
  1353. podNamespace := pkey.Namespace()
  1354. podName := pkey.Key()
  1355. podClusterID := pkey.ClusterID()
  1356. labelSet := labels.Set(pLabels)
  1357. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1358. services, ok := podServicesMapping[key][podName]
  1359. if ok {
  1360. podServicesMapping[key][podName] = append(services, name)
  1361. } else {
  1362. podServicesMapping[key][podName] = []string{name}
  1363. }
  1364. }
  1365. }
  1366. }
  1367. // Remove any duplicate data created by metric names
  1368. pruneDuplicateData(podServicesMapping)
  1369. return podServicesMapping, nil
  1370. }
  1371. // This method alleviates an issue with metrics that used a '_' to replace '-' in deployment
  1372. // and service names. To avoid counting these as multiple deployments/services, we'll remove
  1373. // the '_' version. Not optimal, but takes care of the issue
  1374. func pruneDuplicateData(data map[string]map[string][]string) {
  1375. for _, podMap := range data {
  1376. for podName, values := range podMap {
  1377. podMap[podName] = pruneDuplicates(values)
  1378. }
  1379. }
  1380. }
  1381. // Determine if there is an underscore in the value of a slice. If so, replace _ with -, and then
  1382. // check to see if the result exists in the slice. If both are true, then we DO NOT include that
  1383. // original value in the new slice.
  1384. func pruneDuplicates(s []string) []string {
  1385. m := sliceToSet(s)
  1386. for _, v := range s {
  1387. if strings.Contains(v, "_") {
  1388. name := strings.Replace(v, "_", "-", -1)
  1389. if !m[name] {
  1390. m[name] = true
  1391. }
  1392. delete(m, v)
  1393. }
  1394. }
  1395. return setToSlice(m)
  1396. }
  1397. // Creates a map[string]bool containing the slice values as keys
  1398. func sliceToSet(s []string) map[string]bool {
  1399. m := make(map[string]bool)
  1400. for _, v := range s {
  1401. m[v] = true
  1402. }
  1403. return m
  1404. }
  1405. func setToSlice(m map[string]bool) []string {
  1406. var result []string
  1407. for k := range m {
  1408. result = append(result, k)
  1409. }
  1410. return result
  1411. }
  1412. func costDataPassesFilters(cm clusters.ClusterMap, costs *CostData, namespace string, cluster string) bool {
  1413. passesNamespace := namespace == "" || costs.Namespace == namespace
  1414. passesCluster := cluster == "" || costs.ClusterID == cluster || costs.ClusterName == cluster
  1415. return passesNamespace && passesCluster
  1416. }
  1417. // Finds the a closest multiple less than value
  1418. func floorMultiple(value int64, multiple int64) int64 {
  1419. return (value / multiple) * multiple
  1420. }
  1421. // Attempt to create a key for the request. Reduce the times to minutes in order to more easily group requests based on
  1422. // real time ranges. If for any reason, the key generation fails, return a uuid to ensure uniqueness.
  1423. func requestKeyFor(window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) string {
  1424. keyLayout := "2006-01-02T15:04Z"
  1425. // We "snap" start time and duration to their closest 5 min multiple less than itself, by
  1426. // applying a snapped duration to a snapped start time.
  1427. durMins := int64(window.Minutes())
  1428. durMins = floorMultiple(durMins, 5)
  1429. sMins := int64(window.Start().Minute())
  1430. sOffset := sMins - floorMultiple(sMins, 5)
  1431. sTime := window.Start().Add(-time.Duration(sOffset) * time.Minute)
  1432. eTime := window.Start().Add(time.Duration(durMins) * time.Minute)
  1433. startKey := sTime.Format(keyLayout)
  1434. endKey := eTime.Format(keyLayout)
  1435. return fmt.Sprintf("%s,%s,%s,%s,%s,%t", startKey, endKey, resolution.String(), filterNamespace, filterCluster, remoteEnabled)
  1436. }
  1437. // ComputeCostDataRange executes a range query for cost data.
  1438. // Note that "offset" represents the time between the function call and "endString", and is also passed for convenience
  1439. func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1440. // Create a request key for request grouping. This key will be used to represent the cost-model result
  1441. // for the specific inputs to prevent multiple queries for identical data.
  1442. key := requestKeyFor(window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1443. log.Debugf("ComputeCostDataRange with Key: %s", key)
  1444. // If there is already a request out that uses the same data, wait for it to return to share the results.
  1445. // Otherwise, start executing.
  1446. result, err, _ := cm.RequestGroup.Do(key, func() (interface{}, error) {
  1447. return cm.costDataRange(cli, cp, window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1448. })
  1449. data, ok := result.(map[string]*CostData)
  1450. if !ok {
  1451. return nil, fmt.Errorf("Failed to cast result as map[string]*CostData")
  1452. }
  1453. return data, err
  1454. }
  1455. func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1456. clusterID := env.GetClusterID()
  1457. // durHrs := end.Sub(start).Hours() + 1
  1458. if window.IsOpen() {
  1459. return nil, fmt.Errorf("illegal window: %s", window)
  1460. }
  1461. start := *window.Start()
  1462. end := *window.End()
  1463. // Snap resolution to the nearest minute
  1464. resMins := int64(math.Trunc(resolution.Minutes()))
  1465. if resMins == 0 {
  1466. return nil, fmt.Errorf("resolution must be greater than 0.0")
  1467. }
  1468. resolution = time.Duration(resMins) * time.Minute
  1469. // Warn if resolution does not evenly divide window
  1470. if int64(window.Minutes())%int64(resolution.Minutes()) != 0 {
  1471. log.Warnf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
  1472. }
  1473. // Convert to Prometheus-style duration string in terms of m or h
  1474. resStr := fmt.Sprintf("%dm", resMins)
  1475. if resMins%60 == 0 {
  1476. resStr = fmt.Sprintf("%dh", resMins/60)
  1477. }
  1478. if remoteEnabled {
  1479. remoteLayout := "2006-01-02T15:04:05Z"
  1480. remoteStartStr := window.Start().Format(remoteLayout)
  1481. remoteEndStr := window.End().Format(remoteLayout)
  1482. log.Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
  1483. return CostDataRangeFromSQL("", "", resolution.String(), remoteStartStr, remoteEndStr)
  1484. }
  1485. scrapeIntervalSeconds := cm.ScrapeInterval.Seconds()
  1486. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataRangeContextName)
  1487. queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1488. queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1489. queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1490. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1491. queryCPURequests := fmt.Sprintf(queryCPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1492. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1493. queryGPURequests := fmt.Sprintf(queryGPURequestsStr, env.GetPromClusterFilter(), resStr, "", resolution.Hours(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1494. queryPVRequests := fmt.Sprintf(queryPVRequestsStr, env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1495. queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1496. queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, env.GetPromClusterFilter(), resStr)
  1497. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1498. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1499. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1500. queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), resStr, "")
  1501. // Submit all queries for concurrent evaluation
  1502. resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, resolution)
  1503. resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, resolution)
  1504. resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, resolution)
  1505. resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, resolution)
  1506. resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, resolution)
  1507. resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, resolution)
  1508. resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, resolution)
  1509. resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, resolution)
  1510. resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, resolution)
  1511. resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
  1512. resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
  1513. resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
  1514. resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
  1515. resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1516. resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1517. resChNSAnnotations := ctx.QueryRange(fmt.Sprintf(queryNSAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1518. resChPodAnnotations := ctx.QueryRange(fmt.Sprintf(queryPodAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1519. resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1520. resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1521. resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1522. resChJobs := ctx.QueryRange(fmt.Sprintf(queryPodJobs, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
  1523. resChDaemonsets := ctx.QueryRange(fmt.Sprintf(queryPodDaemonsets, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
  1524. resChNormalization := ctx.QueryRange(queryNormalization, start, end, resolution)
  1525. // Pull k8s pod, controller, service, and namespace details
  1526. podlist := cm.Cache.GetAllPods()
  1527. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  1528. if err != nil {
  1529. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1530. }
  1531. podStatefulsetsMapping, err := getPodStatefulsets(cm.Cache, podlist, clusterID)
  1532. if err != nil {
  1533. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1534. }
  1535. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  1536. if err != nil {
  1537. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1538. }
  1539. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  1540. if err != nil {
  1541. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1542. }
  1543. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  1544. if err != nil {
  1545. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1546. }
  1547. // Process query results. Handle errors afterwards using ctx.Errors.
  1548. resRAMRequests, _ := resChRAMRequests.Await()
  1549. resRAMUsage, _ := resChRAMUsage.Await()
  1550. resRAMAlloc, _ := resChRAMAlloc.Await()
  1551. resCPURequests, _ := resChCPURequests.Await()
  1552. resCPUUsage, _ := resChCPUUsage.Await()
  1553. resCPUAlloc, _ := resChCPUAlloc.Await()
  1554. resGPURequests, _ := resChGPURequests.Await()
  1555. resPVRequests, _ := resChPVRequests.Await()
  1556. resPVCAlloc, _ := resChPVCAlloc.Await()
  1557. resPVHourlyCost, _ := resChPVHourlyCost.Await()
  1558. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  1559. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  1560. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  1561. resNSLabels, _ := resChNSLabels.Await()
  1562. resPodLabels, _ := resChPodLabels.Await()
  1563. resNSAnnotations, _ := resChNSAnnotations.Await()
  1564. resPodAnnotations, _ := resChPodAnnotations.Await()
  1565. resServiceLabels, _ := resChServiceLabels.Await()
  1566. resDeploymentLabels, _ := resChDeploymentLabels.Await()
  1567. resStatefulsetLabels, _ := resChStatefulsetLabels.Await()
  1568. resDaemonsets, _ := resChDaemonsets.Await()
  1569. resJobs, _ := resChJobs.Await()
  1570. resNormalization, _ := resChNormalization.Await()
  1571. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  1572. // NOTE: will not propagate unless coupled with errors.
  1573. if ctx.HasErrors() {
  1574. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  1575. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  1576. for _, promErr := range ctx.Errors() {
  1577. if promErr.Error != nil {
  1578. log.Errorf("CostDataRange: Request Error: %s", promErr.Error)
  1579. }
  1580. if promErr.ParseError != nil {
  1581. log.Errorf("CostDataRange: Parsing Error: %s", promErr.ParseError)
  1582. }
  1583. }
  1584. // ErrorCollection is an collection of errors wrapped in a single error implementation
  1585. return nil, ctx.ErrorCollection()
  1586. }
  1587. normalizationValue, err := getNormalizations(resNormalization)
  1588. if err != nil {
  1589. msg := fmt.Sprintf("error computing normalization for start=%s, end=%s, res=%s", start, end, resolution)
  1590. return nil, prom.WrapError(err, msg)
  1591. }
  1592. pvClaimMapping, err := GetPVInfo(resPVRequests, clusterID)
  1593. if err != nil {
  1594. // Just log for compatibility with KSM less than 1.6
  1595. log.Infof("Unable to get PV Data: %s", err.Error())
  1596. }
  1597. if pvClaimMapping != nil {
  1598. err = addPVData(cm.Cache, pvClaimMapping, cp)
  1599. if err != nil {
  1600. return nil, fmt.Errorf("pvClaimMapping: %s", err)
  1601. }
  1602. }
  1603. pvCostMapping, err := GetPVCostMetrics(resPVHourlyCost, clusterID)
  1604. if err != nil {
  1605. log.Errorf("Unable to get PV Hourly Cost Data: %s", err.Error())
  1606. }
  1607. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  1608. pvAllocationMapping, err := GetPVAllocationMetrics(resPVCAlloc, clusterID)
  1609. if err != nil {
  1610. log.Errorf("Unable to get PV Allocation Cost Data: %s", err.Error())
  1611. }
  1612. if pvAllocationMapping != nil {
  1613. addMetricPVData(pvAllocationMapping, pvCostMapping, cp)
  1614. for k, v := range pvAllocationMapping {
  1615. unmountedPVs[k] = v
  1616. }
  1617. }
  1618. nsLabels, err := GetNamespaceLabelsMetrics(resNSLabels, clusterID)
  1619. if err != nil {
  1620. log.Errorf("Unable to get Namespace Labels for Metrics: %s", err.Error())
  1621. }
  1622. if nsLabels != nil {
  1623. mergeStringMap(namespaceLabelsMapping, nsLabels)
  1624. }
  1625. podLabels, err := GetPodLabelsMetrics(resPodLabels, clusterID)
  1626. if err != nil {
  1627. log.Errorf("Unable to get Pod Labels for Metrics: %s", err.Error())
  1628. }
  1629. nsAnnotations, err := GetNamespaceAnnotationsMetrics(resNSAnnotations, clusterID)
  1630. if err != nil {
  1631. log.Errorf("Unable to get Namespace Annotations for Metrics: %s", err.Error())
  1632. }
  1633. if nsAnnotations != nil {
  1634. mergeStringMap(namespaceAnnotationsMapping, nsAnnotations)
  1635. }
  1636. podAnnotations, err := GetPodAnnotationsMetrics(resPodAnnotations, clusterID)
  1637. if err != nil {
  1638. log.Errorf("Unable to get Pod Annotations for Metrics: %s", err.Error())
  1639. }
  1640. serviceLabels, err := GetServiceSelectorLabelsMetrics(resServiceLabels, clusterID)
  1641. if err != nil {
  1642. log.Errorf("Unable to get Service Selector Labels for Metrics: %s", err.Error())
  1643. }
  1644. deploymentLabels, err := GetDeploymentMatchLabelsMetrics(resDeploymentLabels, clusterID)
  1645. if err != nil {
  1646. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1647. }
  1648. statefulsetLabels, err := GetStatefulsetMatchLabelsMetrics(resStatefulsetLabels, clusterID)
  1649. if err != nil {
  1650. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1651. }
  1652. podStatefulsetMetricsMapping, err := getPodDeploymentsWithMetrics(statefulsetLabels, podLabels)
  1653. if err != nil {
  1654. log.Errorf("Unable to get match Statefulset Labels Metrics to Pods: %s", err.Error())
  1655. }
  1656. appendLabelsList(podStatefulsetsMapping, podStatefulsetMetricsMapping)
  1657. podDeploymentsMetricsMapping, err := getPodDeploymentsWithMetrics(deploymentLabels, podLabels)
  1658. if err != nil {
  1659. log.Errorf("Unable to get match Deployment Labels Metrics to Pods: %s", err.Error())
  1660. }
  1661. appendLabelsList(podDeploymentsMapping, podDeploymentsMetricsMapping)
  1662. podDaemonsets, err := GetPodDaemonsetsWithMetrics(resDaemonsets, clusterID)
  1663. if err != nil {
  1664. log.Errorf("Unable to get Pod Daemonsets for Metrics: %s", err.Error())
  1665. }
  1666. podJobs, err := GetPodJobsWithMetrics(resJobs, clusterID)
  1667. if err != nil {
  1668. log.Errorf("Unable to get Pod Jobs for Metrics: %s", err.Error())
  1669. }
  1670. podServicesMetricsMapping, err := getPodServicesWithMetrics(serviceLabels, podLabels)
  1671. if err != nil {
  1672. log.Errorf("Unable to get match Service Labels Metrics to Pods: %s", err.Error())
  1673. }
  1674. appendLabelsList(podServicesMapping, podServicesMetricsMapping)
  1675. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  1676. if err != nil {
  1677. log.Errorf("Unable to get Network Cost Data: %s", err.Error())
  1678. networkUsageMap = make(map[string]*NetworkUsageData)
  1679. }
  1680. containerNameCost := make(map[string]*CostData)
  1681. containers := make(map[string]bool)
  1682. otherClusterPVRecorded := make(map[string]bool)
  1683. RAMReqMap, err := GetNormalizedContainerMetricVectors(resRAMRequests, normalizationValue, clusterID)
  1684. if err != nil {
  1685. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMRequests)")
  1686. }
  1687. for key := range RAMReqMap {
  1688. containers[key] = true
  1689. }
  1690. RAMUsedMap, err := GetNormalizedContainerMetricVectors(resRAMUsage, normalizationValue, clusterID)
  1691. if err != nil {
  1692. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMUsage)")
  1693. }
  1694. for key := range RAMUsedMap {
  1695. containers[key] = true
  1696. }
  1697. CPUReqMap, err := GetNormalizedContainerMetricVectors(resCPURequests, normalizationValue, clusterID)
  1698. if err != nil {
  1699. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(CPURequests)")
  1700. }
  1701. for key := range CPUReqMap {
  1702. containers[key] = true
  1703. }
  1704. // No need to normalize here, as this comes from a counter, namely:
  1705. // rate(container_cpu_usage_seconds_total) which properly accounts for normalized rates
  1706. CPUUsedMap, err := GetContainerMetricVectors(resCPUUsage, clusterID)
  1707. if err != nil {
  1708. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUUsage)")
  1709. }
  1710. for key := range CPUUsedMap {
  1711. containers[key] = true
  1712. }
  1713. RAMAllocMap, err := GetContainerMetricVectors(resRAMAlloc, clusterID)
  1714. if err != nil {
  1715. return nil, prom.WrapError(err, "GetContainerMetricVectors(RAMAllocations)")
  1716. }
  1717. for key := range RAMAllocMap {
  1718. containers[key] = true
  1719. }
  1720. CPUAllocMap, err := GetContainerMetricVectors(resCPUAlloc, clusterID)
  1721. if err != nil {
  1722. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUAllocations)")
  1723. }
  1724. for key := range CPUAllocMap {
  1725. containers[key] = true
  1726. }
  1727. GPUReqMap, err := GetNormalizedContainerMetricVectors(resGPURequests, normalizationValue, clusterID)
  1728. if err != nil {
  1729. return nil, prom.WrapError(err, "GetContainerMetricVectors(GPURequests)")
  1730. }
  1731. for key := range GPUReqMap {
  1732. containers[key] = true
  1733. }
  1734. // Request metrics can show up after pod eviction and completion.
  1735. // This method synchronizes requests to allocations such that when
  1736. // allocation is 0, so are requests
  1737. applyAllocationToRequests(RAMAllocMap, RAMReqMap)
  1738. applyAllocationToRequests(CPUAllocMap, CPUReqMap)
  1739. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  1740. missingContainers := make(map[string]*CostData)
  1741. for key := range containers {
  1742. if _, ok := containerNameCost[key]; ok {
  1743. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  1744. }
  1745. c, _ := NewContainerMetricFromKey(key)
  1746. RAMReqV, ok := RAMReqMap[key]
  1747. if !ok {
  1748. log.Debug("no RAM requests for " + key)
  1749. RAMReqV = []*util.Vector{}
  1750. }
  1751. RAMUsedV, ok := RAMUsedMap[key]
  1752. if !ok {
  1753. log.Debug("no RAM usage for " + key)
  1754. RAMUsedV = []*util.Vector{}
  1755. }
  1756. CPUReqV, ok := CPUReqMap[key]
  1757. if !ok {
  1758. log.Debug("no CPU requests for " + key)
  1759. CPUReqV = []*util.Vector{}
  1760. }
  1761. CPUUsedV, ok := CPUUsedMap[key]
  1762. if !ok {
  1763. log.Debug("no CPU usage for " + key)
  1764. CPUUsedV = []*util.Vector{}
  1765. }
  1766. RAMAllocsV, ok := RAMAllocMap[key]
  1767. if !ok {
  1768. log.Debug("no RAM allocation for " + key)
  1769. RAMAllocsV = []*util.Vector{}
  1770. }
  1771. CPUAllocsV, ok := CPUAllocMap[key]
  1772. if !ok {
  1773. log.Debug("no CPU allocation for " + key)
  1774. CPUAllocsV = []*util.Vector{}
  1775. }
  1776. GPUReqV, ok := GPUReqMap[key]
  1777. if !ok {
  1778. log.Debug("no GPU requests for " + key)
  1779. GPUReqV = []*util.Vector{}
  1780. }
  1781. var node *costAnalyzerCloud.Node
  1782. if n, ok := missingNodes[c.NodeName]; ok {
  1783. node = n
  1784. } else {
  1785. node = &costAnalyzerCloud.Node{}
  1786. missingNodes[c.NodeName] = node
  1787. }
  1788. nsKey := c.Namespace + "," + c.ClusterID
  1789. podKey := c.Namespace + "," + c.PodName + "," + c.ClusterID
  1790. namespaceLabels, _ := namespaceLabelsMapping[nsKey]
  1791. pLabels := podLabels[podKey]
  1792. if pLabels == nil {
  1793. pLabels = make(map[string]string)
  1794. }
  1795. for k, v := range namespaceLabels {
  1796. if _, ok := pLabels[k]; !ok {
  1797. pLabels[k] = v
  1798. }
  1799. }
  1800. namespaceAnnotations, _ := namespaceAnnotationsMapping[nsKey]
  1801. pAnnotations := podAnnotations[podKey]
  1802. if pAnnotations == nil {
  1803. pAnnotations = make(map[string]string)
  1804. }
  1805. for k, v := range namespaceAnnotations {
  1806. if _, ok := pAnnotations[k]; !ok {
  1807. pAnnotations[k] = v
  1808. }
  1809. }
  1810. var podDeployments []string
  1811. if _, ok := podDeploymentsMapping[nsKey]; ok {
  1812. if ds, ok := podDeploymentsMapping[nsKey][c.PodName]; ok {
  1813. podDeployments = ds
  1814. } else {
  1815. podDeployments = []string{}
  1816. }
  1817. }
  1818. var podStatefulSets []string
  1819. if _, ok := podStatefulsetsMapping[nsKey]; ok {
  1820. if ss, ok := podStatefulsetsMapping[nsKey][c.PodName]; ok {
  1821. podStatefulSets = ss
  1822. } else {
  1823. podStatefulSets = []string{}
  1824. }
  1825. }
  1826. var podServices []string
  1827. if _, ok := podServicesMapping[nsKey]; ok {
  1828. if svcs, ok := podServicesMapping[nsKey][c.PodName]; ok {
  1829. podServices = svcs
  1830. } else {
  1831. podServices = []string{}
  1832. }
  1833. }
  1834. var podPVs []*PersistentVolumeClaimData
  1835. var podNetCosts []*util.Vector
  1836. // For PVC data, we'll need to find the claim mapping and cost data. Will need to append
  1837. // cost data since that was populated by cluster data previously. We do this with
  1838. // the pod_pvc_allocation metric
  1839. podPVData, ok := pvAllocationMapping[podKey]
  1840. if !ok {
  1841. log.Debugf("Failed to locate pv allocation mapping for missing pod.")
  1842. }
  1843. // Delete the current pod key from potentially unmounted pvs
  1844. delete(unmountedPVs, podKey)
  1845. // For network costs, we'll use existing map since it should still contain the
  1846. // correct data.
  1847. var podNetworkCosts []*util.Vector
  1848. if usage, ok := networkUsageMap[podKey]; ok {
  1849. netCosts, err := GetNetworkCost(usage, cp)
  1850. if err != nil {
  1851. log.Errorf("Error pulling network costs: %s", err.Error())
  1852. } else {
  1853. podNetworkCosts = netCosts
  1854. }
  1855. }
  1856. // Check to see if any other data has been recorded for this namespace, pod, clusterId
  1857. // Follow the pattern of only allowing claims data per pod
  1858. if !otherClusterPVRecorded[podKey] {
  1859. otherClusterPVRecorded[podKey] = true
  1860. podPVs = podPVData
  1861. podNetCosts = podNetworkCosts
  1862. }
  1863. pds := []string{}
  1864. if ds, ok := podDaemonsets[podKey]; ok {
  1865. pds = []string{ds}
  1866. }
  1867. jobs := []string{}
  1868. if job, ok := podJobs[podKey]; ok {
  1869. jobs = []string{job}
  1870. }
  1871. costs := &CostData{
  1872. Name: c.ContainerName,
  1873. PodName: c.PodName,
  1874. NodeName: c.NodeName,
  1875. NodeData: node,
  1876. Namespace: c.Namespace,
  1877. Services: podServices,
  1878. Deployments: podDeployments,
  1879. Daemonsets: pds,
  1880. Statefulsets: podStatefulSets,
  1881. Jobs: jobs,
  1882. RAMReq: RAMReqV,
  1883. RAMUsed: RAMUsedV,
  1884. CPUReq: CPUReqV,
  1885. CPUUsed: CPUUsedV,
  1886. RAMAllocation: RAMAllocsV,
  1887. CPUAllocation: CPUAllocsV,
  1888. GPUReq: GPUReqV,
  1889. Annotations: pAnnotations,
  1890. Labels: pLabels,
  1891. NamespaceLabels: namespaceLabels,
  1892. PVCData: podPVs,
  1893. NetworkData: podNetCosts,
  1894. ClusterID: c.ClusterID,
  1895. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  1896. }
  1897. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1898. containerNameCost[key] = costs
  1899. missingContainers[key] = costs
  1900. }
  1901. }
  1902. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  1903. for k, costs := range unmounted {
  1904. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  1905. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1906. containerNameCost[k] = costs
  1907. }
  1908. }
  1909. if window.Minutes() > 0 {
  1910. dur, off := window.DurationOffsetStrings()
  1911. err = findDeletedNodeInfo(cli, missingNodes, dur, off)
  1912. if err != nil {
  1913. log.Errorf("Error fetching historical node data: %s", err.Error())
  1914. }
  1915. }
  1916. return containerNameCost, nil
  1917. }
  1918. func applyAllocationToRequests(allocationMap map[string][]*util.Vector, requestMap map[string][]*util.Vector) {
  1919. // The result of the normalize operation will be a new []*util.Vector to replace the requests
  1920. normalizeOp := func(r *util.Vector, x *float64, y *float64) bool {
  1921. // Omit data (return false) if both x and y inputs don't exist
  1922. if x == nil || y == nil {
  1923. return false
  1924. }
  1925. // If the allocation value is 0, 0 out request value
  1926. if *x == 0 {
  1927. r.Value = 0
  1928. } else {
  1929. r.Value = *y
  1930. }
  1931. return true
  1932. }
  1933. // Run normalization on all request vectors in the mapping
  1934. for k, requests := range requestMap {
  1935. // Only run normalization where there are valid allocations
  1936. allocations, ok := allocationMap[k]
  1937. if !ok {
  1938. delete(requestMap, k)
  1939. continue
  1940. }
  1941. // Replace request map with normalized
  1942. requestMap[k] = util.ApplyVectorOp(allocations, requests, normalizeOp)
  1943. }
  1944. }
  1945. func addMetricPVData(pvAllocationMap map[string][]*PersistentVolumeClaimData, pvCostMap map[string]*costAnalyzerCloud.PV, cp costAnalyzerCloud.Provider) {
  1946. cfg, err := cp.GetConfig()
  1947. if err != nil {
  1948. log.Errorf("Failed to get provider config while adding pv metrics data.")
  1949. return
  1950. }
  1951. for _, pvcDataArray := range pvAllocationMap {
  1952. for _, pvcData := range pvcDataArray {
  1953. costKey := fmt.Sprintf("%s,%s", pvcData.VolumeName, pvcData.ClusterID)
  1954. pvCost, ok := pvCostMap[costKey]
  1955. if !ok {
  1956. pvcData.Volume = &costAnalyzerCloud.PV{
  1957. Cost: cfg.Storage,
  1958. }
  1959. continue
  1960. }
  1961. pvcData.Volume = pvCost
  1962. }
  1963. }
  1964. }
  1965. // Add values that don't already exist in origMap from mergeMap into origMap
  1966. func mergeStringMap(origMap map[string]map[string]string, mergeMap map[string]map[string]string) {
  1967. for k, v := range mergeMap {
  1968. if _, ok := origMap[k]; !ok {
  1969. origMap[k] = v
  1970. }
  1971. }
  1972. }
  1973. func appendLabelsList(mainLabels map[string]map[string][]string, labels map[string]map[string][]string) {
  1974. for k, v := range labels {
  1975. mainLabels[k] = v
  1976. }
  1977. }
  1978. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1979. nsToLabels := make(map[string]map[string]string)
  1980. nss := cache.GetAllNamespaces()
  1981. for _, ns := range nss {
  1982. labels := make(map[string]string)
  1983. for k, v := range ns.Labels {
  1984. labels[prom.SanitizeLabelName(k)] = v
  1985. }
  1986. nsToLabels[ns.Name+","+clusterID] = labels
  1987. }
  1988. return nsToLabels, nil
  1989. }
  1990. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1991. nsToAnnotations := make(map[string]map[string]string)
  1992. nss := cache.GetAllNamespaces()
  1993. for _, ns := range nss {
  1994. annotations := make(map[string]string)
  1995. for k, v := range ns.Annotations {
  1996. annotations[prom.SanitizeLabelName(k)] = v
  1997. }
  1998. nsToAnnotations[ns.Name+","+clusterID] = annotations
  1999. }
  2000. return nsToAnnotations, nil
  2001. }
  2002. func getDaemonsetsOfPod(pod v1.Pod) []string {
  2003. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2004. if ownerReference.Kind == "DaemonSet" {
  2005. return []string{ownerReference.Name}
  2006. }
  2007. }
  2008. return []string{}
  2009. }
  2010. func getJobsOfPod(pod v1.Pod) []string {
  2011. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2012. if ownerReference.Kind == "Job" {
  2013. return []string{ownerReference.Name}
  2014. }
  2015. }
  2016. return []string{}
  2017. }
  2018. func getStatefulSetsOfPod(pod v1.Pod) []string {
  2019. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2020. if ownerReference.Kind == "StatefulSet" {
  2021. return []string{ownerReference.Name}
  2022. }
  2023. }
  2024. return []string{}
  2025. }
  2026. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  2027. daemonsets := cache.GetAllDaemonSets()
  2028. vgpuCount := 0.0
  2029. for _, ds := range daemonsets {
  2030. dsContainerList := &ds.Spec.Template.Spec.Containers
  2031. for _, ctnr := range *dsContainerList {
  2032. if ctnr.Args != nil {
  2033. for _, arg := range ctnr.Args {
  2034. if strings.Contains(arg, "--vgpu=") {
  2035. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  2036. if err != nil {
  2037. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  2038. continue
  2039. }
  2040. vgpuCount = vgpus
  2041. return vgpuCount, nil
  2042. }
  2043. }
  2044. }
  2045. }
  2046. }
  2047. return vgpuCount, nil
  2048. }
  2049. type PersistentVolumeClaimData struct {
  2050. Class string `json:"class"`
  2051. Claim string `json:"claim"`
  2052. Namespace string `json:"namespace"`
  2053. ClusterID string `json:"clusterId"`
  2054. TimesClaimed int `json:"timesClaimed"`
  2055. VolumeName string `json:"volumeName"`
  2056. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  2057. Values []*util.Vector `json:"values"`
  2058. }
  2059. func measureTime(start time.Time, threshold time.Duration, name string) {
  2060. elapsed := time.Since(start)
  2061. if elapsed > threshold {
  2062. log.Infof("[Profiler] %s: %s", elapsed, name)
  2063. }
  2064. }
  2065. func measureTimeAsync(start time.Time, threshold time.Duration, name string, ch chan string) {
  2066. elapsed := time.Since(start)
  2067. if elapsed > threshold {
  2068. ch <- fmt.Sprintf("%s took %s", name, time.Since(start))
  2069. }
  2070. }
  2071. func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy kubecost.AccumulateOption) (*kubecost.AllocationSetRange, error) {
  2072. // Validate window is legal
  2073. if window.IsOpen() || window.IsNegative() {
  2074. return nil, fmt.Errorf("illegal window: %s", window)
  2075. }
  2076. var totalsStore kubecost.TotalsStore
  2077. // Idle is required for proportional asset costs
  2078. if includeProportionalAssetResourceCosts {
  2079. if !includeIdle {
  2080. return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
  2081. }
  2082. totalsStore = kubecost.NewMemoryTotalsStore()
  2083. }
  2084. // Begin with empty response
  2085. asr := kubecost.NewAllocationSetRange()
  2086. // Query for AllocationSets in increments of the given step duration,
  2087. // appending each to the response.
  2088. stepStart := *window.Start()
  2089. stepEnd := stepStart.Add(step)
  2090. var isAKS bool
  2091. for window.End().After(stepStart) {
  2092. allocSet, err := cm.ComputeAllocation(stepStart, stepEnd, resolution)
  2093. if err != nil {
  2094. return nil, fmt.Errorf("error computing allocations for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
  2095. }
  2096. if includeIdle {
  2097. assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
  2098. if err != nil {
  2099. return nil, fmt.Errorf("error computing assets for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
  2100. }
  2101. if includeProportionalAssetResourceCosts {
  2102. // AKS is a special case - there can be a maximum of 2
  2103. // load balancers (1 public and 1 private) in an AKS cluster
  2104. // therefore, when calculating PARCs for load balancers,
  2105. // we must know if this is an AKS cluster
  2106. for _, node := range assetSet.Nodes {
  2107. if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
  2108. isAKS = true
  2109. break
  2110. }
  2111. }
  2112. _, err := kubecost.UpdateAssetTotalsStore(totalsStore, assetSet)
  2113. if err != nil {
  2114. log.Errorf("ETL: error updating asset resource totals for %s: %s", assetSet.Window, err)
  2115. }
  2116. }
  2117. idleSet, err := computeIdleAllocations(allocSet, assetSet, true)
  2118. if err != nil {
  2119. return nil, fmt.Errorf("error computing idle allocations for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
  2120. }
  2121. for _, idleAlloc := range idleSet.Allocations {
  2122. allocSet.Insert(idleAlloc)
  2123. }
  2124. }
  2125. asr.Append(allocSet)
  2126. stepStart = stepEnd
  2127. stepEnd = stepStart.Add(step)
  2128. }
  2129. // Set aggregation options and aggregate
  2130. opts := &kubecost.AllocationAggregationOptions{
  2131. IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
  2132. IdleByNode: idleByNode,
  2133. IncludeAggregatedMetadata: includeAggregatedMetadata,
  2134. }
  2135. // Aggregate
  2136. err := asr.AggregateBy(aggregate, opts)
  2137. if err != nil {
  2138. return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
  2139. }
  2140. // Accumulate, if requested
  2141. if accumulateBy != kubecost.AccumulateOptionNone {
  2142. asr, err = asr.Accumulate(accumulateBy)
  2143. if err != nil {
  2144. log.Errorf("error accumulating by %v: %s", accumulateBy, err)
  2145. return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
  2146. }
  2147. // when accumulating and returning PARCs, we need the totals for the
  2148. // accumulated windows to accurately compute a fraction
  2149. if includeProportionalAssetResourceCosts {
  2150. assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
  2151. if err != nil {
  2152. return nil, fmt.Errorf("error computing assets for %s: %w", kubecost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  2153. }
  2154. _, err = kubecost.UpdateAssetTotalsStore(totalsStore, assetSet)
  2155. if err != nil {
  2156. log.Errorf("ETL: error updating asset resource totals for %s: %s", kubecost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  2157. }
  2158. }
  2159. }
  2160. if includeProportionalAssetResourceCosts {
  2161. for _, as := range asr.Allocations {
  2162. totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
  2163. if !ok {
  2164. log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  2165. return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  2166. }
  2167. totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
  2168. if !ok {
  2169. log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  2170. return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  2171. }
  2172. var totalPublicLbCost, totalPrivateLbCost float64
  2173. if isAKS && sharedLoadBalancer {
  2174. // loop through all assetTotals, adding all load balancer costs by public and private
  2175. for _, tot := range totalStoreByNode {
  2176. if tot.PrivateLoadBalancer {
  2177. totalPrivateLbCost += tot.LoadBalancerCost
  2178. } else {
  2179. totalPublicLbCost += tot.LoadBalancerCost
  2180. }
  2181. }
  2182. }
  2183. // loop through each allocation set, using total cost from totals store
  2184. for _, alloc := range as.Allocations {
  2185. for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
  2186. key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
  2187. // for each parc , check the totals store for each
  2188. // on a totals hit, set the corresponding total and calculate percentage
  2189. var totals *kubecost.AssetTotals
  2190. if totalsLoc, found := totalStoreByCluster[key]; found {
  2191. totals = totalsLoc
  2192. }
  2193. if totalsLoc, found := totalStoreByNode[key]; found {
  2194. totals = totalsLoc
  2195. }
  2196. if totals == nil {
  2197. log.Errorf("unable to locate asset totals for allocation %s", key)
  2198. return nil, fmt.Errorf("unable to locate allocation totals for allocation")
  2199. }
  2200. parc.CPUTotalCost = totals.CPUCost
  2201. parc.GPUTotalCost = totals.GPUCost
  2202. parc.RAMTotalCost = totals.RAMCost
  2203. parc.PVTotalCost = totals.PersistentVolumeCost
  2204. if isAKS && sharedLoadBalancer && len(alloc.LoadBalancers) > 0 {
  2205. // Azure is a special case - use computed totals above
  2206. // use the lbAllocations in the object to determine if
  2207. // this PARC is a public or private load balancer
  2208. // then set the total accordingly
  2209. // AKS only has 1 public and 1 private load balancer
  2210. lbAlloc, found := alloc.LoadBalancers[key]
  2211. if found {
  2212. if lbAlloc.Private {
  2213. parc.LoadBalancerTotalCost = totalPrivateLbCost
  2214. } else {
  2215. parc.LoadBalancerTotalCost = totalPublicLbCost
  2216. }
  2217. }
  2218. } else {
  2219. parc.LoadBalancerTotalCost = totals.LoadBalancerCost
  2220. }
  2221. kubecost.ComputePercentages(&parc)
  2222. alloc.ProportionalAssetResourceCosts[rawKey] = parc
  2223. }
  2224. }
  2225. }
  2226. }
  2227. return asr, nil
  2228. }
  2229. func computeIdleAllocations(allocSet *kubecost.AllocationSet, assetSet *kubecost.AssetSet, idleByNode bool) (*kubecost.AllocationSet, error) {
  2230. if !allocSet.Window.Equal(assetSet.Window) {
  2231. return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
  2232. }
  2233. var allocTotals map[string]*kubecost.AllocationTotals
  2234. var assetTotals map[string]*kubecost.AssetTotals
  2235. if idleByNode {
  2236. allocTotals = kubecost.ComputeAllocationTotals(allocSet, kubecost.AllocationNodeProp)
  2237. assetTotals = kubecost.ComputeAssetTotals(assetSet, true)
  2238. } else {
  2239. allocTotals = kubecost.ComputeAllocationTotals(allocSet, kubecost.AllocationClusterProp)
  2240. assetTotals = kubecost.ComputeAssetTotals(assetSet, false)
  2241. }
  2242. start, end := *allocSet.Window.Start(), *allocSet.Window.End()
  2243. idleSet := kubecost.NewAllocationSet(start, end)
  2244. for key, assetTotal := range assetTotals {
  2245. allocTotal, ok := allocTotals[key]
  2246. if !ok {
  2247. log.Warnf("ETL: did not find allocations for asset key: %s", key)
  2248. // Use a zero-value set of totals. This indicates either (1) an
  2249. // error computing totals, or (2) that no allocations ran on the
  2250. // given node for the given window.
  2251. allocTotal = &kubecost.AllocationTotals{
  2252. Cluster: assetTotal.Cluster,
  2253. Node: assetTotal.Node,
  2254. Start: assetTotal.Start,
  2255. End: assetTotal.End,
  2256. }
  2257. }
  2258. // Insert one idle allocation for each key (whether by node or
  2259. // by cluster), defined as the difference between the total
  2260. // asset cost and the allocated cost per-resource.
  2261. name := fmt.Sprintf("%s/%s", key, kubecost.IdleSuffix)
  2262. err := idleSet.Insert(&kubecost.Allocation{
  2263. Name: name,
  2264. Window: idleSet.Window.Clone(),
  2265. Properties: &kubecost.AllocationProperties{
  2266. Cluster: assetTotal.Cluster,
  2267. Node: assetTotal.Node,
  2268. ProviderID: assetTotal.Node,
  2269. },
  2270. Start: assetTotal.Start,
  2271. End: assetTotal.End,
  2272. CPUCost: assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost(),
  2273. GPUCost: assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost(),
  2274. RAMCost: assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost(),
  2275. })
  2276. if err != nil {
  2277. return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
  2278. }
  2279. }
  2280. return idleSet, nil
  2281. }