costmodel.go 92 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654
  1. package costmodel
  2. import (
  3. "errors"
  4. "fmt"
  5. "math"
  6. "regexp"
  7. "strconv"
  8. "strings"
  9. "time"
  10. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
  11. "github.com/opencost/opencost/pkg/clustercache"
  12. "github.com/opencost/opencost/pkg/costmodel/clusters"
  13. "github.com/opencost/opencost/pkg/env"
  14. "github.com/opencost/opencost/pkg/kubecost"
  15. "github.com/opencost/opencost/pkg/log"
  16. "github.com/opencost/opencost/pkg/prom"
  17. "github.com/opencost/opencost/pkg/util"
  18. prometheus "github.com/prometheus/client_golang/api"
  19. prometheusClient "github.com/prometheus/client_golang/api"
  20. v1 "k8s.io/api/core/v1"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/labels"
  23. "golang.org/x/sync/singleflight"
  24. )
  25. const (
  26. statusAPIError = 422
  27. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  28. unmountedPVsContainer = "unmounted-pvs"
  29. apiPrefix = "/api/v1"
  30. epAlertManagers = apiPrefix + "/alertmanagers"
  31. epLabelValues = apiPrefix + "/label/:name/values"
  32. epSeries = apiPrefix + "/series"
  33. epTargets = apiPrefix + "/targets"
  34. epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
  35. epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
  36. epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
  37. epConfig = apiPrefix + "/status/config"
  38. epFlags = apiPrefix + "/status/flags"
  39. )
  40. // isCron matches a CronJob name and captures the non-timestamp name
  41. //
  42. // We support either a 10 character timestamp OR an 8 character timestamp
  43. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  44. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  45. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  46. type CostModel struct {
  47. Cache clustercache.ClusterCache
  48. ClusterMap clusters.ClusterMap
  49. MaxPrometheusQueryDuration time.Duration
  50. RequestGroup *singleflight.Group
  51. ScrapeInterval time.Duration
  52. PrometheusClient prometheus.Client
  53. Provider costAnalyzerCloud.Provider
  54. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  55. }
  56. func NewCostModel(client prometheus.Client, provider costAnalyzerCloud.Provider, cache clustercache.ClusterCache, clusterMap clusters.ClusterMap, scrapeInterval time.Duration) *CostModel {
  57. // request grouping to prevent over-requesting the same data prior to caching
  58. requestGroup := new(singleflight.Group)
  59. return &CostModel{
  60. Cache: cache,
  61. ClusterMap: clusterMap,
  62. MaxPrometheusQueryDuration: env.GetETLMaxPrometheusQueryDuration(),
  63. PrometheusClient: client,
  64. Provider: provider,
  65. RequestGroup: requestGroup,
  66. ScrapeInterval: scrapeInterval,
  67. }
  68. }
  69. type CostData struct {
  70. Name string `json:"name,omitempty"`
  71. PodName string `json:"podName,omitempty"`
  72. NodeName string `json:"nodeName,omitempty"`
  73. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  74. Namespace string `json:"namespace,omitempty"`
  75. Deployments []string `json:"deployments,omitempty"`
  76. Services []string `json:"services,omitempty"`
  77. Daemonsets []string `json:"daemonsets,omitempty"`
  78. Statefulsets []string `json:"statefulsets,omitempty"`
  79. Jobs []string `json:"jobs,omitempty"`
  80. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  81. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  82. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  83. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  84. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  85. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  86. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  87. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  88. NetworkData []*util.Vector `json:"network,omitempty"`
  89. Annotations map[string]string `json:"annotations,omitempty"`
  90. Labels map[string]string `json:"labels,omitempty"`
  91. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  92. ClusterID string `json:"clusterId"`
  93. ClusterName string `json:"clusterName"`
  94. }
  95. func (cd *CostData) String() string {
  96. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  97. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  98. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  99. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  100. }
  101. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  102. hasController = false
  103. if len(cd.Deployments) > 0 {
  104. name = cd.Deployments[0]
  105. kind = "deployment"
  106. hasController = true
  107. } else if len(cd.Statefulsets) > 0 {
  108. name = cd.Statefulsets[0]
  109. kind = "statefulset"
  110. hasController = true
  111. } else if len(cd.Daemonsets) > 0 {
  112. name = cd.Daemonsets[0]
  113. kind = "daemonset"
  114. hasController = true
  115. } else if len(cd.Jobs) > 0 {
  116. name = cd.Jobs[0]
  117. kind = "job"
  118. hasController = true
  119. match := isCron.FindStringSubmatch(name)
  120. if match != nil {
  121. name = match[1]
  122. }
  123. }
  124. return name, kind, hasController
  125. }
  126. const (
  127. queryRAMRequestsStr = `avg(
  128. label_replace(
  129. label_replace(
  130. avg(
  131. sum_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="",container!="POD", node!="", %s}[%s] %s)
  132. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  133. ), "pod_name","$1","pod","(.+)"
  134. )
  135. ) by (namespace,container_name,pod_name,node,%s)`
  136. queryRAMUsageStr = `avg(
  137. label_replace(
  138. label_replace(
  139. label_replace(
  140. sum_over_time(container_memory_working_set_bytes{container!="", container!="POD", instance!="", %s}[%s] %s), "node", "$1", "instance", "(.+)"
  141. ), "container_name", "$1", "container", "(.+)"
  142. ), "pod_name", "$1", "pod", "(.+)"
  143. )
  144. ) by (namespace, container_name, pod_name, node, %s)`
  145. queryCPURequestsStr = `avg(
  146. label_replace(
  147. label_replace(
  148. avg(
  149. sum_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="",container!="POD", node!="", %s}[%s] %s)
  150. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  151. ), "pod_name","$1","pod","(.+)"
  152. )
  153. ) by (namespace,container_name,pod_name,node,%s)`
  154. queryCPUUsageStr = `avg(
  155. label_replace(
  156. label_replace(
  157. label_replace(
  158. rate(
  159. container_cpu_usage_seconds_total{container!="", container!="POD", instance!="", %s}[%s] %s
  160. ), "node", "$1", "instance", "(.+)"
  161. ), "container_name", "$1", "container", "(.+)"
  162. ), "pod_name", "$1", "pod", "(.+)"
  163. )
  164. ) by (namespace, container_name, pod_name, node, %s)`
  165. queryGPURequestsStr = `avg(
  166. label_replace(
  167. label_replace(
  168. avg(
  169. sum_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s] %s)
  170. * %f
  171. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  172. ), "pod_name","$1","pod","(.+)"
  173. )
  174. ) by (namespace,container_name,pod_name,node,%s)
  175. * on (pod_name, namespace, %s) group_left(container) label_replace(avg(avg_over_time(kube_pod_status_phase{phase="Running", %s}[%s] %s)) by (pod,namespace,%s), "pod_name","$1","pod","(.+)")`
  176. queryPVRequestsStr = `avg(avg(kube_persistentvolumeclaim_info{volumename != "", %s}) by (persistentvolumeclaim, storageclass, namespace, volumename, %s, kubernetes_node)
  177. *
  178. on (persistentvolumeclaim, namespace, %s, kubernetes_node) group_right(storageclass, volumename)
  179. sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{%s}) by (persistentvolumeclaim, namespace, %s, kubernetes_node, kubernetes_name)) by (persistentvolumeclaim, storageclass, namespace, %s, volumename, kubernetes_node)`
  180. // queryRAMAllocationByteHours yields the total byte-hour RAM allocation over the given
  181. // window, aggregated by container.
  182. // [line 3] sum_over_time(each byte) = [byte*scrape] by metric
  183. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  184. // [lines 2,4] sum(") by unique container key and multiply [byte*scrape] * [hours/scrape] for byte*hours
  185. // [lines 1,5] relabeling
  186. queryRAMAllocationByteHours = `
  187. label_replace(label_replace(
  188. sum(
  189. sum_over_time(container_memory_allocation_bytes{container!="",container!="POD", node!="", %s}[%s])
  190. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  191. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  192. // queryCPUAllocationVCPUHours yields the total VCPU-hour CPU allocation over the given
  193. // window, aggregated by container.
  194. // [line 3] sum_over_time(each VCPU*mins in window) = [VCPU*scrape] by metric
  195. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  196. // [lines 2,4] sum(") by unique container key and multiply [VCPU*scrape] * [hours/scrape] for VCPU*hours
  197. // [lines 1,5] relabeling
  198. queryCPUAllocationVCPUHours = `
  199. label_replace(label_replace(
  200. sum(
  201. sum_over_time(container_cpu_allocation{container!="",container!="POD", node!="", %s}[%s])
  202. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  203. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  204. // queryPVCAllocationFmt yields the total byte-hour PVC allocation over the given window.
  205. // sum_over_time(each byte) = [byte*scrape] by metric *(scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by pod
  206. queryPVCAllocationFmt = `sum(sum_over_time(pod_pvc_allocation{%s}[%s])) by (%s, namespace, pod, persistentvolume, persistentvolumeclaim) * %f/60/60`
  207. queryPVHourlyCostFmt = `avg_over_time(pv_hourly_cost{%s}[%s])`
  208. queryNSLabels = `avg_over_time(kube_namespace_labels{%s}[%s])`
  209. queryPodLabels = `avg_over_time(kube_pod_labels{%s}[%s])`
  210. queryNSAnnotations = `avg_over_time(kube_namespace_annotations{%s}[%s])`
  211. queryPodAnnotations = `avg_over_time(kube_pod_annotations{%s}[%s])`
  212. queryDeploymentLabels = `avg_over_time(deployment_match_labels{%s}[%s])`
  213. queryStatefulsetLabels = `avg_over_time(statefulSet_match_labels{%s}[%s])`
  214. queryPodDaemonsets = `sum(kube_pod_owner{owner_kind="DaemonSet", %s}) by (namespace,pod,owner_name,%s)`
  215. queryPodJobs = `sum(kube_pod_owner{owner_kind="Job", %s}) by (namespace,pod,owner_name,%s)`
  216. queryServiceLabels = `avg_over_time(service_selector_labels{%s}[%s])`
  217. queryZoneNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  218. queryRegionNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="false", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  219. queryInternetNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  220. normalizationStr = `max(count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[%s] %s))`
  221. )
  222. func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
  223. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
  224. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
  225. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  226. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  227. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
  228. queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), window, offset)
  229. // Cluster ID is specific to the source cluster
  230. clusterID := env.GetClusterID()
  231. // Submit all Prometheus queries asynchronously
  232. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  233. resChRAMUsage := ctx.Query(queryRAMUsage)
  234. resChCPUUsage := ctx.Query(queryCPUUsage)
  235. resChNetZoneRequests := ctx.Query(queryNetZoneRequests)
  236. resChNetRegionRequests := ctx.Query(queryNetRegionRequests)
  237. resChNetInternetRequests := ctx.Query(queryNetInternetRequests)
  238. resChNormalization := ctx.Query(queryNormalization)
  239. // Pull pod information from k8s API
  240. podlist := cm.Cache.GetAllPods()
  241. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  242. if err != nil {
  243. return nil, err
  244. }
  245. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  246. if err != nil {
  247. return nil, err
  248. }
  249. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  250. if err != nil {
  251. return nil, err
  252. }
  253. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  254. if err != nil {
  255. return nil, err
  256. }
  257. // Process Prometheus query results. Handle errors using ctx.Errors.
  258. resRAMUsage, _ := resChRAMUsage.Await()
  259. resCPUUsage, _ := resChCPUUsage.Await()
  260. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  261. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  262. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  263. resNormalization, _ := resChNormalization.Await()
  264. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  265. // NOTE: will not propagate unless coupled with errors.
  266. if ctx.HasErrors() {
  267. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  268. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  269. for _, promErr := range ctx.Errors() {
  270. if promErr.Error != nil {
  271. log.Errorf("ComputeCostData: Request Error: %s", promErr.Error)
  272. }
  273. if promErr.ParseError != nil {
  274. log.Errorf("ComputeCostData: Parsing Error: %s", promErr.ParseError)
  275. }
  276. }
  277. // ErrorCollection is an collection of errors wrapped in a single error implementation
  278. // We opt to not return an error for the sake of running as a pure exporter.
  279. log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", ctx.ErrorCollection().Error())
  280. }
  281. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  282. normalizationValue, err := getNormalization(resNormalization)
  283. if err != nil {
  284. // We opt to not return an error for the sake of running as a pure exporter.
  285. log.Warnf("ComputeCostData: continuing despite error parsing normalization values from %s: %s", queryNormalization, err.Error())
  286. }
  287. // Determine if there are vgpus configured and if so get the total allocatable number
  288. // If there are no vgpus, the coefficient is set to 1.0
  289. vgpuCount, err := getAllocatableVGPUs(cm.Cache)
  290. if err != nil {
  291. log.Warnf("getAllocatableVGCPUs error: %s", err.Error())
  292. }
  293. vgpuCoeff := 10.0
  294. if vgpuCount > 0.0 {
  295. vgpuCoeff = vgpuCount
  296. }
  297. nodes, err := cm.GetNodeCost(cp)
  298. if err != nil {
  299. log.Warnf("GetNodeCost: no node cost model available: " + err.Error())
  300. return nil, err
  301. }
  302. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  303. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  304. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  305. if err != nil {
  306. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  307. }
  308. if pvClaimMapping != nil {
  309. err = addPVData(cm.Cache, pvClaimMapping, cp)
  310. if err != nil {
  311. return nil, err
  312. }
  313. // copy claim mappings into zombies, then remove as they're discovered
  314. for k, v := range pvClaimMapping {
  315. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  316. }
  317. }
  318. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  319. if err != nil {
  320. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  321. networkUsageMap = make(map[string]*NetworkUsageData)
  322. }
  323. containerNameCost := make(map[string]*CostData)
  324. containers := make(map[string]bool)
  325. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, true, normalizationValue, clusterID)
  326. if err != nil {
  327. return nil, err
  328. }
  329. for key := range RAMUsedMap {
  330. containers[key] = true
  331. }
  332. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, false, 0, clusterID) // No need to normalize here, as this comes from a counter
  333. if err != nil {
  334. return nil, err
  335. }
  336. for key := range CPUUsedMap {
  337. containers[key] = true
  338. }
  339. currentContainers := make(map[string]v1.Pod)
  340. for _, pod := range podlist {
  341. if pod.Status.Phase != v1.PodRunning {
  342. continue
  343. }
  344. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  345. if err != nil {
  346. return nil, err
  347. }
  348. for _, c := range cs {
  349. containers[c.Key()] = true // captures any containers that existed for a time < a prometheus scrape interval. We currently charge 0 for this but should charge something.
  350. currentContainers[c.Key()] = *pod
  351. }
  352. }
  353. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  354. missingContainers := make(map[string]*CostData)
  355. for key := range containers {
  356. if _, ok := containerNameCost[key]; ok {
  357. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  358. }
  359. // The _else_ case for this statement is the case in which the container has been
  360. // deleted so we have usage information but not request information. In that case,
  361. // we return partial data for CPU and RAM: only usage and not requests.
  362. if pod, ok := currentContainers[key]; ok {
  363. podName := pod.GetObjectMeta().GetName()
  364. ns := pod.GetObjectMeta().GetNamespace()
  365. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  366. podLabels := pod.GetObjectMeta().GetLabels()
  367. if podLabels == nil {
  368. podLabels = make(map[string]string)
  369. }
  370. for k, v := range nsLabels {
  371. if _, ok := podLabels[k]; !ok {
  372. podLabels[k] = v
  373. }
  374. }
  375. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  376. podAnnotations := pod.GetObjectMeta().GetAnnotations()
  377. if podAnnotations == nil {
  378. podAnnotations = make(map[string]string)
  379. }
  380. for k, v := range nsAnnotations {
  381. if _, ok := podAnnotations[k]; !ok {
  382. podAnnotations[k] = v
  383. }
  384. }
  385. nodeName := pod.Spec.NodeName
  386. var nodeData *costAnalyzerCloud.Node
  387. if _, ok := nodes[nodeName]; ok {
  388. nodeData = nodes[nodeName]
  389. }
  390. nsKey := ns + "," + clusterID
  391. var podDeployments []string
  392. if _, ok := podDeploymentsMapping[nsKey]; ok {
  393. if ds, ok := podDeploymentsMapping[nsKey][pod.GetObjectMeta().GetName()]; ok {
  394. podDeployments = ds
  395. } else {
  396. podDeployments = []string{}
  397. }
  398. }
  399. var podPVs []*PersistentVolumeClaimData
  400. podClaims := pod.Spec.Volumes
  401. for _, vol := range podClaims {
  402. if vol.PersistentVolumeClaim != nil {
  403. name := vol.PersistentVolumeClaim.ClaimName
  404. key := ns + "," + name + "," + clusterID
  405. if pvClaim, ok := pvClaimMapping[key]; ok {
  406. pvClaim.TimesClaimed++
  407. podPVs = append(podPVs, pvClaim)
  408. // Remove entry from potential unmounted pvs
  409. delete(unmountedPVs, key)
  410. }
  411. }
  412. }
  413. var podNetCosts []*util.Vector
  414. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  415. netCosts, err := GetNetworkCost(usage, cp)
  416. if err != nil {
  417. log.Debugf("Error pulling network costs: %s", err.Error())
  418. } else {
  419. podNetCosts = netCosts
  420. }
  421. }
  422. var podServices []string
  423. if _, ok := podServicesMapping[nsKey]; ok {
  424. if svcs, ok := podServicesMapping[nsKey][pod.GetObjectMeta().GetName()]; ok {
  425. podServices = svcs
  426. } else {
  427. podServices = []string{}
  428. }
  429. }
  430. for i, container := range pod.Spec.Containers {
  431. containerName := container.Name
  432. // recreate the key and look up data for this container
  433. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  434. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  435. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  436. // details on the "amount" API. See
  437. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  438. // for the units of memory and CPU.
  439. ramRequestBytes := container.Resources.Requests.Memory().Value()
  440. // Because information on container RAM & CPU requests isn't
  441. // coming from Prometheus, it won't have a timestamp associated
  442. // with it. We need to provide a timestamp.
  443. RAMReqV := []*util.Vector{
  444. {
  445. Value: float64(ramRequestBytes),
  446. Timestamp: float64(time.Now().UTC().Unix()),
  447. },
  448. }
  449. // use millicores so we can convert to cores in a float64 format
  450. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  451. CPUReqV := []*util.Vector{
  452. {
  453. Value: float64(cpuRequestMilliCores) / 1000,
  454. Timestamp: float64(time.Now().UTC().Unix()),
  455. },
  456. }
  457. gpuReqCount := 0.0
  458. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  459. gpuReqCount = g.AsApproximateFloat64()
  460. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  461. gpuReqCount = g.AsApproximateFloat64()
  462. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  463. // divide vgpu request/limits by total vgpus to get the portion of physical gpus requested
  464. gpuReqCount = g.AsApproximateFloat64() / vgpuCoeff
  465. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  466. gpuReqCount = g.AsApproximateFloat64() / vgpuCoeff
  467. }
  468. GPUReqV := []*util.Vector{
  469. {
  470. Value: float64(gpuReqCount),
  471. Timestamp: float64(time.Now().UTC().Unix()),
  472. },
  473. }
  474. RAMUsedV, ok := RAMUsedMap[newKey]
  475. if !ok {
  476. log.Debug("no RAM usage for " + newKey)
  477. RAMUsedV = []*util.Vector{{}}
  478. }
  479. CPUUsedV, ok := CPUUsedMap[newKey]
  480. if !ok {
  481. log.Debug("no CPU usage for " + newKey)
  482. CPUUsedV = []*util.Vector{{}}
  483. }
  484. var pvReq []*PersistentVolumeClaimData
  485. var netReq []*util.Vector
  486. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  487. pvReq = podPVs
  488. netReq = podNetCosts
  489. }
  490. costs := &CostData{
  491. Name: containerName,
  492. PodName: podName,
  493. NodeName: nodeName,
  494. Namespace: ns,
  495. Deployments: podDeployments,
  496. Services: podServices,
  497. Daemonsets: getDaemonsetsOfPod(pod),
  498. Jobs: getJobsOfPod(pod),
  499. Statefulsets: getStatefulSetsOfPod(pod),
  500. NodeData: nodeData,
  501. RAMReq: RAMReqV,
  502. RAMUsed: RAMUsedV,
  503. CPUReq: CPUReqV,
  504. CPUUsed: CPUUsedV,
  505. GPUReq: GPUReqV,
  506. PVCData: pvReq,
  507. NetworkData: netReq,
  508. Annotations: podAnnotations,
  509. Labels: podLabels,
  510. NamespaceLabels: nsLabels,
  511. ClusterID: clusterID,
  512. ClusterName: cm.ClusterMap.NameFor(clusterID),
  513. }
  514. var cpuReq, cpuUse *util.Vector
  515. if len(costs.CPUReq) > 0 {
  516. cpuReq = costs.CPUReq[0]
  517. }
  518. if len(costs.CPUUsed) > 0 {
  519. cpuUse = costs.CPUUsed[0]
  520. }
  521. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  522. var ramReq, ramUse *util.Vector
  523. if len(costs.RAMReq) > 0 {
  524. ramReq = costs.RAMReq[0]
  525. }
  526. if len(costs.RAMUsed) > 0 {
  527. ramUse = costs.RAMUsed[0]
  528. }
  529. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  530. if filterNamespace == "" {
  531. containerNameCost[newKey] = costs
  532. } else if costs.Namespace == filterNamespace {
  533. containerNameCost[newKey] = costs
  534. }
  535. }
  536. } else {
  537. // The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
  538. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  539. c, err := NewContainerMetricFromKey(key)
  540. if err != nil {
  541. return nil, err
  542. }
  543. // CPU and RAM requests are obtained from the Kubernetes API.
  544. // If this case has been reached, the Kubernetes API will not
  545. // have information about the pod because it no longer exists.
  546. //
  547. // The case where this matters is minimal, mainly in environments
  548. // with very short-lived pods that over-request resources.
  549. RAMReqV := []*util.Vector{{}}
  550. CPUReqV := []*util.Vector{{}}
  551. GPUReqV := []*util.Vector{{}}
  552. RAMUsedV, ok := RAMUsedMap[key]
  553. if !ok {
  554. log.Debug("no RAM usage for " + key)
  555. RAMUsedV = []*util.Vector{{}}
  556. }
  557. CPUUsedV, ok := CPUUsedMap[key]
  558. if !ok {
  559. log.Debug("no CPU usage for " + key)
  560. CPUUsedV = []*util.Vector{{}}
  561. }
  562. node, ok := nodes[c.NodeName]
  563. if !ok {
  564. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  565. if n, ok := missingNodes[c.NodeName]; ok {
  566. node = n
  567. } else {
  568. node = &costAnalyzerCloud.Node{}
  569. missingNodes[c.NodeName] = node
  570. }
  571. }
  572. namespacelabels, _ := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  573. namespaceAnnotations, _ := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  574. costs := &CostData{
  575. Name: c.ContainerName,
  576. PodName: c.PodName,
  577. NodeName: c.NodeName,
  578. NodeData: node,
  579. Namespace: c.Namespace,
  580. RAMReq: RAMReqV,
  581. RAMUsed: RAMUsedV,
  582. CPUReq: CPUReqV,
  583. CPUUsed: CPUUsedV,
  584. GPUReq: GPUReqV,
  585. Annotations: namespaceAnnotations,
  586. NamespaceLabels: namespacelabels,
  587. ClusterID: c.ClusterID,
  588. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  589. }
  590. var cpuReq, cpuUse *util.Vector
  591. if len(costs.CPUReq) > 0 {
  592. cpuReq = costs.CPUReq[0]
  593. }
  594. if len(costs.CPUUsed) > 0 {
  595. cpuUse = costs.CPUUsed[0]
  596. }
  597. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  598. var ramReq, ramUse *util.Vector
  599. if len(costs.RAMReq) > 0 {
  600. ramReq = costs.RAMReq[0]
  601. }
  602. if len(costs.RAMUsed) > 0 {
  603. ramUse = costs.RAMUsed[0]
  604. }
  605. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  606. if filterNamespace == "" {
  607. containerNameCost[key] = costs
  608. missingContainers[key] = costs
  609. } else if costs.Namespace == filterNamespace {
  610. containerNameCost[key] = costs
  611. missingContainers[key] = costs
  612. }
  613. }
  614. }
  615. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  616. // to pass along the cost data
  617. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  618. for k, costs := range unmounted {
  619. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  620. if filterNamespace == "" {
  621. containerNameCost[k] = costs
  622. } else if costs.Namespace == filterNamespace {
  623. containerNameCost[k] = costs
  624. }
  625. }
  626. err = findDeletedNodeInfo(cli, missingNodes, window, "")
  627. if err != nil {
  628. log.Errorf("Error fetching historical node data: %s", err.Error())
  629. }
  630. err = findDeletedPodInfo(cli, missingContainers, window)
  631. if err != nil {
  632. log.Errorf("Error fetching historical pod data: %s", err.Error())
  633. }
  634. return containerNameCost, err
  635. }
  636. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  637. costs := make(map[string]*CostData)
  638. if len(unmountedPVs) == 0 {
  639. return costs
  640. }
  641. for k, pv := range unmountedPVs {
  642. keyParts := strings.Split(k, ",")
  643. if len(keyParts) != 3 {
  644. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  645. continue
  646. }
  647. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  648. namespacelabels, _ := namespaceLabelsMapping[ns+","+clusterID]
  649. namespaceAnnotations, _ := namespaceAnnotationsMapping[ns+","+clusterID]
  650. metric := NewContainerMetricFromValues(ns, unmountedPVsContainer, unmountedPVsContainer, "", clusterID)
  651. key := metric.Key()
  652. if costData, ok := costs[key]; !ok {
  653. costs[key] = &CostData{
  654. Name: unmountedPVsContainer,
  655. PodName: unmountedPVsContainer,
  656. NodeName: "",
  657. Annotations: namespaceAnnotations,
  658. Namespace: ns,
  659. NamespaceLabels: namespacelabels,
  660. Labels: namespacelabels,
  661. ClusterID: clusterID,
  662. ClusterName: clusterMap.NameFor(clusterID),
  663. PVCData: pv,
  664. }
  665. } else {
  666. costData.PVCData = append(costData.PVCData, pv...)
  667. }
  668. }
  669. return costs
  670. }
  671. func findDeletedPodInfo(cli prometheusClient.Client, missingContainers map[string]*CostData, window string) error {
  672. if len(missingContainers) > 0 {
  673. queryHistoricalPodLabels := fmt.Sprintf(`kube_pod_labels{%s}[%s]`, env.GetPromClusterFilter(), window)
  674. podLabelsResult, _, err := prom.NewNamedContext(cli, prom.ComputeCostDataContextName).QuerySync(queryHistoricalPodLabels)
  675. if err != nil {
  676. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  677. }
  678. podLabels := make(map[string]map[string]string)
  679. if podLabelsResult != nil {
  680. podLabels, err = parsePodLabels(podLabelsResult)
  681. if err != nil {
  682. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  683. }
  684. }
  685. for key, costData := range missingContainers {
  686. cm, _ := NewContainerMetricFromKey(key)
  687. labels, ok := podLabels[cm.PodName]
  688. if !ok {
  689. labels = make(map[string]string)
  690. }
  691. for k, v := range costData.NamespaceLabels {
  692. labels[k] = v
  693. }
  694. costData.Labels = labels
  695. }
  696. }
  697. return nil
  698. }
  699. func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*costAnalyzerCloud.Node, window, offset string) error {
  700. if len(missingNodes) > 0 {
  701. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  702. offsetStr := ""
  703. if offset != "" {
  704. offsetStr = fmt.Sprintf("offset %s", offset)
  705. }
  706. queryHistoricalCPUCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  707. queryHistoricalRAMCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  708. queryHistoricalGPUCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
  709. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  710. cpuCostResCh := ctx.Query(queryHistoricalCPUCost)
  711. ramCostResCh := ctx.Query(queryHistoricalRAMCost)
  712. gpuCostResCh := ctx.Query(queryHistoricalGPUCost)
  713. cpuCostRes, _ := cpuCostResCh.Await()
  714. ramCostRes, _ := ramCostResCh.Await()
  715. gpuCostRes, _ := gpuCostResCh.Await()
  716. if ctx.HasErrors() {
  717. return ctx.ErrorCollection()
  718. }
  719. cpuCosts, err := getCost(cpuCostRes)
  720. if err != nil {
  721. return err
  722. }
  723. ramCosts, err := getCost(ramCostRes)
  724. if err != nil {
  725. return err
  726. }
  727. gpuCosts, err := getCost(gpuCostRes)
  728. if err != nil {
  729. return err
  730. }
  731. if len(cpuCosts) == 0 {
  732. log.Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  733. }
  734. for node, costv := range cpuCosts {
  735. if _, ok := missingNodes[node]; ok {
  736. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  737. } else {
  738. log.DedupedWarningf(5, "Node `%s` in prometheus but not k8s api", node)
  739. }
  740. }
  741. for node, costv := range ramCosts {
  742. if _, ok := missingNodes[node]; ok {
  743. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  744. }
  745. }
  746. for node, costv := range gpuCosts {
  747. if _, ok := missingNodes[node]; ok {
  748. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  749. }
  750. }
  751. }
  752. return nil
  753. }
  754. // getContainerAllocation takes the max between request and usage. This function
  755. // returns a slice containing a single element describing the container's
  756. // allocation.
  757. //
  758. // Additionally, the timestamp of the allocation will be the highest value
  759. // timestamp between the two vectors. This mitigates situations where
  760. // Timestamp=0. This should have no effect on the metrics emitted by the
  761. // CostModelMetricsEmitter
  762. func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
  763. var result []*util.Vector
  764. if req != nil && used != nil {
  765. x1 := req.Value
  766. if math.IsNaN(x1) {
  767. log.Warnf("NaN value found during %s allocation calculation for requests.", allocationType)
  768. x1 = 0.0
  769. }
  770. y1 := used.Value
  771. if math.IsNaN(y1) {
  772. log.Warnf("NaN value found during %s allocation calculation for used.", allocationType)
  773. y1 = 0.0
  774. }
  775. result = []*util.Vector{
  776. {
  777. Value: math.Max(x1, y1),
  778. Timestamp: math.Max(req.Timestamp, used.Timestamp),
  779. },
  780. }
  781. if result[0].Value == 0 && result[0].Timestamp == 0 {
  782. log.Warnf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  783. }
  784. } else if req != nil {
  785. result = []*util.Vector{
  786. {
  787. Value: req.Value,
  788. Timestamp: req.Timestamp,
  789. },
  790. }
  791. } else if used != nil {
  792. result = []*util.Vector{
  793. {
  794. Value: used.Value,
  795. Timestamp: used.Timestamp,
  796. },
  797. }
  798. } else {
  799. log.Warnf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  800. result = []*util.Vector{
  801. {
  802. Value: 0,
  803. Timestamp: float64(time.Now().UTC().Unix()),
  804. },
  805. }
  806. }
  807. return result
  808. }
  809. func addPVData(cache clustercache.ClusterCache, pvClaimMapping map[string]*PersistentVolumeClaimData, cloud costAnalyzerCloud.Provider) error {
  810. cfg, err := cloud.GetConfig()
  811. if err != nil {
  812. return err
  813. }
  814. // Pull a region from the first node
  815. var defaultRegion string
  816. nodeList := cache.GetAllNodes()
  817. if len(nodeList) > 0 {
  818. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  819. }
  820. storageClasses := cache.GetAllStorageClasses()
  821. storageClassMap := make(map[string]map[string]string)
  822. for _, storageClass := range storageClasses {
  823. params := storageClass.Parameters
  824. storageClassMap[storageClass.ObjectMeta.Name] = params
  825. if storageClass.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.GetAnnotations()["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  826. storageClassMap["default"] = params
  827. storageClassMap[""] = params
  828. }
  829. }
  830. pvs := cache.GetAllPersistentVolumes()
  831. pvMap := make(map[string]*costAnalyzerCloud.PV)
  832. for _, pv := range pvs {
  833. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  834. if !ok {
  835. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  836. }
  837. var region string
  838. if r, ok := util.GetRegion(pv.Labels); ok {
  839. region = r
  840. } else {
  841. region = defaultRegion
  842. }
  843. cacPv := &costAnalyzerCloud.PV{
  844. Class: pv.Spec.StorageClassName,
  845. Region: region,
  846. Parameters: parameters,
  847. }
  848. err := GetPVCost(cacPv, pv, cloud, region)
  849. if err != nil {
  850. return err
  851. }
  852. pvMap[pv.Name] = cacPv
  853. }
  854. for _, pvc := range pvClaimMapping {
  855. if vol, ok := pvMap[pvc.VolumeName]; ok {
  856. pvc.Volume = vol
  857. } else {
  858. log.Debugf("PV not found, using default")
  859. pvc.Volume = &costAnalyzerCloud.PV{
  860. Cost: cfg.Storage,
  861. }
  862. }
  863. }
  864. return nil
  865. }
  866. func GetPVCost(pv *costAnalyzerCloud.PV, kpv *v1.PersistentVolume, cp costAnalyzerCloud.Provider, defaultRegion string) error {
  867. cfg, err := cp.GetConfig()
  868. if err != nil {
  869. return err
  870. }
  871. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  872. pv.ProviderID = key.ID()
  873. pvWithCost, err := cp.PVPricing(key)
  874. if err != nil {
  875. pv.Cost = cfg.Storage
  876. return err
  877. }
  878. if pvWithCost == nil || pvWithCost.Cost == "" {
  879. pv.Cost = cfg.Storage
  880. return nil // set default cost
  881. }
  882. pv.Cost = pvWithCost.Cost
  883. return nil
  884. }
  885. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  886. if cm.pricingMetadata != nil {
  887. return cm.pricingMetadata, nil
  888. } else {
  889. return nil, fmt.Errorf("Node costs not yet calculated")
  890. }
  891. }
  892. func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*costAnalyzerCloud.Node, error) {
  893. cfg, err := cp.GetConfig()
  894. if err != nil {
  895. return nil, err
  896. }
  897. nodeList := cm.Cache.GetAllNodes()
  898. nodes := make(map[string]*costAnalyzerCloud.Node)
  899. vgpuCount, err := getAllocatableVGPUs(cm.Cache)
  900. if err != nil {
  901. return nil, err
  902. }
  903. vgpuCoeff := 10.0
  904. if vgpuCount > 0.0 {
  905. vgpuCoeff = vgpuCount
  906. }
  907. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  908. TotalNodes: 0,
  909. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  910. }
  911. for _, n := range nodeList {
  912. name := n.GetObjectMeta().GetName()
  913. nodeLabels := n.GetObjectMeta().GetLabels()
  914. nodeLabels["providerID"] = n.Spec.ProviderID
  915. pmd.TotalNodes++
  916. cnode, _, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  917. if err != nil {
  918. log.Infof("Error getting node pricing. Error: %s", err.Error())
  919. if cnode != nil {
  920. nodes[name] = cnode
  921. continue
  922. } else {
  923. cnode = &costAnalyzerCloud.Node{
  924. VCPUCost: cfg.CPU,
  925. RAMCost: cfg.RAM,
  926. }
  927. }
  928. }
  929. if _, ok := pmd.PricingTypeCounts[cnode.PricingType]; ok {
  930. pmd.PricingTypeCounts[cnode.PricingType]++
  931. } else {
  932. pmd.PricingTypeCounts[cnode.PricingType] = 1
  933. }
  934. newCnode := *cnode
  935. if newCnode.InstanceType == "" {
  936. it, _ := util.GetInstanceType(n.Labels)
  937. newCnode.InstanceType = it
  938. }
  939. if newCnode.Region == "" {
  940. region, _ := util.GetRegion(n.Labels)
  941. newCnode.Region = region
  942. }
  943. if newCnode.ArchType == "" {
  944. arch, _ := util.GetArchType(n.Labels)
  945. newCnode.ArchType = arch
  946. }
  947. newCnode.ProviderID = n.Spec.ProviderID
  948. var cpu float64
  949. if newCnode.VCPU == "" {
  950. cpu = float64(n.Status.Capacity.Cpu().Value())
  951. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  952. } else {
  953. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  954. if err != nil {
  955. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  956. }
  957. }
  958. if math.IsNaN(cpu) {
  959. log.Warnf("cpu parsed as NaN. Setting to 0.")
  960. cpu = 0
  961. }
  962. var ram float64
  963. if newCnode.RAM == "" {
  964. newCnode.RAM = n.Status.Capacity.Memory().String()
  965. }
  966. ram = float64(n.Status.Capacity.Memory().Value())
  967. if math.IsNaN(ram) {
  968. log.Warnf("ram parsed as NaN. Setting to 0.")
  969. ram = 0
  970. }
  971. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  972. // Azure does not seem to provide a GPU count in its pricing API. GKE supports attaching multiple GPUs
  973. // So the k8s api will often report more accurate results for GPU count under status > capacity > nvidia.com/gpu than the cloud providers billing data
  974. // not all providers are guaranteed to use this, so don't overwrite a Provider assignment if we can't find something under that capacity exists
  975. gpuc := 0.0
  976. q, ok := n.Status.Capacity["nvidia.com/gpu"]
  977. if ok {
  978. gpuCount := q.Value()
  979. if gpuCount != 0 {
  980. newCnode.GPU = fmt.Sprintf("%d", gpuCount)
  981. gpuc = float64(gpuCount)
  982. }
  983. } else if g, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  984. gpuCount := g.Value()
  985. if gpuCount != 0 {
  986. newCnode.GPU = fmt.Sprintf("%d", int(float64(gpuCount)/vgpuCoeff))
  987. gpuc = float64(gpuCount) / vgpuCoeff
  988. }
  989. } else {
  990. gpuc, err = strconv.ParseFloat(newCnode.GPU, 64)
  991. if err != nil {
  992. gpuc = 0.0
  993. }
  994. }
  995. if math.IsNaN(gpuc) {
  996. log.Warnf("gpu count parsed as NaN. Setting to 0.")
  997. gpuc = 0.0
  998. }
  999. // Special case for SUSE rancher, since it won't behave with normal
  1000. // calculations, courtesy of the instance type not being "real" (a
  1001. // recognizable AWS instance type.)
  1002. if newCnode.InstanceType == "rke2" {
  1003. log.Infof(
  1004. "Found a SUSE Rancher node %s, defaulting and skipping math",
  1005. cp.GetKey(nodeLabels, n).Features(),
  1006. )
  1007. defaultCPUCorePrice, err := strconv.ParseFloat(cfg.CPU, 64)
  1008. if err != nil {
  1009. log.Errorf("Could not parse default cpu price")
  1010. defaultCPUCorePrice = 0
  1011. }
  1012. if math.IsNaN(defaultCPUCorePrice) {
  1013. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1014. defaultCPUCorePrice = 0
  1015. }
  1016. defaultRAMPrice, err := strconv.ParseFloat(cfg.RAM, 64)
  1017. if err != nil {
  1018. log.Errorf("Could not parse default ram price")
  1019. defaultRAMPrice = 0
  1020. }
  1021. if math.IsNaN(defaultRAMPrice) {
  1022. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1023. defaultRAMPrice = 0
  1024. }
  1025. defaultGPUPrice, err := strconv.ParseFloat(cfg.GPU, 64)
  1026. if err != nil {
  1027. log.Errorf("Could not parse default gpu price")
  1028. defaultGPUPrice = 0
  1029. }
  1030. if math.IsNaN(defaultGPUPrice) {
  1031. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  1032. defaultGPUPrice = 0
  1033. }
  1034. // Just say no to doing the ratios!
  1035. cpuCost := defaultCPUCorePrice * cpu
  1036. gpuCost := defaultGPUPrice * gpuc
  1037. ramCost := defaultRAMPrice * ram
  1038. nodeCost := cpuCost + gpuCost + ramCost
  1039. newCnode.Cost = fmt.Sprintf("%f", nodeCost)
  1040. newCnode.VCPUCost = fmt.Sprintf("%f", cpuCost)
  1041. newCnode.GPUCost = fmt.Sprintf("%f", gpuCost)
  1042. newCnode.RAMCost = fmt.Sprintf("%f", ramCost)
  1043. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1044. } else if newCnode.GPU != "" && newCnode.GPUCost == "" {
  1045. // was the big thing to investigate. All the funky ratio math
  1046. // we were doing was messing with their default pricing. for SUSE Rancher.
  1047. // We couldn't find a gpu cost, so fix cpu and ram, then accordingly
  1048. log.Infof("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1049. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1050. if err != nil {
  1051. log.Errorf("Could not parse default cpu price")
  1052. defaultCPU = 0
  1053. }
  1054. if math.IsNaN(defaultCPU) {
  1055. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1056. defaultCPU = 0
  1057. }
  1058. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1059. if err != nil {
  1060. log.Errorf("Could not parse default ram price")
  1061. defaultRAM = 0
  1062. }
  1063. if math.IsNaN(defaultRAM) {
  1064. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1065. defaultRAM = 0
  1066. }
  1067. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  1068. if err != nil {
  1069. log.Errorf("Could not parse default gpu price")
  1070. defaultGPU = 0
  1071. }
  1072. if math.IsNaN(defaultGPU) {
  1073. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  1074. defaultGPU = 0
  1075. }
  1076. cpuToRAMRatio := defaultCPU / defaultRAM
  1077. if math.IsNaN(cpuToRAMRatio) {
  1078. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1079. cpuToRAMRatio = 10
  1080. }
  1081. gpuToRAMRatio := defaultGPU / defaultRAM
  1082. if math.IsNaN(gpuToRAMRatio) {
  1083. log.Warnf("gpuToRAMRatio is NaN. Setting to 100.")
  1084. gpuToRAMRatio = 100
  1085. }
  1086. ramGB := ram / 1024 / 1024 / 1024
  1087. if math.IsNaN(ramGB) {
  1088. log.Warnf("ramGB is NaN. Setting to 0.")
  1089. ramGB = 0
  1090. }
  1091. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  1092. if math.IsNaN(ramMultiple) {
  1093. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1094. ramMultiple = 0
  1095. }
  1096. var nodePrice float64
  1097. if newCnode.Cost != "" {
  1098. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1099. if err != nil {
  1100. log.Errorf("Could not parse total node price")
  1101. return nil, err
  1102. }
  1103. } else if newCnode.VCPUCost != "" {
  1104. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1105. if err != nil {
  1106. log.Errorf("Could not parse node vcpu price")
  1107. return nil, err
  1108. }
  1109. } else { // add case to use default pricing model when API data fails.
  1110. log.Debugf("No node price or CPUprice found, falling back to default")
  1111. nodePrice = defaultCPU*cpu + defaultRAM*ram + gpuc*defaultGPU
  1112. }
  1113. if math.IsNaN(nodePrice) {
  1114. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1115. nodePrice = 0
  1116. }
  1117. ramPrice := (nodePrice / ramMultiple)
  1118. if math.IsNaN(ramPrice) {
  1119. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1120. ramPrice = 0
  1121. }
  1122. cpuPrice := ramPrice * cpuToRAMRatio
  1123. gpuPrice := ramPrice * gpuToRAMRatio
  1124. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1125. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1126. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1127. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1128. } else if newCnode.RAMCost == "" {
  1129. // We couldn't find a ramcost, so fix cpu and allocate ram accordingly
  1130. log.Debugf("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1131. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1132. if err != nil {
  1133. log.Warnf("Could not parse default cpu price")
  1134. defaultCPU = 0
  1135. }
  1136. if math.IsNaN(defaultCPU) {
  1137. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1138. defaultCPU = 0
  1139. }
  1140. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1141. if err != nil {
  1142. log.Warnf("Could not parse default ram price")
  1143. defaultRAM = 0
  1144. }
  1145. if math.IsNaN(defaultRAM) {
  1146. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1147. defaultRAM = 0
  1148. }
  1149. cpuToRAMRatio := defaultCPU / defaultRAM
  1150. if math.IsNaN(cpuToRAMRatio) {
  1151. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1152. cpuToRAMRatio = 10
  1153. }
  1154. ramGB := ram / 1024 / 1024 / 1024
  1155. if math.IsNaN(ramGB) {
  1156. log.Warnf("ramGB is NaN. Setting to 0.")
  1157. ramGB = 0
  1158. }
  1159. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1160. if math.IsNaN(ramMultiple) {
  1161. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1162. ramMultiple = 0
  1163. }
  1164. var nodePrice float64
  1165. if newCnode.Cost != "" {
  1166. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1167. if err != nil {
  1168. log.Warnf("Could not parse total node price")
  1169. return nil, err
  1170. }
  1171. if newCnode.GPUCost != "" {
  1172. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1173. if err != nil {
  1174. log.Warnf("Could not parse node gpu price")
  1175. return nil, err
  1176. }
  1177. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1178. }
  1179. } else if newCnode.VCPUCost != "" {
  1180. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1181. if err != nil {
  1182. log.Warnf("Could not parse node vcpu price")
  1183. return nil, err
  1184. }
  1185. } else { // add case to use default pricing model when API data fails.
  1186. log.Debugf("No node price or CPUprice found, falling back to default")
  1187. nodePrice = defaultCPU*cpu + defaultRAM*ramGB
  1188. }
  1189. if math.IsNaN(nodePrice) {
  1190. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1191. nodePrice = 0
  1192. }
  1193. ramPrice := (nodePrice / ramMultiple)
  1194. if math.IsNaN(ramPrice) {
  1195. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1196. ramPrice = 0
  1197. }
  1198. cpuPrice := ramPrice * cpuToRAMRatio
  1199. if defaultRAM != 0 {
  1200. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1201. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1202. } else { // just assign the full price to CPU
  1203. if cpu != 0 {
  1204. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1205. } else {
  1206. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1207. }
  1208. }
  1209. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1210. log.Debugf("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1211. }
  1212. nodes[name] = &newCnode
  1213. }
  1214. cm.pricingMetadata = pmd
  1215. cp.ApplyReservedInstancePricing(nodes)
  1216. return nodes, nil
  1217. }
  1218. // TODO: drop some logs
  1219. func (cm *CostModel) GetLBCost(cp costAnalyzerCloud.Provider) (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1220. // for fetching prices from cloud provider
  1221. // cfg, err := cp.GetConfig()
  1222. // if err != nil {
  1223. // return nil, err
  1224. // }
  1225. servicesList := cm.Cache.GetAllServices()
  1226. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1227. for _, service := range servicesList {
  1228. namespace := service.GetObjectMeta().GetNamespace()
  1229. name := service.GetObjectMeta().GetName()
  1230. key := serviceKey{
  1231. Cluster: env.GetClusterID(),
  1232. Namespace: namespace,
  1233. Service: name,
  1234. }
  1235. if service.Spec.Type == "LoadBalancer" {
  1236. loadBalancer, err := cp.LoadBalancerPricing()
  1237. if err != nil {
  1238. return nil, err
  1239. }
  1240. newLoadBalancer := *loadBalancer
  1241. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1242. address := loadBalancerIngress.IP
  1243. // Some cloud providers use hostname rather than IP
  1244. if address == "" {
  1245. address = loadBalancerIngress.Hostname
  1246. }
  1247. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1248. }
  1249. loadBalancerMap[key] = &newLoadBalancer
  1250. }
  1251. }
  1252. return loadBalancerMap, nil
  1253. }
  1254. func getPodServices(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1255. servicesList := cache.GetAllServices()
  1256. podServicesMapping := make(map[string]map[string][]string)
  1257. for _, service := range servicesList {
  1258. namespace := service.GetObjectMeta().GetNamespace()
  1259. name := service.GetObjectMeta().GetName()
  1260. key := namespace + "," + clusterID
  1261. if _, ok := podServicesMapping[key]; !ok {
  1262. podServicesMapping[key] = make(map[string][]string)
  1263. }
  1264. s := labels.Nothing()
  1265. if service.Spec.Selector != nil && len(service.Spec.Selector) > 0 {
  1266. s = labels.Set(service.Spec.Selector).AsSelectorPreValidated()
  1267. }
  1268. for _, pod := range podList {
  1269. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1270. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1271. services, ok := podServicesMapping[key][pod.GetObjectMeta().GetName()]
  1272. if ok {
  1273. podServicesMapping[key][pod.GetObjectMeta().GetName()] = append(services, name)
  1274. } else {
  1275. podServicesMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1276. }
  1277. }
  1278. }
  1279. }
  1280. return podServicesMapping, nil
  1281. }
  1282. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1283. ssList := cache.GetAllStatefulSets()
  1284. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1285. for _, ss := range ssList {
  1286. namespace := ss.GetObjectMeta().GetNamespace()
  1287. name := ss.GetObjectMeta().GetName()
  1288. key := namespace + "," + clusterID
  1289. if _, ok := podSSMapping[key]; !ok {
  1290. podSSMapping[key] = make(map[string][]string)
  1291. }
  1292. s, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
  1293. if err != nil {
  1294. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1295. }
  1296. for _, pod := range podList {
  1297. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1298. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1299. sss, ok := podSSMapping[key][pod.GetObjectMeta().GetName()]
  1300. if ok {
  1301. podSSMapping[key][pod.GetObjectMeta().GetName()] = append(sss, name)
  1302. } else {
  1303. podSSMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1304. }
  1305. }
  1306. }
  1307. }
  1308. return podSSMapping, nil
  1309. }
  1310. func getPodDeployments(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1311. deploymentsList := cache.GetAllDeployments()
  1312. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1313. for _, deployment := range deploymentsList {
  1314. namespace := deployment.GetObjectMeta().GetNamespace()
  1315. name := deployment.GetObjectMeta().GetName()
  1316. key := namespace + "," + clusterID
  1317. if _, ok := podDeploymentsMapping[key]; !ok {
  1318. podDeploymentsMapping[key] = make(map[string][]string)
  1319. }
  1320. s, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
  1321. if err != nil {
  1322. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1323. }
  1324. for _, pod := range podList {
  1325. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1326. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1327. deployments, ok := podDeploymentsMapping[key][pod.GetObjectMeta().GetName()]
  1328. if ok {
  1329. podDeploymentsMapping[key][pod.GetObjectMeta().GetName()] = append(deployments, name)
  1330. } else {
  1331. podDeploymentsMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1332. }
  1333. }
  1334. }
  1335. }
  1336. return podDeploymentsMapping, nil
  1337. }
  1338. func getPodDeploymentsWithMetrics(deploymentLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1339. podDeploymentsMapping := make(map[string]map[string][]string)
  1340. for depKey, depLabels := range deploymentLabels {
  1341. kt, err := NewKeyTuple(depKey)
  1342. if err != nil {
  1343. continue
  1344. }
  1345. namespace := kt.Namespace()
  1346. name := kt.Key()
  1347. clusterID := kt.ClusterID()
  1348. key := namespace + "," + clusterID
  1349. if _, ok := podDeploymentsMapping[key]; !ok {
  1350. podDeploymentsMapping[key] = make(map[string][]string)
  1351. }
  1352. s := labels.Set(depLabels).AsSelectorPreValidated()
  1353. for podKey, pLabels := range podLabels {
  1354. pkey, err := NewKeyTuple(podKey)
  1355. if err != nil {
  1356. continue
  1357. }
  1358. podNamespace := pkey.Namespace()
  1359. podName := pkey.Key()
  1360. podClusterID := pkey.ClusterID()
  1361. labelSet := labels.Set(pLabels)
  1362. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1363. deployments, ok := podDeploymentsMapping[key][podName]
  1364. if ok {
  1365. podDeploymentsMapping[key][podName] = append(deployments, name)
  1366. } else {
  1367. podDeploymentsMapping[key][podName] = []string{name}
  1368. }
  1369. }
  1370. }
  1371. }
  1372. // Remove any duplicate data created by metric names
  1373. pruneDuplicateData(podDeploymentsMapping)
  1374. return podDeploymentsMapping, nil
  1375. }
  1376. func getPodServicesWithMetrics(serviceLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1377. podServicesMapping := make(map[string]map[string][]string)
  1378. for servKey, servLabels := range serviceLabels {
  1379. kt, err := NewKeyTuple(servKey)
  1380. if err != nil {
  1381. continue
  1382. }
  1383. namespace := kt.Namespace()
  1384. name := kt.Key()
  1385. clusterID := kt.ClusterID()
  1386. key := namespace + "," + clusterID
  1387. if _, ok := podServicesMapping[key]; !ok {
  1388. podServicesMapping[key] = make(map[string][]string)
  1389. }
  1390. s := labels.Nothing()
  1391. if servLabels != nil && len(servLabels) > 0 {
  1392. s = labels.Set(servLabels).AsSelectorPreValidated()
  1393. }
  1394. for podKey, pLabels := range podLabels {
  1395. pkey, err := NewKeyTuple(podKey)
  1396. if err != nil {
  1397. continue
  1398. }
  1399. podNamespace := pkey.Namespace()
  1400. podName := pkey.Key()
  1401. podClusterID := pkey.ClusterID()
  1402. labelSet := labels.Set(pLabels)
  1403. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1404. services, ok := podServicesMapping[key][podName]
  1405. if ok {
  1406. podServicesMapping[key][podName] = append(services, name)
  1407. } else {
  1408. podServicesMapping[key][podName] = []string{name}
  1409. }
  1410. }
  1411. }
  1412. }
  1413. // Remove any duplicate data created by metric names
  1414. pruneDuplicateData(podServicesMapping)
  1415. return podServicesMapping, nil
  1416. }
  1417. // This method alleviates an issue with metrics that used a '_' to replace '-' in deployment
  1418. // and service names. To avoid counting these as multiple deployments/services, we'll remove
  1419. // the '_' version. Not optimal, but takes care of the issue
  1420. func pruneDuplicateData(data map[string]map[string][]string) {
  1421. for _, podMap := range data {
  1422. for podName, values := range podMap {
  1423. podMap[podName] = pruneDuplicates(values)
  1424. }
  1425. }
  1426. }
  1427. // Determine if there is an underscore in the value of a slice. If so, replace _ with -, and then
  1428. // check to see if the result exists in the slice. If both are true, then we DO NOT include that
  1429. // original value in the new slice.
  1430. func pruneDuplicates(s []string) []string {
  1431. m := sliceToSet(s)
  1432. for _, v := range s {
  1433. if strings.Contains(v, "_") {
  1434. name := strings.Replace(v, "_", "-", -1)
  1435. if !m[name] {
  1436. m[name] = true
  1437. }
  1438. delete(m, v)
  1439. }
  1440. }
  1441. return setToSlice(m)
  1442. }
  1443. // Creates a map[string]bool containing the slice values as keys
  1444. func sliceToSet(s []string) map[string]bool {
  1445. m := make(map[string]bool)
  1446. for _, v := range s {
  1447. m[v] = true
  1448. }
  1449. return m
  1450. }
  1451. func setToSlice(m map[string]bool) []string {
  1452. var result []string
  1453. for k := range m {
  1454. result = append(result, k)
  1455. }
  1456. return result
  1457. }
  1458. func costDataPassesFilters(cm clusters.ClusterMap, costs *CostData, namespace string, cluster string) bool {
  1459. passesNamespace := namespace == "" || costs.Namespace == namespace
  1460. passesCluster := cluster == "" || costs.ClusterID == cluster || costs.ClusterName == cluster
  1461. return passesNamespace && passesCluster
  1462. }
  1463. // Finds the a closest multiple less than value
  1464. func floorMultiple(value int64, multiple int64) int64 {
  1465. return (value / multiple) * multiple
  1466. }
  1467. // Attempt to create a key for the request. Reduce the times to minutes in order to more easily group requests based on
  1468. // real time ranges. If for any reason, the key generation fails, return a uuid to ensure uniqueness.
  1469. func requestKeyFor(window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) string {
  1470. keyLayout := "2006-01-02T15:04Z"
  1471. // We "snap" start time and duration to their closest 5 min multiple less than itself, by
  1472. // applying a snapped duration to a snapped start time.
  1473. durMins := int64(window.Minutes())
  1474. durMins = floorMultiple(durMins, 5)
  1475. sMins := int64(window.Start().Minute())
  1476. sOffset := sMins - floorMultiple(sMins, 5)
  1477. sTime := window.Start().Add(-time.Duration(sOffset) * time.Minute)
  1478. eTime := window.Start().Add(time.Duration(durMins) * time.Minute)
  1479. startKey := sTime.Format(keyLayout)
  1480. endKey := eTime.Format(keyLayout)
  1481. return fmt.Sprintf("%s,%s,%s,%s,%s,%t", startKey, endKey, resolution.String(), filterNamespace, filterCluster, remoteEnabled)
  1482. }
  1483. // ComputeCostDataRange executes a range query for cost data.
  1484. // Note that "offset" represents the time between the function call and "endString", and is also passed for convenience
  1485. func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1486. // Create a request key for request grouping. This key will be used to represent the cost-model result
  1487. // for the specific inputs to prevent multiple queries for identical data.
  1488. key := requestKeyFor(window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1489. log.Debugf("ComputeCostDataRange with Key: %s", key)
  1490. // If there is already a request out that uses the same data, wait for it to return to share the results.
  1491. // Otherwise, start executing.
  1492. result, err, _ := cm.RequestGroup.Do(key, func() (interface{}, error) {
  1493. return cm.costDataRange(cli, cp, window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1494. })
  1495. data, ok := result.(map[string]*CostData)
  1496. if !ok {
  1497. return nil, fmt.Errorf("Failed to cast result as map[string]*CostData")
  1498. }
  1499. return data, err
  1500. }
  1501. func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1502. clusterID := env.GetClusterID()
  1503. // durHrs := end.Sub(start).Hours() + 1
  1504. if window.IsOpen() {
  1505. return nil, fmt.Errorf("illegal window: %s", window)
  1506. }
  1507. start := *window.Start()
  1508. end := *window.End()
  1509. // Snap resolution to the nearest minute
  1510. resMins := int64(math.Trunc(resolution.Minutes()))
  1511. if resMins == 0 {
  1512. return nil, fmt.Errorf("resolution must be greater than 0.0")
  1513. }
  1514. resolution = time.Duration(resMins) * time.Minute
  1515. // Warn if resolution does not evenly divide window
  1516. if int64(window.Minutes())%int64(resolution.Minutes()) != 0 {
  1517. log.Warnf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
  1518. }
  1519. // Convert to Prometheus-style duration string in terms of m or h
  1520. resStr := fmt.Sprintf("%dm", resMins)
  1521. if resMins%60 == 0 {
  1522. resStr = fmt.Sprintf("%dh", resMins/60)
  1523. }
  1524. if remoteEnabled {
  1525. remoteLayout := "2006-01-02T15:04:05Z"
  1526. remoteStartStr := window.Start().Format(remoteLayout)
  1527. remoteEndStr := window.End().Format(remoteLayout)
  1528. log.Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
  1529. return CostDataRangeFromSQL("", "", resolution.String(), remoteStartStr, remoteEndStr)
  1530. }
  1531. scrapeIntervalSeconds := cm.ScrapeInterval.Seconds()
  1532. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataRangeContextName)
  1533. queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1534. queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1535. queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1536. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1537. queryCPURequests := fmt.Sprintf(queryCPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1538. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1539. queryGPURequests := fmt.Sprintf(queryGPURequestsStr, env.GetPromClusterFilter(), resStr, "", resolution.Hours(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1540. queryPVRequests := fmt.Sprintf(queryPVRequestsStr, env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1541. queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1542. queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, env.GetPromClusterFilter(), resStr)
  1543. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1544. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1545. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
  1546. queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), resStr, "")
  1547. // Submit all queries for concurrent evaluation
  1548. resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, resolution)
  1549. resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, resolution)
  1550. resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, resolution)
  1551. resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, resolution)
  1552. resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, resolution)
  1553. resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, resolution)
  1554. resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, resolution)
  1555. resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, resolution)
  1556. resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, resolution)
  1557. resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
  1558. resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
  1559. resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
  1560. resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
  1561. resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1562. resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1563. resChNSAnnotations := ctx.QueryRange(fmt.Sprintf(queryNSAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1564. resChPodAnnotations := ctx.QueryRange(fmt.Sprintf(queryPodAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1565. resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1566. resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1567. resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
  1568. resChJobs := ctx.QueryRange(fmt.Sprintf(queryPodJobs, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
  1569. resChDaemonsets := ctx.QueryRange(fmt.Sprintf(queryPodDaemonsets, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
  1570. resChNormalization := ctx.QueryRange(queryNormalization, start, end, resolution)
  1571. // Pull k8s pod, controller, service, and namespace details
  1572. podlist := cm.Cache.GetAllPods()
  1573. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  1574. if err != nil {
  1575. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1576. }
  1577. podStatefulsetsMapping, err := getPodStatefulsets(cm.Cache, podlist, clusterID)
  1578. if err != nil {
  1579. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1580. }
  1581. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  1582. if err != nil {
  1583. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1584. }
  1585. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  1586. if err != nil {
  1587. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1588. }
  1589. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  1590. if err != nil {
  1591. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1592. }
  1593. // Process query results. Handle errors afterwards using ctx.Errors.
  1594. resRAMRequests, _ := resChRAMRequests.Await()
  1595. resRAMUsage, _ := resChRAMUsage.Await()
  1596. resRAMAlloc, _ := resChRAMAlloc.Await()
  1597. resCPURequests, _ := resChCPURequests.Await()
  1598. resCPUUsage, _ := resChCPUUsage.Await()
  1599. resCPUAlloc, _ := resChCPUAlloc.Await()
  1600. resGPURequests, _ := resChGPURequests.Await()
  1601. resPVRequests, _ := resChPVRequests.Await()
  1602. resPVCAlloc, _ := resChPVCAlloc.Await()
  1603. resPVHourlyCost, _ := resChPVHourlyCost.Await()
  1604. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  1605. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  1606. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  1607. resNSLabels, _ := resChNSLabels.Await()
  1608. resPodLabels, _ := resChPodLabels.Await()
  1609. resNSAnnotations, _ := resChNSAnnotations.Await()
  1610. resPodAnnotations, _ := resChPodAnnotations.Await()
  1611. resServiceLabels, _ := resChServiceLabels.Await()
  1612. resDeploymentLabels, _ := resChDeploymentLabels.Await()
  1613. resStatefulsetLabels, _ := resChStatefulsetLabels.Await()
  1614. resDaemonsets, _ := resChDaemonsets.Await()
  1615. resJobs, _ := resChJobs.Await()
  1616. resNormalization, _ := resChNormalization.Await()
  1617. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  1618. // NOTE: will not propagate unless coupled with errors.
  1619. if ctx.HasErrors() {
  1620. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  1621. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  1622. for _, promErr := range ctx.Errors() {
  1623. if promErr.Error != nil {
  1624. log.Errorf("CostDataRange: Request Error: %s", promErr.Error)
  1625. }
  1626. if promErr.ParseError != nil {
  1627. log.Errorf("CostDataRange: Parsing Error: %s", promErr.ParseError)
  1628. }
  1629. }
  1630. // ErrorCollection is an collection of errors wrapped in a single error implementation
  1631. return nil, ctx.ErrorCollection()
  1632. }
  1633. normalizationValue, err := getNormalizations(resNormalization)
  1634. if err != nil {
  1635. msg := fmt.Sprintf("error computing normalization for start=%s, end=%s, res=%s", start, end, resolution)
  1636. return nil, prom.WrapError(err, msg)
  1637. }
  1638. pvClaimMapping, err := GetPVInfo(resPVRequests, clusterID)
  1639. if err != nil {
  1640. // Just log for compatibility with KSM less than 1.6
  1641. log.Infof("Unable to get PV Data: %s", err.Error())
  1642. }
  1643. if pvClaimMapping != nil {
  1644. err = addPVData(cm.Cache, pvClaimMapping, cp)
  1645. if err != nil {
  1646. return nil, fmt.Errorf("pvClaimMapping: %s", err)
  1647. }
  1648. }
  1649. pvCostMapping, err := GetPVCostMetrics(resPVHourlyCost, clusterID)
  1650. if err != nil {
  1651. log.Errorf("Unable to get PV Hourly Cost Data: %s", err.Error())
  1652. }
  1653. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  1654. pvAllocationMapping, err := GetPVAllocationMetrics(resPVCAlloc, clusterID)
  1655. if err != nil {
  1656. log.Errorf("Unable to get PV Allocation Cost Data: %s", err.Error())
  1657. }
  1658. if pvAllocationMapping != nil {
  1659. addMetricPVData(pvAllocationMapping, pvCostMapping, cp)
  1660. for k, v := range pvAllocationMapping {
  1661. unmountedPVs[k] = v
  1662. }
  1663. }
  1664. nsLabels, err := GetNamespaceLabelsMetrics(resNSLabels, clusterID)
  1665. if err != nil {
  1666. log.Errorf("Unable to get Namespace Labels for Metrics: %s", err.Error())
  1667. }
  1668. if nsLabels != nil {
  1669. mergeStringMap(namespaceLabelsMapping, nsLabels)
  1670. }
  1671. podLabels, err := GetPodLabelsMetrics(resPodLabels, clusterID)
  1672. if err != nil {
  1673. log.Errorf("Unable to get Pod Labels for Metrics: %s", err.Error())
  1674. }
  1675. nsAnnotations, err := GetNamespaceAnnotationsMetrics(resNSAnnotations, clusterID)
  1676. if err != nil {
  1677. log.Errorf("Unable to get Namespace Annotations for Metrics: %s", err.Error())
  1678. }
  1679. if nsAnnotations != nil {
  1680. mergeStringMap(namespaceAnnotationsMapping, nsAnnotations)
  1681. }
  1682. podAnnotations, err := GetPodAnnotationsMetrics(resPodAnnotations, clusterID)
  1683. if err != nil {
  1684. log.Errorf("Unable to get Pod Annotations for Metrics: %s", err.Error())
  1685. }
  1686. serviceLabels, err := GetServiceSelectorLabelsMetrics(resServiceLabels, clusterID)
  1687. if err != nil {
  1688. log.Errorf("Unable to get Service Selector Labels for Metrics: %s", err.Error())
  1689. }
  1690. deploymentLabels, err := GetDeploymentMatchLabelsMetrics(resDeploymentLabels, clusterID)
  1691. if err != nil {
  1692. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1693. }
  1694. statefulsetLabels, err := GetStatefulsetMatchLabelsMetrics(resStatefulsetLabels, clusterID)
  1695. if err != nil {
  1696. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1697. }
  1698. podStatefulsetMetricsMapping, err := getPodDeploymentsWithMetrics(statefulsetLabels, podLabels)
  1699. if err != nil {
  1700. log.Errorf("Unable to get match Statefulset Labels Metrics to Pods: %s", err.Error())
  1701. }
  1702. appendLabelsList(podStatefulsetsMapping, podStatefulsetMetricsMapping)
  1703. podDeploymentsMetricsMapping, err := getPodDeploymentsWithMetrics(deploymentLabels, podLabels)
  1704. if err != nil {
  1705. log.Errorf("Unable to get match Deployment Labels Metrics to Pods: %s", err.Error())
  1706. }
  1707. appendLabelsList(podDeploymentsMapping, podDeploymentsMetricsMapping)
  1708. podDaemonsets, err := GetPodDaemonsetsWithMetrics(resDaemonsets, clusterID)
  1709. if err != nil {
  1710. log.Errorf("Unable to get Pod Daemonsets for Metrics: %s", err.Error())
  1711. }
  1712. podJobs, err := GetPodJobsWithMetrics(resJobs, clusterID)
  1713. if err != nil {
  1714. log.Errorf("Unable to get Pod Jobs for Metrics: %s", err.Error())
  1715. }
  1716. podServicesMetricsMapping, err := getPodServicesWithMetrics(serviceLabels, podLabels)
  1717. if err != nil {
  1718. log.Errorf("Unable to get match Service Labels Metrics to Pods: %s", err.Error())
  1719. }
  1720. appendLabelsList(podServicesMapping, podServicesMetricsMapping)
  1721. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  1722. if err != nil {
  1723. log.Errorf("Unable to get Network Cost Data: %s", err.Error())
  1724. networkUsageMap = make(map[string]*NetworkUsageData)
  1725. }
  1726. containerNameCost := make(map[string]*CostData)
  1727. containers := make(map[string]bool)
  1728. otherClusterPVRecorded := make(map[string]bool)
  1729. RAMReqMap, err := GetNormalizedContainerMetricVectors(resRAMRequests, normalizationValue, clusterID)
  1730. if err != nil {
  1731. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMRequests)")
  1732. }
  1733. for key := range RAMReqMap {
  1734. containers[key] = true
  1735. }
  1736. RAMUsedMap, err := GetNormalizedContainerMetricVectors(resRAMUsage, normalizationValue, clusterID)
  1737. if err != nil {
  1738. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMUsage)")
  1739. }
  1740. for key := range RAMUsedMap {
  1741. containers[key] = true
  1742. }
  1743. CPUReqMap, err := GetNormalizedContainerMetricVectors(resCPURequests, normalizationValue, clusterID)
  1744. if err != nil {
  1745. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(CPURequests)")
  1746. }
  1747. for key := range CPUReqMap {
  1748. containers[key] = true
  1749. }
  1750. // No need to normalize here, as this comes from a counter, namely:
  1751. // rate(container_cpu_usage_seconds_total) which properly accounts for normalized rates
  1752. CPUUsedMap, err := GetContainerMetricVectors(resCPUUsage, clusterID)
  1753. if err != nil {
  1754. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUUsage)")
  1755. }
  1756. for key := range CPUUsedMap {
  1757. containers[key] = true
  1758. }
  1759. RAMAllocMap, err := GetContainerMetricVectors(resRAMAlloc, clusterID)
  1760. if err != nil {
  1761. return nil, prom.WrapError(err, "GetContainerMetricVectors(RAMAllocations)")
  1762. }
  1763. for key := range RAMAllocMap {
  1764. containers[key] = true
  1765. }
  1766. CPUAllocMap, err := GetContainerMetricVectors(resCPUAlloc, clusterID)
  1767. if err != nil {
  1768. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUAllocations)")
  1769. }
  1770. for key := range CPUAllocMap {
  1771. containers[key] = true
  1772. }
  1773. GPUReqMap, err := GetNormalizedContainerMetricVectors(resGPURequests, normalizationValue, clusterID)
  1774. if err != nil {
  1775. return nil, prom.WrapError(err, "GetContainerMetricVectors(GPURequests)")
  1776. }
  1777. for key := range GPUReqMap {
  1778. containers[key] = true
  1779. }
  1780. // Request metrics can show up after pod eviction and completion.
  1781. // This method synchronizes requests to allocations such that when
  1782. // allocation is 0, so are requests
  1783. applyAllocationToRequests(RAMAllocMap, RAMReqMap)
  1784. applyAllocationToRequests(CPUAllocMap, CPUReqMap)
  1785. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  1786. missingContainers := make(map[string]*CostData)
  1787. for key := range containers {
  1788. if _, ok := containerNameCost[key]; ok {
  1789. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  1790. }
  1791. c, _ := NewContainerMetricFromKey(key)
  1792. RAMReqV, ok := RAMReqMap[key]
  1793. if !ok {
  1794. log.Debug("no RAM requests for " + key)
  1795. RAMReqV = []*util.Vector{}
  1796. }
  1797. RAMUsedV, ok := RAMUsedMap[key]
  1798. if !ok {
  1799. log.Debug("no RAM usage for " + key)
  1800. RAMUsedV = []*util.Vector{}
  1801. }
  1802. CPUReqV, ok := CPUReqMap[key]
  1803. if !ok {
  1804. log.Debug("no CPU requests for " + key)
  1805. CPUReqV = []*util.Vector{}
  1806. }
  1807. CPUUsedV, ok := CPUUsedMap[key]
  1808. if !ok {
  1809. log.Debug("no CPU usage for " + key)
  1810. CPUUsedV = []*util.Vector{}
  1811. }
  1812. RAMAllocsV, ok := RAMAllocMap[key]
  1813. if !ok {
  1814. log.Debug("no RAM allocation for " + key)
  1815. RAMAllocsV = []*util.Vector{}
  1816. }
  1817. CPUAllocsV, ok := CPUAllocMap[key]
  1818. if !ok {
  1819. log.Debug("no CPU allocation for " + key)
  1820. CPUAllocsV = []*util.Vector{}
  1821. }
  1822. GPUReqV, ok := GPUReqMap[key]
  1823. if !ok {
  1824. log.Debug("no GPU requests for " + key)
  1825. GPUReqV = []*util.Vector{}
  1826. }
  1827. var node *costAnalyzerCloud.Node
  1828. if n, ok := missingNodes[c.NodeName]; ok {
  1829. node = n
  1830. } else {
  1831. node = &costAnalyzerCloud.Node{}
  1832. missingNodes[c.NodeName] = node
  1833. }
  1834. nsKey := c.Namespace + "," + c.ClusterID
  1835. podKey := c.Namespace + "," + c.PodName + "," + c.ClusterID
  1836. namespaceLabels, _ := namespaceLabelsMapping[nsKey]
  1837. pLabels := podLabels[podKey]
  1838. if pLabels == nil {
  1839. pLabels = make(map[string]string)
  1840. }
  1841. for k, v := range namespaceLabels {
  1842. if _, ok := pLabels[k]; !ok {
  1843. pLabels[k] = v
  1844. }
  1845. }
  1846. namespaceAnnotations, _ := namespaceAnnotationsMapping[nsKey]
  1847. pAnnotations := podAnnotations[podKey]
  1848. if pAnnotations == nil {
  1849. pAnnotations = make(map[string]string)
  1850. }
  1851. for k, v := range namespaceAnnotations {
  1852. if _, ok := pAnnotations[k]; !ok {
  1853. pAnnotations[k] = v
  1854. }
  1855. }
  1856. var podDeployments []string
  1857. if _, ok := podDeploymentsMapping[nsKey]; ok {
  1858. if ds, ok := podDeploymentsMapping[nsKey][c.PodName]; ok {
  1859. podDeployments = ds
  1860. } else {
  1861. podDeployments = []string{}
  1862. }
  1863. }
  1864. var podStatefulSets []string
  1865. if _, ok := podStatefulsetsMapping[nsKey]; ok {
  1866. if ss, ok := podStatefulsetsMapping[nsKey][c.PodName]; ok {
  1867. podStatefulSets = ss
  1868. } else {
  1869. podStatefulSets = []string{}
  1870. }
  1871. }
  1872. var podServices []string
  1873. if _, ok := podServicesMapping[nsKey]; ok {
  1874. if svcs, ok := podServicesMapping[nsKey][c.PodName]; ok {
  1875. podServices = svcs
  1876. } else {
  1877. podServices = []string{}
  1878. }
  1879. }
  1880. var podPVs []*PersistentVolumeClaimData
  1881. var podNetCosts []*util.Vector
  1882. // For PVC data, we'll need to find the claim mapping and cost data. Will need to append
  1883. // cost data since that was populated by cluster data previously. We do this with
  1884. // the pod_pvc_allocation metric
  1885. podPVData, ok := pvAllocationMapping[podKey]
  1886. if !ok {
  1887. log.Debugf("Failed to locate pv allocation mapping for missing pod.")
  1888. }
  1889. // Delete the current pod key from potentially unmounted pvs
  1890. delete(unmountedPVs, podKey)
  1891. // For network costs, we'll use existing map since it should still contain the
  1892. // correct data.
  1893. var podNetworkCosts []*util.Vector
  1894. if usage, ok := networkUsageMap[podKey]; ok {
  1895. netCosts, err := GetNetworkCost(usage, cp)
  1896. if err != nil {
  1897. log.Errorf("Error pulling network costs: %s", err.Error())
  1898. } else {
  1899. podNetworkCosts = netCosts
  1900. }
  1901. }
  1902. // Check to see if any other data has been recorded for this namespace, pod, clusterId
  1903. // Follow the pattern of only allowing claims data per pod
  1904. if !otherClusterPVRecorded[podKey] {
  1905. otherClusterPVRecorded[podKey] = true
  1906. podPVs = podPVData
  1907. podNetCosts = podNetworkCosts
  1908. }
  1909. pds := []string{}
  1910. if ds, ok := podDaemonsets[podKey]; ok {
  1911. pds = []string{ds}
  1912. }
  1913. jobs := []string{}
  1914. if job, ok := podJobs[podKey]; ok {
  1915. jobs = []string{job}
  1916. }
  1917. costs := &CostData{
  1918. Name: c.ContainerName,
  1919. PodName: c.PodName,
  1920. NodeName: c.NodeName,
  1921. NodeData: node,
  1922. Namespace: c.Namespace,
  1923. Services: podServices,
  1924. Deployments: podDeployments,
  1925. Daemonsets: pds,
  1926. Statefulsets: podStatefulSets,
  1927. Jobs: jobs,
  1928. RAMReq: RAMReqV,
  1929. RAMUsed: RAMUsedV,
  1930. CPUReq: CPUReqV,
  1931. CPUUsed: CPUUsedV,
  1932. RAMAllocation: RAMAllocsV,
  1933. CPUAllocation: CPUAllocsV,
  1934. GPUReq: GPUReqV,
  1935. Annotations: pAnnotations,
  1936. Labels: pLabels,
  1937. NamespaceLabels: namespaceLabels,
  1938. PVCData: podPVs,
  1939. NetworkData: podNetCosts,
  1940. ClusterID: c.ClusterID,
  1941. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  1942. }
  1943. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1944. containerNameCost[key] = costs
  1945. missingContainers[key] = costs
  1946. }
  1947. }
  1948. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  1949. for k, costs := range unmounted {
  1950. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  1951. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1952. containerNameCost[k] = costs
  1953. }
  1954. }
  1955. if window.Minutes() > 0 {
  1956. dur, off := window.DurationOffsetStrings()
  1957. err = findDeletedNodeInfo(cli, missingNodes, dur, off)
  1958. if err != nil {
  1959. log.Errorf("Error fetching historical node data: %s", err.Error())
  1960. }
  1961. }
  1962. return containerNameCost, nil
  1963. }
  1964. func applyAllocationToRequests(allocationMap map[string][]*util.Vector, requestMap map[string][]*util.Vector) {
  1965. // The result of the normalize operation will be a new []*util.Vector to replace the requests
  1966. normalizeOp := func(r *util.Vector, x *float64, y *float64) bool {
  1967. // Omit data (return false) if both x and y inputs don't exist
  1968. if x == nil || y == nil {
  1969. return false
  1970. }
  1971. // If the allocation value is 0, 0 out request value
  1972. if *x == 0 {
  1973. r.Value = 0
  1974. } else {
  1975. r.Value = *y
  1976. }
  1977. return true
  1978. }
  1979. // Run normalization on all request vectors in the mapping
  1980. for k, requests := range requestMap {
  1981. // Only run normalization where there are valid allocations
  1982. allocations, ok := allocationMap[k]
  1983. if !ok {
  1984. delete(requestMap, k)
  1985. continue
  1986. }
  1987. // Replace request map with normalized
  1988. requestMap[k] = util.ApplyVectorOp(allocations, requests, normalizeOp)
  1989. }
  1990. }
  1991. func addMetricPVData(pvAllocationMap map[string][]*PersistentVolumeClaimData, pvCostMap map[string]*costAnalyzerCloud.PV, cp costAnalyzerCloud.Provider) {
  1992. cfg, err := cp.GetConfig()
  1993. if err != nil {
  1994. log.Errorf("Failed to get provider config while adding pv metrics data.")
  1995. return
  1996. }
  1997. for _, pvcDataArray := range pvAllocationMap {
  1998. for _, pvcData := range pvcDataArray {
  1999. costKey := fmt.Sprintf("%s,%s", pvcData.VolumeName, pvcData.ClusterID)
  2000. pvCost, ok := pvCostMap[costKey]
  2001. if !ok {
  2002. pvcData.Volume = &costAnalyzerCloud.PV{
  2003. Cost: cfg.Storage,
  2004. }
  2005. continue
  2006. }
  2007. pvcData.Volume = pvCost
  2008. }
  2009. }
  2010. }
  2011. // Add values that don't already exist in origMap from mergeMap into origMap
  2012. func mergeStringMap(origMap map[string]map[string]string, mergeMap map[string]map[string]string) {
  2013. for k, v := range mergeMap {
  2014. if _, ok := origMap[k]; !ok {
  2015. origMap[k] = v
  2016. }
  2017. }
  2018. }
  2019. func appendLabelsList(mainLabels map[string]map[string][]string, labels map[string]map[string][]string) {
  2020. for k, v := range labels {
  2021. mainLabels[k] = v
  2022. }
  2023. }
  2024. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  2025. nsToLabels := make(map[string]map[string]string)
  2026. nss := cache.GetAllNamespaces()
  2027. for _, ns := range nss {
  2028. labels := make(map[string]string)
  2029. for k, v := range ns.Labels {
  2030. labels[prom.SanitizeLabelName(k)] = v
  2031. }
  2032. nsToLabels[ns.Name+","+clusterID] = labels
  2033. }
  2034. return nsToLabels, nil
  2035. }
  2036. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  2037. nsToAnnotations := make(map[string]map[string]string)
  2038. nss := cache.GetAllNamespaces()
  2039. for _, ns := range nss {
  2040. annotations := make(map[string]string)
  2041. for k, v := range ns.Annotations {
  2042. annotations[prom.SanitizeLabelName(k)] = v
  2043. }
  2044. nsToAnnotations[ns.Name+","+clusterID] = annotations
  2045. }
  2046. return nsToAnnotations, nil
  2047. }
  2048. func getDaemonsetsOfPod(pod v1.Pod) []string {
  2049. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2050. if ownerReference.Kind == "DaemonSet" {
  2051. return []string{ownerReference.Name}
  2052. }
  2053. }
  2054. return []string{}
  2055. }
  2056. func getJobsOfPod(pod v1.Pod) []string {
  2057. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2058. if ownerReference.Kind == "Job" {
  2059. return []string{ownerReference.Name}
  2060. }
  2061. }
  2062. return []string{}
  2063. }
  2064. func getStatefulSetsOfPod(pod v1.Pod) []string {
  2065. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  2066. if ownerReference.Kind == "StatefulSet" {
  2067. return []string{ownerReference.Name}
  2068. }
  2069. }
  2070. return []string{}
  2071. }
  2072. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  2073. daemonsets := cache.GetAllDaemonSets()
  2074. vgpuCount := 0.0
  2075. for _, ds := range daemonsets {
  2076. dsContainerList := &ds.Spec.Template.Spec.Containers
  2077. for _, ctnr := range *dsContainerList {
  2078. if ctnr.Args != nil {
  2079. for _, arg := range ctnr.Args {
  2080. if strings.Contains(arg, "--vgpu=") {
  2081. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  2082. if err != nil {
  2083. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  2084. continue
  2085. }
  2086. vgpuCount = vgpus
  2087. return vgpuCount, nil
  2088. }
  2089. }
  2090. }
  2091. }
  2092. }
  2093. return vgpuCount, nil
  2094. }
  2095. type PersistentVolumeClaimData struct {
  2096. Class string `json:"class"`
  2097. Claim string `json:"claim"`
  2098. Namespace string `json:"namespace"`
  2099. ClusterID string `json:"clusterId"`
  2100. TimesClaimed int `json:"timesClaimed"`
  2101. VolumeName string `json:"volumeName"`
  2102. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  2103. Values []*util.Vector `json:"values"`
  2104. }
  2105. func measureTime(start time.Time, threshold time.Duration, name string) {
  2106. elapsed := time.Since(start)
  2107. if elapsed > threshold {
  2108. log.Infof("[Profiler] %s: %s", elapsed, name)
  2109. }
  2110. }
  2111. func measureTimeAsync(start time.Time, threshold time.Duration, name string, ch chan string) {
  2112. elapsed := time.Since(start)
  2113. if elapsed > threshold {
  2114. ch <- fmt.Sprintf("%s took %s", name, time.Since(start))
  2115. }
  2116. }
  2117. func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy kubecost.AccumulateOption) (*kubecost.AllocationSetRange, error) {
  2118. // Validate window is legal
  2119. if window.IsOpen() || window.IsNegative() {
  2120. return nil, fmt.Errorf("illegal window: %s", window)
  2121. }
  2122. var totalsStore kubecost.TotalsStore
  2123. // Idle is required for proportional asset costs
  2124. if includeProportionalAssetResourceCosts {
  2125. if !includeIdle {
  2126. return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
  2127. }
  2128. totalsStore = kubecost.NewMemoryTotalsStore()
  2129. }
  2130. // Begin with empty response
  2131. asr := kubecost.NewAllocationSetRange()
  2132. // Query for AllocationSets in increments of the given step duration,
  2133. // appending each to the response.
  2134. stepStart := *window.Start()
  2135. stepEnd := stepStart.Add(step)
  2136. var isAKS bool
  2137. for window.End().After(stepStart) {
  2138. allocSet, err := cm.ComputeAllocation(stepStart, stepEnd, resolution)
  2139. if err != nil {
  2140. return nil, fmt.Errorf("error computing allocations for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
  2141. }
  2142. if includeIdle {
  2143. assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
  2144. if err != nil {
  2145. return nil, fmt.Errorf("error computing assets for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
  2146. }
  2147. if includeProportionalAssetResourceCosts {
  2148. // AKS is a special case - there can be a maximum of 2
  2149. // load balancers (1 public and 1 private) in an AKS cluster
  2150. // therefore, when calculating PARCs for load balancers,
  2151. // we must know if this is an AKS cluster
  2152. for _, node := range assetSet.Nodes {
  2153. if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
  2154. isAKS = true
  2155. break
  2156. }
  2157. }
  2158. _, err := kubecost.UpdateAssetTotalsStore(totalsStore, assetSet)
  2159. if err != nil {
  2160. log.Errorf("ETL: error updating asset resource totals for %s: %s", assetSet.Window, err)
  2161. }
  2162. }
  2163. idleSet, err := computeIdleAllocations(allocSet, assetSet, true)
  2164. if err != nil {
  2165. return nil, fmt.Errorf("error computing idle allocations for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
  2166. }
  2167. for _, idleAlloc := range idleSet.Allocations {
  2168. allocSet.Insert(idleAlloc)
  2169. }
  2170. }
  2171. asr.Append(allocSet)
  2172. stepStart = stepEnd
  2173. stepEnd = stepStart.Add(step)
  2174. }
  2175. // Set aggregation options and aggregate
  2176. opts := &kubecost.AllocationAggregationOptions{
  2177. IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
  2178. IdleByNode: idleByNode,
  2179. IncludeAggregatedMetadata: includeAggregatedMetadata,
  2180. }
  2181. // Aggregate
  2182. err := asr.AggregateBy(aggregate, opts)
  2183. if err != nil {
  2184. return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
  2185. }
  2186. // Accumulate, if requested
  2187. if accumulateBy != kubecost.AccumulateOptionNone {
  2188. asr, err = asr.Accumulate(accumulateBy)
  2189. if err != nil {
  2190. log.Errorf("error accumulating by %v: %s", accumulateBy, err)
  2191. return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
  2192. }
  2193. // when accumulating and returning PARCs, we need the totals for the
  2194. // accumulated windows to accurately compute a fraction
  2195. if includeProportionalAssetResourceCosts {
  2196. assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
  2197. if err != nil {
  2198. return nil, fmt.Errorf("error computing assets for %s: %w", kubecost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  2199. }
  2200. _, err = kubecost.UpdateAssetTotalsStore(totalsStore, assetSet)
  2201. if err != nil {
  2202. log.Errorf("ETL: error updating asset resource totals for %s: %s", kubecost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  2203. }
  2204. }
  2205. }
  2206. if includeProportionalAssetResourceCosts {
  2207. for _, as := range asr.Allocations {
  2208. totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
  2209. if !ok {
  2210. log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  2211. return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  2212. }
  2213. totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
  2214. if !ok {
  2215. log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  2216. return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  2217. }
  2218. var totalPublicLbCost, totalPrivateLbCost float64
  2219. if isAKS && sharedLoadBalancer {
  2220. // loop through all assetTotals, adding all load balancer costs by public and private
  2221. for _, tot := range totalStoreByNode {
  2222. if tot.PrivateLoadBalancer {
  2223. totalPrivateLbCost += tot.LoadBalancerCost
  2224. } else {
  2225. totalPublicLbCost += tot.LoadBalancerCost
  2226. }
  2227. }
  2228. }
  2229. // loop through each allocation set, using total cost from totals store
  2230. for _, alloc := range as.Allocations {
  2231. for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
  2232. key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
  2233. // for each parc , check the totals store for each
  2234. // on a totals hit, set the corresponding total and calculate percentage
  2235. var totals *kubecost.AssetTotals
  2236. if totalsLoc, found := totalStoreByCluster[key]; found {
  2237. totals = totalsLoc
  2238. }
  2239. if totalsLoc, found := totalStoreByNode[key]; found {
  2240. totals = totalsLoc
  2241. }
  2242. if totals == nil {
  2243. log.Errorf("unable to locate asset totals for allocation %s, corresponding PARC is being skipped", key)
  2244. continue
  2245. }
  2246. parc.CPUTotalCost = totals.CPUCost
  2247. parc.GPUTotalCost = totals.GPUCost
  2248. parc.RAMTotalCost = totals.RAMCost
  2249. parc.PVTotalCost = totals.PersistentVolumeCost
  2250. if isAKS && sharedLoadBalancer && len(alloc.LoadBalancers) > 0 {
  2251. // Azure is a special case - use computed totals above
  2252. // use the lbAllocations in the object to determine if
  2253. // this PARC is a public or private load balancer
  2254. // then set the total accordingly
  2255. // AKS only has 1 public and 1 private load balancer
  2256. lbAlloc, found := alloc.LoadBalancers[key]
  2257. if found {
  2258. if lbAlloc.Private {
  2259. parc.LoadBalancerTotalCost = totalPrivateLbCost
  2260. } else {
  2261. parc.LoadBalancerTotalCost = totalPublicLbCost
  2262. }
  2263. }
  2264. } else {
  2265. parc.LoadBalancerTotalCost = totals.LoadBalancerCost
  2266. }
  2267. kubecost.ComputePercentages(&parc)
  2268. alloc.ProportionalAssetResourceCosts[rawKey] = parc
  2269. }
  2270. }
  2271. }
  2272. }
  2273. return asr, nil
  2274. }
  2275. func computeIdleAllocations(allocSet *kubecost.AllocationSet, assetSet *kubecost.AssetSet, idleByNode bool) (*kubecost.AllocationSet, error) {
  2276. if !allocSet.Window.Equal(assetSet.Window) {
  2277. return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
  2278. }
  2279. var allocTotals map[string]*kubecost.AllocationTotals
  2280. var assetTotals map[string]*kubecost.AssetTotals
  2281. if idleByNode {
  2282. allocTotals = kubecost.ComputeAllocationTotals(allocSet, kubecost.AllocationNodeProp)
  2283. assetTotals = kubecost.ComputeAssetTotals(assetSet, true)
  2284. } else {
  2285. allocTotals = kubecost.ComputeAllocationTotals(allocSet, kubecost.AllocationClusterProp)
  2286. assetTotals = kubecost.ComputeAssetTotals(assetSet, false)
  2287. }
  2288. start, end := *allocSet.Window.Start(), *allocSet.Window.End()
  2289. idleSet := kubecost.NewAllocationSet(start, end)
  2290. for key, assetTotal := range assetTotals {
  2291. allocTotal, ok := allocTotals[key]
  2292. if !ok {
  2293. log.Warnf("ETL: did not find allocations for asset key: %s", key)
  2294. // Use a zero-value set of totals. This indicates either (1) an
  2295. // error computing totals, or (2) that no allocations ran on the
  2296. // given node for the given window.
  2297. allocTotal = &kubecost.AllocationTotals{
  2298. Cluster: assetTotal.Cluster,
  2299. Node: assetTotal.Node,
  2300. Start: assetTotal.Start,
  2301. End: assetTotal.End,
  2302. }
  2303. }
  2304. // Insert one idle allocation for each key (whether by node or
  2305. // by cluster), defined as the difference between the total
  2306. // asset cost and the allocated cost per-resource.
  2307. name := fmt.Sprintf("%s/%s", key, kubecost.IdleSuffix)
  2308. err := idleSet.Insert(&kubecost.Allocation{
  2309. Name: name,
  2310. Window: idleSet.Window.Clone(),
  2311. Properties: &kubecost.AllocationProperties{
  2312. Cluster: assetTotal.Cluster,
  2313. Node: assetTotal.Node,
  2314. ProviderID: assetTotal.Node,
  2315. },
  2316. Start: assetTotal.Start,
  2317. End: assetTotal.End,
  2318. CPUCost: assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost(),
  2319. GPUCost: assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost(),
  2320. RAMCost: assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost(),
  2321. })
  2322. if err != nil {
  2323. return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
  2324. }
  2325. }
  2326. return idleSet, nil
  2327. }