costmodel.go 79 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273
  1. package costmodel
  2. import (
  3. "fmt"
  4. "math"
  5. "regexp"
  6. "strconv"
  7. "strings"
  8. "time"
  9. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud"
  10. "github.com/opencost/opencost/pkg/clustercache"
  11. "github.com/opencost/opencost/pkg/costmodel/clusters"
  12. "github.com/opencost/opencost/pkg/env"
  13. "github.com/opencost/opencost/pkg/kubecost"
  14. "github.com/opencost/opencost/pkg/log"
  15. "github.com/opencost/opencost/pkg/prom"
  16. "github.com/opencost/opencost/pkg/util"
  17. prometheus "github.com/prometheus/client_golang/api"
  18. prometheusClient "github.com/prometheus/client_golang/api"
  19. v1 "k8s.io/api/core/v1"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/labels"
  22. "golang.org/x/sync/singleflight"
  23. )
  24. const (
  25. statusAPIError = 422
  26. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  27. apiPrefix = "/api/v1"
  28. epAlertManagers = apiPrefix + "/alertmanagers"
  29. epLabelValues = apiPrefix + "/label/:name/values"
  30. epSeries = apiPrefix + "/series"
  31. epTargets = apiPrefix + "/targets"
  32. epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
  33. epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
  34. epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
  35. epConfig = apiPrefix + "/status/config"
  36. epFlags = apiPrefix + "/status/flags"
  37. )
  38. // isCron matches a CronJob name and captures the non-timestamp name
  39. //
  40. // We support either a 10 character timestamp OR an 8 character timestamp
  41. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  42. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  43. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  44. type CostModel struct {
  45. Cache clustercache.ClusterCache
  46. ClusterMap clusters.ClusterMap
  47. MaxPrometheusQueryDuration time.Duration
  48. RequestGroup *singleflight.Group
  49. ScrapeInterval time.Duration
  50. PrometheusClient prometheus.Client
  51. Provider costAnalyzerCloud.Provider
  52. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  53. }
  54. func NewCostModel(client prometheus.Client, provider costAnalyzerCloud.Provider, cache clustercache.ClusterCache, clusterMap clusters.ClusterMap, scrapeInterval time.Duration) *CostModel {
  55. // request grouping to prevent over-requesting the same data prior to caching
  56. requestGroup := new(singleflight.Group)
  57. return &CostModel{
  58. Cache: cache,
  59. ClusterMap: clusterMap,
  60. MaxPrometheusQueryDuration: env.GetETLMaxPrometheusQueryDuration(),
  61. PrometheusClient: client,
  62. Provider: provider,
  63. RequestGroup: requestGroup,
  64. ScrapeInterval: scrapeInterval,
  65. }
  66. }
  67. type CostData struct {
  68. Name string `json:"name,omitempty"`
  69. PodName string `json:"podName,omitempty"`
  70. NodeName string `json:"nodeName,omitempty"`
  71. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  72. Namespace string `json:"namespace,omitempty"`
  73. Deployments []string `json:"deployments,omitempty"`
  74. Services []string `json:"services,omitempty"`
  75. Daemonsets []string `json:"daemonsets,omitempty"`
  76. Statefulsets []string `json:"statefulsets,omitempty"`
  77. Jobs []string `json:"jobs,omitempty"`
  78. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  79. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  80. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  81. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  82. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  83. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  84. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  85. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  86. NetworkData []*util.Vector `json:"network,omitempty"`
  87. Annotations map[string]string `json:"annotations,omitempty"`
  88. Labels map[string]string `json:"labels,omitempty"`
  89. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  90. ClusterID string `json:"clusterId"`
  91. ClusterName string `json:"clusterName"`
  92. }
  93. func (cd *CostData) String() string {
  94. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  95. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  96. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  97. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  98. }
  99. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  100. hasController = false
  101. if len(cd.Deployments) > 0 {
  102. name = cd.Deployments[0]
  103. kind = "deployment"
  104. hasController = true
  105. } else if len(cd.Statefulsets) > 0 {
  106. name = cd.Statefulsets[0]
  107. kind = "statefulset"
  108. hasController = true
  109. } else if len(cd.Daemonsets) > 0 {
  110. name = cd.Daemonsets[0]
  111. kind = "daemonset"
  112. hasController = true
  113. } else if len(cd.Jobs) > 0 {
  114. name = cd.Jobs[0]
  115. kind = "job"
  116. hasController = true
  117. match := isCron.FindStringSubmatch(name)
  118. if match != nil {
  119. name = match[1]
  120. }
  121. }
  122. return name, kind, hasController
  123. }
  124. const (
  125. queryRAMRequestsStr = `avg(
  126. label_replace(
  127. label_replace(
  128. avg(
  129. count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="",container!="POD", node!=""}[%s] %s)
  130. *
  131. avg_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="",container!="POD", node!=""}[%s] %s)
  132. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  133. ), "pod_name","$1","pod","(.+)"
  134. )
  135. ) by (namespace,container_name,pod_name,node,%s)`
  136. queryRAMUsageStr = `sort_desc(
  137. avg(
  138. label_replace(count_over_time(container_memory_working_set_bytes{container_name!="",container_name!="POD", instance!=""}[%s] %s), "node", "$1", "instance","(.+)")
  139. *
  140. label_replace(avg_over_time(container_memory_working_set_bytes{container_name!="",container_name!="POD", instance!=""}[%s] %s), "node", "$1", "instance","(.+)")
  141. ) by (namespace,container_name,pod_name,node,%s)
  142. )`
  143. queryCPURequestsStr = `avg(
  144. label_replace(
  145. label_replace(
  146. avg(
  147. count_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="",container!="POD", node!=""}[%s] %s)
  148. *
  149. avg_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="",container!="POD", node!=""}[%s] %s)
  150. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  151. ), "pod_name","$1","pod","(.+)"
  152. )
  153. ) by (namespace,container_name,pod_name,node,%s)`
  154. queryCPUUsageStr = `avg(
  155. label_replace(
  156. rate(
  157. container_cpu_usage_seconds_total{container_name!="",container_name!="POD",instance!=""}[%s] %s
  158. ) , "node", "$1", "instance", "(.+)"
  159. )
  160. ) by (namespace,container_name,pod_name,node,%s)`
  161. queryGPURequestsStr = `avg(
  162. label_replace(
  163. label_replace(
  164. avg(
  165. count_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!=""}[%s] %s)
  166. *
  167. avg_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!=""}[%s] %s)
  168. * %f
  169. ) by (namespace,container,pod,node,%s) , "container_name","$1","container","(.+)"
  170. ), "pod_name","$1","pod","(.+)"
  171. )
  172. ) by (namespace,container_name,pod_name,node,%s)
  173. * on (pod_name, namespace, %s) group_left(container) label_replace(avg(avg_over_time(kube_pod_status_phase{phase="Running"}[%s] %s)) by (pod,namespace,%s), "pod_name","$1","pod","(.+)")`
  174. queryPVRequestsStr = `avg(avg(kube_persistentvolumeclaim_info{volumename != ""}) by (persistentvolumeclaim, storageclass, namespace, volumename, %s, kubernetes_node)
  175. *
  176. on (persistentvolumeclaim, namespace, %s, kubernetes_node) group_right(storageclass, volumename)
  177. sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{}) by (persistentvolumeclaim, namespace, %s, kubernetes_node, kubernetes_name)) by (persistentvolumeclaim, storageclass, namespace, %s, volumename, kubernetes_node)`
  178. // queryRAMAllocationByteHours yields the total byte-hour RAM allocation over the given
  179. // window, aggregated by container.
  180. // [line 3] sum_over_time(each byte) = [byte*scrape] by metric
  181. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  182. // [lines 2,4] sum(") by unique container key and multiply [byte*scrape] * [hours/scrape] for byte*hours
  183. // [lines 1,5] relabeling
  184. queryRAMAllocationByteHours = `
  185. label_replace(label_replace(
  186. sum(
  187. sum_over_time(container_memory_allocation_bytes{container!="",container!="POD", node!=""}[%s])
  188. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  189. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  190. // queryCPUAllocationVCPUHours yields the total VCPU-hour CPU allocation over the given
  191. // window, aggregated by container.
  192. // [line 3] sum_over_time(each VCPU*mins in window) = [VCPU*scrape] by metric
  193. // [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by container
  194. // [lines 2,4] sum(") by unique container key and multiply [VCPU*scrape] * [hours/scrape] for VCPU*hours
  195. // [lines 1,5] relabeling
  196. queryCPUAllocationVCPUHours = `
  197. label_replace(label_replace(
  198. sum(
  199. sum_over_time(container_cpu_allocation{container!="",container!="POD", node!=""}[%s])
  200. ) by (namespace,container,pod,node,%s) * %f / 60 / 60
  201. , "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
  202. // queryPVCAllocationFmt yields the total byte-hour PVC allocation over the given window.
  203. // sum_over_time(each byte) = [byte*scrape] by metric *(scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 = [hours/scrape] by pod
  204. queryPVCAllocationFmt = `sum(sum_over_time(pod_pvc_allocation[%s])) by (%s, namespace, pod, persistentvolume, persistentvolumeclaim) * %f/60/60`
  205. queryPVHourlyCostFmt = `avg_over_time(pv_hourly_cost[%s])`
  206. queryNSLabels = `avg_over_time(kube_namespace_labels[%s])`
  207. queryPodLabels = `avg_over_time(kube_pod_labels[%s])`
  208. queryNSAnnotations = `avg_over_time(kube_namespace_annotations[%s])`
  209. queryPodAnnotations = `avg_over_time(kube_pod_annotations[%s])`
  210. queryDeploymentLabels = `avg_over_time(deployment_match_labels[%s])`
  211. queryStatefulsetLabels = `avg_over_time(statefulSet_match_labels[%s])`
  212. queryPodDaemonsets = `sum(kube_pod_owner{owner_kind="DaemonSet"}) by (namespace,pod,owner_name,%s)`
  213. queryPodJobs = `sum(kube_pod_owner{owner_kind="Job"}) by (namespace,pod,owner_name,%s)`
  214. queryServiceLabels = `avg_over_time(service_selector_labels[%s])`
  215. queryZoneNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="true"}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  216. queryRegionNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="false"}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  217. queryInternetNetworkUsage = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true"}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
  218. normalizationStr = `max(count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte"}[%s] %s))`
  219. )
  220. func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
  221. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, window, offset, window, offset, env.GetPromClusterLabel())
  222. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, window, offset, env.GetPromClusterLabel())
  223. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, window, "", env.GetPromClusterLabel())
  224. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, window, "", env.GetPromClusterLabel())
  225. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, window, "", env.GetPromClusterLabel())
  226. queryNormalization := fmt.Sprintf(normalizationStr, window, offset)
  227. // Cluster ID is specific to the source cluster
  228. clusterID := env.GetClusterID()
  229. // Submit all Prometheus queries asynchronously
  230. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  231. resChRAMUsage := ctx.Query(queryRAMUsage)
  232. resChCPUUsage := ctx.Query(queryCPUUsage)
  233. resChNetZoneRequests := ctx.Query(queryNetZoneRequests)
  234. resChNetRegionRequests := ctx.Query(queryNetRegionRequests)
  235. resChNetInternetRequests := ctx.Query(queryNetInternetRequests)
  236. resChNormalization := ctx.Query(queryNormalization)
  237. // Pull pod information from k8s API
  238. podlist := cm.Cache.GetAllPods()
  239. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  240. if err != nil {
  241. return nil, err
  242. }
  243. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  244. if err != nil {
  245. return nil, err
  246. }
  247. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  248. if err != nil {
  249. return nil, err
  250. }
  251. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  252. if err != nil {
  253. return nil, err
  254. }
  255. // Process Prometheus query results. Handle errors using ctx.Errors.
  256. resRAMUsage, _ := resChRAMUsage.Await()
  257. resCPUUsage, _ := resChCPUUsage.Await()
  258. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  259. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  260. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  261. resNormalization, _ := resChNormalization.Await()
  262. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  263. // NOTE: will not propagate unless coupled with errors.
  264. if ctx.HasErrors() {
  265. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  266. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  267. for _, promErr := range ctx.Errors() {
  268. if promErr.Error != nil {
  269. log.Errorf("ComputeCostData: Request Error: %s", promErr.Error)
  270. }
  271. if promErr.ParseError != nil {
  272. log.Errorf("ComputeCostData: Parsing Error: %s", promErr.ParseError)
  273. }
  274. }
  275. // ErrorCollection is an collection of errors wrapped in a single error implementation
  276. // We opt to not return an error for the sake of running as a pure exporter.
  277. log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", ctx.ErrorCollection().Error())
  278. }
  279. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  280. normalizationValue, err := getNormalization(resNormalization)
  281. if err != nil {
  282. // We opt to not return an error for the sake of running as a pure exporter.
  283. log.Warnf("ComputeCostData: continuing despite error parsing normalization values from %s: %s", queryNormalization, err.Error())
  284. }
  285. // Determine if there are vgpus configured and if so get the total allocatable number
  286. // If there are no vgpus, the coefficient is set to 1.0
  287. vgpuCount, err := getAllocatableVGPUs(cm.Cache)
  288. vgpuCoeff := 10.0
  289. if vgpuCount > 0.0 {
  290. vgpuCoeff = vgpuCount
  291. }
  292. nodes, err := cm.GetNodeCost(cp)
  293. if err != nil {
  294. log.Warnf("GetNodeCost: no node cost model available: " + err.Error())
  295. return nil, err
  296. }
  297. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  298. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  299. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  300. if err != nil {
  301. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  302. }
  303. if pvClaimMapping != nil {
  304. err = addPVData(cm.Cache, pvClaimMapping, cp)
  305. if err != nil {
  306. return nil, err
  307. }
  308. // copy claim mappings into zombies, then remove as they're discovered
  309. for k, v := range pvClaimMapping {
  310. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  311. }
  312. }
  313. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  314. if err != nil {
  315. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  316. networkUsageMap = make(map[string]*NetworkUsageData)
  317. }
  318. containerNameCost := make(map[string]*CostData)
  319. containers := make(map[string]bool)
  320. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, true, normalizationValue, clusterID)
  321. if err != nil {
  322. return nil, err
  323. }
  324. for key := range RAMUsedMap {
  325. containers[key] = true
  326. }
  327. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, false, 0, clusterID) // No need to normalize here, as this comes from a counter
  328. if err != nil {
  329. return nil, err
  330. }
  331. for key := range CPUUsedMap {
  332. containers[key] = true
  333. }
  334. currentContainers := make(map[string]v1.Pod)
  335. for _, pod := range podlist {
  336. if pod.Status.Phase != v1.PodRunning {
  337. continue
  338. }
  339. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  340. if err != nil {
  341. return nil, err
  342. }
  343. for _, c := range cs {
  344. containers[c.Key()] = true // captures any containers that existed for a time < a prometheus scrape interval. We currently charge 0 for this but should charge something.
  345. currentContainers[c.Key()] = *pod
  346. }
  347. }
  348. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  349. missingContainers := make(map[string]*CostData)
  350. for key := range containers {
  351. if _, ok := containerNameCost[key]; ok {
  352. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  353. }
  354. // The _else_ case for this statement is the case in which the container has been
  355. // deleted so we have usage information but not request information. In that case,
  356. // we return partial data for CPU and RAM: only usage and not requests.
  357. if pod, ok := currentContainers[key]; ok {
  358. podName := pod.GetObjectMeta().GetName()
  359. ns := pod.GetObjectMeta().GetNamespace()
  360. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  361. podLabels := pod.GetObjectMeta().GetLabels()
  362. if podLabels == nil {
  363. podLabels = make(map[string]string)
  364. }
  365. for k, v := range nsLabels {
  366. if _, ok := podLabels[k]; !ok {
  367. podLabels[k] = v
  368. }
  369. }
  370. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  371. podAnnotations := pod.GetObjectMeta().GetAnnotations()
  372. if podAnnotations == nil {
  373. podAnnotations = make(map[string]string)
  374. }
  375. for k, v := range nsAnnotations {
  376. if _, ok := podAnnotations[k]; !ok {
  377. podAnnotations[k] = v
  378. }
  379. }
  380. nodeName := pod.Spec.NodeName
  381. var nodeData *costAnalyzerCloud.Node
  382. if _, ok := nodes[nodeName]; ok {
  383. nodeData = nodes[nodeName]
  384. }
  385. nsKey := ns + "," + clusterID
  386. var podDeployments []string
  387. if _, ok := podDeploymentsMapping[nsKey]; ok {
  388. if ds, ok := podDeploymentsMapping[nsKey][pod.GetObjectMeta().GetName()]; ok {
  389. podDeployments = ds
  390. } else {
  391. podDeployments = []string{}
  392. }
  393. }
  394. var podPVs []*PersistentVolumeClaimData
  395. podClaims := pod.Spec.Volumes
  396. for _, vol := range podClaims {
  397. if vol.PersistentVolumeClaim != nil {
  398. name := vol.PersistentVolumeClaim.ClaimName
  399. key := ns + "," + name + "," + clusterID
  400. if pvClaim, ok := pvClaimMapping[key]; ok {
  401. pvClaim.TimesClaimed++
  402. podPVs = append(podPVs, pvClaim)
  403. // Remove entry from potential unmounted pvs
  404. delete(unmountedPVs, key)
  405. }
  406. }
  407. }
  408. var podNetCosts []*util.Vector
  409. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  410. netCosts, err := GetNetworkCost(usage, cp)
  411. if err != nil {
  412. log.Debugf("Error pulling network costs: %s", err.Error())
  413. } else {
  414. podNetCosts = netCosts
  415. }
  416. }
  417. var podServices []string
  418. if _, ok := podServicesMapping[nsKey]; ok {
  419. if svcs, ok := podServicesMapping[nsKey][pod.GetObjectMeta().GetName()]; ok {
  420. podServices = svcs
  421. } else {
  422. podServices = []string{}
  423. }
  424. }
  425. for i, container := range pod.Spec.Containers {
  426. containerName := container.Name
  427. // recreate the key and look up data for this container
  428. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  429. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  430. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  431. // details on the "amount" API. See
  432. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  433. // for the units of memory and CPU.
  434. ramRequestBytes := container.Resources.Requests.Memory().Value()
  435. // Because RAM (and CPU) information isn't coming from Prometheus, it won't
  436. // have a timestamp associated with it. We need to provide a timestamp,
  437. // otherwise the vector op that gets applied to take the max of usage
  438. // and request won't work properly and will only take into account
  439. // usage.
  440. RAMReqV := []*util.Vector{
  441. {
  442. Value: float64(ramRequestBytes),
  443. Timestamp: float64(time.Now().UTC().Unix()),
  444. },
  445. }
  446. // use millicores so we can convert to cores in a float64 format
  447. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  448. CPUReqV := []*util.Vector{
  449. {
  450. Value: float64(cpuRequestMilliCores) / 1000,
  451. Timestamp: float64(time.Now().UTC().Unix()),
  452. },
  453. }
  454. gpuReqCount := 0.0
  455. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  456. gpuReqCount = g.AsApproximateFloat64()
  457. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  458. gpuReqCount = g.AsApproximateFloat64()
  459. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  460. // divide vgpu request/limits by total vgpus to get the portion of physical gpus requested
  461. gpuReqCount = g.AsApproximateFloat64() / vgpuCoeff
  462. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  463. gpuReqCount = g.AsApproximateFloat64() / vgpuCoeff
  464. }
  465. GPUReqV := []*util.Vector{
  466. {
  467. Value: float64(gpuReqCount),
  468. Timestamp: float64(time.Now().UTC().Unix()),
  469. },
  470. }
  471. RAMUsedV, ok := RAMUsedMap[newKey]
  472. if !ok {
  473. log.Debug("no RAM usage for " + newKey)
  474. RAMUsedV = []*util.Vector{{}}
  475. }
  476. CPUUsedV, ok := CPUUsedMap[newKey]
  477. if !ok {
  478. log.Debug("no CPU usage for " + newKey)
  479. CPUUsedV = []*util.Vector{{}}
  480. }
  481. var pvReq []*PersistentVolumeClaimData
  482. var netReq []*util.Vector
  483. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  484. pvReq = podPVs
  485. netReq = podNetCosts
  486. }
  487. costs := &CostData{
  488. Name: containerName,
  489. PodName: podName,
  490. NodeName: nodeName,
  491. Namespace: ns,
  492. Deployments: podDeployments,
  493. Services: podServices,
  494. Daemonsets: getDaemonsetsOfPod(pod),
  495. Jobs: getJobsOfPod(pod),
  496. Statefulsets: getStatefulSetsOfPod(pod),
  497. NodeData: nodeData,
  498. RAMReq: RAMReqV,
  499. RAMUsed: RAMUsedV,
  500. CPUReq: CPUReqV,
  501. CPUUsed: CPUUsedV,
  502. GPUReq: GPUReqV,
  503. PVCData: pvReq,
  504. NetworkData: netReq,
  505. Annotations: podAnnotations,
  506. Labels: podLabels,
  507. NamespaceLabels: nsLabels,
  508. ClusterID: clusterID,
  509. ClusterName: cm.ClusterMap.NameFor(clusterID),
  510. }
  511. costs.CPUAllocation = getContainerAllocation(costs.CPUReq, costs.CPUUsed, "CPU")
  512. costs.RAMAllocation = getContainerAllocation(costs.RAMReq, costs.RAMUsed, "RAM")
  513. if filterNamespace == "" {
  514. containerNameCost[newKey] = costs
  515. } else if costs.Namespace == filterNamespace {
  516. containerNameCost[newKey] = costs
  517. }
  518. }
  519. } else {
  520. // The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
  521. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  522. c, err := NewContainerMetricFromKey(key)
  523. if err != nil {
  524. return nil, err
  525. }
  526. // CPU and RAM requests are obtained from the Kubernetes API.
  527. // If this case has been reached, the Kubernetes API will not
  528. // have information about the pod because it no longer exists.
  529. //
  530. // The case where this matters is minimal, mainly in environments
  531. // with very short-lived pods that over-request resources.
  532. RAMReqV := []*util.Vector{{}}
  533. CPUReqV := []*util.Vector{{}}
  534. GPUReqV := []*util.Vector{{}}
  535. RAMUsedV, ok := RAMUsedMap[key]
  536. if !ok {
  537. log.Debug("no RAM usage for " + key)
  538. RAMUsedV = []*util.Vector{{}}
  539. }
  540. CPUUsedV, ok := CPUUsedMap[key]
  541. if !ok {
  542. log.Debug("no CPU usage for " + key)
  543. CPUUsedV = []*util.Vector{{}}
  544. }
  545. node, ok := nodes[c.NodeName]
  546. if !ok {
  547. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  548. if n, ok := missingNodes[c.NodeName]; ok {
  549. node = n
  550. } else {
  551. node = &costAnalyzerCloud.Node{}
  552. missingNodes[c.NodeName] = node
  553. }
  554. }
  555. namespacelabels, _ := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  556. namespaceAnnotations, _ := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  557. costs := &CostData{
  558. Name: c.ContainerName,
  559. PodName: c.PodName,
  560. NodeName: c.NodeName,
  561. NodeData: node,
  562. Namespace: c.Namespace,
  563. RAMReq: RAMReqV,
  564. RAMUsed: RAMUsedV,
  565. CPUReq: CPUReqV,
  566. CPUUsed: CPUUsedV,
  567. GPUReq: GPUReqV,
  568. Annotations: namespaceAnnotations,
  569. NamespaceLabels: namespacelabels,
  570. ClusterID: c.ClusterID,
  571. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  572. }
  573. costs.CPUAllocation = getContainerAllocation(costs.CPUReq, costs.CPUUsed, "CPU")
  574. costs.RAMAllocation = getContainerAllocation(costs.RAMReq, costs.RAMUsed, "RAM")
  575. if filterNamespace == "" {
  576. containerNameCost[key] = costs
  577. missingContainers[key] = costs
  578. } else if costs.Namespace == filterNamespace {
  579. containerNameCost[key] = costs
  580. missingContainers[key] = costs
  581. }
  582. }
  583. }
  584. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  585. // to pass along the cost data
  586. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  587. for k, costs := range unmounted {
  588. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  589. if filterNamespace == "" {
  590. containerNameCost[k] = costs
  591. } else if costs.Namespace == filterNamespace {
  592. containerNameCost[k] = costs
  593. }
  594. }
  595. err = findDeletedNodeInfo(cli, missingNodes, window, "")
  596. if err != nil {
  597. log.Errorf("Error fetching historical node data: %s", err.Error())
  598. }
  599. err = findDeletedPodInfo(cli, missingContainers, window)
  600. if err != nil {
  601. log.Errorf("Error fetching historical pod data: %s", err.Error())
  602. }
  603. return containerNameCost, err
  604. }
  605. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  606. costs := make(map[string]*CostData)
  607. if len(unmountedPVs) == 0 {
  608. return costs
  609. }
  610. for k, pv := range unmountedPVs {
  611. keyParts := strings.Split(k, ",")
  612. if len(keyParts) != 3 {
  613. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  614. continue
  615. }
  616. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  617. namespacelabels, _ := namespaceLabelsMapping[ns+","+clusterID]
  618. namespaceAnnotations, _ := namespaceAnnotationsMapping[ns+","+clusterID]
  619. // Should be a unique "Unmounted" cost data type
  620. name := "unmounted-pvs"
  621. metric := NewContainerMetricFromValues(ns, name, name, "", clusterID)
  622. key := metric.Key()
  623. if costData, ok := costs[key]; !ok {
  624. costs[key] = &CostData{
  625. Name: name,
  626. PodName: name,
  627. NodeName: "",
  628. Annotations: namespaceAnnotations,
  629. Namespace: ns,
  630. NamespaceLabels: namespacelabels,
  631. Labels: namespacelabels,
  632. ClusterID: clusterID,
  633. ClusterName: clusterMap.NameFor(clusterID),
  634. PVCData: pv,
  635. }
  636. } else {
  637. costData.PVCData = append(costData.PVCData, pv...)
  638. }
  639. }
  640. return costs
  641. }
  642. func findDeletedPodInfo(cli prometheusClient.Client, missingContainers map[string]*CostData, window string) error {
  643. if len(missingContainers) > 0 {
  644. queryHistoricalPodLabels := fmt.Sprintf(`kube_pod_labels{}[%s]`, window)
  645. podLabelsResult, _, err := prom.NewNamedContext(cli, prom.ComputeCostDataContextName).QuerySync(queryHistoricalPodLabels)
  646. if err != nil {
  647. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  648. }
  649. podLabels := make(map[string]map[string]string)
  650. if podLabelsResult != nil {
  651. podLabels, err = parsePodLabels(podLabelsResult)
  652. if err != nil {
  653. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  654. }
  655. }
  656. for key, costData := range missingContainers {
  657. cm, _ := NewContainerMetricFromKey(key)
  658. labels, ok := podLabels[cm.PodName]
  659. if !ok {
  660. labels = make(map[string]string)
  661. }
  662. for k, v := range costData.NamespaceLabels {
  663. labels[k] = v
  664. }
  665. costData.Labels = labels
  666. }
  667. }
  668. return nil
  669. }
  670. func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*costAnalyzerCloud.Node, window, offset string) error {
  671. if len(missingNodes) > 0 {
  672. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  673. offsetStr := ""
  674. if offset != "" {
  675. offsetStr = fmt.Sprintf("offset %s", offset)
  676. }
  677. queryHistoricalCPUCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost[%s] %s)) by (node, instance, %s)`, window, offsetStr, env.GetPromClusterLabel())
  678. queryHistoricalRAMCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost[%s] %s)) by (node, instance, %s)`, window, offsetStr, env.GetPromClusterLabel())
  679. queryHistoricalGPUCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost[%s] %s)) by (node, instance, %s)`, window, offsetStr, env.GetPromClusterLabel())
  680. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
  681. cpuCostResCh := ctx.Query(queryHistoricalCPUCost)
  682. ramCostResCh := ctx.Query(queryHistoricalRAMCost)
  683. gpuCostResCh := ctx.Query(queryHistoricalGPUCost)
  684. cpuCostRes, _ := cpuCostResCh.Await()
  685. ramCostRes, _ := ramCostResCh.Await()
  686. gpuCostRes, _ := gpuCostResCh.Await()
  687. if ctx.HasErrors() {
  688. return ctx.ErrorCollection()
  689. }
  690. cpuCosts, err := getCost(cpuCostRes)
  691. if err != nil {
  692. return err
  693. }
  694. ramCosts, err := getCost(ramCostRes)
  695. if err != nil {
  696. return err
  697. }
  698. gpuCosts, err := getCost(gpuCostRes)
  699. if err != nil {
  700. return err
  701. }
  702. if len(cpuCosts) == 0 {
  703. log.Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  704. }
  705. for node, costv := range cpuCosts {
  706. if _, ok := missingNodes[node]; ok {
  707. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  708. } else {
  709. log.DedupedWarningf(5, "Node `%s` in prometheus but not k8s api", node)
  710. }
  711. }
  712. for node, costv := range ramCosts {
  713. if _, ok := missingNodes[node]; ok {
  714. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  715. }
  716. }
  717. for node, costv := range gpuCosts {
  718. if _, ok := missingNodes[node]; ok {
  719. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  720. }
  721. }
  722. }
  723. return nil
  724. }
  725. func getContainerAllocation(req []*util.Vector, used []*util.Vector, allocationType string) []*util.Vector {
  726. // The result of the normalize operation will be a new []*util.Vector to replace the requests
  727. allocationOp := func(r *util.Vector, x *float64, y *float64) bool {
  728. if x != nil && y != nil {
  729. x1 := *x
  730. if math.IsNaN(x1) {
  731. log.Warnf("NaN value found during %s allocation calculation for requests.", allocationType)
  732. x1 = 0.0
  733. }
  734. y1 := *y
  735. if math.IsNaN(y1) {
  736. log.Warnf("NaN value found during %s allocation calculation for used.", allocationType)
  737. y1 = 0.0
  738. }
  739. r.Value = math.Max(x1, y1)
  740. } else if x != nil {
  741. r.Value = *x
  742. } else if y != nil {
  743. r.Value = *y
  744. }
  745. return true
  746. }
  747. return util.ApplyVectorOp(req, used, allocationOp)
  748. }
  749. func addPVData(cache clustercache.ClusterCache, pvClaimMapping map[string]*PersistentVolumeClaimData, cloud costAnalyzerCloud.Provider) error {
  750. cfg, err := cloud.GetConfig()
  751. if err != nil {
  752. return err
  753. }
  754. // Pull a region from the first node
  755. var defaultRegion string
  756. nodeList := cache.GetAllNodes()
  757. if len(nodeList) > 0 {
  758. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  759. }
  760. storageClasses := cache.GetAllStorageClasses()
  761. storageClassMap := make(map[string]map[string]string)
  762. for _, storageClass := range storageClasses {
  763. params := storageClass.Parameters
  764. storageClassMap[storageClass.ObjectMeta.Name] = params
  765. if storageClass.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.GetAnnotations()["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  766. storageClassMap["default"] = params
  767. storageClassMap[""] = params
  768. }
  769. }
  770. pvs := cache.GetAllPersistentVolumes()
  771. pvMap := make(map[string]*costAnalyzerCloud.PV)
  772. for _, pv := range pvs {
  773. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  774. if !ok {
  775. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  776. }
  777. var region string
  778. if r, ok := util.GetRegion(pv.Labels); ok {
  779. region = r
  780. } else {
  781. region = defaultRegion
  782. }
  783. cacPv := &costAnalyzerCloud.PV{
  784. Class: pv.Spec.StorageClassName,
  785. Region: region,
  786. Parameters: parameters,
  787. }
  788. err := GetPVCost(cacPv, pv, cloud, region)
  789. if err != nil {
  790. return err
  791. }
  792. pvMap[pv.Name] = cacPv
  793. }
  794. for _, pvc := range pvClaimMapping {
  795. if vol, ok := pvMap[pvc.VolumeName]; ok {
  796. pvc.Volume = vol
  797. } else {
  798. log.Debugf("PV not found, using default")
  799. pvc.Volume = &costAnalyzerCloud.PV{
  800. Cost: cfg.Storage,
  801. }
  802. }
  803. }
  804. return nil
  805. }
  806. func GetPVCost(pv *costAnalyzerCloud.PV, kpv *v1.PersistentVolume, cp costAnalyzerCloud.Provider, defaultRegion string) error {
  807. cfg, err := cp.GetConfig()
  808. if err != nil {
  809. return err
  810. }
  811. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  812. pv.ProviderID = key.ID()
  813. pvWithCost, err := cp.PVPricing(key)
  814. if err != nil {
  815. pv.Cost = cfg.Storage
  816. return err
  817. }
  818. if pvWithCost == nil || pvWithCost.Cost == "" {
  819. pv.Cost = cfg.Storage
  820. return nil // set default cost
  821. }
  822. pv.Cost = pvWithCost.Cost
  823. return nil
  824. }
  825. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  826. if cm.pricingMetadata != nil {
  827. return cm.pricingMetadata, nil
  828. } else {
  829. return nil, fmt.Errorf("Node costs not yet calculated")
  830. }
  831. }
  832. func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*costAnalyzerCloud.Node, error) {
  833. cfg, err := cp.GetConfig()
  834. if err != nil {
  835. return nil, err
  836. }
  837. nodeList := cm.Cache.GetAllNodes()
  838. nodes := make(map[string]*costAnalyzerCloud.Node)
  839. vgpuCount, err := getAllocatableVGPUs(cm.Cache)
  840. vgpuCoeff := 10.0
  841. if vgpuCount > 0.0 {
  842. vgpuCoeff = vgpuCount
  843. }
  844. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  845. TotalNodes: 0,
  846. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  847. }
  848. for _, n := range nodeList {
  849. name := n.GetObjectMeta().GetName()
  850. nodeLabels := n.GetObjectMeta().GetLabels()
  851. nodeLabels["providerID"] = n.Spec.ProviderID
  852. pmd.TotalNodes++
  853. cnode, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  854. if err != nil {
  855. log.Infof("Error getting node pricing. Error: %s", err.Error())
  856. if cnode != nil {
  857. nodes[name] = cnode
  858. continue
  859. } else {
  860. cnode = &costAnalyzerCloud.Node{
  861. VCPUCost: cfg.CPU,
  862. RAMCost: cfg.RAM,
  863. }
  864. }
  865. }
  866. if _, ok := pmd.PricingTypeCounts[cnode.PricingType]; ok {
  867. pmd.PricingTypeCounts[cnode.PricingType]++
  868. } else {
  869. pmd.PricingTypeCounts[cnode.PricingType] = 1
  870. }
  871. newCnode := *cnode
  872. if newCnode.InstanceType == "" {
  873. it, _ := util.GetInstanceType(n.Labels)
  874. newCnode.InstanceType = it
  875. }
  876. if newCnode.Region == "" {
  877. region, _ := util.GetRegion(n.Labels)
  878. newCnode.Region = region
  879. }
  880. newCnode.ProviderID = n.Spec.ProviderID
  881. var cpu float64
  882. if newCnode.VCPU == "" {
  883. cpu = float64(n.Status.Capacity.Cpu().Value())
  884. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  885. } else {
  886. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  887. if err != nil {
  888. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  889. }
  890. }
  891. if math.IsNaN(cpu) {
  892. log.Warnf("cpu parsed as NaN. Setting to 0.")
  893. cpu = 0
  894. }
  895. var ram float64
  896. if newCnode.RAM == "" {
  897. newCnode.RAM = n.Status.Capacity.Memory().String()
  898. }
  899. ram = float64(n.Status.Capacity.Memory().Value())
  900. if math.IsNaN(ram) {
  901. log.Warnf("ram parsed as NaN. Setting to 0.")
  902. ram = 0
  903. }
  904. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  905. // Azure does not seem to provide a GPU count in its pricing API. GKE supports attaching multiple GPUs
  906. // So the k8s api will often report more accurate results for GPU count under status > capacity > nvidia.com/gpu than the cloud providers billing data
  907. // not all providers are guaranteed to use this, so don't overwrite a Provider assignment if we can't find something under that capacity exists
  908. gpuc := 0.0
  909. q, ok := n.Status.Capacity["nvidia.com/gpu"]
  910. if ok {
  911. gpuCount := q.Value()
  912. if gpuCount != 0 {
  913. newCnode.GPU = fmt.Sprintf("%d", q.Value())
  914. gpuc = float64(gpuCount)
  915. }
  916. } else if g, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  917. gpuCount := g.Value()
  918. if gpuCount != 0 {
  919. newCnode.GPU = fmt.Sprintf("%d", int(float64(q.Value())/vgpuCoeff))
  920. gpuc = float64(gpuCount) / vgpuCoeff
  921. }
  922. } else {
  923. gpuc, err = strconv.ParseFloat(newCnode.GPU, 64)
  924. if err != nil {
  925. gpuc = 0.0
  926. }
  927. }
  928. if math.IsNaN(gpuc) {
  929. log.Warnf("gpu count parsed as NaN. Setting to 0.")
  930. gpuc = 0.0
  931. }
  932. if newCnode.GPU != "" && newCnode.GPUCost == "" {
  933. // We couldn't find a gpu cost, so fix cpu and ram, then accordingly
  934. log.Infof("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  935. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  936. if err != nil {
  937. log.Errorf("Could not parse default cpu price")
  938. defaultCPU = 0
  939. }
  940. if math.IsNaN(defaultCPU) {
  941. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  942. defaultCPU = 0
  943. }
  944. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  945. if err != nil {
  946. log.Errorf("Could not parse default ram price")
  947. defaultRAM = 0
  948. }
  949. if math.IsNaN(defaultRAM) {
  950. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  951. defaultRAM = 0
  952. }
  953. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  954. if err != nil {
  955. log.Errorf("Could not parse default gpu price")
  956. defaultGPU = 0
  957. }
  958. if math.IsNaN(defaultGPU) {
  959. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  960. defaultGPU = 0
  961. }
  962. cpuToRAMRatio := defaultCPU / defaultRAM
  963. if math.IsNaN(cpuToRAMRatio) {
  964. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
  965. cpuToRAMRatio = 0
  966. }
  967. gpuToRAMRatio := defaultGPU / defaultRAM
  968. if math.IsNaN(gpuToRAMRatio) {
  969. log.Warnf("gpuToRAMRatio is NaN. Setting to 0.")
  970. gpuToRAMRatio = 0
  971. }
  972. ramGB := ram / 1024 / 1024 / 1024
  973. if math.IsNaN(ramGB) {
  974. log.Warnf("ramGB is NaN. Setting to 0.")
  975. ramGB = 0
  976. }
  977. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  978. if math.IsNaN(ramMultiple) {
  979. log.Warnf("ramMultiple is NaN. Setting to 0.")
  980. ramMultiple = 0
  981. }
  982. var nodePrice float64
  983. if newCnode.Cost != "" {
  984. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  985. if err != nil {
  986. log.Errorf("Could not parse total node price")
  987. return nil, err
  988. }
  989. } else {
  990. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  991. if err != nil {
  992. log.Errorf("Could not parse node vcpu price")
  993. return nil, err
  994. }
  995. }
  996. if math.IsNaN(nodePrice) {
  997. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  998. nodePrice = 0
  999. }
  1000. ramPrice := (nodePrice / ramMultiple)
  1001. if math.IsNaN(ramPrice) {
  1002. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1003. ramPrice = 0
  1004. }
  1005. cpuPrice := ramPrice * cpuToRAMRatio
  1006. gpuPrice := ramPrice * gpuToRAMRatio
  1007. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1008. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1009. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1010. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1011. } else if newCnode.RAMCost == "" {
  1012. // We couldn't find a ramcost, so fix cpu and allocate ram accordingly
  1013. log.Debugf("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1014. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1015. if err != nil {
  1016. log.Warnf("Could not parse default cpu price")
  1017. defaultCPU = 0
  1018. }
  1019. if math.IsNaN(defaultCPU) {
  1020. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1021. defaultCPU = 0
  1022. }
  1023. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1024. if err != nil {
  1025. log.Warnf("Could not parse default ram price")
  1026. defaultRAM = 0
  1027. }
  1028. if math.IsNaN(defaultRAM) {
  1029. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1030. defaultRAM = 0
  1031. }
  1032. cpuToRAMRatio := defaultCPU / defaultRAM
  1033. if math.IsNaN(cpuToRAMRatio) {
  1034. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
  1035. cpuToRAMRatio = 0
  1036. }
  1037. ramGB := ram / 1024 / 1024 / 1024
  1038. if math.IsNaN(ramGB) {
  1039. log.Warnf("ramGB is NaN. Setting to 0.")
  1040. ramGB = 0
  1041. }
  1042. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1043. if math.IsNaN(ramMultiple) {
  1044. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1045. ramMultiple = 0
  1046. }
  1047. var nodePrice float64
  1048. if newCnode.Cost != "" {
  1049. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1050. if err != nil {
  1051. log.Warnf("Could not parse total node price")
  1052. return nil, err
  1053. }
  1054. if newCnode.GPUCost != "" {
  1055. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1056. if err != nil {
  1057. log.Warnf("Could not parse node gpu price")
  1058. return nil, err
  1059. }
  1060. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1061. }
  1062. } else {
  1063. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1064. if err != nil {
  1065. log.Warnf("Could not parse node vcpu price")
  1066. return nil, err
  1067. }
  1068. }
  1069. if math.IsNaN(nodePrice) {
  1070. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1071. nodePrice = 0
  1072. }
  1073. ramPrice := (nodePrice / ramMultiple)
  1074. if math.IsNaN(ramPrice) {
  1075. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1076. ramPrice = 0
  1077. }
  1078. cpuPrice := ramPrice * cpuToRAMRatio
  1079. if defaultRAM != 0 {
  1080. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1081. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1082. } else { // just assign the full price to CPU
  1083. if cpu != 0 {
  1084. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1085. } else {
  1086. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1087. }
  1088. }
  1089. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1090. log.Debugf("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1091. }
  1092. nodes[name] = &newCnode
  1093. }
  1094. cm.pricingMetadata = pmd
  1095. cp.ApplyReservedInstancePricing(nodes)
  1096. return nodes, nil
  1097. }
  1098. // TODO: drop some logs
  1099. func (cm *CostModel) GetLBCost(cp costAnalyzerCloud.Provider) (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1100. // for fetching prices from cloud provider
  1101. // cfg, err := cp.GetConfig()
  1102. // if err != nil {
  1103. // return nil, err
  1104. // }
  1105. servicesList := cm.Cache.GetAllServices()
  1106. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1107. for _, service := range servicesList {
  1108. namespace := service.GetObjectMeta().GetNamespace()
  1109. name := service.GetObjectMeta().GetName()
  1110. key := serviceKey{
  1111. Cluster: env.GetClusterID(),
  1112. Namespace: namespace,
  1113. Service: name,
  1114. }
  1115. if service.Spec.Type == "LoadBalancer" {
  1116. loadBalancer, err := cp.LoadBalancerPricing()
  1117. if err != nil {
  1118. return nil, err
  1119. }
  1120. newLoadBalancer := *loadBalancer
  1121. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1122. address := loadBalancerIngress.IP
  1123. // Some cloud providers use hostname rather than IP
  1124. if address == "" {
  1125. address = loadBalancerIngress.Hostname
  1126. }
  1127. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1128. }
  1129. loadBalancerMap[key] = &newLoadBalancer
  1130. }
  1131. }
  1132. return loadBalancerMap, nil
  1133. }
  1134. func getPodServices(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1135. servicesList := cache.GetAllServices()
  1136. podServicesMapping := make(map[string]map[string][]string)
  1137. for _, service := range servicesList {
  1138. namespace := service.GetObjectMeta().GetNamespace()
  1139. name := service.GetObjectMeta().GetName()
  1140. key := namespace + "," + clusterID
  1141. if _, ok := podServicesMapping[key]; !ok {
  1142. podServicesMapping[key] = make(map[string][]string)
  1143. }
  1144. s := labels.Nothing()
  1145. if service.Spec.Selector != nil && len(service.Spec.Selector) > 0 {
  1146. s = labels.Set(service.Spec.Selector).AsSelectorPreValidated()
  1147. }
  1148. for _, pod := range podList {
  1149. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1150. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1151. services, ok := podServicesMapping[key][pod.GetObjectMeta().GetName()]
  1152. if ok {
  1153. podServicesMapping[key][pod.GetObjectMeta().GetName()] = append(services, name)
  1154. } else {
  1155. podServicesMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1156. }
  1157. }
  1158. }
  1159. }
  1160. return podServicesMapping, nil
  1161. }
  1162. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1163. ssList := cache.GetAllStatefulSets()
  1164. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1165. for _, ss := range ssList {
  1166. namespace := ss.GetObjectMeta().GetNamespace()
  1167. name := ss.GetObjectMeta().GetName()
  1168. key := namespace + "," + clusterID
  1169. if _, ok := podSSMapping[key]; !ok {
  1170. podSSMapping[key] = make(map[string][]string)
  1171. }
  1172. s, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
  1173. if err != nil {
  1174. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1175. }
  1176. for _, pod := range podList {
  1177. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1178. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1179. sss, ok := podSSMapping[key][pod.GetObjectMeta().GetName()]
  1180. if ok {
  1181. podSSMapping[key][pod.GetObjectMeta().GetName()] = append(sss, name)
  1182. } else {
  1183. podSSMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1184. }
  1185. }
  1186. }
  1187. }
  1188. return podSSMapping, nil
  1189. }
  1190. func getPodDeployments(cache clustercache.ClusterCache, podList []*v1.Pod, clusterID string) (map[string]map[string][]string, error) {
  1191. deploymentsList := cache.GetAllDeployments()
  1192. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1193. for _, deployment := range deploymentsList {
  1194. namespace := deployment.GetObjectMeta().GetNamespace()
  1195. name := deployment.GetObjectMeta().GetName()
  1196. key := namespace + "," + clusterID
  1197. if _, ok := podDeploymentsMapping[key]; !ok {
  1198. podDeploymentsMapping[key] = make(map[string][]string)
  1199. }
  1200. s, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
  1201. if err != nil {
  1202. log.Errorf("Error doing deployment label conversion: " + err.Error())
  1203. }
  1204. for _, pod := range podList {
  1205. labelSet := labels.Set(pod.GetObjectMeta().GetLabels())
  1206. if s.Matches(labelSet) && pod.GetObjectMeta().GetNamespace() == namespace {
  1207. deployments, ok := podDeploymentsMapping[key][pod.GetObjectMeta().GetName()]
  1208. if ok {
  1209. podDeploymentsMapping[key][pod.GetObjectMeta().GetName()] = append(deployments, name)
  1210. } else {
  1211. podDeploymentsMapping[key][pod.GetObjectMeta().GetName()] = []string{name}
  1212. }
  1213. }
  1214. }
  1215. }
  1216. return podDeploymentsMapping, nil
  1217. }
  1218. func getPodDeploymentsWithMetrics(deploymentLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1219. podDeploymentsMapping := make(map[string]map[string][]string)
  1220. for depKey, depLabels := range deploymentLabels {
  1221. kt, err := NewKeyTuple(depKey)
  1222. if err != nil {
  1223. continue
  1224. }
  1225. namespace := kt.Namespace()
  1226. name := kt.Key()
  1227. clusterID := kt.ClusterID()
  1228. key := namespace + "," + clusterID
  1229. if _, ok := podDeploymentsMapping[key]; !ok {
  1230. podDeploymentsMapping[key] = make(map[string][]string)
  1231. }
  1232. s := labels.Set(depLabels).AsSelectorPreValidated()
  1233. for podKey, pLabels := range podLabels {
  1234. pkey, err := NewKeyTuple(podKey)
  1235. if err != nil {
  1236. continue
  1237. }
  1238. podNamespace := pkey.Namespace()
  1239. podName := pkey.Key()
  1240. podClusterID := pkey.ClusterID()
  1241. labelSet := labels.Set(pLabels)
  1242. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1243. deployments, ok := podDeploymentsMapping[key][podName]
  1244. if ok {
  1245. podDeploymentsMapping[key][podName] = append(deployments, name)
  1246. } else {
  1247. podDeploymentsMapping[key][podName] = []string{name}
  1248. }
  1249. }
  1250. }
  1251. }
  1252. // Remove any duplicate data created by metric names
  1253. pruneDuplicateData(podDeploymentsMapping)
  1254. return podDeploymentsMapping, nil
  1255. }
  1256. func getPodServicesWithMetrics(serviceLabels map[string]map[string]string, podLabels map[string]map[string]string) (map[string]map[string][]string, error) {
  1257. podServicesMapping := make(map[string]map[string][]string)
  1258. for servKey, servLabels := range serviceLabels {
  1259. kt, err := NewKeyTuple(servKey)
  1260. if err != nil {
  1261. continue
  1262. }
  1263. namespace := kt.Namespace()
  1264. name := kt.Key()
  1265. clusterID := kt.ClusterID()
  1266. key := namespace + "," + clusterID
  1267. if _, ok := podServicesMapping[key]; !ok {
  1268. podServicesMapping[key] = make(map[string][]string)
  1269. }
  1270. s := labels.Nothing()
  1271. if servLabels != nil && len(servLabels) > 0 {
  1272. s = labels.Set(servLabels).AsSelectorPreValidated()
  1273. }
  1274. for podKey, pLabels := range podLabels {
  1275. pkey, err := NewKeyTuple(podKey)
  1276. if err != nil {
  1277. continue
  1278. }
  1279. podNamespace := pkey.Namespace()
  1280. podName := pkey.Key()
  1281. podClusterID := pkey.ClusterID()
  1282. labelSet := labels.Set(pLabels)
  1283. if s.Matches(labelSet) && podNamespace == namespace && podClusterID == clusterID {
  1284. services, ok := podServicesMapping[key][podName]
  1285. if ok {
  1286. podServicesMapping[key][podName] = append(services, name)
  1287. } else {
  1288. podServicesMapping[key][podName] = []string{name}
  1289. }
  1290. }
  1291. }
  1292. }
  1293. // Remove any duplicate data created by metric names
  1294. pruneDuplicateData(podServicesMapping)
  1295. return podServicesMapping, nil
  1296. }
  1297. // This method alleviates an issue with metrics that used a '_' to replace '-' in deployment
  1298. // and service names. To avoid counting these as multiple deployments/services, we'll remove
  1299. // the '_' version. Not optimal, but takes care of the issue
  1300. func pruneDuplicateData(data map[string]map[string][]string) {
  1301. for _, podMap := range data {
  1302. for podName, values := range podMap {
  1303. podMap[podName] = pruneDuplicates(values)
  1304. }
  1305. }
  1306. }
  1307. // Determine if there is an underscore in the value of a slice. If so, replace _ with -, and then
  1308. // check to see if the result exists in the slice. If both are true, then we DO NOT include that
  1309. // original value in the new slice.
  1310. func pruneDuplicates(s []string) []string {
  1311. m := sliceToSet(s)
  1312. for _, v := range s {
  1313. if strings.Contains(v, "_") {
  1314. name := strings.Replace(v, "_", "-", -1)
  1315. if !m[name] {
  1316. m[name] = true
  1317. }
  1318. delete(m, v)
  1319. }
  1320. }
  1321. return setToSlice(m)
  1322. }
  1323. // Creates a map[string]bool containing the slice values as keys
  1324. func sliceToSet(s []string) map[string]bool {
  1325. m := make(map[string]bool)
  1326. for _, v := range s {
  1327. m[v] = true
  1328. }
  1329. return m
  1330. }
  1331. func setToSlice(m map[string]bool) []string {
  1332. var result []string
  1333. for k := range m {
  1334. result = append(result, k)
  1335. }
  1336. return result
  1337. }
  1338. func costDataPassesFilters(cm clusters.ClusterMap, costs *CostData, namespace string, cluster string) bool {
  1339. passesNamespace := namespace == "" || costs.Namespace == namespace
  1340. passesCluster := cluster == "" || costs.ClusterID == cluster || costs.ClusterName == cluster
  1341. return passesNamespace && passesCluster
  1342. }
  1343. // Finds the a closest multiple less than value
  1344. func floorMultiple(value int64, multiple int64) int64 {
  1345. return (value / multiple) * multiple
  1346. }
  1347. // Attempt to create a key for the request. Reduce the times to minutes in order to more easily group requests based on
  1348. // real time ranges. If for any reason, the key generation fails, return a uuid to ensure uniqueness.
  1349. func requestKeyFor(window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) string {
  1350. keyLayout := "2006-01-02T15:04Z"
  1351. // We "snap" start time and duration to their closest 5 min multiple less than itself, by
  1352. // applying a snapped duration to a snapped start time.
  1353. durMins := int64(window.Minutes())
  1354. durMins = floorMultiple(durMins, 5)
  1355. sMins := int64(window.Start().Minute())
  1356. sOffset := sMins - floorMultiple(sMins, 5)
  1357. sTime := window.Start().Add(-time.Duration(sOffset) * time.Minute)
  1358. eTime := window.Start().Add(time.Duration(durMins) * time.Minute)
  1359. startKey := sTime.Format(keyLayout)
  1360. endKey := eTime.Format(keyLayout)
  1361. return fmt.Sprintf("%s,%s,%s,%s,%s,%t", startKey, endKey, resolution.String(), filterNamespace, filterCluster, remoteEnabled)
  1362. }
  1363. // ComputeCostDataRange executes a range query for cost data.
  1364. // Note that "offset" represents the time between the function call and "endString", and is also passed for convenience
  1365. func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1366. // Create a request key for request grouping. This key will be used to represent the cost-model result
  1367. // for the specific inputs to prevent multiple queries for identical data.
  1368. key := requestKeyFor(window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1369. log.Debugf("ComputeCostDataRange with Key: %s", key)
  1370. // If there is already a request out that uses the same data, wait for it to return to share the results.
  1371. // Otherwise, start executing.
  1372. result, err, _ := cm.RequestGroup.Do(key, func() (interface{}, error) {
  1373. return cm.costDataRange(cli, cp, window, resolution, filterNamespace, filterCluster, remoteEnabled)
  1374. })
  1375. data, ok := result.(map[string]*CostData)
  1376. if !ok {
  1377. return nil, fmt.Errorf("Failed to cast result as map[string]*CostData")
  1378. }
  1379. return data, err
  1380. }
  1381. func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window kubecost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
  1382. clusterID := env.GetClusterID()
  1383. // durHrs := end.Sub(start).Hours() + 1
  1384. if window.IsOpen() {
  1385. return nil, fmt.Errorf("illegal window: %s", window)
  1386. }
  1387. start := *window.Start()
  1388. end := *window.End()
  1389. // Snap resolution to the nearest minute
  1390. resMins := int64(math.Trunc(resolution.Minutes()))
  1391. if resMins == 0 {
  1392. return nil, fmt.Errorf("resolution must be greater than 0.0")
  1393. }
  1394. resolution = time.Duration(resMins) * time.Minute
  1395. // Warn if resolution does not evenly divide window
  1396. if int64(window.Minutes())%int64(resolution.Minutes()) != 0 {
  1397. log.Warnf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
  1398. }
  1399. // Convert to Prometheus-style duration string in terms of m or h
  1400. resStr := fmt.Sprintf("%dm", resMins)
  1401. if resMins%60 == 0 {
  1402. resStr = fmt.Sprintf("%dh", resMins/60)
  1403. }
  1404. if remoteEnabled {
  1405. remoteLayout := "2006-01-02T15:04:05Z"
  1406. remoteStartStr := window.Start().Format(remoteLayout)
  1407. remoteEndStr := window.End().Format(remoteLayout)
  1408. log.Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
  1409. return CostDataRangeFromSQL("", "", resolution.String(), remoteStartStr, remoteEndStr)
  1410. }
  1411. scrapeIntervalSeconds := cm.ScrapeInterval.Seconds()
  1412. ctx := prom.NewNamedContext(cli, prom.ComputeCostDataRangeContextName)
  1413. queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1414. queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1415. queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, resStr, "", resStr, "", env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1416. queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, resStr, "", resStr, "", env.GetPromClusterLabel())
  1417. queryCPURequests := fmt.Sprintf(queryCPURequestsStr, resStr, "", resStr, "", env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1418. queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, resStr, "", env.GetPromClusterLabel())
  1419. queryGPURequests := fmt.Sprintf(queryGPURequestsStr, resStr, "", resStr, "", resolution.Hours(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), resStr, "", env.GetPromClusterLabel())
  1420. queryPVRequests := fmt.Sprintf(queryPVRequestsStr, env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterLabel())
  1421. queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
  1422. queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, resStr)
  1423. queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, resStr, "", env.GetPromClusterLabel())
  1424. queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, resStr, "", env.GetPromClusterLabel())
  1425. queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, resStr, "", env.GetPromClusterLabel())
  1426. queryNormalization := fmt.Sprintf(normalizationStr, resStr, "")
  1427. // Submit all queries for concurrent evaluation
  1428. resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, resolution)
  1429. resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, resolution)
  1430. resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, resolution)
  1431. resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, resolution)
  1432. resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, resolution)
  1433. resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, resolution)
  1434. resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, resolution)
  1435. resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, resolution)
  1436. resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, resolution)
  1437. resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
  1438. resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
  1439. resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
  1440. resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
  1441. resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, resStr), start, end, resolution)
  1442. resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, resStr), start, end, resolution)
  1443. resChNSAnnotations := ctx.QueryRange(fmt.Sprintf(queryNSAnnotations, resStr), start, end, resolution)
  1444. resChPodAnnotations := ctx.QueryRange(fmt.Sprintf(queryPodAnnotations, resStr), start, end, resolution)
  1445. resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, resStr), start, end, resolution)
  1446. resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, resStr), start, end, resolution)
  1447. resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, resStr), start, end, resolution)
  1448. resChJobs := ctx.QueryRange(fmt.Sprintf(queryPodJobs, env.GetPromClusterLabel()), start, end, resolution)
  1449. resChDaemonsets := ctx.QueryRange(fmt.Sprintf(queryPodDaemonsets, env.GetPromClusterLabel()), start, end, resolution)
  1450. resChNormalization := ctx.QueryRange(queryNormalization, start, end, resolution)
  1451. // Pull k8s pod, controller, service, and namespace details
  1452. podlist := cm.Cache.GetAllPods()
  1453. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  1454. if err != nil {
  1455. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1456. }
  1457. podStatefulsetsMapping, err := getPodStatefulsets(cm.Cache, podlist, clusterID)
  1458. if err != nil {
  1459. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1460. }
  1461. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  1462. if err != nil {
  1463. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1464. }
  1465. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  1466. if err != nil {
  1467. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1468. }
  1469. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  1470. if err != nil {
  1471. return nil, fmt.Errorf("error querying the kubernetes API: %s", err)
  1472. }
  1473. // Process query results. Handle errors afterwards using ctx.Errors.
  1474. resRAMRequests, _ := resChRAMRequests.Await()
  1475. resRAMUsage, _ := resChRAMUsage.Await()
  1476. resRAMAlloc, _ := resChRAMAlloc.Await()
  1477. resCPURequests, _ := resChCPURequests.Await()
  1478. resCPUUsage, _ := resChCPUUsage.Await()
  1479. resCPUAlloc, _ := resChCPUAlloc.Await()
  1480. resGPURequests, _ := resChGPURequests.Await()
  1481. resPVRequests, _ := resChPVRequests.Await()
  1482. resPVCAlloc, _ := resChPVCAlloc.Await()
  1483. resPVHourlyCost, _ := resChPVHourlyCost.Await()
  1484. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  1485. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  1486. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  1487. resNSLabels, _ := resChNSLabels.Await()
  1488. resPodLabels, _ := resChPodLabels.Await()
  1489. resNSAnnotations, _ := resChNSAnnotations.Await()
  1490. resPodAnnotations, _ := resChPodAnnotations.Await()
  1491. resServiceLabels, _ := resChServiceLabels.Await()
  1492. resDeploymentLabels, _ := resChDeploymentLabels.Await()
  1493. resStatefulsetLabels, _ := resChStatefulsetLabels.Await()
  1494. resDaemonsets, _ := resChDaemonsets.Await()
  1495. resJobs, _ := resChJobs.Await()
  1496. resNormalization, _ := resChNormalization.Await()
  1497. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  1498. // NOTE: will not propagate unless coupled with errors.
  1499. if ctx.HasErrors() {
  1500. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  1501. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  1502. for _, promErr := range ctx.Errors() {
  1503. if promErr.Error != nil {
  1504. log.Errorf("CostDataRange: Request Error: %s", promErr.Error)
  1505. }
  1506. if promErr.ParseError != nil {
  1507. log.Errorf("CostDataRange: Parsing Error: %s", promErr.ParseError)
  1508. }
  1509. }
  1510. // ErrorCollection is an collection of errors wrapped in a single error implementation
  1511. return nil, ctx.ErrorCollection()
  1512. }
  1513. normalizationValue, err := getNormalizations(resNormalization)
  1514. if err != nil {
  1515. msg := fmt.Sprintf("error computing normalization for start=%s, end=%s, res=%s", start, end, resolution)
  1516. return nil, prom.WrapError(err, msg)
  1517. }
  1518. pvClaimMapping, err := GetPVInfo(resPVRequests, clusterID)
  1519. if err != nil {
  1520. // Just log for compatibility with KSM less than 1.6
  1521. log.Infof("Unable to get PV Data: %s", err.Error())
  1522. }
  1523. if pvClaimMapping != nil {
  1524. err = addPVData(cm.Cache, pvClaimMapping, cp)
  1525. if err != nil {
  1526. return nil, fmt.Errorf("pvClaimMapping: %s", err)
  1527. }
  1528. }
  1529. pvCostMapping, err := GetPVCostMetrics(resPVHourlyCost, clusterID)
  1530. if err != nil {
  1531. log.Errorf("Unable to get PV Hourly Cost Data: %s", err.Error())
  1532. }
  1533. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  1534. pvAllocationMapping, err := GetPVAllocationMetrics(resPVCAlloc, clusterID)
  1535. if err != nil {
  1536. log.Errorf("Unable to get PV Allocation Cost Data: %s", err.Error())
  1537. }
  1538. if pvAllocationMapping != nil {
  1539. addMetricPVData(pvAllocationMapping, pvCostMapping, cp)
  1540. for k, v := range pvAllocationMapping {
  1541. unmountedPVs[k] = v
  1542. }
  1543. }
  1544. nsLabels, err := GetNamespaceLabelsMetrics(resNSLabels, clusterID)
  1545. if err != nil {
  1546. log.Errorf("Unable to get Namespace Labels for Metrics: %s", err.Error())
  1547. }
  1548. if nsLabels != nil {
  1549. mergeStringMap(namespaceLabelsMapping, nsLabels)
  1550. }
  1551. podLabels, err := GetPodLabelsMetrics(resPodLabels, clusterID)
  1552. if err != nil {
  1553. log.Errorf("Unable to get Pod Labels for Metrics: %s", err.Error())
  1554. }
  1555. nsAnnotations, err := GetNamespaceAnnotationsMetrics(resNSAnnotations, clusterID)
  1556. if err != nil {
  1557. log.Errorf("Unable to get Namespace Annotations for Metrics: %s", err.Error())
  1558. }
  1559. if nsAnnotations != nil {
  1560. mergeStringMap(namespaceAnnotationsMapping, nsAnnotations)
  1561. }
  1562. podAnnotations, err := GetPodAnnotationsMetrics(resPodAnnotations, clusterID)
  1563. if err != nil {
  1564. log.Errorf("Unable to get Pod Annotations for Metrics: %s", err.Error())
  1565. }
  1566. serviceLabels, err := GetServiceSelectorLabelsMetrics(resServiceLabels, clusterID)
  1567. if err != nil {
  1568. log.Errorf("Unable to get Service Selector Labels for Metrics: %s", err.Error())
  1569. }
  1570. deploymentLabels, err := GetDeploymentMatchLabelsMetrics(resDeploymentLabels, clusterID)
  1571. if err != nil {
  1572. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1573. }
  1574. statefulsetLabels, err := GetStatefulsetMatchLabelsMetrics(resStatefulsetLabels, clusterID)
  1575. if err != nil {
  1576. log.Errorf("Unable to get Deployment Match Labels for Metrics: %s", err.Error())
  1577. }
  1578. podStatefulsetMetricsMapping, err := getPodDeploymentsWithMetrics(statefulsetLabels, podLabels)
  1579. if err != nil {
  1580. log.Errorf("Unable to get match Statefulset Labels Metrics to Pods: %s", err.Error())
  1581. }
  1582. appendLabelsList(podStatefulsetsMapping, podStatefulsetMetricsMapping)
  1583. podDeploymentsMetricsMapping, err := getPodDeploymentsWithMetrics(deploymentLabels, podLabels)
  1584. if err != nil {
  1585. log.Errorf("Unable to get match Deployment Labels Metrics to Pods: %s", err.Error())
  1586. }
  1587. appendLabelsList(podDeploymentsMapping, podDeploymentsMetricsMapping)
  1588. podDaemonsets, err := GetPodDaemonsetsWithMetrics(resDaemonsets, clusterID)
  1589. if err != nil {
  1590. log.Errorf("Unable to get Pod Daemonsets for Metrics: %s", err.Error())
  1591. }
  1592. podJobs, err := GetPodJobsWithMetrics(resJobs, clusterID)
  1593. if err != nil {
  1594. log.Errorf("Unable to get Pod Jobs for Metrics: %s", err.Error())
  1595. }
  1596. podServicesMetricsMapping, err := getPodServicesWithMetrics(serviceLabels, podLabels)
  1597. if err != nil {
  1598. log.Errorf("Unable to get match Service Labels Metrics to Pods: %s", err.Error())
  1599. }
  1600. appendLabelsList(podServicesMapping, podServicesMetricsMapping)
  1601. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  1602. if err != nil {
  1603. log.Errorf("Unable to get Network Cost Data: %s", err.Error())
  1604. networkUsageMap = make(map[string]*NetworkUsageData)
  1605. }
  1606. containerNameCost := make(map[string]*CostData)
  1607. containers := make(map[string]bool)
  1608. otherClusterPVRecorded := make(map[string]bool)
  1609. RAMReqMap, err := GetNormalizedContainerMetricVectors(resRAMRequests, normalizationValue, clusterID)
  1610. if err != nil {
  1611. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMRequests)")
  1612. }
  1613. for key := range RAMReqMap {
  1614. containers[key] = true
  1615. }
  1616. RAMUsedMap, err := GetNormalizedContainerMetricVectors(resRAMUsage, normalizationValue, clusterID)
  1617. if err != nil {
  1618. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMUsage)")
  1619. }
  1620. for key := range RAMUsedMap {
  1621. containers[key] = true
  1622. }
  1623. CPUReqMap, err := GetNormalizedContainerMetricVectors(resCPURequests, normalizationValue, clusterID)
  1624. if err != nil {
  1625. return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(CPURequests)")
  1626. }
  1627. for key := range CPUReqMap {
  1628. containers[key] = true
  1629. }
  1630. // No need to normalize here, as this comes from a counter, namely:
  1631. // rate(container_cpu_usage_seconds_total) which properly accounts for normalized rates
  1632. CPUUsedMap, err := GetContainerMetricVectors(resCPUUsage, clusterID)
  1633. if err != nil {
  1634. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUUsage)")
  1635. }
  1636. for key := range CPUUsedMap {
  1637. containers[key] = true
  1638. }
  1639. RAMAllocMap, err := GetContainerMetricVectors(resRAMAlloc, clusterID)
  1640. if err != nil {
  1641. return nil, prom.WrapError(err, "GetContainerMetricVectors(RAMAllocations)")
  1642. }
  1643. for key := range RAMAllocMap {
  1644. containers[key] = true
  1645. }
  1646. CPUAllocMap, err := GetContainerMetricVectors(resCPUAlloc, clusterID)
  1647. if err != nil {
  1648. return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUAllocations)")
  1649. }
  1650. for key := range CPUAllocMap {
  1651. containers[key] = true
  1652. }
  1653. GPUReqMap, err := GetNormalizedContainerMetricVectors(resGPURequests, normalizationValue, clusterID)
  1654. if err != nil {
  1655. return nil, prom.WrapError(err, "GetContainerMetricVectors(GPURequests)")
  1656. }
  1657. for key := range GPUReqMap {
  1658. containers[key] = true
  1659. }
  1660. // Request metrics can show up after pod eviction and completion.
  1661. // This method synchronizes requests to allocations such that when
  1662. // allocation is 0, so are requests
  1663. applyAllocationToRequests(RAMAllocMap, RAMReqMap)
  1664. applyAllocationToRequests(CPUAllocMap, CPUReqMap)
  1665. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  1666. missingContainers := make(map[string]*CostData)
  1667. for key := range containers {
  1668. if _, ok := containerNameCost[key]; ok {
  1669. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  1670. }
  1671. c, _ := NewContainerMetricFromKey(key)
  1672. RAMReqV, ok := RAMReqMap[key]
  1673. if !ok {
  1674. log.Debug("no RAM requests for " + key)
  1675. RAMReqV = []*util.Vector{}
  1676. }
  1677. RAMUsedV, ok := RAMUsedMap[key]
  1678. if !ok {
  1679. log.Debug("no RAM usage for " + key)
  1680. RAMUsedV = []*util.Vector{}
  1681. }
  1682. CPUReqV, ok := CPUReqMap[key]
  1683. if !ok {
  1684. log.Debug("no CPU requests for " + key)
  1685. CPUReqV = []*util.Vector{}
  1686. }
  1687. CPUUsedV, ok := CPUUsedMap[key]
  1688. if !ok {
  1689. log.Debug("no CPU usage for " + key)
  1690. CPUUsedV = []*util.Vector{}
  1691. }
  1692. RAMAllocsV, ok := RAMAllocMap[key]
  1693. if !ok {
  1694. log.Debug("no RAM allocation for " + key)
  1695. RAMAllocsV = []*util.Vector{}
  1696. }
  1697. CPUAllocsV, ok := CPUAllocMap[key]
  1698. if !ok {
  1699. log.Debug("no CPU allocation for " + key)
  1700. CPUAllocsV = []*util.Vector{}
  1701. }
  1702. GPUReqV, ok := GPUReqMap[key]
  1703. if !ok {
  1704. log.Debug("no GPU requests for " + key)
  1705. GPUReqV = []*util.Vector{}
  1706. }
  1707. var node *costAnalyzerCloud.Node
  1708. if n, ok := missingNodes[c.NodeName]; ok {
  1709. node = n
  1710. } else {
  1711. node = &costAnalyzerCloud.Node{}
  1712. missingNodes[c.NodeName] = node
  1713. }
  1714. nsKey := c.Namespace + "," + c.ClusterID
  1715. podKey := c.Namespace + "," + c.PodName + "," + c.ClusterID
  1716. namespaceLabels, _ := namespaceLabelsMapping[nsKey]
  1717. pLabels := podLabels[podKey]
  1718. if pLabels == nil {
  1719. pLabels = make(map[string]string)
  1720. }
  1721. for k, v := range namespaceLabels {
  1722. if _, ok := pLabels[k]; !ok {
  1723. pLabels[k] = v
  1724. }
  1725. }
  1726. namespaceAnnotations, _ := namespaceAnnotationsMapping[nsKey]
  1727. pAnnotations := podAnnotations[podKey]
  1728. if pAnnotations == nil {
  1729. pAnnotations = make(map[string]string)
  1730. }
  1731. for k, v := range namespaceAnnotations {
  1732. if _, ok := pAnnotations[k]; !ok {
  1733. pAnnotations[k] = v
  1734. }
  1735. }
  1736. var podDeployments []string
  1737. if _, ok := podDeploymentsMapping[nsKey]; ok {
  1738. if ds, ok := podDeploymentsMapping[nsKey][c.PodName]; ok {
  1739. podDeployments = ds
  1740. } else {
  1741. podDeployments = []string{}
  1742. }
  1743. }
  1744. var podStatefulSets []string
  1745. if _, ok := podStatefulsetsMapping[nsKey]; ok {
  1746. if ss, ok := podStatefulsetsMapping[nsKey][c.PodName]; ok {
  1747. podStatefulSets = ss
  1748. } else {
  1749. podStatefulSets = []string{}
  1750. }
  1751. }
  1752. var podServices []string
  1753. if _, ok := podServicesMapping[nsKey]; ok {
  1754. if svcs, ok := podServicesMapping[nsKey][c.PodName]; ok {
  1755. podServices = svcs
  1756. } else {
  1757. podServices = []string{}
  1758. }
  1759. }
  1760. var podPVs []*PersistentVolumeClaimData
  1761. var podNetCosts []*util.Vector
  1762. // For PVC data, we'll need to find the claim mapping and cost data. Will need to append
  1763. // cost data since that was populated by cluster data previously. We do this with
  1764. // the pod_pvc_allocation metric
  1765. podPVData, ok := pvAllocationMapping[podKey]
  1766. if !ok {
  1767. log.Debugf("Failed to locate pv allocation mapping for missing pod.")
  1768. }
  1769. // Delete the current pod key from potentially unmounted pvs
  1770. delete(unmountedPVs, podKey)
  1771. // For network costs, we'll use existing map since it should still contain the
  1772. // correct data.
  1773. var podNetworkCosts []*util.Vector
  1774. if usage, ok := networkUsageMap[podKey]; ok {
  1775. netCosts, err := GetNetworkCost(usage, cp)
  1776. if err != nil {
  1777. log.Errorf("Error pulling network costs: %s", err.Error())
  1778. } else {
  1779. podNetworkCosts = netCosts
  1780. }
  1781. }
  1782. // Check to see if any other data has been recorded for this namespace, pod, clusterId
  1783. // Follow the pattern of only allowing claims data per pod
  1784. if !otherClusterPVRecorded[podKey] {
  1785. otherClusterPVRecorded[podKey] = true
  1786. podPVs = podPVData
  1787. podNetCosts = podNetworkCosts
  1788. }
  1789. pds := []string{}
  1790. if ds, ok := podDaemonsets[podKey]; ok {
  1791. pds = []string{ds}
  1792. }
  1793. jobs := []string{}
  1794. if job, ok := podJobs[podKey]; ok {
  1795. jobs = []string{job}
  1796. }
  1797. costs := &CostData{
  1798. Name: c.ContainerName,
  1799. PodName: c.PodName,
  1800. NodeName: c.NodeName,
  1801. NodeData: node,
  1802. Namespace: c.Namespace,
  1803. Services: podServices,
  1804. Deployments: podDeployments,
  1805. Daemonsets: pds,
  1806. Statefulsets: podStatefulSets,
  1807. Jobs: jobs,
  1808. RAMReq: RAMReqV,
  1809. RAMUsed: RAMUsedV,
  1810. CPUReq: CPUReqV,
  1811. CPUUsed: CPUUsedV,
  1812. RAMAllocation: RAMAllocsV,
  1813. CPUAllocation: CPUAllocsV,
  1814. GPUReq: GPUReqV,
  1815. Annotations: pAnnotations,
  1816. Labels: pLabels,
  1817. NamespaceLabels: namespaceLabels,
  1818. PVCData: podPVs,
  1819. NetworkData: podNetCosts,
  1820. ClusterID: c.ClusterID,
  1821. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  1822. }
  1823. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1824. containerNameCost[key] = costs
  1825. missingContainers[key] = costs
  1826. }
  1827. }
  1828. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  1829. for k, costs := range unmounted {
  1830. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  1831. if costDataPassesFilters(cm.ClusterMap, costs, filterNamespace, filterCluster) {
  1832. containerNameCost[k] = costs
  1833. }
  1834. }
  1835. if window.Minutes() > 0 {
  1836. dur, off := window.DurationOffsetStrings()
  1837. err = findDeletedNodeInfo(cli, missingNodes, dur, off)
  1838. if err != nil {
  1839. log.Errorf("Error fetching historical node data: %s", err.Error())
  1840. }
  1841. }
  1842. return containerNameCost, nil
  1843. }
  1844. func applyAllocationToRequests(allocationMap map[string][]*util.Vector, requestMap map[string][]*util.Vector) {
  1845. // The result of the normalize operation will be a new []*util.Vector to replace the requests
  1846. normalizeOp := func(r *util.Vector, x *float64, y *float64) bool {
  1847. // Omit data (return false) if both x and y inputs don't exist
  1848. if x == nil || y == nil {
  1849. return false
  1850. }
  1851. // If the allocation value is 0, 0 out request value
  1852. if *x == 0 {
  1853. r.Value = 0
  1854. } else {
  1855. r.Value = *y
  1856. }
  1857. return true
  1858. }
  1859. // Run normalization on all request vectors in the mapping
  1860. for k, requests := range requestMap {
  1861. // Only run normalization where there are valid allocations
  1862. allocations, ok := allocationMap[k]
  1863. if !ok {
  1864. delete(requestMap, k)
  1865. continue
  1866. }
  1867. // Replace request map with normalized
  1868. requestMap[k] = util.ApplyVectorOp(allocations, requests, normalizeOp)
  1869. }
  1870. }
  1871. func addMetricPVData(pvAllocationMap map[string][]*PersistentVolumeClaimData, pvCostMap map[string]*costAnalyzerCloud.PV, cp costAnalyzerCloud.Provider) {
  1872. cfg, err := cp.GetConfig()
  1873. if err != nil {
  1874. log.Errorf("Failed to get provider config while adding pv metrics data.")
  1875. return
  1876. }
  1877. for _, pvcDataArray := range pvAllocationMap {
  1878. for _, pvcData := range pvcDataArray {
  1879. costKey := fmt.Sprintf("%s,%s", pvcData.VolumeName, pvcData.ClusterID)
  1880. pvCost, ok := pvCostMap[costKey]
  1881. if !ok {
  1882. pvcData.Volume = &costAnalyzerCloud.PV{
  1883. Cost: cfg.Storage,
  1884. }
  1885. continue
  1886. }
  1887. pvcData.Volume = pvCost
  1888. }
  1889. }
  1890. }
  1891. // Add values that don't already exist in origMap from mergeMap into origMap
  1892. func mergeStringMap(origMap map[string]map[string]string, mergeMap map[string]map[string]string) {
  1893. for k, v := range mergeMap {
  1894. if _, ok := origMap[k]; !ok {
  1895. origMap[k] = v
  1896. }
  1897. }
  1898. }
  1899. func appendLabelsList(mainLabels map[string]map[string][]string, labels map[string]map[string][]string) {
  1900. for k, v := range labels {
  1901. mainLabels[k] = v
  1902. }
  1903. }
  1904. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1905. nsToLabels := make(map[string]map[string]string)
  1906. nss := cache.GetAllNamespaces()
  1907. for _, ns := range nss {
  1908. labels := make(map[string]string)
  1909. for k, v := range ns.Labels {
  1910. labels[prom.SanitizeLabelName(k)] = v
  1911. }
  1912. nsToLabels[ns.Name+","+clusterID] = labels
  1913. }
  1914. return nsToLabels, nil
  1915. }
  1916. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1917. nsToAnnotations := make(map[string]map[string]string)
  1918. nss := cache.GetAllNamespaces()
  1919. for _, ns := range nss {
  1920. annotations := make(map[string]string)
  1921. for k, v := range ns.Annotations {
  1922. annotations[prom.SanitizeLabelName(k)] = v
  1923. }
  1924. nsToAnnotations[ns.Name+","+clusterID] = annotations
  1925. }
  1926. return nsToAnnotations, nil
  1927. }
  1928. func getDaemonsetsOfPod(pod v1.Pod) []string {
  1929. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  1930. if ownerReference.Kind == "DaemonSet" {
  1931. return []string{ownerReference.Name}
  1932. }
  1933. }
  1934. return []string{}
  1935. }
  1936. func getJobsOfPod(pod v1.Pod) []string {
  1937. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  1938. if ownerReference.Kind == "Job" {
  1939. return []string{ownerReference.Name}
  1940. }
  1941. }
  1942. return []string{}
  1943. }
  1944. func getStatefulSetsOfPod(pod v1.Pod) []string {
  1945. for _, ownerReference := range pod.ObjectMeta.OwnerReferences {
  1946. if ownerReference.Kind == "StatefulSet" {
  1947. return []string{ownerReference.Name}
  1948. }
  1949. }
  1950. return []string{}
  1951. }
  1952. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  1953. daemonsets := cache.GetAllDaemonSets()
  1954. vgpuCount := 0.0
  1955. for _, ds := range daemonsets {
  1956. dsContainerList := &ds.Spec.Template.Spec.Containers
  1957. for _, ctnr := range *dsContainerList {
  1958. if ctnr.Args != nil {
  1959. for _, arg := range ctnr.Args {
  1960. if strings.Contains(arg, "--vgpu=") {
  1961. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  1962. if err != nil {
  1963. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  1964. continue
  1965. }
  1966. vgpuCount = vgpus
  1967. return vgpuCount, nil
  1968. }
  1969. }
  1970. }
  1971. }
  1972. }
  1973. return vgpuCount, nil
  1974. }
  1975. type PersistentVolumeClaimData struct {
  1976. Class string `json:"class"`
  1977. Claim string `json:"claim"`
  1978. Namespace string `json:"namespace"`
  1979. ClusterID string `json:"clusterId"`
  1980. TimesClaimed int `json:"timesClaimed"`
  1981. VolumeName string `json:"volumeName"`
  1982. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  1983. Values []*util.Vector `json:"values"`
  1984. }
  1985. func measureTime(start time.Time, threshold time.Duration, name string) {
  1986. elapsed := time.Since(start)
  1987. if elapsed > threshold {
  1988. log.Infof("[Profiler] %s: %s", elapsed, name)
  1989. }
  1990. }
  1991. func measureTimeAsync(start time.Time, threshold time.Duration, name string, ch chan string) {
  1992. elapsed := time.Since(start)
  1993. if elapsed > threshold {
  1994. ch <- fmt.Sprintf("%s took %s", name, time.Since(start))
  1995. }
  1996. }