costmodel.go 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189
  1. package costmodel
  2. import (
  3. "errors"
  4. "fmt"
  5. "maps"
  6. "math"
  7. "regexp"
  8. "strconv"
  9. "strings"
  10. "time"
  11. "github.com/opencost/opencost/core/pkg/clustercache"
  12. "github.com/opencost/opencost/core/pkg/clusters"
  13. coreenv "github.com/opencost/opencost/core/pkg/env"
  14. "github.com/opencost/opencost/core/pkg/filter/allocation"
  15. "github.com/opencost/opencost/core/pkg/log"
  16. "github.com/opencost/opencost/core/pkg/model/kubemodel"
  17. "github.com/opencost/opencost/core/pkg/opencost"
  18. "github.com/opencost/opencost/core/pkg/source"
  19. "github.com/opencost/opencost/core/pkg/util"
  20. "github.com/opencost/opencost/core/pkg/util/promutil"
  21. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
  22. km "github.com/opencost/opencost/pkg/kubemodel"
  23. v1 "k8s.io/api/core/v1"
  24. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  25. "k8s.io/apimachinery/pkg/labels"
  26. "golang.org/x/sync/singleflight"
  27. )
  28. const (
  29. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  30. unmountedPVsContainer = "unmounted-pvs"
  31. annotationDomain = "opencost.io"
  32. annotationStorageCost = annotationDomain + "/storage-hourly-cost"
  33. annotationNodeCPUCost = annotationDomain + "/node-cpu-hourly-cost"
  34. annotationNodeRAMCost = annotationDomain + "/node-ram-hourly-cost"
  35. )
  36. // isCron matches a CronJob name and captures the non-timestamp name
  37. //
  38. // We support either a 10 character timestamp OR an 8 character timestamp
  39. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  40. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  41. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  42. type CostModel struct {
  43. Cache clustercache.ClusterCache
  44. ClusterMap clusters.ClusterMap
  45. BatchDuration time.Duration
  46. RequestGroup *singleflight.Group
  47. DataSource source.OpenCostDataSource
  48. Provider costAnalyzerCloud.Provider
  49. KubeModel *km.KubeModel
  50. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  51. }
  52. func NewCostModel(
  53. clusterUID string,
  54. dataSource source.OpenCostDataSource,
  55. provider costAnalyzerCloud.Provider,
  56. cache clustercache.ClusterCache,
  57. clusterMap clusters.ClusterMap,
  58. batchDuration time.Duration,
  59. ) *CostModel {
  60. // request grouping to prevent over-requesting the same data prior to caching
  61. requestGroup := new(singleflight.Group)
  62. var kubeModel *km.KubeModel
  63. var err error
  64. if dataSource != nil {
  65. kubeModel, err = km.NewKubeModel(clusterUID, dataSource)
  66. if err != nil {
  67. // KubeModel is required. Log a fatal error if we fail to init.
  68. log.Fatalf("error initializing KubeModel: %s", err)
  69. }
  70. }
  71. return &CostModel{
  72. Cache: cache,
  73. ClusterMap: clusterMap,
  74. BatchDuration: batchDuration,
  75. DataSource: dataSource,
  76. Provider: provider,
  77. RequestGroup: requestGroup,
  78. KubeModel: kubeModel,
  79. }
  80. }
  81. func (cm *CostModel) ComputeKubeModelSet(start, end time.Time) (*kubemodel.KubeModelSet, error) {
  82. if cm.KubeModel == nil {
  83. return nil, fmt.Errorf("KubeModel not initialized")
  84. }
  85. return cm.KubeModel.ComputeKubeModelSet(start, end)
  86. }
  87. type CostData struct {
  88. Name string `json:"name,omitempty"`
  89. PodName string `json:"podName,omitempty"`
  90. NodeName string `json:"nodeName,omitempty"`
  91. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  92. Namespace string `json:"namespace,omitempty"`
  93. Deployments []string `json:"deployments,omitempty"`
  94. Services []string `json:"services,omitempty"`
  95. Daemonsets []string `json:"daemonsets,omitempty"`
  96. Statefulsets []string `json:"statefulsets,omitempty"`
  97. Jobs []string `json:"jobs,omitempty"`
  98. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  99. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  100. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  101. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  102. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  103. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  104. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  105. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  106. NetworkData []*util.Vector `json:"network,omitempty"`
  107. Annotations map[string]string `json:"annotations,omitempty"`
  108. Labels map[string]string `json:"labels,omitempty"`
  109. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  110. ClusterID string `json:"clusterId"`
  111. ClusterName string `json:"clusterName"`
  112. }
  113. func (cd *CostData) String() string {
  114. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  115. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  116. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  117. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  118. }
  119. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  120. hasController = false
  121. if len(cd.Deployments) > 0 {
  122. name = cd.Deployments[0]
  123. kind = "deployment"
  124. hasController = true
  125. } else if len(cd.Statefulsets) > 0 {
  126. name = cd.Statefulsets[0]
  127. kind = "statefulset"
  128. hasController = true
  129. } else if len(cd.Daemonsets) > 0 {
  130. name = cd.Daemonsets[0]
  131. kind = "daemonset"
  132. hasController = true
  133. } else if len(cd.Jobs) > 0 {
  134. name = cd.Jobs[0]
  135. kind = "job"
  136. hasController = true
  137. match := isCron.FindStringSubmatch(name)
  138. if match != nil {
  139. name = match[1]
  140. }
  141. }
  142. return name, kind, hasController
  143. }
  144. func (cm *CostModel) ComputeCostData(start, end time.Time) (map[string]*CostData, error) {
  145. // Cluster ID is specific to the source cluster
  146. clusterID := coreenv.GetClusterID()
  147. cp := cm.Provider
  148. ds := cm.DataSource
  149. mq := ds.Metrics()
  150. // Get Kubernetes data
  151. // Pull pod information from k8s API
  152. podlist := cm.Cache.GetAllPods()
  153. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  154. if err != nil {
  155. return nil, err
  156. }
  157. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  158. if err != nil {
  159. return nil, err
  160. }
  161. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  162. if err != nil {
  163. return nil, err
  164. }
  165. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  166. if err != nil {
  167. return nil, err
  168. }
  169. // Get metrics data
  170. resRAMUsage, resCPUUsage, resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, resNetNatGatewayRequests, resNetNatGatewayIngressRequests, err := queryMetrics(mq, start, end)
  171. if err != nil {
  172. log.Warnf("ComputeCostData: continuing despite metrics errors: %s", err)
  173. }
  174. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  175. nodes, err := cm.GetNodeCost()
  176. if err != nil {
  177. log.Warnf("GetNodeCost: no node cost model available: %s", err)
  178. return nil, err
  179. }
  180. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  181. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  182. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  183. if err != nil {
  184. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  185. }
  186. if pvClaimMapping != nil {
  187. err = cm.addPVData(pvClaimMapping)
  188. if err != nil {
  189. return nil, err
  190. }
  191. // copy claim mappings into zombies, then remove as they're discovered
  192. for k, v := range pvClaimMapping {
  193. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  194. }
  195. }
  196. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, resNetNatGatewayRequests, resNetNatGatewayIngressRequests, clusterID)
  197. if err != nil {
  198. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  199. networkUsageMap = make(map[string]*NetworkUsageData)
  200. }
  201. containerNameCost := make(map[string]*CostData)
  202. containers := make(map[string]bool)
  203. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, clusterID)
  204. if err != nil {
  205. return nil, err
  206. }
  207. for key := range RAMUsedMap {
  208. containers[key] = true
  209. }
  210. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, clusterID) // No need to normalize here, as this comes from a counter
  211. if err != nil {
  212. return nil, err
  213. }
  214. for key := range CPUUsedMap {
  215. containers[key] = true
  216. }
  217. currentContainers := make(map[string]clustercache.Pod)
  218. for _, pod := range podlist {
  219. if pod.Status.Phase != v1.PodRunning {
  220. continue
  221. }
  222. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  223. if err != nil {
  224. return nil, err
  225. }
  226. for _, c := range cs {
  227. containers[c.Key()] = true // captures any containers that existed for a time < a metrics scrape interval. We currently charge 0 for this but should charge something.
  228. currentContainers[c.Key()] = *pod
  229. }
  230. }
  231. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  232. missingContainers := make(map[string]*CostData)
  233. for key := range containers {
  234. if _, ok := containerNameCost[key]; ok {
  235. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  236. }
  237. // The _else_ case for this statement is the case in which the container has been
  238. // deleted so we have usage information but not request information. In that case,
  239. // we return partial data for CPU and RAM: only usage and not requests.
  240. if pod, ok := currentContainers[key]; ok {
  241. podName := pod.Name
  242. ns := pod.Namespace
  243. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  244. podLabels := maps.Clone(pod.Labels)
  245. if podLabels == nil {
  246. podLabels = make(map[string]string)
  247. }
  248. for k, v := range nsLabels {
  249. if _, ok := podLabels[k]; !ok {
  250. podLabels[k] = v
  251. }
  252. }
  253. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  254. podAnnotations := pod.Annotations
  255. if podAnnotations == nil {
  256. podAnnotations = make(map[string]string)
  257. }
  258. for k, v := range nsAnnotations {
  259. if _, ok := podAnnotations[k]; !ok {
  260. podAnnotations[k] = v
  261. }
  262. }
  263. nodeName := pod.Spec.NodeName
  264. var nodeData *costAnalyzerCloud.Node
  265. if _, ok := nodes[nodeName]; ok {
  266. nodeData = nodes[nodeName]
  267. }
  268. nsKey := ns + "," + clusterID
  269. var podDeployments []string
  270. if _, ok := podDeploymentsMapping[nsKey]; ok {
  271. if ds, ok := podDeploymentsMapping[nsKey][pod.Name]; ok {
  272. podDeployments = ds
  273. } else {
  274. podDeployments = []string{}
  275. }
  276. }
  277. var podPVs []*PersistentVolumeClaimData
  278. podClaims := pod.Spec.Volumes
  279. for _, vol := range podClaims {
  280. if vol.PersistentVolumeClaim != nil {
  281. name := vol.PersistentVolumeClaim.ClaimName
  282. key := ns + "," + name + "," + clusterID
  283. if pvClaim, ok := pvClaimMapping[key]; ok {
  284. pvClaim.TimesClaimed++
  285. podPVs = append(podPVs, pvClaim)
  286. // Remove entry from potential unmounted pvs
  287. delete(unmountedPVs, key)
  288. }
  289. }
  290. }
  291. var podNetCosts []*util.Vector
  292. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  293. netCosts, err := GetNetworkCost(usage, cp)
  294. if err != nil {
  295. log.Debugf("Error pulling network costs: %s", err.Error())
  296. } else {
  297. podNetCosts = netCosts
  298. }
  299. }
  300. var podServices []string
  301. if _, ok := podServicesMapping[nsKey]; ok {
  302. if svcs, ok := podServicesMapping[nsKey][pod.Name]; ok {
  303. podServices = svcs
  304. } else {
  305. podServices = []string{}
  306. }
  307. }
  308. for i, container := range pod.Spec.Containers {
  309. containerName := container.Name
  310. // recreate the key and look up data for this container
  311. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  312. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  313. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  314. // details on the "amount" API. See
  315. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  316. // for the units of memory and CPU.
  317. ramRequestBytes := container.Resources.Requests.Memory().Value()
  318. // Because information on container RAM & CPU requests isn't
  319. // coming from metrics, it won't have a timestamp associated
  320. // with it. We need to provide a timestamp.
  321. RAMReqV := []*util.Vector{
  322. {
  323. Value: float64(ramRequestBytes),
  324. Timestamp: float64(time.Now().UTC().Unix()),
  325. },
  326. }
  327. // use millicores so we can convert to cores in a float64 format
  328. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  329. CPUReqV := []*util.Vector{
  330. {
  331. Value: float64(cpuRequestMilliCores) / 1000,
  332. Timestamp: float64(time.Now().UTC().Unix()),
  333. },
  334. }
  335. gpuReqCount := 0.0
  336. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  337. gpuReqCount = g.AsApproximateFloat64()
  338. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  339. gpuReqCount = g.AsApproximateFloat64()
  340. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  341. gpuReqCount = g.AsApproximateFloat64()
  342. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  343. gpuReqCount = g.AsApproximateFloat64()
  344. }
  345. GPUReqV := []*util.Vector{
  346. {
  347. Value: float64(gpuReqCount),
  348. Timestamp: float64(time.Now().UTC().Unix()),
  349. },
  350. }
  351. RAMUsedV, ok := RAMUsedMap[newKey]
  352. if !ok {
  353. log.Debug("no RAM usage for " + newKey)
  354. RAMUsedV = []*util.Vector{{}}
  355. }
  356. CPUUsedV, ok := CPUUsedMap[newKey]
  357. if !ok {
  358. log.Debug("no CPU usage for " + newKey)
  359. CPUUsedV = []*util.Vector{{}}
  360. }
  361. var pvReq []*PersistentVolumeClaimData
  362. var netReq []*util.Vector
  363. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  364. pvReq = podPVs
  365. netReq = podNetCosts
  366. }
  367. costs := &CostData{
  368. Name: containerName,
  369. PodName: podName,
  370. NodeName: nodeName,
  371. Namespace: ns,
  372. Deployments: podDeployments,
  373. Services: podServices,
  374. Daemonsets: getDaemonsetsOfPod(pod),
  375. Jobs: getJobsOfPod(pod),
  376. Statefulsets: getStatefulSetsOfPod(pod),
  377. NodeData: nodeData,
  378. RAMReq: RAMReqV,
  379. RAMUsed: RAMUsedV,
  380. CPUReq: CPUReqV,
  381. CPUUsed: CPUUsedV,
  382. GPUReq: GPUReqV,
  383. PVCData: pvReq,
  384. NetworkData: netReq,
  385. Annotations: podAnnotations,
  386. Labels: podLabels,
  387. NamespaceLabels: nsLabels,
  388. ClusterID: clusterID,
  389. ClusterName: cm.ClusterMap.NameFor(clusterID),
  390. }
  391. var cpuReq, cpuUse *util.Vector
  392. if len(costs.CPUReq) > 0 {
  393. cpuReq = costs.CPUReq[0]
  394. }
  395. if len(costs.CPUUsed) > 0 {
  396. cpuUse = costs.CPUUsed[0]
  397. }
  398. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  399. var ramReq, ramUse *util.Vector
  400. if len(costs.RAMReq) > 0 {
  401. ramReq = costs.RAMReq[0]
  402. }
  403. if len(costs.RAMUsed) > 0 {
  404. ramUse = costs.RAMUsed[0]
  405. }
  406. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  407. containerNameCost[newKey] = costs
  408. }
  409. } else {
  410. // The container has been deleted. Not all information is sent to metrics via ksm, so fill out what we can without k8s api
  411. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  412. c, err := NewContainerMetricFromKey(key)
  413. if err != nil {
  414. return nil, err
  415. }
  416. // CPU and RAM requests are obtained from the Kubernetes API.
  417. // If this case has been reached, the Kubernetes API will not
  418. // have information about the pod because it no longer exists.
  419. //
  420. // The case where this matters is minimal, mainly in environments
  421. // with very short-lived pods that over-request resources.
  422. RAMReqV := []*util.Vector{{}}
  423. CPUReqV := []*util.Vector{{}}
  424. GPUReqV := []*util.Vector{{}}
  425. RAMUsedV, ok := RAMUsedMap[key]
  426. if !ok {
  427. log.Debug("no RAM usage for " + key)
  428. RAMUsedV = []*util.Vector{{}}
  429. }
  430. CPUUsedV, ok := CPUUsedMap[key]
  431. if !ok {
  432. log.Debug("no CPU usage for " + key)
  433. CPUUsedV = []*util.Vector{{}}
  434. }
  435. node, ok := nodes[c.NodeName]
  436. if !ok {
  437. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  438. if n, ok := missingNodes[c.NodeName]; ok {
  439. node = n
  440. } else {
  441. node = &costAnalyzerCloud.Node{}
  442. missingNodes[c.NodeName] = node
  443. }
  444. }
  445. namespacelabels := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  446. namespaceAnnotations := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  447. costs := &CostData{
  448. Name: c.ContainerName,
  449. PodName: c.PodName,
  450. NodeName: c.NodeName,
  451. NodeData: node,
  452. Namespace: c.Namespace,
  453. RAMReq: RAMReqV,
  454. RAMUsed: RAMUsedV,
  455. CPUReq: CPUReqV,
  456. CPUUsed: CPUUsedV,
  457. GPUReq: GPUReqV,
  458. Annotations: namespaceAnnotations,
  459. NamespaceLabels: namespacelabels,
  460. ClusterID: c.ClusterID,
  461. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  462. }
  463. var cpuReq, cpuUse *util.Vector
  464. if len(costs.CPUReq) > 0 {
  465. cpuReq = costs.CPUReq[0]
  466. }
  467. if len(costs.CPUUsed) > 0 {
  468. cpuUse = costs.CPUUsed[0]
  469. }
  470. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  471. var ramReq, ramUse *util.Vector
  472. if len(costs.RAMReq) > 0 {
  473. ramReq = costs.RAMReq[0]
  474. }
  475. if len(costs.RAMUsed) > 0 {
  476. ramUse = costs.RAMUsed[0]
  477. }
  478. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  479. containerNameCost[key] = costs
  480. missingContainers[key] = costs
  481. }
  482. }
  483. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  484. // to pass along the cost data
  485. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  486. for k, costs := range unmounted {
  487. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  488. containerNameCost[k] = costs
  489. }
  490. err = findDeletedNodeInfo(cm.DataSource, missingNodes, start, end)
  491. if err != nil {
  492. log.Errorf("Error fetching historical node data: %s", err.Error())
  493. }
  494. err = findDeletedPodInfo(cm.DataSource, missingContainers, start, end)
  495. if err != nil {
  496. log.Errorf("Error fetching historical pod data: %s", err.Error())
  497. }
  498. return containerNameCost, err
  499. }
  500. func queryMetrics(mq source.MetricsQuerier, start, end time.Time) ([]*source.ContainerMetricResult, []*source.ContainerMetricResult, []*source.NetZoneGiBResult, []*source.NetRegionGiBResult, []*source.NetInternetGiBResult, []*source.NetNatGatewayGiBResult, []*source.NetNatGatewayIngressGiBResult, error) {
  501. grp := source.NewQueryGroup()
  502. resChRAMUsage := source.WithGroup(grp, mq.QueryRAMUsageAvg(start, end))
  503. resChCPUUsage := source.WithGroup(grp, mq.QueryCPUUsageAvg(start, end))
  504. resChNetZoneRequests := source.WithGroup(grp, mq.QueryNetZoneGiB(start, end))
  505. resChNetRegionRequests := source.WithGroup(grp, mq.QueryNetRegionGiB(start, end))
  506. resChNetInternetRequests := source.WithGroup(grp, mq.QueryNetInternetGiB(start, end))
  507. resChNetNatGatewayEgressRequests := source.WithGroup(grp, mq.QueryNetNatGatewayGiB(start, end))
  508. resChNetNatGatewayIngressRequests := source.WithGroup(grp, mq.QueryNetNatGatewayIngressGiB(start, end))
  509. // Process metrics query results. Handle errors using ctx.Errors.
  510. resRAMUsage, _ := resChRAMUsage.Await()
  511. resCPUUsage, _ := resChCPUUsage.Await()
  512. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  513. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  514. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  515. resNetNatGatewayEgressRequests, _ := resChNetNatGatewayEgressRequests.Await()
  516. resNetNatGatewayIngressRequests, _ := resChNetNatGatewayIngressRequests.Await()
  517. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  518. // NOTE: will not propagate unless coupled with errors.
  519. if grp.HasErrors() {
  520. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  521. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  522. for _, queryErr := range grp.Errors() {
  523. if queryErr.Error != nil {
  524. log.Errorf("ComputeCostData: Request Error: %s", queryErr.Error)
  525. }
  526. if queryErr.ParseError != nil {
  527. log.Errorf("ComputeCostData: Parsing Error: %s", queryErr.ParseError)
  528. }
  529. }
  530. // ErrorCollection is an collection of errors wrapped in a single error implementation
  531. // We opt to not return an error for the sake of running as a pure exporter.
  532. return resRAMUsage, resCPUUsage, resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, resNetNatGatewayEgressRequests, resNetNatGatewayIngressRequests, grp.Error()
  533. }
  534. return resRAMUsage, resCPUUsage, resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, resNetNatGatewayEgressRequests, resNetNatGatewayIngressRequests, nil
  535. }
  536. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  537. costs := make(map[string]*CostData)
  538. if len(unmountedPVs) == 0 {
  539. return costs
  540. }
  541. for k, pv := range unmountedPVs {
  542. keyParts := strings.Split(k, ",")
  543. if len(keyParts) != 3 {
  544. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  545. continue
  546. }
  547. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  548. namespacelabels := namespaceLabelsMapping[ns+","+clusterID]
  549. namespaceAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  550. metric := NewContainerMetricFromValues(ns, unmountedPVsContainer, unmountedPVsContainer, "", clusterID)
  551. key := metric.Key()
  552. if costData, ok := costs[key]; !ok {
  553. costs[key] = &CostData{
  554. Name: unmountedPVsContainer,
  555. PodName: unmountedPVsContainer,
  556. NodeName: "",
  557. Annotations: namespaceAnnotations,
  558. Namespace: ns,
  559. NamespaceLabels: namespacelabels,
  560. Labels: namespacelabels,
  561. ClusterID: clusterID,
  562. ClusterName: clusterMap.NameFor(clusterID),
  563. PVCData: pv,
  564. }
  565. } else {
  566. costData.PVCData = append(costData.PVCData, pv...)
  567. }
  568. }
  569. return costs
  570. }
  571. func findDeletedPodInfo(dataSource source.OpenCostDataSource, missingContainers map[string]*CostData, start, end time.Time) error {
  572. if len(missingContainers) > 0 {
  573. mq := dataSource.Metrics()
  574. podLabelsResCh := mq.QueryPodLabels(start, end)
  575. podLabelsResult, err := podLabelsResCh.Await()
  576. if err != nil {
  577. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  578. }
  579. podLabels := make(map[string]map[string]string)
  580. if podLabelsResult != nil {
  581. podLabels, err = parsePodLabels(podLabelsResult)
  582. if err != nil {
  583. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  584. }
  585. }
  586. for key, costData := range missingContainers {
  587. cm, _ := NewContainerMetricFromKey(key)
  588. labels, ok := podLabels[cm.PodName]
  589. if !ok {
  590. labels = make(map[string]string)
  591. }
  592. for k, v := range costData.NamespaceLabels {
  593. labels[k] = v
  594. }
  595. costData.Labels = labels
  596. }
  597. }
  598. return nil
  599. }
  600. func findDeletedNodeInfo(dataSource source.OpenCostDataSource, missingNodes map[string]*costAnalyzerCloud.Node, start, end time.Time) error {
  601. if len(missingNodes) > 0 {
  602. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  603. grp := source.NewQueryGroup()
  604. mq := dataSource.Metrics()
  605. cpuCostResCh := source.WithGroup(grp, mq.QueryNodeCPUPricePerHr(start, end))
  606. ramCostResCh := source.WithGroup(grp, mq.QueryNodeRAMPricePerGiBHr(start, end))
  607. gpuCostResCh := source.WithGroup(grp, mq.QueryNodeGPUPricePerHr(start, end))
  608. cpuCostRes, _ := cpuCostResCh.Await()
  609. ramCostRes, _ := ramCostResCh.Await()
  610. gpuCostRes, _ := gpuCostResCh.Await()
  611. if grp.HasErrors() {
  612. return grp.Error()
  613. }
  614. cpuCosts, err := getCost(cpuCostRes, cpuCostNode, cpuCostData)
  615. if err != nil {
  616. return err
  617. }
  618. ramCosts, err := getCost(ramCostRes, ramCostNode, ramCostData)
  619. if err != nil {
  620. return err
  621. }
  622. gpuCosts, err := getCost(gpuCostRes, gpuCostNode, gpuCostData)
  623. if err != nil {
  624. return err
  625. }
  626. if len(cpuCosts) == 0 {
  627. log.Infof("Opencost metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  628. }
  629. for node, costv := range cpuCosts {
  630. if _, ok := missingNodes[node]; ok {
  631. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  632. } else {
  633. log.DedupedWarningf(5, "Node `%s` in metrics but not k8s api", node)
  634. }
  635. }
  636. for node, costv := range ramCosts {
  637. if _, ok := missingNodes[node]; ok {
  638. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  639. }
  640. }
  641. for node, costv := range gpuCosts {
  642. if _, ok := missingNodes[node]; ok {
  643. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  644. }
  645. }
  646. }
  647. return nil
  648. }
  649. // getContainerAllocation takes the max between request and usage. This function
  650. // returns a slice containing a single element describing the container's
  651. // allocation.
  652. //
  653. // Additionally, the timestamp of the allocation will be the highest value
  654. // timestamp between the two vectors. This mitigates situations where
  655. // Timestamp=0. This should have no effect on the metrics emitted by the
  656. // CostModelMetricsEmitter
  657. func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
  658. var result []*util.Vector
  659. if req != nil && used != nil {
  660. x1 := req.Value
  661. if math.IsNaN(x1) {
  662. log.Debugf("NaN value found during %s allocation calculation for requests.", allocationType)
  663. x1 = 0.0
  664. }
  665. y1 := used.Value
  666. if math.IsNaN(y1) {
  667. log.Debugf("NaN value found during %s allocation calculation for used.", allocationType)
  668. y1 = 0.0
  669. }
  670. result = []*util.Vector{
  671. {
  672. Value: math.Max(x1, y1),
  673. Timestamp: math.Max(req.Timestamp, used.Timestamp),
  674. },
  675. }
  676. if result[0].Value == 0 && result[0].Timestamp == 0 {
  677. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  678. }
  679. } else if req != nil {
  680. result = []*util.Vector{
  681. {
  682. Value: req.Value,
  683. Timestamp: req.Timestamp,
  684. },
  685. }
  686. } else if used != nil {
  687. result = []*util.Vector{
  688. {
  689. Value: used.Value,
  690. Timestamp: used.Timestamp,
  691. },
  692. }
  693. } else {
  694. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  695. result = []*util.Vector{
  696. {
  697. Value: 0,
  698. Timestamp: float64(time.Now().UTC().Unix()),
  699. },
  700. }
  701. }
  702. return result
  703. }
  704. func (cm *CostModel) addPVData(pvClaimMapping map[string]*PersistentVolumeClaimData) error {
  705. cache := cm.Cache
  706. cloud := cm.Provider
  707. cfg, err := cloud.GetConfig()
  708. if err != nil {
  709. return err
  710. }
  711. // Pull a region from the first node
  712. var defaultRegion string
  713. nodeList := cache.GetAllNodes()
  714. if len(nodeList) > 0 {
  715. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  716. }
  717. storageClasses := cache.GetAllStorageClasses()
  718. storageClassMap := make(map[string]map[string]string)
  719. for _, storageClass := range storageClasses {
  720. params := storageClass.Parameters
  721. storageClassMap[storageClass.Name] = params
  722. if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.Annotations["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  723. storageClassMap["default"] = params
  724. storageClassMap[""] = params
  725. }
  726. // Add custom cost annotation to storage class map
  727. if key, found := storageClass.Annotations[annotationStorageCost]; found && params != nil {
  728. params[annotationStorageCost] = key
  729. }
  730. }
  731. pvs := cache.GetAllPersistentVolumes()
  732. pvMap := make(map[string]*costAnalyzerCloud.PV)
  733. for _, pv := range pvs {
  734. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  735. if !ok {
  736. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  737. }
  738. var region string
  739. if r, ok := util.GetRegion(pv.Labels); ok {
  740. region = r
  741. } else {
  742. region = defaultRegion
  743. }
  744. cacPv := &costAnalyzerCloud.PV{
  745. Class: pv.Spec.StorageClassName,
  746. Region: region,
  747. Parameters: parameters,
  748. }
  749. err := cm.GetPVCost(cacPv, pv, region)
  750. if err != nil {
  751. return err
  752. }
  753. pvMap[pv.Name] = cacPv
  754. }
  755. for _, pvc := range pvClaimMapping {
  756. if vol, ok := pvMap[pvc.VolumeName]; ok {
  757. pvc.Volume = vol
  758. } else {
  759. log.Debugf("PV not found, using default")
  760. pvc.Volume = &costAnalyzerCloud.PV{
  761. Cost: cfg.Storage,
  762. }
  763. }
  764. }
  765. return nil
  766. }
  767. // Checks if the provided cost string can be parsed into a finite, non-negative float64.
  768. // If the cost is invalid, it logs a warning with the cost value and the reason.
  769. func (cm *CostModel) costIsValid(cost string) bool {
  770. parsedCost, err := strconv.ParseFloat(cost, 64)
  771. if err != nil {
  772. log.Warnf("Invalid cost value: %s. Error: %s", cost, err.Error())
  773. return false
  774. }
  775. // Check if the parsed cost is a valid number (not NaN, not Inf, and non-negative)
  776. if math.IsNaN(parsedCost) || math.IsInf(parsedCost, 0) || parsedCost < 0 {
  777. log.Warnf("Invalid cost value: %s. Error: cost must be a finite, non-negative number", cost)
  778. return false
  779. }
  780. return true
  781. }
  782. func (cm *CostModel) GetPVCost(pv *costAnalyzerCloud.PV, kpv *clustercache.PersistentVolume, defaultRegion string) error {
  783. cp := cm.Provider
  784. cfg, err := cp.GetConfig()
  785. if err != nil {
  786. return err
  787. }
  788. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  789. pv.ProviderID = key.ID()
  790. // If PV has a custom cost annotation, use that to mandate the cost
  791. if cost, found := kpv.Annotations[annotationStorageCost]; found && cm.costIsValid(cost) {
  792. log.Infof("Found custom cost from annotation for PV %s: %s", kpv.Name, cost)
  793. pv.Cost = cost
  794. return nil
  795. }
  796. // If SC has a custom cost annotation, use that to mandate the cost
  797. if cost, found := pv.Parameters[annotationStorageCost]; found && cm.costIsValid(cost) {
  798. log.Infof("Found custom cost from Storage Class annotation for PV %s: %s", kpv.Name, cost)
  799. pv.Cost = cost
  800. return nil
  801. }
  802. pvWithCost, err := cp.PVPricing(key)
  803. if err != nil {
  804. pv.Cost = cfg.Storage
  805. return err
  806. }
  807. if pvWithCost == nil || pvWithCost.Cost == "" {
  808. pv.Cost = cfg.Storage
  809. return nil // set default cost
  810. }
  811. pv.Cost = pvWithCost.Cost
  812. return nil
  813. }
  814. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  815. if cm.pricingMetadata != nil {
  816. return cm.pricingMetadata, nil
  817. } else {
  818. return nil, fmt.Errorf("Node costs not yet calculated")
  819. }
  820. }
  821. func (cm *CostModel) GetNodeCost() (map[string]*costAnalyzerCloud.Node, error) {
  822. cp := cm.Provider
  823. cfg, err := cp.GetConfig()
  824. if err != nil {
  825. return nil, err
  826. }
  827. nodeList := cm.Cache.GetAllNodes()
  828. nodes := make(map[string]*costAnalyzerCloud.Node)
  829. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  830. TotalNodes: 0,
  831. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  832. }
  833. for _, n := range nodeList {
  834. name := n.Name
  835. nodeLabels := n.Labels
  836. if nodeLabels == nil {
  837. log.Warnf("GetNodeCost: Found node '%s' with no labels", name)
  838. nodeLabels = make(map[string]string)
  839. }
  840. nodeLabels["providerID"] = n.SpecProviderID
  841. pmd.TotalNodes++
  842. cnode, _, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  843. if err != nil {
  844. log.DedupedInfof(10, "Could not get node pricing for node %s: %s. Falling back to default pricing.", name, err.Error())
  845. if cnode != nil {
  846. nodes[name] = cnode
  847. continue
  848. } else {
  849. cnode = &costAnalyzerCloud.Node{
  850. VCPUCost: cfg.CPU,
  851. RAMCost: cfg.RAM,
  852. }
  853. }
  854. }
  855. if cost, found := n.Annotations[annotationNodeCPUCost]; found && cm.costIsValid(cost) {
  856. log.Infof("Found custom CPU cost from annotation for Node %s: %s", n.Name, cost)
  857. cnode.VCPUCost = cost
  858. }
  859. if cost, found := n.Annotations[annotationNodeRAMCost]; found && cm.costIsValid(cost) {
  860. log.Infof("Found custom RAM cost from annotation for Node %s: %s", n.Name, cost)
  861. cnode.RAMCost = cost
  862. }
  863. pmd.PricingTypeCounts[cnode.PricingType]++
  864. // newCnode builds upon cnode but populates/overrides certain fields.
  865. // cnode was populated leveraging cloud provider public pricing APIs.
  866. newCnode := *cnode
  867. if newCnode.InstanceType == "" {
  868. it, _ := util.GetInstanceType(n.Labels)
  869. newCnode.InstanceType = it
  870. }
  871. if newCnode.Region == "" {
  872. region, _ := util.GetRegion(n.Labels)
  873. newCnode.Region = region
  874. }
  875. if newCnode.ArchType == "" {
  876. arch, _ := util.GetArchType(n.Labels)
  877. newCnode.ArchType = arch
  878. }
  879. newCnode.ProviderID = n.SpecProviderID
  880. var cpu float64
  881. if newCnode.VCPU == "" {
  882. cpu = float64(n.Status.Capacity.Cpu().Value())
  883. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  884. } else {
  885. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  886. if err != nil {
  887. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  888. }
  889. }
  890. if math.IsNaN(cpu) {
  891. log.Warnf("cpu parsed as NaN. Setting to 0.")
  892. cpu = 0
  893. }
  894. if newCnode.RAM == "" {
  895. newCnode.RAM = n.Status.Capacity.Memory().String()
  896. }
  897. if newCnode.RAMBytes == "" {
  898. newCnode.RAMBytes = fmt.Sprintf("%v", n.Status.Capacity.Memory().Value())
  899. }
  900. ram, _ := strconv.ParseFloat(newCnode.RAMBytes, 64)
  901. if math.IsNaN(ram) {
  902. log.Warnf("ram parsed as NaN. Setting to 0.")
  903. ram = 0
  904. }
  905. gpuc, err := strconv.ParseFloat(newCnode.GPU, 64)
  906. if err != nil {
  907. gpuc = 0.0
  908. }
  909. // The k8s API will often report more accurate results for GPU count
  910. // than cloud provider public pricing APIs. If found, override the
  911. // original value.
  912. gpuOverride, vgpuOverride, err := getGPUCount(cm.Cache, n)
  913. if err != nil {
  914. log.Warnf("Unable to get GPUCount for node %s: %s", n.Name, err.Error())
  915. }
  916. if gpuOverride > 0 {
  917. newCnode.GPU = fmt.Sprintf("%f", gpuOverride)
  918. gpuc = gpuOverride
  919. }
  920. if vgpuOverride > 0 {
  921. newCnode.VGPU = fmt.Sprintf("%f", vgpuOverride)
  922. }
  923. // Special case for SUSE rancher, since it won't behave with normal
  924. // calculations, courtesy of the instance type not being "real" (a
  925. // recognizable AWS instance type.)
  926. if newCnode.InstanceType == "rke2" {
  927. log.Infof(
  928. "Found a SUSE Rancher node %s, defaulting and skipping math",
  929. cp.GetKey(nodeLabels, n).Features(),
  930. )
  931. defaultCPUCorePrice, err := strconv.ParseFloat(cfg.CPU, 64)
  932. if err != nil {
  933. log.Errorf("Could not parse default cpu price")
  934. defaultCPUCorePrice = 0
  935. }
  936. if math.IsNaN(defaultCPUCorePrice) {
  937. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  938. defaultCPUCorePrice = 0
  939. }
  940. // Some customers may want GPU pricing to be determined by the labels affixed to their nodes. GpuPricing
  941. // passes the node's labels to the provider, which then cross-references them with the labels that the
  942. // provider knows to have label-specific costs associated with them, and returns that cost. See CSVProvider
  943. // for an example implementation.
  944. var gpuPrice float64
  945. gpuPricing, err := cp.GpuPricing(nodeLabels)
  946. if err != nil {
  947. log.Errorf("Could not determine custom GPU pricing: %s", err)
  948. gpuPrice = 0
  949. } else if len(gpuPricing) > 0 {
  950. gpuPrice, err = strconv.ParseFloat(gpuPricing, 64)
  951. if err != nil {
  952. log.Errorf("Could not parse custom GPU pricing: %s", err)
  953. gpuPrice = 0
  954. } else if math.IsNaN(gpuPrice) {
  955. log.Warnf("Custom GPU pricing parsed as NaN. Setting to 0.")
  956. gpuPrice = 0
  957. } else {
  958. log.Infof("Using custom GPU pricing for node \"%s\": %f", name, gpuPrice)
  959. }
  960. } else {
  961. gpuPrice, err = strconv.ParseFloat(cfg.GPU, 64)
  962. if err != nil {
  963. log.Errorf("Could not parse default gpu price")
  964. gpuPrice = 0
  965. }
  966. if math.IsNaN(gpuPrice) {
  967. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  968. gpuPrice = 0
  969. }
  970. }
  971. defaultRAMPrice, err := strconv.ParseFloat(cfg.RAM, 64)
  972. if err != nil {
  973. log.Errorf("Could not parse default ram price")
  974. defaultRAMPrice = 0
  975. }
  976. if math.IsNaN(defaultRAMPrice) {
  977. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  978. defaultRAMPrice = 0
  979. }
  980. defaultGPUPrice, err := strconv.ParseFloat(cfg.GPU, 64)
  981. if err != nil {
  982. log.Errorf("Could not parse default gpu price")
  983. defaultGPUPrice = 0
  984. }
  985. if math.IsNaN(defaultGPUPrice) {
  986. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  987. defaultGPUPrice = 0
  988. }
  989. // Just say no to doing the ratios!
  990. cpuCost := defaultCPUCorePrice * cpu
  991. gpuCost := gpuPrice * gpuc
  992. ramCost := defaultRAMPrice * ram
  993. nodeCost := cpuCost + gpuCost + ramCost
  994. newCnode.Cost = fmt.Sprintf("%f", nodeCost)
  995. newCnode.VCPUCost = fmt.Sprintf("%f", defaultCPUCorePrice)
  996. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  997. newCnode.RAMCost = fmt.Sprintf("%f", defaultRAMPrice)
  998. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  999. } else if newCnode.GPU != "" && newCnode.GPUCost == "" {
  1000. // was the big thing to investigate. All the funky ratio math
  1001. // we were doing was messing with their default pricing. for SUSE Rancher.
  1002. // We reach this when a GPU is detected on a node, but no cost for
  1003. // the GPU is defined in the OnDemand pricing. Calculate ratios of
  1004. // CPU to RAM and GPU to RAM costs, then distribute the total node
  1005. // cost among the CPU, RAM, and GPU.
  1006. log.Tracef("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1007. // Some customers may want GPU pricing to be determined by the labels affixed to their nodes. GpuPricing
  1008. // passes the node's labels to the provider, which then cross-references them with the labels that the
  1009. // provider knows to have label-specific costs associated with them, and returns that cost. See CSVProvider
  1010. // for an example implementation.
  1011. gpuPricing, err := cp.GpuPricing(nodeLabels)
  1012. if err != nil {
  1013. log.Errorf("Could not determine custom GPU pricing: %s", err)
  1014. } else if len(gpuPricing) > 0 {
  1015. newCnode.GPUCost = gpuPricing
  1016. log.Infof("Using custom GPU pricing for node \"%s\": %s", name, gpuPricing)
  1017. }
  1018. if newCnode.GPUCost == "" {
  1019. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1020. if err != nil {
  1021. log.Errorf("Could not parse default cpu price")
  1022. defaultCPU = 0
  1023. }
  1024. if math.IsNaN(defaultCPU) {
  1025. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1026. defaultCPU = 0
  1027. }
  1028. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1029. if err != nil {
  1030. log.Errorf("Could not parse default ram price")
  1031. defaultRAM = 0
  1032. }
  1033. if math.IsNaN(defaultRAM) {
  1034. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1035. defaultRAM = 0
  1036. }
  1037. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  1038. if err != nil {
  1039. log.Errorf("Could not parse default gpu price")
  1040. defaultGPU = 0
  1041. }
  1042. if math.IsNaN(defaultGPU) {
  1043. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  1044. defaultGPU = 0
  1045. }
  1046. cpuToRAMRatio := defaultCPU / defaultRAM
  1047. if math.IsNaN(cpuToRAMRatio) {
  1048. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1049. cpuToRAMRatio = 10
  1050. }
  1051. gpuToRAMRatio := defaultGPU / defaultRAM
  1052. if math.IsNaN(gpuToRAMRatio) {
  1053. log.Warnf("gpuToRAMRatio is NaN. Setting to 100.")
  1054. gpuToRAMRatio = 100
  1055. }
  1056. ramGB := ram / 1024 / 1024 / 1024
  1057. if math.IsNaN(ramGB) {
  1058. log.Warnf("ramGB is NaN. Setting to 0.")
  1059. ramGB = 0
  1060. }
  1061. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  1062. if math.IsNaN(ramMultiple) {
  1063. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1064. ramMultiple = 0
  1065. }
  1066. var nodePrice float64
  1067. if newCnode.Cost != "" {
  1068. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1069. if err != nil {
  1070. log.Errorf("Could not parse total node price")
  1071. return nil, err
  1072. }
  1073. } else if newCnode.VCPUCost != "" {
  1074. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1075. if err != nil {
  1076. log.Errorf("Could not parse node vcpu price")
  1077. return nil, err
  1078. }
  1079. } else { // add case to use default pricing model when API data fails.
  1080. log.Debugf("No node price or CPUprice found, falling back to default")
  1081. nodePrice = defaultCPU*cpu + defaultRAM*ram + gpuc*defaultGPU
  1082. }
  1083. if math.IsNaN(nodePrice) {
  1084. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1085. nodePrice = 0
  1086. }
  1087. ramPrice := (nodePrice / ramMultiple)
  1088. if math.IsNaN(ramPrice) {
  1089. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1090. ramPrice = 0
  1091. }
  1092. cpuPrice := ramPrice * cpuToRAMRatio
  1093. gpuPrice := ramPrice * gpuToRAMRatio
  1094. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1095. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1096. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1097. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1098. }
  1099. } else if newCnode.RAMCost == "" {
  1100. // We reach this when no RAM cost is defined in the OnDemand
  1101. // pricing. It calculates a cpuToRAMRatio and ramMultiple to
  1102. // distrubte the total node cost among CPU and RAM costs.
  1103. log.Tracef("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1104. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1105. if err != nil {
  1106. log.Warnf("Could not parse default cpu price")
  1107. defaultCPU = 0
  1108. }
  1109. if math.IsNaN(defaultCPU) {
  1110. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1111. defaultCPU = 0
  1112. }
  1113. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1114. if err != nil {
  1115. log.Warnf("Could not parse default ram price")
  1116. defaultRAM = 0
  1117. }
  1118. if math.IsNaN(defaultRAM) {
  1119. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1120. defaultRAM = 0
  1121. }
  1122. cpuToRAMRatio := defaultCPU / defaultRAM
  1123. if math.IsNaN(cpuToRAMRatio) {
  1124. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1125. cpuToRAMRatio = 10
  1126. }
  1127. ramGB := ram / 1024 / 1024 / 1024
  1128. if math.IsNaN(ramGB) {
  1129. log.Warnf("ramGB is NaN. Setting to 0.")
  1130. ramGB = 0
  1131. }
  1132. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1133. if math.IsNaN(ramMultiple) {
  1134. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1135. ramMultiple = 0
  1136. }
  1137. var nodePrice float64
  1138. if newCnode.Cost != "" {
  1139. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1140. if err != nil {
  1141. log.Warnf("Could not parse total node price")
  1142. return nil, err
  1143. }
  1144. if newCnode.GPUCost != "" {
  1145. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1146. if err != nil {
  1147. log.Warnf("Could not parse node gpu price")
  1148. return nil, err
  1149. }
  1150. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1151. }
  1152. } else if newCnode.VCPUCost != "" {
  1153. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1154. if err != nil {
  1155. log.Warnf("Could not parse node vcpu price")
  1156. return nil, err
  1157. }
  1158. } else { // add case to use default pricing model when API data fails.
  1159. log.Debugf("No node price or CPUprice found, falling back to default")
  1160. nodePrice = defaultCPU*cpu + defaultRAM*ramGB
  1161. }
  1162. if math.IsNaN(nodePrice) {
  1163. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1164. nodePrice = 0
  1165. }
  1166. ramPrice := (nodePrice / ramMultiple)
  1167. if math.IsNaN(ramPrice) {
  1168. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1169. ramPrice = 0
  1170. }
  1171. cpuPrice := ramPrice * cpuToRAMRatio
  1172. if defaultRAM != 0 {
  1173. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1174. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1175. } else { // just assign the full price to CPU
  1176. if cpu != 0 {
  1177. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1178. } else {
  1179. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1180. }
  1181. }
  1182. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1183. log.Tracef("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1184. }
  1185. nodes[name] = &newCnode
  1186. }
  1187. cm.pricingMetadata = pmd
  1188. cp.ApplyReservedInstancePricing(nodes)
  1189. return nodes, nil
  1190. }
  1191. // TODO: drop some logs
  1192. func (cm *CostModel) GetLBCost() (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1193. // for fetching prices from cloud provider
  1194. // cfg, err := cp.GetConfig()
  1195. // if err != nil {
  1196. // return nil, err
  1197. // }
  1198. cp := cm.Provider
  1199. servicesList := cm.Cache.GetAllServices()
  1200. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1201. for _, service := range servicesList {
  1202. namespace := service.Namespace
  1203. name := service.Name
  1204. key := serviceKey{
  1205. Cluster: coreenv.GetClusterID(),
  1206. Namespace: namespace,
  1207. Service: name,
  1208. }
  1209. if service.Type == "LoadBalancer" {
  1210. loadBalancer, err := cp.LoadBalancerPricing()
  1211. if err != nil {
  1212. return nil, err
  1213. }
  1214. newLoadBalancer := *loadBalancer
  1215. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1216. address := loadBalancerIngress.IP
  1217. // Some cloud providers use hostname rather than IP
  1218. if address == "" {
  1219. address = loadBalancerIngress.Hostname
  1220. }
  1221. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1222. }
  1223. loadBalancerMap[key] = &newLoadBalancer
  1224. }
  1225. }
  1226. return loadBalancerMap, nil
  1227. }
  1228. func getPodServices(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1229. servicesList := cache.GetAllServices()
  1230. podServicesMapping := make(map[string]map[string][]string)
  1231. for _, service := range servicesList {
  1232. namespace := service.Namespace
  1233. name := service.Name
  1234. key := namespace + "," + clusterID
  1235. if _, ok := podServicesMapping[key]; !ok {
  1236. podServicesMapping[key] = make(map[string][]string)
  1237. }
  1238. s := labels.Nothing()
  1239. if len(service.SpecSelector) > 0 {
  1240. s = labels.Set(service.SpecSelector).AsSelectorPreValidated()
  1241. }
  1242. for _, pod := range podList {
  1243. labelSet := labels.Set(pod.Labels)
  1244. if s.Matches(labelSet) && pod.Namespace == namespace {
  1245. services, ok := podServicesMapping[key][pod.Name]
  1246. if ok {
  1247. podServicesMapping[key][pod.Name] = append(services, name)
  1248. } else {
  1249. podServicesMapping[key][pod.Name] = []string{name}
  1250. }
  1251. }
  1252. }
  1253. }
  1254. return podServicesMapping, nil
  1255. }
  1256. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1257. ssList := cache.GetAllStatefulSets()
  1258. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1259. for _, ss := range ssList {
  1260. namespace := ss.Namespace
  1261. name := ss.Name
  1262. key := namespace + "," + clusterID
  1263. if _, ok := podSSMapping[key]; !ok {
  1264. podSSMapping[key] = make(map[string][]string)
  1265. }
  1266. s, err := metav1.LabelSelectorAsSelector(ss.SpecSelector)
  1267. if err != nil {
  1268. log.Errorf("Error doing deployment label conversion: %s", err.Error())
  1269. }
  1270. for _, pod := range podList {
  1271. labelSet := labels.Set(pod.Labels)
  1272. if s.Matches(labelSet) && pod.Namespace == namespace {
  1273. sss, ok := podSSMapping[key][pod.Name]
  1274. if ok {
  1275. podSSMapping[key][pod.Name] = append(sss, name)
  1276. } else {
  1277. podSSMapping[key][pod.Name] = []string{name}
  1278. }
  1279. }
  1280. }
  1281. }
  1282. return podSSMapping, nil
  1283. }
  1284. func getPodDeployments(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1285. deploymentsList := cache.GetAllDeployments()
  1286. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1287. for _, deployment := range deploymentsList {
  1288. namespace := deployment.Namespace
  1289. name := deployment.Name
  1290. key := namespace + "," + clusterID
  1291. if _, ok := podDeploymentsMapping[key]; !ok {
  1292. podDeploymentsMapping[key] = make(map[string][]string)
  1293. }
  1294. s, err := metav1.LabelSelectorAsSelector(deployment.SpecSelector)
  1295. if err != nil {
  1296. log.Errorf("Error doing deployment label conversion: %s", err)
  1297. }
  1298. for _, pod := range podList {
  1299. labelSet := labels.Set(pod.Labels)
  1300. if s.Matches(labelSet) && pod.Namespace == namespace {
  1301. deployments, ok := podDeploymentsMapping[key][pod.Name]
  1302. if ok {
  1303. podDeploymentsMapping[key][pod.Name] = append(deployments, name)
  1304. } else {
  1305. podDeploymentsMapping[key][pod.Name] = []string{name}
  1306. }
  1307. }
  1308. }
  1309. }
  1310. return podDeploymentsMapping, nil
  1311. }
  1312. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1313. nsToLabels := make(map[string]map[string]string)
  1314. nss := cache.GetAllNamespaces()
  1315. for _, ns := range nss {
  1316. labels := make(map[string]string)
  1317. for k, v := range ns.Labels {
  1318. labels[promutil.SanitizeLabelName(k)] = v
  1319. }
  1320. nsToLabels[ns.Name+","+clusterID] = labels
  1321. }
  1322. return nsToLabels, nil
  1323. }
  1324. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1325. nsToAnnotations := make(map[string]map[string]string)
  1326. nss := cache.GetAllNamespaces()
  1327. for _, ns := range nss {
  1328. annotations := make(map[string]string)
  1329. for k, v := range ns.Annotations {
  1330. annotations[promutil.SanitizeLabelName(k)] = v
  1331. }
  1332. nsToAnnotations[ns.Name+","+clusterID] = annotations
  1333. }
  1334. return nsToAnnotations, nil
  1335. }
  1336. func getDaemonsetsOfPod(pod clustercache.Pod) []string {
  1337. for _, ownerReference := range pod.OwnerReferences {
  1338. if ownerReference.Kind == "DaemonSet" {
  1339. return []string{ownerReference.Name}
  1340. }
  1341. }
  1342. return []string{}
  1343. }
  1344. func getJobsOfPod(pod clustercache.Pod) []string {
  1345. for _, ownerReference := range pod.OwnerReferences {
  1346. if ownerReference.Kind == "Job" {
  1347. return []string{ownerReference.Name}
  1348. }
  1349. }
  1350. return []string{}
  1351. }
  1352. func getStatefulSetsOfPod(pod clustercache.Pod) []string {
  1353. for _, ownerReference := range pod.OwnerReferences {
  1354. if ownerReference.Kind == "StatefulSet" {
  1355. return []string{ownerReference.Name}
  1356. }
  1357. }
  1358. return []string{}
  1359. }
  1360. // getGPUCount reads the node's Status and Labels (via the k8s API) to identify
  1361. // the number of GPUs and vGPUs are equipped on the node. If unable to identify
  1362. // a GPU count, it will return -1.
  1363. func getGPUCount(cache clustercache.ClusterCache, n *clustercache.Node) (float64, float64, error) {
  1364. g, hasGpu := n.Status.Capacity["nvidia.com/gpu"]
  1365. _, hasReplicas := n.Labels["nvidia.com/gpu.replicas"]
  1366. // Case 1: Standard NVIDIA GPU
  1367. if hasGpu && g.Value() != 0 && !hasReplicas {
  1368. return float64(g.Value()), float64(g.Value()), nil
  1369. }
  1370. // Case 2: NVIDIA GPU with GPU Feature Discovery (GFD) Pod enabled.
  1371. // Ref: https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/gpu-sharing.html#verifying-the-gpu-time-slicing-configuration
  1372. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L44-L45
  1373. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L103-L118
  1374. if hasReplicas {
  1375. resultGPU := 0.0
  1376. resultVGPU := 0.0
  1377. if c, ok := n.Labels["nvidia.com/gpu.count"]; ok {
  1378. var err error
  1379. resultGPU, err = strconv.ParseFloat(c, 64)
  1380. if err != nil {
  1381. return -1, -1, fmt.Errorf("could not parse label \"nvidia.com/gpu.count\": %v", err)
  1382. }
  1383. }
  1384. if s, ok := n.Status.Capacity["nvidia.com/gpu.shared"]; ok { // GFD configured `renameByDefault=true`
  1385. resultVGPU = float64(s.Value())
  1386. } else if g, ok := n.Status.Capacity["nvidia.com/gpu"]; ok { // GFD configured `renameByDefault=false`
  1387. resultVGPU = float64(g.Value())
  1388. } else {
  1389. resultVGPU = resultGPU
  1390. }
  1391. return resultGPU, resultVGPU, nil
  1392. }
  1393. // Case 3: AWS vGPU
  1394. if vgpu, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  1395. vgpuCount, err := getAllocatableVGPUs(cache)
  1396. if err != nil {
  1397. return -1, -1, err
  1398. }
  1399. vgpuCoeff := 10.0
  1400. if vgpuCount > 0.0 {
  1401. vgpuCoeff = vgpuCount
  1402. }
  1403. if vgpu.Value() != 0 {
  1404. resultGPU := float64(vgpu.Value()) / vgpuCoeff
  1405. resultVGPU := float64(vgpu.Value())
  1406. return resultGPU, resultVGPU, nil
  1407. }
  1408. }
  1409. // No GPU found
  1410. return -1, -1, nil
  1411. }
  1412. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  1413. daemonsets := cache.GetAllDaemonSets()
  1414. vgpuCount := 0.0
  1415. for _, ds := range daemonsets {
  1416. dsContainerList := &ds.SpecContainers
  1417. for _, ctnr := range *dsContainerList {
  1418. if ctnr.Args != nil {
  1419. for _, arg := range ctnr.Args {
  1420. if strings.Contains(arg, "--vgpu=") {
  1421. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  1422. if err != nil {
  1423. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  1424. continue
  1425. }
  1426. vgpuCount = vgpus
  1427. return vgpuCount, nil
  1428. }
  1429. }
  1430. }
  1431. }
  1432. }
  1433. return vgpuCount, nil
  1434. }
  1435. type PersistentVolumeClaimData struct {
  1436. Class string `json:"class"`
  1437. Claim string `json:"claim"`
  1438. Namespace string `json:"namespace"`
  1439. ClusterID string `json:"clusterId"`
  1440. TimesClaimed int `json:"timesClaimed"`
  1441. VolumeName string `json:"volumeName"`
  1442. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  1443. Values []*util.Vector `json:"values"`
  1444. }
  1445. func measureTime(start time.Time, threshold time.Duration, name string) {
  1446. elapsed := time.Since(start)
  1447. if elapsed > threshold {
  1448. log.Infof("[Profiler] %s: %s", elapsed, name)
  1449. }
  1450. }
  1451. func (cm *CostModel) QueryAllocation(window opencost.Window, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy opencost.AccumulateOption, shareIdle bool, filterString string) (*opencost.AllocationSetRange, error) {
  1452. // Validate window is legal
  1453. if window.IsOpen() || window.IsNegative() {
  1454. return nil, fmt.Errorf("illegal window: %s", window)
  1455. }
  1456. var totalsStore opencost.TotalsStore
  1457. // Idle is required for proportional asset costs
  1458. if includeProportionalAssetResourceCosts {
  1459. if !includeIdle {
  1460. return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
  1461. }
  1462. totalsStore = opencost.NewMemoryTotalsStore()
  1463. }
  1464. // Begin with empty response
  1465. asr := opencost.NewAllocationSetRange()
  1466. queryWindow, err := resolveQueryWindowForAccumulate(window, accumulateBy)
  1467. if err != nil {
  1468. return nil, fmt.Errorf("invalid accumulation configuration: %w", err)
  1469. }
  1470. // Query for AllocationSets in increments of the given step duration,
  1471. // appending each to the response.
  1472. stepStart := *queryWindow.Start()
  1473. stepEnd := stepStart.Add(step)
  1474. var isAKS bool
  1475. for queryWindow.End().After(stepStart) {
  1476. allocSet, err := cm.ComputeAllocation(stepStart, stepEnd)
  1477. if err != nil {
  1478. return nil, fmt.Errorf("error computing allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1479. }
  1480. if includeIdle {
  1481. assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
  1482. if err != nil {
  1483. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1484. }
  1485. if includeProportionalAssetResourceCosts {
  1486. // AKS is a special case - there can be a maximum of 2
  1487. // load balancers (1 public and 1 private) in an AKS cluster
  1488. // therefore, when calculating PARCs for load balancers,
  1489. // we must know if this is an AKS cluster
  1490. for _, node := range assetSet.Nodes {
  1491. if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
  1492. isAKS = true
  1493. break
  1494. }
  1495. }
  1496. _, err := opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  1497. if err != nil {
  1498. log.Errorf("Allocation: error updating asset resource totals for %s: %s", assetSet.Window, err)
  1499. }
  1500. }
  1501. idleSet, err := computeIdleAllocations(allocSet, assetSet, idleByNode)
  1502. if err != nil {
  1503. return nil, fmt.Errorf("error computing idle allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1504. }
  1505. for _, idleAlloc := range idleSet.Allocations {
  1506. allocSet.Insert(idleAlloc)
  1507. }
  1508. }
  1509. asr.Append(allocSet)
  1510. stepStart = stepEnd
  1511. stepEnd = stepStart.Add(step)
  1512. }
  1513. // Apply allocation filter BEFORE aggregation if provided
  1514. if filterString != "" {
  1515. parser := allocation.NewAllocationFilterParser()
  1516. filterNode, err := parser.Parse(filterString)
  1517. if err != nil {
  1518. return nil, fmt.Errorf("invalid filter: %w", err)
  1519. }
  1520. compiler := opencost.NewAllocationMatchCompiler(nil)
  1521. matcher, err := compiler.Compile(filterNode)
  1522. if err != nil {
  1523. return nil, fmt.Errorf("failed to compile filter: %w", err)
  1524. }
  1525. filteredASR := opencost.NewAllocationSetRange()
  1526. for _, as := range asr.Allocations {
  1527. filteredAS := opencost.NewAllocationSet(as.Start(), as.End())
  1528. for _, alloc := range as.Allocations {
  1529. if matcher.Matches(alloc) {
  1530. filteredAS.Set(alloc)
  1531. }
  1532. }
  1533. if filteredAS.Length() > 0 {
  1534. filteredASR.Append(filteredAS)
  1535. }
  1536. }
  1537. asr = filteredASR
  1538. }
  1539. // Set aggregation options and aggregate
  1540. var shareIdleOpt string
  1541. if shareIdle {
  1542. shareIdleOpt = opencost.ShareWeighted
  1543. } else {
  1544. shareIdleOpt = opencost.ShareNone
  1545. }
  1546. opts := &opencost.AllocationAggregationOptions{
  1547. IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
  1548. IdleByNode: idleByNode,
  1549. IncludeAggregatedMetadata: includeAggregatedMetadata,
  1550. ShareIdle: shareIdleOpt,
  1551. }
  1552. // Aggregate
  1553. err = asr.AggregateBy(aggregate, opts)
  1554. if err != nil {
  1555. return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
  1556. }
  1557. // Accumulate, if requested
  1558. if accumulateBy != opencost.AccumulateOptionNone {
  1559. asr, err = asr.Accumulate(accumulateBy)
  1560. if err != nil {
  1561. log.Errorf("error accumulating by %v: %s", accumulateBy, err)
  1562. return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
  1563. }
  1564. asr = trimAllocationSetRangeToRequestWindow(asr, window)
  1565. // when accumulating and returning PARCs, we need the totals for the
  1566. // accumulated windows to accurately compute a fraction
  1567. if includeProportionalAssetResourceCosts {
  1568. assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
  1569. if err != nil {
  1570. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  1571. }
  1572. _, err = opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  1573. if err != nil {
  1574. log.Errorf("Allocation: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  1575. }
  1576. }
  1577. }
  1578. if includeProportionalAssetResourceCosts {
  1579. for _, as := range asr.Allocations {
  1580. totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
  1581. if !ok {
  1582. log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  1583. return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  1584. }
  1585. totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
  1586. if !ok {
  1587. log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  1588. return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  1589. }
  1590. var totalPublicLbCost, totalPrivateLbCost float64
  1591. if isAKS && sharedLoadBalancer {
  1592. // loop through all assetTotals, adding all load balancer costs by public and private
  1593. for _, tot := range totalStoreByNode {
  1594. if tot.PrivateLoadBalancer {
  1595. totalPrivateLbCost += tot.LoadBalancerCost
  1596. } else {
  1597. totalPublicLbCost += tot.LoadBalancerCost
  1598. }
  1599. }
  1600. }
  1601. // loop through each allocation set, using total cost from totals store
  1602. for _, alloc := range as.Allocations {
  1603. for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
  1604. key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
  1605. // for each parc , check the totals store for each
  1606. // on a totals hit, set the corresponding total and calculate percentage
  1607. var totals *opencost.AssetTotals
  1608. if totalsLoc, found := totalStoreByCluster[key]; found {
  1609. totals = totalsLoc
  1610. }
  1611. if totalsLoc, found := totalStoreByNode[key]; found {
  1612. totals = totalsLoc
  1613. }
  1614. if totals == nil {
  1615. log.Errorf("unable to locate asset totals for allocation %s, corresponding PARC is being skipped", key)
  1616. continue
  1617. }
  1618. parc.CPUTotalCost = totals.CPUCost
  1619. parc.GPUTotalCost = totals.GPUCost
  1620. parc.RAMTotalCost = totals.RAMCost
  1621. parc.PVTotalCost = totals.PersistentVolumeCost
  1622. if isAKS && sharedLoadBalancer && len(alloc.LoadBalancers) > 0 {
  1623. // Azure is a special case - use computed totals above
  1624. // use the lbAllocations in the object to determine if
  1625. // this PARC is a public or private load balancer
  1626. // then set the total accordingly
  1627. // AKS only has 1 public and 1 private load balancer
  1628. lbAlloc, found := alloc.LoadBalancers[key]
  1629. if found {
  1630. if lbAlloc.Private {
  1631. parc.LoadBalancerTotalCost = totalPrivateLbCost
  1632. } else {
  1633. parc.LoadBalancerTotalCost = totalPublicLbCost
  1634. }
  1635. }
  1636. } else {
  1637. parc.LoadBalancerTotalCost = totals.LoadBalancerCost
  1638. }
  1639. opencost.ComputePercentages(&parc)
  1640. alloc.ProportionalAssetResourceCosts[rawKey] = parc
  1641. }
  1642. }
  1643. }
  1644. }
  1645. return asr, nil
  1646. }
  1647. // debugAssetAllocationMismatch analyzes and logs discrepancies between asset and allocation data
  1648. // This helps diagnose pricing issues and negative idle costs
  1649. func debugAssetAllocationMismatch(allocSet *opencost.AllocationSet, assetSet *opencost.AssetSet) {
  1650. log.Debugf("=== Asset-Allocation Debug Analysis for window %s ===", allocSet.Window)
  1651. // Build maps for efficient lookup
  1652. assetsByProviderID := make(map[string]*opencost.Node)
  1653. assetsByNode := make(map[string]*opencost.Node)
  1654. for _, asset := range assetSet.Nodes {
  1655. if asset.Properties != nil && asset.Properties.ProviderID != "" {
  1656. assetsByProviderID[asset.Properties.ProviderID] = asset
  1657. }
  1658. if asset.Properties != nil && asset.Properties.Name != "" {
  1659. assetsByNode[asset.Properties.Name] = asset
  1660. }
  1661. }
  1662. // 1) Find allocations without matching assets (by ProviderID)
  1663. allocsWithoutAssets := make([]*opencost.Allocation, 0)
  1664. for _, alloc := range allocSet.Allocations {
  1665. if alloc.Properties == nil {
  1666. continue
  1667. }
  1668. providerID := alloc.Properties.ProviderID
  1669. if providerID == "" {
  1670. continue
  1671. }
  1672. if _, found := assetsByProviderID[providerID]; !found {
  1673. allocsWithoutAssets = append(allocsWithoutAssets, alloc)
  1674. }
  1675. }
  1676. if len(allocsWithoutAssets) > 0 {
  1677. log.Debugf("Found %d allocations without matching assets:", len(allocsWithoutAssets))
  1678. for _, alloc := range allocsWithoutAssets {
  1679. log.Debugf(" - Allocation: %s, Node: %s, ProviderID: %s, TotalCost: %.4f",
  1680. alloc.Name,
  1681. alloc.Properties.Node,
  1682. alloc.Properties.ProviderID,
  1683. alloc.TotalCost())
  1684. }
  1685. }
  1686. // 2) Sum allocations per node and compare to node asset costs
  1687. allocTotalsByNode := make(map[string]*struct {
  1688. CPUCost float64
  1689. GPUCost float64
  1690. RAMCost float64
  1691. TotalCost float64
  1692. CPUCoreHours float64
  1693. GPUHours float64
  1694. RAMByteHours float64
  1695. Count int
  1696. })
  1697. for _, alloc := range allocSet.Allocations {
  1698. if alloc.Properties == nil || alloc.Properties.Node == "" {
  1699. continue
  1700. }
  1701. node := alloc.Properties.Node
  1702. if _, exists := allocTotalsByNode[node]; !exists {
  1703. allocTotalsByNode[node] = &struct {
  1704. CPUCost float64
  1705. GPUCost float64
  1706. RAMCost float64
  1707. TotalCost float64
  1708. CPUCoreHours float64
  1709. GPUHours float64
  1710. RAMByteHours float64
  1711. Count int
  1712. }{}
  1713. }
  1714. allocTotalsByNode[node].CPUCost += alloc.CPUCost
  1715. allocTotalsByNode[node].GPUCost += alloc.GPUCost
  1716. allocTotalsByNode[node].RAMCost += alloc.RAMCost
  1717. allocTotalsByNode[node].TotalCost += alloc.TotalCost()
  1718. allocTotalsByNode[node].CPUCoreHours += alloc.CPUCoreHours
  1719. allocTotalsByNode[node].GPUHours += alloc.GPUHours
  1720. allocTotalsByNode[node].RAMByteHours += alloc.RAMByteHours
  1721. allocTotalsByNode[node].Count++
  1722. }
  1723. log.Debugf("Per-Node Asset vs Allocation Comparison:")
  1724. for node, allocTotals := range allocTotalsByNode {
  1725. asset, hasAsset := assetsByNode[node]
  1726. if !hasAsset {
  1727. log.Debugf(" Node %s: Has allocations but NO ASSET (allocations: %d, total cost: %.4f)",
  1728. node, allocTotals.Count, allocTotals.TotalCost)
  1729. continue
  1730. }
  1731. assetCPU := asset.CPUCost
  1732. assetGPU := asset.GPUCost
  1733. assetRAM := asset.RAMCost
  1734. assetTotal := asset.TotalCost()
  1735. cpuDiff := assetCPU - allocTotals.CPUCost
  1736. gpuDiff := assetGPU - allocTotals.GPUCost
  1737. ramDiff := assetRAM - allocTotals.RAMCost
  1738. totalDiff := assetTotal - allocTotals.TotalCost
  1739. status := "OK"
  1740. if cpuDiff < 0 || gpuDiff < 0 || ramDiff < 0 {
  1741. status = "NEGATIVE_IDLE"
  1742. }
  1743. log.Debugf(" Node %s [%s]:", node, status)
  1744. log.Debugf(" Asset: CPU=%.4f, GPU=%.4f, RAM=%.4f, Total=%.4f",
  1745. assetCPU, assetGPU, assetRAM, assetTotal)
  1746. log.Debugf(" Allocation: CPU=%.4f, GPU=%.4f, RAM=%.4f, Total=%.4f (%d allocs)",
  1747. allocTotals.CPUCost, allocTotals.GPUCost, allocTotals.RAMCost, allocTotals.TotalCost, allocTotals.Count)
  1748. log.Debugf(" Difference: CPU=%.4f, GPU=%.4f, RAM=%.4f, Total=%.4f",
  1749. cpuDiff, gpuDiff, ramDiff, totalDiff)
  1750. if asset.Adjustment != 0 {
  1751. log.Debugf(" Adjustment: %.4f", asset.Adjustment)
  1752. }
  1753. // Compare resource amounts vs costs: higher resources should have higher costs
  1754. assetCPUHours := asset.CPUCoreHours
  1755. assetGPUHours := asset.GPUHours
  1756. assetRAMBytes := asset.RAMByteHours
  1757. allocCPUHours := allocTotals.CPUCoreHours
  1758. allocGPUHours := allocTotals.GPUHours
  1759. allocRAMBytes := allocTotals.RAMByteHours
  1760. // Warn if resource amounts and costs are inverted (higher resources but lower costs)
  1761. if assetCPUHours > 0 && allocCPUHours > 0 {
  1762. if assetCPUHours > allocCPUHours && assetCPU < allocTotals.CPUCost {
  1763. log.Warnf("Resource-cost inversion for %s CPU: asset has MORE hours (%.2f) but LESS cost (%.4f) than allocations (hours: %.2f, cost: %.4f)",
  1764. node, assetCPUHours, assetCPU, allocCPUHours, allocTotals.CPUCost)
  1765. } else if assetCPUHours < allocCPUHours && assetCPU > allocTotals.CPUCost {
  1766. log.Warnf("Resource-cost inversion for %s CPU: asset has LESS hours (%.2f) but MORE cost (%.4f) than allocations (hours: %.2f, cost: %.4f)",
  1767. node, assetCPUHours, assetCPU, allocCPUHours, allocTotals.CPUCost)
  1768. }
  1769. }
  1770. if assetGPUHours > 0 && allocGPUHours > 0 {
  1771. if assetGPUHours > allocGPUHours && assetGPU < allocTotals.GPUCost {
  1772. log.Warnf("Resource-cost inversion for %s GPU: asset has MORE hours (%.2f) but LESS cost (%.4f) than allocations (hours: %.2f, cost: %.4f)",
  1773. node, assetGPUHours, assetGPU, allocGPUHours, allocTotals.GPUCost)
  1774. } else if assetGPUHours < allocGPUHours && assetGPU > allocTotals.GPUCost {
  1775. log.Warnf("Resource-cost inversion for %s GPU: asset has LESS hours (%.2f) but MORE cost (%.4f) than allocations (hours: %.2f, cost: %.4f)",
  1776. node, assetGPUHours, assetGPU, allocGPUHours, allocTotals.GPUCost)
  1777. }
  1778. }
  1779. if assetRAMBytes > 0 && allocRAMBytes > 0 {
  1780. if assetRAMBytes > allocRAMBytes && assetRAM < allocTotals.RAMCost {
  1781. log.Warnf("Resource-cost inversion for %s RAM: asset has MORE byte-hours (%.2f) but LESS cost (%.4f) than allocations (byte-hours: %.2f, cost: %.4f)",
  1782. node, assetRAMBytes, assetRAM, allocRAMBytes, allocTotals.RAMCost)
  1783. } else if assetRAMBytes < allocRAMBytes && assetRAM > allocTotals.RAMCost {
  1784. log.Warnf("Resource-cost inversion for %s RAM: asset has LESS byte-hours (%.2f) but MORE cost (%.4f) than allocations (byte-hours: %.2f, cost: %.4f)",
  1785. node, assetRAMBytes, assetRAM, allocRAMBytes, allocTotals.RAMCost)
  1786. }
  1787. }
  1788. // Log resource amounts for debugging
  1789. log.Debugf(" Resource Hours:")
  1790. log.Debugf(" Asset: CPU=%.2f hours, GPU=%.2f hours, RAM=%.2f byte-hours",
  1791. assetCPUHours, assetGPUHours, assetRAMBytes)
  1792. log.Debugf(" Allocation: CPU=%.2f hours, GPU=%.2f hours, RAM=%.2f byte-hours",
  1793. allocCPUHours, allocGPUHours, allocRAMBytes)
  1794. }
  1795. // 3) Sum total of all node costs
  1796. totalNodeCPU := 0.0
  1797. totalNodeGPU := 0.0
  1798. totalNodeRAM := 0.0
  1799. totalNodeCost := 0.0
  1800. nodeCount := 0
  1801. for _, asset := range assetSet.Nodes {
  1802. totalNodeCPU += asset.CPUCost
  1803. totalNodeGPU += asset.GPUCost
  1804. totalNodeRAM += asset.RAMCost
  1805. totalNodeCost += asset.TotalCost()
  1806. nodeCount++
  1807. }
  1808. log.Debugf("Total Node Asset Costs:")
  1809. log.Debugf(" Nodes: %d", nodeCount)
  1810. log.Debugf(" CPU: %.4f", totalNodeCPU)
  1811. log.Debugf(" GPU: %.4f", totalNodeGPU)
  1812. log.Debugf(" RAM: %.4f", totalNodeRAM)
  1813. log.Debugf(" Total: %.4f", totalNodeCost)
  1814. // 4) Sum total of all allocation costs
  1815. totalAllocCPU := 0.0
  1816. totalAllocGPU := 0.0
  1817. totalAllocRAM := 0.0
  1818. totalAllocCost := 0.0
  1819. allocCount := 0
  1820. for _, alloc := range allocSet.Allocations {
  1821. totalAllocCPU += alloc.CPUCost
  1822. totalAllocGPU += alloc.GPUCost
  1823. totalAllocRAM += alloc.RAMCost
  1824. totalAllocCost += alloc.TotalCost()
  1825. allocCount++
  1826. }
  1827. log.Debugf("Total Allocation Costs:")
  1828. log.Debugf(" Allocations: %d", allocCount)
  1829. log.Debugf(" CPU: %.4f", totalAllocCPU)
  1830. log.Debugf(" GPU: %.4f", totalAllocGPU)
  1831. log.Debugf(" RAM: %.4f", totalAllocRAM)
  1832. log.Debugf(" Total: %.4f", totalAllocCost)
  1833. // Overall comparison
  1834. log.Debugf("Overall Asset vs Allocation:")
  1835. log.Debugf(" CPU Difference: %.4f (Asset - Allocation)", totalNodeCPU-totalAllocCPU)
  1836. log.Debugf(" GPU Difference: %.4f (Asset - Allocation)", totalNodeGPU-totalAllocGPU)
  1837. log.Debugf(" RAM Difference: %.4f (Asset - Allocation)", totalNodeRAM-totalAllocRAM)
  1838. log.Debugf(" Total Difference: %.4f (Asset - Allocation)", totalNodeCost-totalAllocCost)
  1839. log.Debugf("=== End Asset-Allocation Debug Analysis ===")
  1840. }
  1841. func computeIdleAllocations(allocSet *opencost.AllocationSet, assetSet *opencost.AssetSet, idleByNode bool) (*opencost.AllocationSet, error) {
  1842. if !allocSet.Window.Equal(assetSet.Window) {
  1843. return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
  1844. }
  1845. // Run debug analysis when log level is debug
  1846. debugAssetAllocationMismatch(allocSet, assetSet)
  1847. var allocTotals map[string]*opencost.AllocationTotals
  1848. var assetTotals map[string]*opencost.AssetTotals
  1849. if idleByNode {
  1850. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationNodeProp)
  1851. assetTotals = opencost.ComputeAssetTotals(assetSet, true)
  1852. } else {
  1853. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationClusterProp)
  1854. assetTotals = opencost.ComputeAssetTotals(assetSet, false)
  1855. }
  1856. start, end := *allocSet.Window.Start(), *allocSet.Window.End()
  1857. idleSet := opencost.NewAllocationSet(start, end)
  1858. for key, assetTotal := range assetTotals {
  1859. allocTotal, ok := allocTotals[key]
  1860. if !ok {
  1861. log.Warnf("Allocation: did not find allocations for asset key: %s", key)
  1862. // Use a zero-value set of totals. This indicates either (1) an
  1863. // error computing totals, or (2) that no allocations ran on the
  1864. // given node for the given window.
  1865. allocTotal = &opencost.AllocationTotals{
  1866. Cluster: assetTotal.Cluster,
  1867. Node: assetTotal.Node,
  1868. Start: assetTotal.Start,
  1869. End: assetTotal.End,
  1870. }
  1871. }
  1872. // Insert one idle allocation for each key (whether by node or
  1873. // by cluster), defined as the difference between the total
  1874. // asset cost and the allocated cost per-resource.
  1875. // Idle costs are clamped to zero to prevent negative values that can occur
  1876. // when asset total costs are less than allocated costs. This can happen when:
  1877. // - Pricing data is unavailable (promless mode, API failures, missing price data)
  1878. // - Custom pricing is misconfigured or returns zero values
  1879. // - Cloud billing adjustments reduce asset costs below allocation costs
  1880. // - Allocation calculations exceed asset costs due to timing or rounding
  1881. name := fmt.Sprintf("%s/%s", key, opencost.IdleSuffix)
  1882. cpuIdleCost := assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost()
  1883. gpuIdleCost := assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost()
  1884. ramIdleCost := assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost()
  1885. // Clamp idle costs to zero to prevent negative idle allocations
  1886. if cpuIdleCost < 0 {
  1887. log.Warnf("Negative CPU idle cost detected for %s: asset total (%.4f) < allocation total (%.4f), clamping to 0",
  1888. key, assetTotal.TotalCPUCost(), allocTotal.TotalCPUCost())
  1889. cpuIdleCost = 0
  1890. }
  1891. if gpuIdleCost < 0 {
  1892. log.Warnf("Negative GPU idle cost detected for %s: asset total (%.4f) < allocation total (%.4f), clamping to 0",
  1893. key, assetTotal.TotalGPUCost(), allocTotal.TotalGPUCost())
  1894. gpuIdleCost = 0
  1895. }
  1896. if ramIdleCost < 0 {
  1897. log.Warnf("Negative RAM idle cost detected for %s: asset total (%.4f) < allocation total (%.4f), clamping to 0",
  1898. key, assetTotal.TotalRAMCost(), allocTotal.TotalRAMCost())
  1899. ramIdleCost = 0
  1900. }
  1901. err := idleSet.Insert(&opencost.Allocation{
  1902. Name: name,
  1903. Window: idleSet.Window.Clone(),
  1904. Properties: &opencost.AllocationProperties{
  1905. Cluster: assetTotal.Cluster,
  1906. Node: assetTotal.Node,
  1907. ProviderID: assetTotal.ProviderID,
  1908. },
  1909. Start: assetTotal.Start,
  1910. End: assetTotal.End,
  1911. CPUCost: cpuIdleCost,
  1912. GPUCost: gpuIdleCost,
  1913. RAMCost: ramIdleCost,
  1914. })
  1915. if err != nil {
  1916. return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
  1917. }
  1918. }
  1919. return idleSet, nil
  1920. }
  1921. func (cm *CostModel) GetDataSource() source.OpenCostDataSource {
  1922. return cm.DataSource
  1923. }