costmodel.go 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125
  1. package costmodel
  2. import (
  3. "errors"
  4. "fmt"
  5. "maps"
  6. "math"
  7. "regexp"
  8. "strconv"
  9. "strings"
  10. "time"
  11. "github.com/opencost/opencost/core/pkg/clustercache"
  12. "github.com/opencost/opencost/core/pkg/clusters"
  13. coreenv "github.com/opencost/opencost/core/pkg/env"
  14. "github.com/opencost/opencost/core/pkg/filter/allocation"
  15. "github.com/opencost/opencost/core/pkg/log"
  16. "github.com/opencost/opencost/core/pkg/model/kubemodel"
  17. "github.com/opencost/opencost/core/pkg/opencost"
  18. "github.com/opencost/opencost/core/pkg/source"
  19. "github.com/opencost/opencost/core/pkg/util"
  20. "github.com/opencost/opencost/core/pkg/util/promutil"
  21. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
  22. km "github.com/opencost/opencost/pkg/kubemodel"
  23. v1 "k8s.io/api/core/v1"
  24. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  25. "k8s.io/apimachinery/pkg/labels"
  26. "golang.org/x/sync/singleflight"
  27. )
  28. const (
  29. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  30. unmountedPVsContainer = "unmounted-pvs"
  31. )
  32. // isCron matches a CronJob name and captures the non-timestamp name
  33. //
  34. // We support either a 10 character timestamp OR an 8 character timestamp
  35. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  36. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  37. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  38. type CostModel struct {
  39. Cache clustercache.ClusterCache
  40. ClusterMap clusters.ClusterMap
  41. BatchDuration time.Duration
  42. RequestGroup *singleflight.Group
  43. DataSource source.OpenCostDataSource
  44. Provider costAnalyzerCloud.Provider
  45. KubeModel *km.KubeModel
  46. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  47. }
  48. func NewCostModel(
  49. clusterUID string,
  50. dataSource source.OpenCostDataSource,
  51. provider costAnalyzerCloud.Provider,
  52. cache clustercache.ClusterCache,
  53. clusterMap clusters.ClusterMap,
  54. batchDuration time.Duration,
  55. ) *CostModel {
  56. // request grouping to prevent over-requesting the same data prior to caching
  57. requestGroup := new(singleflight.Group)
  58. var kubeModel *km.KubeModel
  59. var err error
  60. if dataSource != nil {
  61. kubeModel, err = km.NewKubeModel(clusterUID, dataSource)
  62. if err != nil {
  63. // KubeModel is required. Log a fatal error if we fail to init.
  64. log.Fatalf("error initializing KubeModel: %s", err)
  65. }
  66. }
  67. return &CostModel{
  68. Cache: cache,
  69. ClusterMap: clusterMap,
  70. BatchDuration: batchDuration,
  71. DataSource: dataSource,
  72. Provider: provider,
  73. RequestGroup: requestGroup,
  74. KubeModel: kubeModel,
  75. }
  76. }
  77. func (cm *CostModel) ComputeKubeModelSet(start, end time.Time) (*kubemodel.KubeModelSet, error) {
  78. if cm.KubeModel == nil {
  79. return nil, fmt.Errorf("KubeModel not initialized")
  80. }
  81. return cm.KubeModel.ComputeKubeModelSet(start, end)
  82. }
  83. type CostData struct {
  84. Name string `json:"name,omitempty"`
  85. PodName string `json:"podName,omitempty"`
  86. NodeName string `json:"nodeName,omitempty"`
  87. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  88. Namespace string `json:"namespace,omitempty"`
  89. Deployments []string `json:"deployments,omitempty"`
  90. Services []string `json:"services,omitempty"`
  91. Daemonsets []string `json:"daemonsets,omitempty"`
  92. Statefulsets []string `json:"statefulsets,omitempty"`
  93. Jobs []string `json:"jobs,omitempty"`
  94. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  95. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  96. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  97. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  98. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  99. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  100. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  101. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  102. NetworkData []*util.Vector `json:"network,omitempty"`
  103. Annotations map[string]string `json:"annotations,omitempty"`
  104. Labels map[string]string `json:"labels,omitempty"`
  105. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  106. ClusterID string `json:"clusterId"`
  107. ClusterName string `json:"clusterName"`
  108. }
  109. func (cd *CostData) String() string {
  110. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  111. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  112. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  113. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  114. }
  115. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  116. hasController = false
  117. if len(cd.Deployments) > 0 {
  118. name = cd.Deployments[0]
  119. kind = "deployment"
  120. hasController = true
  121. } else if len(cd.Statefulsets) > 0 {
  122. name = cd.Statefulsets[0]
  123. kind = "statefulset"
  124. hasController = true
  125. } else if len(cd.Daemonsets) > 0 {
  126. name = cd.Daemonsets[0]
  127. kind = "daemonset"
  128. hasController = true
  129. } else if len(cd.Jobs) > 0 {
  130. name = cd.Jobs[0]
  131. kind = "job"
  132. hasController = true
  133. match := isCron.FindStringSubmatch(name)
  134. if match != nil {
  135. name = match[1]
  136. }
  137. }
  138. return name, kind, hasController
  139. }
  140. func (cm *CostModel) ComputeCostData(start, end time.Time) (map[string]*CostData, error) {
  141. // Cluster ID is specific to the source cluster
  142. clusterID := coreenv.GetClusterID()
  143. cp := cm.Provider
  144. ds := cm.DataSource
  145. mq := ds.Metrics()
  146. // Get Kubernetes data
  147. // Pull pod information from k8s API
  148. podlist := cm.Cache.GetAllPods()
  149. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  150. if err != nil {
  151. return nil, err
  152. }
  153. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  154. if err != nil {
  155. return nil, err
  156. }
  157. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  158. if err != nil {
  159. return nil, err
  160. }
  161. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  162. if err != nil {
  163. return nil, err
  164. }
  165. // Get metrics data
  166. resRAMUsage, resCPUUsage, resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, resNetNatGatewayRequests, resNetNatGatewayIngressRequests, err := queryMetrics(mq, start, end)
  167. if err != nil {
  168. log.Warnf("ComputeCostData: continuing despite metrics errors: %s", err)
  169. }
  170. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  171. nodes, err := cm.GetNodeCost()
  172. if err != nil {
  173. log.Warnf("GetNodeCost: no node cost model available: %s", err)
  174. return nil, err
  175. }
  176. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  177. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  178. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  179. if err != nil {
  180. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  181. }
  182. if pvClaimMapping != nil {
  183. err = cm.addPVData(pvClaimMapping)
  184. if err != nil {
  185. return nil, err
  186. }
  187. // copy claim mappings into zombies, then remove as they're discovered
  188. for k, v := range pvClaimMapping {
  189. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  190. }
  191. }
  192. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, resNetNatGatewayRequests, resNetNatGatewayIngressRequests, clusterID)
  193. if err != nil {
  194. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  195. networkUsageMap = make(map[string]*NetworkUsageData)
  196. }
  197. containerNameCost := make(map[string]*CostData)
  198. containers := make(map[string]bool)
  199. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, clusterID)
  200. if err != nil {
  201. return nil, err
  202. }
  203. for key := range RAMUsedMap {
  204. containers[key] = true
  205. }
  206. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, clusterID) // No need to normalize here, as this comes from a counter
  207. if err != nil {
  208. return nil, err
  209. }
  210. for key := range CPUUsedMap {
  211. containers[key] = true
  212. }
  213. currentContainers := make(map[string]clustercache.Pod)
  214. for _, pod := range podlist {
  215. if pod.Status.Phase != v1.PodRunning {
  216. continue
  217. }
  218. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  219. if err != nil {
  220. return nil, err
  221. }
  222. for _, c := range cs {
  223. containers[c.Key()] = true // captures any containers that existed for a time < a metrics scrape interval. We currently charge 0 for this but should charge something.
  224. currentContainers[c.Key()] = *pod
  225. }
  226. }
  227. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  228. missingContainers := make(map[string]*CostData)
  229. for key := range containers {
  230. if _, ok := containerNameCost[key]; ok {
  231. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  232. }
  233. // The _else_ case for this statement is the case in which the container has been
  234. // deleted so we have usage information but not request information. In that case,
  235. // we return partial data for CPU and RAM: only usage and not requests.
  236. if pod, ok := currentContainers[key]; ok {
  237. podName := pod.Name
  238. ns := pod.Namespace
  239. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  240. podLabels := maps.Clone(pod.Labels)
  241. if podLabels == nil {
  242. podLabels = make(map[string]string)
  243. }
  244. for k, v := range nsLabels {
  245. if _, ok := podLabels[k]; !ok {
  246. podLabels[k] = v
  247. }
  248. }
  249. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  250. podAnnotations := pod.Annotations
  251. if podAnnotations == nil {
  252. podAnnotations = make(map[string]string)
  253. }
  254. for k, v := range nsAnnotations {
  255. if _, ok := podAnnotations[k]; !ok {
  256. podAnnotations[k] = v
  257. }
  258. }
  259. nodeName := pod.Spec.NodeName
  260. var nodeData *costAnalyzerCloud.Node
  261. if _, ok := nodes[nodeName]; ok {
  262. nodeData = nodes[nodeName]
  263. }
  264. nsKey := ns + "," + clusterID
  265. var podDeployments []string
  266. if _, ok := podDeploymentsMapping[nsKey]; ok {
  267. if ds, ok := podDeploymentsMapping[nsKey][pod.Name]; ok {
  268. podDeployments = ds
  269. } else {
  270. podDeployments = []string{}
  271. }
  272. }
  273. var podPVs []*PersistentVolumeClaimData
  274. podClaims := pod.Spec.Volumes
  275. for _, vol := range podClaims {
  276. if vol.PersistentVolumeClaim != nil {
  277. name := vol.PersistentVolumeClaim.ClaimName
  278. key := ns + "," + name + "," + clusterID
  279. if pvClaim, ok := pvClaimMapping[key]; ok {
  280. pvClaim.TimesClaimed++
  281. podPVs = append(podPVs, pvClaim)
  282. // Remove entry from potential unmounted pvs
  283. delete(unmountedPVs, key)
  284. }
  285. }
  286. }
  287. var podNetCosts []*util.Vector
  288. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  289. netCosts, err := GetNetworkCost(usage, cp)
  290. if err != nil {
  291. log.Debugf("Error pulling network costs: %s", err.Error())
  292. } else {
  293. podNetCosts = netCosts
  294. }
  295. }
  296. var podServices []string
  297. if _, ok := podServicesMapping[nsKey]; ok {
  298. if svcs, ok := podServicesMapping[nsKey][pod.Name]; ok {
  299. podServices = svcs
  300. } else {
  301. podServices = []string{}
  302. }
  303. }
  304. for i, container := range pod.Spec.Containers {
  305. containerName := container.Name
  306. // recreate the key and look up data for this container
  307. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  308. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  309. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  310. // details on the "amount" API. See
  311. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  312. // for the units of memory and CPU.
  313. ramRequestBytes := container.Resources.Requests.Memory().Value()
  314. // Because information on container RAM & CPU requests isn't
  315. // coming from metrics, it won't have a timestamp associated
  316. // with it. We need to provide a timestamp.
  317. RAMReqV := []*util.Vector{
  318. {
  319. Value: float64(ramRequestBytes),
  320. Timestamp: float64(time.Now().UTC().Unix()),
  321. },
  322. }
  323. // use millicores so we can convert to cores in a float64 format
  324. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  325. CPUReqV := []*util.Vector{
  326. {
  327. Value: float64(cpuRequestMilliCores) / 1000,
  328. Timestamp: float64(time.Now().UTC().Unix()),
  329. },
  330. }
  331. gpuReqCount := 0.0
  332. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  333. gpuReqCount = g.AsApproximateFloat64()
  334. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  335. gpuReqCount = g.AsApproximateFloat64()
  336. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  337. gpuReqCount = g.AsApproximateFloat64()
  338. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  339. gpuReqCount = g.AsApproximateFloat64()
  340. }
  341. GPUReqV := []*util.Vector{
  342. {
  343. Value: float64(gpuReqCount),
  344. Timestamp: float64(time.Now().UTC().Unix()),
  345. },
  346. }
  347. RAMUsedV, ok := RAMUsedMap[newKey]
  348. if !ok {
  349. log.Debug("no RAM usage for " + newKey)
  350. RAMUsedV = []*util.Vector{{}}
  351. }
  352. CPUUsedV, ok := CPUUsedMap[newKey]
  353. if !ok {
  354. log.Debug("no CPU usage for " + newKey)
  355. CPUUsedV = []*util.Vector{{}}
  356. }
  357. var pvReq []*PersistentVolumeClaimData
  358. var netReq []*util.Vector
  359. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  360. pvReq = podPVs
  361. netReq = podNetCosts
  362. }
  363. costs := &CostData{
  364. Name: containerName,
  365. PodName: podName,
  366. NodeName: nodeName,
  367. Namespace: ns,
  368. Deployments: podDeployments,
  369. Services: podServices,
  370. Daemonsets: getDaemonsetsOfPod(pod),
  371. Jobs: getJobsOfPod(pod),
  372. Statefulsets: getStatefulSetsOfPod(pod),
  373. NodeData: nodeData,
  374. RAMReq: RAMReqV,
  375. RAMUsed: RAMUsedV,
  376. CPUReq: CPUReqV,
  377. CPUUsed: CPUUsedV,
  378. GPUReq: GPUReqV,
  379. PVCData: pvReq,
  380. NetworkData: netReq,
  381. Annotations: podAnnotations,
  382. Labels: podLabels,
  383. NamespaceLabels: nsLabels,
  384. ClusterID: clusterID,
  385. ClusterName: cm.ClusterMap.NameFor(clusterID),
  386. }
  387. var cpuReq, cpuUse *util.Vector
  388. if len(costs.CPUReq) > 0 {
  389. cpuReq = costs.CPUReq[0]
  390. }
  391. if len(costs.CPUUsed) > 0 {
  392. cpuUse = costs.CPUUsed[0]
  393. }
  394. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  395. var ramReq, ramUse *util.Vector
  396. if len(costs.RAMReq) > 0 {
  397. ramReq = costs.RAMReq[0]
  398. }
  399. if len(costs.RAMUsed) > 0 {
  400. ramUse = costs.RAMUsed[0]
  401. }
  402. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  403. containerNameCost[newKey] = costs
  404. }
  405. } else {
  406. // The container has been deleted. Not all information is sent to metrics via ksm, so fill out what we can without k8s api
  407. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  408. c, err := NewContainerMetricFromKey(key)
  409. if err != nil {
  410. return nil, err
  411. }
  412. // CPU and RAM requests are obtained from the Kubernetes API.
  413. // If this case has been reached, the Kubernetes API will not
  414. // have information about the pod because it no longer exists.
  415. //
  416. // The case where this matters is minimal, mainly in environments
  417. // with very short-lived pods that over-request resources.
  418. RAMReqV := []*util.Vector{{}}
  419. CPUReqV := []*util.Vector{{}}
  420. GPUReqV := []*util.Vector{{}}
  421. RAMUsedV, ok := RAMUsedMap[key]
  422. if !ok {
  423. log.Debug("no RAM usage for " + key)
  424. RAMUsedV = []*util.Vector{{}}
  425. }
  426. CPUUsedV, ok := CPUUsedMap[key]
  427. if !ok {
  428. log.Debug("no CPU usage for " + key)
  429. CPUUsedV = []*util.Vector{{}}
  430. }
  431. node, ok := nodes[c.NodeName]
  432. if !ok {
  433. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  434. if n, ok := missingNodes[c.NodeName]; ok {
  435. node = n
  436. } else {
  437. node = &costAnalyzerCloud.Node{}
  438. missingNodes[c.NodeName] = node
  439. }
  440. }
  441. namespacelabels := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  442. namespaceAnnotations := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  443. costs := &CostData{
  444. Name: c.ContainerName,
  445. PodName: c.PodName,
  446. NodeName: c.NodeName,
  447. NodeData: node,
  448. Namespace: c.Namespace,
  449. RAMReq: RAMReqV,
  450. RAMUsed: RAMUsedV,
  451. CPUReq: CPUReqV,
  452. CPUUsed: CPUUsedV,
  453. GPUReq: GPUReqV,
  454. Annotations: namespaceAnnotations,
  455. NamespaceLabels: namespacelabels,
  456. ClusterID: c.ClusterID,
  457. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  458. }
  459. var cpuReq, cpuUse *util.Vector
  460. if len(costs.CPUReq) > 0 {
  461. cpuReq = costs.CPUReq[0]
  462. }
  463. if len(costs.CPUUsed) > 0 {
  464. cpuUse = costs.CPUUsed[0]
  465. }
  466. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  467. var ramReq, ramUse *util.Vector
  468. if len(costs.RAMReq) > 0 {
  469. ramReq = costs.RAMReq[0]
  470. }
  471. if len(costs.RAMUsed) > 0 {
  472. ramUse = costs.RAMUsed[0]
  473. }
  474. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  475. containerNameCost[key] = costs
  476. missingContainers[key] = costs
  477. }
  478. }
  479. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  480. // to pass along the cost data
  481. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  482. for k, costs := range unmounted {
  483. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  484. containerNameCost[k] = costs
  485. }
  486. err = findDeletedNodeInfo(cm.DataSource, missingNodes, start, end)
  487. if err != nil {
  488. log.Errorf("Error fetching historical node data: %s", err.Error())
  489. }
  490. err = findDeletedPodInfo(cm.DataSource, missingContainers, start, end)
  491. if err != nil {
  492. log.Errorf("Error fetching historical pod data: %s", err.Error())
  493. }
  494. return containerNameCost, err
  495. }
  496. func queryMetrics(mq source.MetricsQuerier, start, end time.Time) ([]*source.ContainerMetricResult, []*source.ContainerMetricResult, []*source.NetZoneGiBResult, []*source.NetRegionGiBResult, []*source.NetInternetGiBResult, []*source.NetNatGatewayGiBResult, []*source.NetNatGatewayIngressGiBResult, error) {
  497. grp := source.NewQueryGroup()
  498. resChRAMUsage := source.WithGroup(grp, mq.QueryRAMUsageAvg(start, end))
  499. resChCPUUsage := source.WithGroup(grp, mq.QueryCPUUsageAvg(start, end))
  500. resChNetZoneRequests := source.WithGroup(grp, mq.QueryNetZoneGiB(start, end))
  501. resChNetRegionRequests := source.WithGroup(grp, mq.QueryNetRegionGiB(start, end))
  502. resChNetInternetRequests := source.WithGroup(grp, mq.QueryNetInternetGiB(start, end))
  503. resChNetNatGatewayEgressRequests := source.WithGroup(grp, mq.QueryNetNatGatewayGiB(start, end))
  504. resChNetNatGatewayIngressRequests := source.WithGroup(grp, mq.QueryNetNatGatewayIngressGiB(start, end))
  505. // Process metrics query results. Handle errors using ctx.Errors.
  506. resRAMUsage, _ := resChRAMUsage.Await()
  507. resCPUUsage, _ := resChCPUUsage.Await()
  508. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  509. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  510. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  511. resNetNatGatewayEgressRequests, _ := resChNetNatGatewayEgressRequests.Await()
  512. resNetNatGatewayIngressRequests, _ := resChNetNatGatewayIngressRequests.Await()
  513. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  514. // NOTE: will not propagate unless coupled with errors.
  515. if grp.HasErrors() {
  516. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  517. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  518. for _, queryErr := range grp.Errors() {
  519. if queryErr.Error != nil {
  520. log.Errorf("ComputeCostData: Request Error: %s", queryErr.Error)
  521. }
  522. if queryErr.ParseError != nil {
  523. log.Errorf("ComputeCostData: Parsing Error: %s", queryErr.ParseError)
  524. }
  525. }
  526. // ErrorCollection is an collection of errors wrapped in a single error implementation
  527. // We opt to not return an error for the sake of running as a pure exporter.
  528. return resRAMUsage, resCPUUsage, resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, resNetNatGatewayEgressRequests, resNetNatGatewayIngressRequests, grp.Error()
  529. }
  530. return resRAMUsage, resCPUUsage, resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, resNetNatGatewayEgressRequests, resNetNatGatewayIngressRequests, nil
  531. }
  532. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  533. costs := make(map[string]*CostData)
  534. if len(unmountedPVs) == 0 {
  535. return costs
  536. }
  537. for k, pv := range unmountedPVs {
  538. keyParts := strings.Split(k, ",")
  539. if len(keyParts) != 3 {
  540. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  541. continue
  542. }
  543. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  544. namespacelabels := namespaceLabelsMapping[ns+","+clusterID]
  545. namespaceAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  546. metric := NewContainerMetricFromValues(ns, unmountedPVsContainer, unmountedPVsContainer, "", clusterID)
  547. key := metric.Key()
  548. if costData, ok := costs[key]; !ok {
  549. costs[key] = &CostData{
  550. Name: unmountedPVsContainer,
  551. PodName: unmountedPVsContainer,
  552. NodeName: "",
  553. Annotations: namespaceAnnotations,
  554. Namespace: ns,
  555. NamespaceLabels: namespacelabels,
  556. Labels: namespacelabels,
  557. ClusterID: clusterID,
  558. ClusterName: clusterMap.NameFor(clusterID),
  559. PVCData: pv,
  560. }
  561. } else {
  562. costData.PVCData = append(costData.PVCData, pv...)
  563. }
  564. }
  565. return costs
  566. }
  567. func findDeletedPodInfo(dataSource source.OpenCostDataSource, missingContainers map[string]*CostData, start, end time.Time) error {
  568. if len(missingContainers) > 0 {
  569. mq := dataSource.Metrics()
  570. podLabelsResCh := mq.QueryPodLabels(start, end)
  571. podLabelsResult, err := podLabelsResCh.Await()
  572. if err != nil {
  573. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  574. }
  575. podLabels := make(map[string]map[string]string)
  576. if podLabelsResult != nil {
  577. podLabels, err = parsePodLabels(podLabelsResult)
  578. if err != nil {
  579. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  580. }
  581. }
  582. for key, costData := range missingContainers {
  583. cm, _ := NewContainerMetricFromKey(key)
  584. labels, ok := podLabels[cm.PodName]
  585. if !ok {
  586. labels = make(map[string]string)
  587. }
  588. for k, v := range costData.NamespaceLabels {
  589. labels[k] = v
  590. }
  591. costData.Labels = labels
  592. }
  593. }
  594. return nil
  595. }
  596. func findDeletedNodeInfo(dataSource source.OpenCostDataSource, missingNodes map[string]*costAnalyzerCloud.Node, start, end time.Time) error {
  597. if len(missingNodes) > 0 {
  598. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  599. grp := source.NewQueryGroup()
  600. mq := dataSource.Metrics()
  601. cpuCostResCh := source.WithGroup(grp, mq.QueryNodeCPUPricePerHr(start, end))
  602. ramCostResCh := source.WithGroup(grp, mq.QueryNodeRAMPricePerGiBHr(start, end))
  603. gpuCostResCh := source.WithGroup(grp, mq.QueryNodeGPUPricePerHr(start, end))
  604. cpuCostRes, _ := cpuCostResCh.Await()
  605. ramCostRes, _ := ramCostResCh.Await()
  606. gpuCostRes, _ := gpuCostResCh.Await()
  607. if grp.HasErrors() {
  608. return grp.Error()
  609. }
  610. cpuCosts, err := getCost(cpuCostRes, cpuCostNode, cpuCostData)
  611. if err != nil {
  612. return err
  613. }
  614. ramCosts, err := getCost(ramCostRes, ramCostNode, ramCostData)
  615. if err != nil {
  616. return err
  617. }
  618. gpuCosts, err := getCost(gpuCostRes, gpuCostNode, gpuCostData)
  619. if err != nil {
  620. return err
  621. }
  622. if len(cpuCosts) == 0 {
  623. log.Infof("Opencost metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  624. }
  625. for node, costv := range cpuCosts {
  626. if _, ok := missingNodes[node]; ok {
  627. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  628. } else {
  629. log.DedupedWarningf(5, "Node `%s` in metrics but not k8s api", node)
  630. }
  631. }
  632. for node, costv := range ramCosts {
  633. if _, ok := missingNodes[node]; ok {
  634. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  635. }
  636. }
  637. for node, costv := range gpuCosts {
  638. if _, ok := missingNodes[node]; ok {
  639. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  640. }
  641. }
  642. }
  643. return nil
  644. }
  645. // getContainerAllocation takes the max between request and usage. This function
  646. // returns a slice containing a single element describing the container's
  647. // allocation.
  648. //
  649. // Additionally, the timestamp of the allocation will be the highest value
  650. // timestamp between the two vectors. This mitigates situations where
  651. // Timestamp=0. This should have no effect on the metrics emitted by the
  652. // CostModelMetricsEmitter
  653. func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
  654. var result []*util.Vector
  655. if req != nil && used != nil {
  656. x1 := req.Value
  657. if math.IsNaN(x1) {
  658. log.Debugf("NaN value found during %s allocation calculation for requests.", allocationType)
  659. x1 = 0.0
  660. }
  661. y1 := used.Value
  662. if math.IsNaN(y1) {
  663. log.Debugf("NaN value found during %s allocation calculation for used.", allocationType)
  664. y1 = 0.0
  665. }
  666. result = []*util.Vector{
  667. {
  668. Value: math.Max(x1, y1),
  669. Timestamp: math.Max(req.Timestamp, used.Timestamp),
  670. },
  671. }
  672. if result[0].Value == 0 && result[0].Timestamp == 0 {
  673. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  674. }
  675. } else if req != nil {
  676. result = []*util.Vector{
  677. {
  678. Value: req.Value,
  679. Timestamp: req.Timestamp,
  680. },
  681. }
  682. } else if used != nil {
  683. result = []*util.Vector{
  684. {
  685. Value: used.Value,
  686. Timestamp: used.Timestamp,
  687. },
  688. }
  689. } else {
  690. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  691. result = []*util.Vector{
  692. {
  693. Value: 0,
  694. Timestamp: float64(time.Now().UTC().Unix()),
  695. },
  696. }
  697. }
  698. return result
  699. }
  700. func (cm *CostModel) addPVData(pvClaimMapping map[string]*PersistentVolumeClaimData) error {
  701. cache := cm.Cache
  702. cloud := cm.Provider
  703. cfg, err := cloud.GetConfig()
  704. if err != nil {
  705. return err
  706. }
  707. // Pull a region from the first node
  708. var defaultRegion string
  709. nodeList := cache.GetAllNodes()
  710. if len(nodeList) > 0 {
  711. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  712. }
  713. storageClasses := cache.GetAllStorageClasses()
  714. storageClassMap := make(map[string]map[string]string)
  715. for _, storageClass := range storageClasses {
  716. params := storageClass.Parameters
  717. storageClassMap[storageClass.Name] = params
  718. if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.Annotations["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  719. storageClassMap["default"] = params
  720. storageClassMap[""] = params
  721. }
  722. }
  723. pvs := cache.GetAllPersistentVolumes()
  724. pvMap := make(map[string]*costAnalyzerCloud.PV)
  725. for _, pv := range pvs {
  726. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  727. if !ok {
  728. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  729. }
  730. var region string
  731. if r, ok := util.GetRegion(pv.Labels); ok {
  732. region = r
  733. } else {
  734. region = defaultRegion
  735. }
  736. cacPv := &costAnalyzerCloud.PV{
  737. Class: pv.Spec.StorageClassName,
  738. Region: region,
  739. Parameters: parameters,
  740. }
  741. err := cm.GetPVCost(cacPv, pv, region)
  742. if err != nil {
  743. return err
  744. }
  745. pvMap[pv.Name] = cacPv
  746. }
  747. for _, pvc := range pvClaimMapping {
  748. if vol, ok := pvMap[pvc.VolumeName]; ok {
  749. pvc.Volume = vol
  750. } else {
  751. log.Debugf("PV not found, using default")
  752. pvc.Volume = &costAnalyzerCloud.PV{
  753. Cost: cfg.Storage,
  754. }
  755. }
  756. }
  757. return nil
  758. }
  759. func (cm *CostModel) GetPVCost(pv *costAnalyzerCloud.PV, kpv *clustercache.PersistentVolume, defaultRegion string) error {
  760. cp := cm.Provider
  761. cfg, err := cp.GetConfig()
  762. if err != nil {
  763. return err
  764. }
  765. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  766. pv.ProviderID = key.ID()
  767. pvWithCost, err := cp.PVPricing(key)
  768. if err != nil {
  769. pv.Cost = cfg.Storage
  770. return err
  771. }
  772. if pvWithCost == nil || pvWithCost.Cost == "" {
  773. pv.Cost = cfg.Storage
  774. return nil // set default cost
  775. }
  776. pv.Cost = pvWithCost.Cost
  777. return nil
  778. }
  779. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  780. if cm.pricingMetadata != nil {
  781. return cm.pricingMetadata, nil
  782. } else {
  783. return nil, fmt.Errorf("Node costs not yet calculated")
  784. }
  785. }
  786. func (cm *CostModel) GetNodeCost() (map[string]*costAnalyzerCloud.Node, error) {
  787. cp := cm.Provider
  788. cfg, err := cp.GetConfig()
  789. if err != nil {
  790. return nil, err
  791. }
  792. nodeList := cm.Cache.GetAllNodes()
  793. nodes := make(map[string]*costAnalyzerCloud.Node)
  794. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  795. TotalNodes: 0,
  796. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  797. }
  798. for _, n := range nodeList {
  799. name := n.Name
  800. nodeLabels := n.Labels
  801. nodeLabels["providerID"] = n.SpecProviderID
  802. pmd.TotalNodes++
  803. cnode, _, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  804. if err != nil {
  805. log.Infof("Could not get node pricing for node %s. Falling back to default pricing", name)
  806. log.Debugf("Error getting node pricing: %s", err.Error())
  807. if cnode != nil {
  808. nodes[name] = cnode
  809. continue
  810. } else {
  811. cnode = &costAnalyzerCloud.Node{
  812. VCPUCost: cfg.CPU,
  813. RAMCost: cfg.RAM,
  814. }
  815. }
  816. }
  817. pmd.PricingTypeCounts[cnode.PricingType]++
  818. // newCnode builds upon cnode but populates/overrides certain fields.
  819. // cnode was populated leveraging cloud provider public pricing APIs.
  820. newCnode := *cnode
  821. if newCnode.InstanceType == "" {
  822. it, _ := util.GetInstanceType(n.Labels)
  823. newCnode.InstanceType = it
  824. }
  825. if newCnode.Region == "" {
  826. region, _ := util.GetRegion(n.Labels)
  827. newCnode.Region = region
  828. }
  829. if newCnode.ArchType == "" {
  830. arch, _ := util.GetArchType(n.Labels)
  831. newCnode.ArchType = arch
  832. }
  833. newCnode.ProviderID = n.SpecProviderID
  834. var cpu float64
  835. if newCnode.VCPU == "" {
  836. cpu = float64(n.Status.Capacity.Cpu().Value())
  837. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  838. } else {
  839. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  840. if err != nil {
  841. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  842. }
  843. }
  844. if math.IsNaN(cpu) {
  845. log.Warnf("cpu parsed as NaN. Setting to 0.")
  846. cpu = 0
  847. }
  848. if newCnode.RAM == "" {
  849. newCnode.RAM = n.Status.Capacity.Memory().String()
  850. }
  851. if newCnode.RAMBytes == "" {
  852. newCnode.RAMBytes = fmt.Sprintf("%v", n.Status.Capacity.Memory().Value())
  853. }
  854. ram, _ := strconv.ParseFloat(newCnode.RAMBytes, 64)
  855. if math.IsNaN(ram) {
  856. log.Warnf("ram parsed as NaN. Setting to 0.")
  857. ram = 0
  858. }
  859. gpuc, err := strconv.ParseFloat(newCnode.GPU, 64)
  860. if err != nil {
  861. gpuc = 0.0
  862. }
  863. // The k8s API will often report more accurate results for GPU count
  864. // than cloud provider public pricing APIs. If found, override the
  865. // original value.
  866. gpuOverride, vgpuOverride, err := getGPUCount(cm.Cache, n)
  867. if err != nil {
  868. log.Warnf("Unable to get GPUCount for node %s: %s", n.Name, err.Error())
  869. }
  870. if gpuOverride > 0 {
  871. newCnode.GPU = fmt.Sprintf("%f", gpuOverride)
  872. gpuc = gpuOverride
  873. }
  874. if vgpuOverride > 0 {
  875. newCnode.VGPU = fmt.Sprintf("%f", vgpuOverride)
  876. }
  877. // Special case for SUSE rancher, since it won't behave with normal
  878. // calculations, courtesy of the instance type not being "real" (a
  879. // recognizable AWS instance type.)
  880. if newCnode.InstanceType == "rke2" {
  881. log.Infof(
  882. "Found a SUSE Rancher node %s, defaulting and skipping math",
  883. cp.GetKey(nodeLabels, n).Features(),
  884. )
  885. defaultCPUCorePrice, err := strconv.ParseFloat(cfg.CPU, 64)
  886. if err != nil {
  887. log.Errorf("Could not parse default cpu price")
  888. defaultCPUCorePrice = 0
  889. }
  890. if math.IsNaN(defaultCPUCorePrice) {
  891. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  892. defaultCPUCorePrice = 0
  893. }
  894. // Some customers may want GPU pricing to be determined by the labels affixed to their nodes. GpuPricing
  895. // passes the node's labels to the provider, which then cross-references them with the labels that the
  896. // provider knows to have label-specific costs associated with them, and returns that cost. See CSVProvider
  897. // for an example implementation.
  898. var gpuPrice float64
  899. gpuPricing, err := cp.GpuPricing(nodeLabels)
  900. if err != nil {
  901. log.Errorf("Could not determine custom GPU pricing: %s", err)
  902. gpuPrice = 0
  903. } else if len(gpuPricing) > 0 {
  904. gpuPrice, err = strconv.ParseFloat(gpuPricing, 64)
  905. if err != nil {
  906. log.Errorf("Could not parse custom GPU pricing: %s", err)
  907. gpuPrice = 0
  908. } else if math.IsNaN(gpuPrice) {
  909. log.Warnf("Custom GPU pricing parsed as NaN. Setting to 0.")
  910. gpuPrice = 0
  911. } else {
  912. log.Infof("Using custom GPU pricing for node \"%s\": %f", name, gpuPrice)
  913. }
  914. } else {
  915. gpuPrice, err = strconv.ParseFloat(cfg.GPU, 64)
  916. if err != nil {
  917. log.Errorf("Could not parse default gpu price")
  918. gpuPrice = 0
  919. }
  920. if math.IsNaN(gpuPrice) {
  921. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  922. gpuPrice = 0
  923. }
  924. }
  925. defaultRAMPrice, err := strconv.ParseFloat(cfg.RAM, 64)
  926. if err != nil {
  927. log.Errorf("Could not parse default ram price")
  928. defaultRAMPrice = 0
  929. }
  930. if math.IsNaN(defaultRAMPrice) {
  931. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  932. defaultRAMPrice = 0
  933. }
  934. defaultGPUPrice, err := strconv.ParseFloat(cfg.GPU, 64)
  935. if err != nil {
  936. log.Errorf("Could not parse default gpu price")
  937. defaultGPUPrice = 0
  938. }
  939. if math.IsNaN(defaultGPUPrice) {
  940. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  941. defaultGPUPrice = 0
  942. }
  943. // Just say no to doing the ratios!
  944. cpuCost := defaultCPUCorePrice * cpu
  945. gpuCost := gpuPrice * gpuc
  946. ramCost := defaultRAMPrice * ram
  947. nodeCost := cpuCost + gpuCost + ramCost
  948. newCnode.Cost = fmt.Sprintf("%f", nodeCost)
  949. newCnode.VCPUCost = fmt.Sprintf("%f", defaultCPUCorePrice)
  950. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  951. newCnode.RAMCost = fmt.Sprintf("%f", defaultRAMPrice)
  952. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  953. } else if newCnode.GPU != "" && newCnode.GPUCost == "" {
  954. // was the big thing to investigate. All the funky ratio math
  955. // we were doing was messing with their default pricing. for SUSE Rancher.
  956. // We reach this when a GPU is detected on a node, but no cost for
  957. // the GPU is defined in the OnDemand pricing. Calculate ratios of
  958. // CPU to RAM and GPU to RAM costs, then distribute the total node
  959. // cost among the CPU, RAM, and GPU.
  960. log.Tracef("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  961. // Some customers may want GPU pricing to be determined by the labels affixed to their nodes. GpuPricing
  962. // passes the node's labels to the provider, which then cross-references them with the labels that the
  963. // provider knows to have label-specific costs associated with them, and returns that cost. See CSVProvider
  964. // for an example implementation.
  965. gpuPricing, err := cp.GpuPricing(nodeLabels)
  966. if err != nil {
  967. log.Errorf("Could not determine custom GPU pricing: %s", err)
  968. } else if len(gpuPricing) > 0 {
  969. newCnode.GPUCost = gpuPricing
  970. log.Infof("Using custom GPU pricing for node \"%s\": %s", name, gpuPricing)
  971. }
  972. if newCnode.GPUCost == "" {
  973. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  974. if err != nil {
  975. log.Errorf("Could not parse default cpu price")
  976. defaultCPU = 0
  977. }
  978. if math.IsNaN(defaultCPU) {
  979. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  980. defaultCPU = 0
  981. }
  982. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  983. if err != nil {
  984. log.Errorf("Could not parse default ram price")
  985. defaultRAM = 0
  986. }
  987. if math.IsNaN(defaultRAM) {
  988. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  989. defaultRAM = 0
  990. }
  991. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  992. if err != nil {
  993. log.Errorf("Could not parse default gpu price")
  994. defaultGPU = 0
  995. }
  996. if math.IsNaN(defaultGPU) {
  997. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  998. defaultGPU = 0
  999. }
  1000. cpuToRAMRatio := defaultCPU / defaultRAM
  1001. if math.IsNaN(cpuToRAMRatio) {
  1002. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1003. cpuToRAMRatio = 10
  1004. }
  1005. gpuToRAMRatio := defaultGPU / defaultRAM
  1006. if math.IsNaN(gpuToRAMRatio) {
  1007. log.Warnf("gpuToRAMRatio is NaN. Setting to 100.")
  1008. gpuToRAMRatio = 100
  1009. }
  1010. ramGB := ram / 1024 / 1024 / 1024
  1011. if math.IsNaN(ramGB) {
  1012. log.Warnf("ramGB is NaN. Setting to 0.")
  1013. ramGB = 0
  1014. }
  1015. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  1016. if math.IsNaN(ramMultiple) {
  1017. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1018. ramMultiple = 0
  1019. }
  1020. var nodePrice float64
  1021. if newCnode.Cost != "" {
  1022. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1023. if err != nil {
  1024. log.Errorf("Could not parse total node price")
  1025. return nil, err
  1026. }
  1027. } else if newCnode.VCPUCost != "" {
  1028. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1029. if err != nil {
  1030. log.Errorf("Could not parse node vcpu price")
  1031. return nil, err
  1032. }
  1033. } else { // add case to use default pricing model when API data fails.
  1034. log.Debugf("No node price or CPUprice found, falling back to default")
  1035. nodePrice = defaultCPU*cpu + defaultRAM*ram + gpuc*defaultGPU
  1036. }
  1037. if math.IsNaN(nodePrice) {
  1038. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1039. nodePrice = 0
  1040. }
  1041. ramPrice := (nodePrice / ramMultiple)
  1042. if math.IsNaN(ramPrice) {
  1043. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1044. ramPrice = 0
  1045. }
  1046. cpuPrice := ramPrice * cpuToRAMRatio
  1047. gpuPrice := ramPrice * gpuToRAMRatio
  1048. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1049. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1050. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1051. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1052. }
  1053. } else if newCnode.RAMCost == "" {
  1054. // We reach this when no RAM cost is defined in the OnDemand
  1055. // pricing. It calculates a cpuToRAMRatio and ramMultiple to
  1056. // distrubte the total node cost among CPU and RAM costs.
  1057. log.Tracef("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1058. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1059. if err != nil {
  1060. log.Warnf("Could not parse default cpu price")
  1061. defaultCPU = 0
  1062. }
  1063. if math.IsNaN(defaultCPU) {
  1064. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1065. defaultCPU = 0
  1066. }
  1067. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1068. if err != nil {
  1069. log.Warnf("Could not parse default ram price")
  1070. defaultRAM = 0
  1071. }
  1072. if math.IsNaN(defaultRAM) {
  1073. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1074. defaultRAM = 0
  1075. }
  1076. cpuToRAMRatio := defaultCPU / defaultRAM
  1077. if math.IsNaN(cpuToRAMRatio) {
  1078. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1079. cpuToRAMRatio = 10
  1080. }
  1081. ramGB := ram / 1024 / 1024 / 1024
  1082. if math.IsNaN(ramGB) {
  1083. log.Warnf("ramGB is NaN. Setting to 0.")
  1084. ramGB = 0
  1085. }
  1086. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1087. if math.IsNaN(ramMultiple) {
  1088. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1089. ramMultiple = 0
  1090. }
  1091. var nodePrice float64
  1092. if newCnode.Cost != "" {
  1093. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1094. if err != nil {
  1095. log.Warnf("Could not parse total node price")
  1096. return nil, err
  1097. }
  1098. if newCnode.GPUCost != "" {
  1099. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1100. if err != nil {
  1101. log.Warnf("Could not parse node gpu price")
  1102. return nil, err
  1103. }
  1104. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1105. }
  1106. } else if newCnode.VCPUCost != "" {
  1107. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1108. if err != nil {
  1109. log.Warnf("Could not parse node vcpu price")
  1110. return nil, err
  1111. }
  1112. } else { // add case to use default pricing model when API data fails.
  1113. log.Debugf("No node price or CPUprice found, falling back to default")
  1114. nodePrice = defaultCPU*cpu + defaultRAM*ramGB
  1115. }
  1116. if math.IsNaN(nodePrice) {
  1117. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1118. nodePrice = 0
  1119. }
  1120. ramPrice := (nodePrice / ramMultiple)
  1121. if math.IsNaN(ramPrice) {
  1122. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1123. ramPrice = 0
  1124. }
  1125. cpuPrice := ramPrice * cpuToRAMRatio
  1126. if defaultRAM != 0 {
  1127. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1128. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1129. } else { // just assign the full price to CPU
  1130. if cpu != 0 {
  1131. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1132. } else {
  1133. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1134. }
  1135. }
  1136. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1137. log.Tracef("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1138. }
  1139. nodes[name] = &newCnode
  1140. }
  1141. cm.pricingMetadata = pmd
  1142. cp.ApplyReservedInstancePricing(nodes)
  1143. return nodes, nil
  1144. }
  1145. // TODO: drop some logs
  1146. func (cm *CostModel) GetLBCost() (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1147. // for fetching prices from cloud provider
  1148. // cfg, err := cp.GetConfig()
  1149. // if err != nil {
  1150. // return nil, err
  1151. // }
  1152. cp := cm.Provider
  1153. servicesList := cm.Cache.GetAllServices()
  1154. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1155. for _, service := range servicesList {
  1156. namespace := service.Namespace
  1157. name := service.Name
  1158. key := serviceKey{
  1159. Cluster: coreenv.GetClusterID(),
  1160. Namespace: namespace,
  1161. Service: name,
  1162. }
  1163. if service.Type == "LoadBalancer" {
  1164. loadBalancer, err := cp.LoadBalancerPricing()
  1165. if err != nil {
  1166. return nil, err
  1167. }
  1168. newLoadBalancer := *loadBalancer
  1169. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1170. address := loadBalancerIngress.IP
  1171. // Some cloud providers use hostname rather than IP
  1172. if address == "" {
  1173. address = loadBalancerIngress.Hostname
  1174. }
  1175. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1176. }
  1177. loadBalancerMap[key] = &newLoadBalancer
  1178. }
  1179. }
  1180. return loadBalancerMap, nil
  1181. }
  1182. func getPodServices(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1183. servicesList := cache.GetAllServices()
  1184. podServicesMapping := make(map[string]map[string][]string)
  1185. for _, service := range servicesList {
  1186. namespace := service.Namespace
  1187. name := service.Name
  1188. key := namespace + "," + clusterID
  1189. if _, ok := podServicesMapping[key]; !ok {
  1190. podServicesMapping[key] = make(map[string][]string)
  1191. }
  1192. s := labels.Nothing()
  1193. if len(service.SpecSelector) > 0 {
  1194. s = labels.Set(service.SpecSelector).AsSelectorPreValidated()
  1195. }
  1196. for _, pod := range podList {
  1197. labelSet := labels.Set(pod.Labels)
  1198. if s.Matches(labelSet) && pod.Namespace == namespace {
  1199. services, ok := podServicesMapping[key][pod.Name]
  1200. if ok {
  1201. podServicesMapping[key][pod.Name] = append(services, name)
  1202. } else {
  1203. podServicesMapping[key][pod.Name] = []string{name}
  1204. }
  1205. }
  1206. }
  1207. }
  1208. return podServicesMapping, nil
  1209. }
  1210. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1211. ssList := cache.GetAllStatefulSets()
  1212. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1213. for _, ss := range ssList {
  1214. namespace := ss.Namespace
  1215. name := ss.Name
  1216. key := namespace + "," + clusterID
  1217. if _, ok := podSSMapping[key]; !ok {
  1218. podSSMapping[key] = make(map[string][]string)
  1219. }
  1220. s, err := metav1.LabelSelectorAsSelector(ss.SpecSelector)
  1221. if err != nil {
  1222. log.Errorf("Error doing deployment label conversion: %s", err.Error())
  1223. }
  1224. for _, pod := range podList {
  1225. labelSet := labels.Set(pod.Labels)
  1226. if s.Matches(labelSet) && pod.Namespace == namespace {
  1227. sss, ok := podSSMapping[key][pod.Name]
  1228. if ok {
  1229. podSSMapping[key][pod.Name] = append(sss, name)
  1230. } else {
  1231. podSSMapping[key][pod.Name] = []string{name}
  1232. }
  1233. }
  1234. }
  1235. }
  1236. return podSSMapping, nil
  1237. }
  1238. func getPodDeployments(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1239. deploymentsList := cache.GetAllDeployments()
  1240. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1241. for _, deployment := range deploymentsList {
  1242. namespace := deployment.Namespace
  1243. name := deployment.Name
  1244. key := namespace + "," + clusterID
  1245. if _, ok := podDeploymentsMapping[key]; !ok {
  1246. podDeploymentsMapping[key] = make(map[string][]string)
  1247. }
  1248. s, err := metav1.LabelSelectorAsSelector(deployment.SpecSelector)
  1249. if err != nil {
  1250. log.Errorf("Error doing deployment label conversion: %s", err)
  1251. }
  1252. for _, pod := range podList {
  1253. labelSet := labels.Set(pod.Labels)
  1254. if s.Matches(labelSet) && pod.Namespace == namespace {
  1255. deployments, ok := podDeploymentsMapping[key][pod.Name]
  1256. if ok {
  1257. podDeploymentsMapping[key][pod.Name] = append(deployments, name)
  1258. } else {
  1259. podDeploymentsMapping[key][pod.Name] = []string{name}
  1260. }
  1261. }
  1262. }
  1263. }
  1264. return podDeploymentsMapping, nil
  1265. }
  1266. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1267. nsToLabels := make(map[string]map[string]string)
  1268. nss := cache.GetAllNamespaces()
  1269. for _, ns := range nss {
  1270. labels := make(map[string]string)
  1271. for k, v := range ns.Labels {
  1272. labels[promutil.SanitizeLabelName(k)] = v
  1273. }
  1274. nsToLabels[ns.Name+","+clusterID] = labels
  1275. }
  1276. return nsToLabels, nil
  1277. }
  1278. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1279. nsToAnnotations := make(map[string]map[string]string)
  1280. nss := cache.GetAllNamespaces()
  1281. for _, ns := range nss {
  1282. annotations := make(map[string]string)
  1283. for k, v := range ns.Annotations {
  1284. annotations[promutil.SanitizeLabelName(k)] = v
  1285. }
  1286. nsToAnnotations[ns.Name+","+clusterID] = annotations
  1287. }
  1288. return nsToAnnotations, nil
  1289. }
  1290. func getDaemonsetsOfPod(pod clustercache.Pod) []string {
  1291. for _, ownerReference := range pod.OwnerReferences {
  1292. if ownerReference.Kind == "DaemonSet" {
  1293. return []string{ownerReference.Name}
  1294. }
  1295. }
  1296. return []string{}
  1297. }
  1298. func getJobsOfPod(pod clustercache.Pod) []string {
  1299. for _, ownerReference := range pod.OwnerReferences {
  1300. if ownerReference.Kind == "Job" {
  1301. return []string{ownerReference.Name}
  1302. }
  1303. }
  1304. return []string{}
  1305. }
  1306. func getStatefulSetsOfPod(pod clustercache.Pod) []string {
  1307. for _, ownerReference := range pod.OwnerReferences {
  1308. if ownerReference.Kind == "StatefulSet" {
  1309. return []string{ownerReference.Name}
  1310. }
  1311. }
  1312. return []string{}
  1313. }
  1314. // getGPUCount reads the node's Status and Labels (via the k8s API) to identify
  1315. // the number of GPUs and vGPUs are equipped on the node. If unable to identify
  1316. // a GPU count, it will return -1.
  1317. func getGPUCount(cache clustercache.ClusterCache, n *clustercache.Node) (float64, float64, error) {
  1318. g, hasGpu := n.Status.Capacity["nvidia.com/gpu"]
  1319. _, hasReplicas := n.Labels["nvidia.com/gpu.replicas"]
  1320. // Case 1: Standard NVIDIA GPU
  1321. if hasGpu && g.Value() != 0 && !hasReplicas {
  1322. return float64(g.Value()), float64(g.Value()), nil
  1323. }
  1324. // Case 2: NVIDIA GPU with GPU Feature Discovery (GFD) Pod enabled.
  1325. // Ref: https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/gpu-sharing.html#verifying-the-gpu-time-slicing-configuration
  1326. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L44-L45
  1327. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L103-L118
  1328. if hasReplicas {
  1329. resultGPU := 0.0
  1330. resultVGPU := 0.0
  1331. if c, ok := n.Labels["nvidia.com/gpu.count"]; ok {
  1332. var err error
  1333. resultGPU, err = strconv.ParseFloat(c, 64)
  1334. if err != nil {
  1335. return -1, -1, fmt.Errorf("could not parse label \"nvidia.com/gpu.count\": %v", err)
  1336. }
  1337. }
  1338. if s, ok := n.Status.Capacity["nvidia.com/gpu.shared"]; ok { // GFD configured `renameByDefault=true`
  1339. resultVGPU = float64(s.Value())
  1340. } else if g, ok := n.Status.Capacity["nvidia.com/gpu"]; ok { // GFD configured `renameByDefault=false`
  1341. resultVGPU = float64(g.Value())
  1342. } else {
  1343. resultVGPU = resultGPU
  1344. }
  1345. return resultGPU, resultVGPU, nil
  1346. }
  1347. // Case 3: AWS vGPU
  1348. if vgpu, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  1349. vgpuCount, err := getAllocatableVGPUs(cache)
  1350. if err != nil {
  1351. return -1, -1, err
  1352. }
  1353. vgpuCoeff := 10.0
  1354. if vgpuCount > 0.0 {
  1355. vgpuCoeff = vgpuCount
  1356. }
  1357. if vgpu.Value() != 0 {
  1358. resultGPU := float64(vgpu.Value()) / vgpuCoeff
  1359. resultVGPU := float64(vgpu.Value())
  1360. return resultGPU, resultVGPU, nil
  1361. }
  1362. }
  1363. // No GPU found
  1364. return -1, -1, nil
  1365. }
  1366. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  1367. daemonsets := cache.GetAllDaemonSets()
  1368. vgpuCount := 0.0
  1369. for _, ds := range daemonsets {
  1370. dsContainerList := &ds.SpecContainers
  1371. for _, ctnr := range *dsContainerList {
  1372. if ctnr.Args != nil {
  1373. for _, arg := range ctnr.Args {
  1374. if strings.Contains(arg, "--vgpu=") {
  1375. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  1376. if err != nil {
  1377. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  1378. continue
  1379. }
  1380. vgpuCount = vgpus
  1381. return vgpuCount, nil
  1382. }
  1383. }
  1384. }
  1385. }
  1386. }
  1387. return vgpuCount, nil
  1388. }
  1389. type PersistentVolumeClaimData struct {
  1390. Class string `json:"class"`
  1391. Claim string `json:"claim"`
  1392. Namespace string `json:"namespace"`
  1393. ClusterID string `json:"clusterId"`
  1394. TimesClaimed int `json:"timesClaimed"`
  1395. VolumeName string `json:"volumeName"`
  1396. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  1397. Values []*util.Vector `json:"values"`
  1398. }
  1399. func measureTime(start time.Time, threshold time.Duration, name string) {
  1400. elapsed := time.Since(start)
  1401. if elapsed > threshold {
  1402. log.Infof("[Profiler] %s: %s", elapsed, name)
  1403. }
  1404. }
  1405. func (cm *CostModel) QueryAllocation(window opencost.Window, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy opencost.AccumulateOption, shareIdle bool, filterString string) (*opencost.AllocationSetRange, error) {
  1406. // Validate window is legal
  1407. if window.IsOpen() || window.IsNegative() {
  1408. return nil, fmt.Errorf("illegal window: %s", window)
  1409. }
  1410. var totalsStore opencost.TotalsStore
  1411. // Idle is required for proportional asset costs
  1412. if includeProportionalAssetResourceCosts {
  1413. if !includeIdle {
  1414. return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
  1415. }
  1416. totalsStore = opencost.NewMemoryTotalsStore()
  1417. }
  1418. // Begin with empty response
  1419. asr := opencost.NewAllocationSetRange()
  1420. // Query for AllocationSets in increments of the given step duration,
  1421. // appending each to the response.
  1422. stepStart := *window.Start()
  1423. stepEnd := stepStart.Add(step)
  1424. var isAKS bool
  1425. for window.End().After(stepStart) {
  1426. allocSet, err := cm.ComputeAllocation(stepStart, stepEnd)
  1427. if err != nil {
  1428. return nil, fmt.Errorf("error computing allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1429. }
  1430. if includeIdle {
  1431. assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
  1432. if err != nil {
  1433. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1434. }
  1435. if includeProportionalAssetResourceCosts {
  1436. // AKS is a special case - there can be a maximum of 2
  1437. // load balancers (1 public and 1 private) in an AKS cluster
  1438. // therefore, when calculating PARCs for load balancers,
  1439. // we must know if this is an AKS cluster
  1440. for _, node := range assetSet.Nodes {
  1441. if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
  1442. isAKS = true
  1443. break
  1444. }
  1445. }
  1446. _, err := opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  1447. if err != nil {
  1448. log.Errorf("Allocation: error updating asset resource totals for %s: %s", assetSet.Window, err)
  1449. }
  1450. }
  1451. idleSet, err := computeIdleAllocations(allocSet, assetSet, idleByNode)
  1452. if err != nil {
  1453. return nil, fmt.Errorf("error computing idle allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1454. }
  1455. for _, idleAlloc := range idleSet.Allocations {
  1456. allocSet.Insert(idleAlloc)
  1457. }
  1458. }
  1459. asr.Append(allocSet)
  1460. stepStart = stepEnd
  1461. stepEnd = stepStart.Add(step)
  1462. }
  1463. // Apply allocation filter BEFORE aggregation if provided
  1464. if filterString != "" {
  1465. parser := allocation.NewAllocationFilterParser()
  1466. filterNode, err := parser.Parse(filterString)
  1467. if err != nil {
  1468. return nil, fmt.Errorf("invalid filter: %w", err)
  1469. }
  1470. compiler := opencost.NewAllocationMatchCompiler(nil)
  1471. matcher, err := compiler.Compile(filterNode)
  1472. if err != nil {
  1473. return nil, fmt.Errorf("failed to compile filter: %w", err)
  1474. }
  1475. filteredASR := opencost.NewAllocationSetRange()
  1476. for _, as := range asr.Slice() {
  1477. filteredAS := opencost.NewAllocationSet(as.Start(), as.End())
  1478. for _, alloc := range as.Allocations {
  1479. if matcher.Matches(alloc) {
  1480. filteredAS.Set(alloc)
  1481. }
  1482. }
  1483. if filteredAS.Length() > 0 {
  1484. filteredASR.Append(filteredAS)
  1485. }
  1486. }
  1487. asr = filteredASR
  1488. }
  1489. // Set aggregation options and aggregate
  1490. var shareIdleOpt string
  1491. if shareIdle {
  1492. shareIdleOpt = opencost.ShareWeighted
  1493. } else {
  1494. shareIdleOpt = opencost.ShareNone
  1495. }
  1496. opts := &opencost.AllocationAggregationOptions{
  1497. IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
  1498. IdleByNode: idleByNode,
  1499. IncludeAggregatedMetadata: includeAggregatedMetadata,
  1500. ShareIdle: shareIdleOpt,
  1501. }
  1502. // Aggregate
  1503. err := asr.AggregateBy(aggregate, opts)
  1504. if err != nil {
  1505. return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
  1506. }
  1507. // Accumulate, if requested
  1508. if accumulateBy != opencost.AccumulateOptionNone {
  1509. asr, err = asr.Accumulate(accumulateBy)
  1510. if err != nil {
  1511. log.Errorf("error accumulating by %v: %s", accumulateBy, err)
  1512. return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
  1513. }
  1514. // when accumulating and returning PARCs, we need the totals for the
  1515. // accumulated windows to accurately compute a fraction
  1516. if includeProportionalAssetResourceCosts {
  1517. assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
  1518. if err != nil {
  1519. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  1520. }
  1521. _, err = opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  1522. if err != nil {
  1523. log.Errorf("Allocation: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  1524. }
  1525. }
  1526. }
  1527. if includeProportionalAssetResourceCosts {
  1528. for _, as := range asr.Allocations {
  1529. totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
  1530. if !ok {
  1531. log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  1532. return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  1533. }
  1534. totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
  1535. if !ok {
  1536. log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  1537. return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  1538. }
  1539. var totalPublicLbCost, totalPrivateLbCost float64
  1540. if isAKS && sharedLoadBalancer {
  1541. // loop through all assetTotals, adding all load balancer costs by public and private
  1542. for _, tot := range totalStoreByNode {
  1543. if tot.PrivateLoadBalancer {
  1544. totalPrivateLbCost += tot.LoadBalancerCost
  1545. } else {
  1546. totalPublicLbCost += tot.LoadBalancerCost
  1547. }
  1548. }
  1549. }
  1550. // loop through each allocation set, using total cost from totals store
  1551. for _, alloc := range as.Allocations {
  1552. for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
  1553. key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
  1554. // for each parc , check the totals store for each
  1555. // on a totals hit, set the corresponding total and calculate percentage
  1556. var totals *opencost.AssetTotals
  1557. if totalsLoc, found := totalStoreByCluster[key]; found {
  1558. totals = totalsLoc
  1559. }
  1560. if totalsLoc, found := totalStoreByNode[key]; found {
  1561. totals = totalsLoc
  1562. }
  1563. if totals == nil {
  1564. log.Errorf("unable to locate asset totals for allocation %s, corresponding PARC is being skipped", key)
  1565. continue
  1566. }
  1567. parc.CPUTotalCost = totals.CPUCost
  1568. parc.GPUTotalCost = totals.GPUCost
  1569. parc.RAMTotalCost = totals.RAMCost
  1570. parc.PVTotalCost = totals.PersistentVolumeCost
  1571. if isAKS && sharedLoadBalancer && len(alloc.LoadBalancers) > 0 {
  1572. // Azure is a special case - use computed totals above
  1573. // use the lbAllocations in the object to determine if
  1574. // this PARC is a public or private load balancer
  1575. // then set the total accordingly
  1576. // AKS only has 1 public and 1 private load balancer
  1577. lbAlloc, found := alloc.LoadBalancers[key]
  1578. if found {
  1579. if lbAlloc.Private {
  1580. parc.LoadBalancerTotalCost = totalPrivateLbCost
  1581. } else {
  1582. parc.LoadBalancerTotalCost = totalPublicLbCost
  1583. }
  1584. }
  1585. } else {
  1586. parc.LoadBalancerTotalCost = totals.LoadBalancerCost
  1587. }
  1588. opencost.ComputePercentages(&parc)
  1589. alloc.ProportionalAssetResourceCosts[rawKey] = parc
  1590. }
  1591. }
  1592. }
  1593. }
  1594. return asr, nil
  1595. }
  1596. // debugAssetAllocationMismatch analyzes and logs discrepancies between asset and allocation data
  1597. // This helps diagnose pricing issues and negative idle costs
  1598. func debugAssetAllocationMismatch(allocSet *opencost.AllocationSet, assetSet *opencost.AssetSet) {
  1599. log.Debugf("=== Asset-Allocation Debug Analysis for window %s ===", allocSet.Window)
  1600. // Build maps for efficient lookup
  1601. assetsByProviderID := make(map[string]*opencost.Node)
  1602. assetsByNode := make(map[string]*opencost.Node)
  1603. for _, asset := range assetSet.Nodes {
  1604. if asset.Properties != nil && asset.Properties.ProviderID != "" {
  1605. assetsByProviderID[asset.Properties.ProviderID] = asset
  1606. }
  1607. if asset.Properties != nil && asset.Properties.Name != "" {
  1608. assetsByNode[asset.Properties.Name] = asset
  1609. }
  1610. }
  1611. // 1) Find allocations without matching assets (by ProviderID)
  1612. allocsWithoutAssets := make([]*opencost.Allocation, 0)
  1613. for _, alloc := range allocSet.Allocations {
  1614. if alloc.Properties == nil {
  1615. continue
  1616. }
  1617. providerID := alloc.Properties.ProviderID
  1618. if providerID == "" {
  1619. continue
  1620. }
  1621. if _, found := assetsByProviderID[providerID]; !found {
  1622. allocsWithoutAssets = append(allocsWithoutAssets, alloc)
  1623. }
  1624. }
  1625. if len(allocsWithoutAssets) > 0 {
  1626. log.Debugf("Found %d allocations without matching assets:", len(allocsWithoutAssets))
  1627. for _, alloc := range allocsWithoutAssets {
  1628. log.Debugf(" - Allocation: %s, Node: %s, ProviderID: %s, TotalCost: %.4f",
  1629. alloc.Name,
  1630. alloc.Properties.Node,
  1631. alloc.Properties.ProviderID,
  1632. alloc.TotalCost())
  1633. }
  1634. }
  1635. // 2) Sum allocations per node and compare to node asset costs
  1636. allocTotalsByNode := make(map[string]*struct {
  1637. CPUCost float64
  1638. GPUCost float64
  1639. RAMCost float64
  1640. TotalCost float64
  1641. CPUCoreHours float64
  1642. GPUHours float64
  1643. RAMByteHours float64
  1644. Count int
  1645. })
  1646. for _, alloc := range allocSet.Allocations {
  1647. if alloc.Properties == nil || alloc.Properties.Node == "" {
  1648. continue
  1649. }
  1650. node := alloc.Properties.Node
  1651. if _, exists := allocTotalsByNode[node]; !exists {
  1652. allocTotalsByNode[node] = &struct {
  1653. CPUCost float64
  1654. GPUCost float64
  1655. RAMCost float64
  1656. TotalCost float64
  1657. CPUCoreHours float64
  1658. GPUHours float64
  1659. RAMByteHours float64
  1660. Count int
  1661. }{}
  1662. }
  1663. allocTotalsByNode[node].CPUCost += alloc.CPUCost
  1664. allocTotalsByNode[node].GPUCost += alloc.GPUCost
  1665. allocTotalsByNode[node].RAMCost += alloc.RAMCost
  1666. allocTotalsByNode[node].TotalCost += alloc.TotalCost()
  1667. allocTotalsByNode[node].CPUCoreHours += alloc.CPUCoreHours
  1668. allocTotalsByNode[node].GPUHours += alloc.GPUHours
  1669. allocTotalsByNode[node].RAMByteHours += alloc.RAMByteHours
  1670. allocTotalsByNode[node].Count++
  1671. }
  1672. log.Debugf("Per-Node Asset vs Allocation Comparison:")
  1673. for node, allocTotals := range allocTotalsByNode {
  1674. asset, hasAsset := assetsByNode[node]
  1675. if !hasAsset {
  1676. log.Debugf(" Node %s: Has allocations but NO ASSET (allocations: %d, total cost: %.4f)",
  1677. node, allocTotals.Count, allocTotals.TotalCost)
  1678. continue
  1679. }
  1680. assetCPU := asset.CPUCost
  1681. assetGPU := asset.GPUCost
  1682. assetRAM := asset.RAMCost
  1683. assetTotal := asset.TotalCost()
  1684. cpuDiff := assetCPU - allocTotals.CPUCost
  1685. gpuDiff := assetGPU - allocTotals.GPUCost
  1686. ramDiff := assetRAM - allocTotals.RAMCost
  1687. totalDiff := assetTotal - allocTotals.TotalCost
  1688. status := "OK"
  1689. if cpuDiff < 0 || gpuDiff < 0 || ramDiff < 0 {
  1690. status = "NEGATIVE_IDLE"
  1691. }
  1692. log.Debugf(" Node %s [%s]:", node, status)
  1693. log.Debugf(" Asset: CPU=%.4f, GPU=%.4f, RAM=%.4f, Total=%.4f",
  1694. assetCPU, assetGPU, assetRAM, assetTotal)
  1695. log.Debugf(" Allocation: CPU=%.4f, GPU=%.4f, RAM=%.4f, Total=%.4f (%d allocs)",
  1696. allocTotals.CPUCost, allocTotals.GPUCost, allocTotals.RAMCost, allocTotals.TotalCost, allocTotals.Count)
  1697. log.Debugf(" Difference: CPU=%.4f, GPU=%.4f, RAM=%.4f, Total=%.4f",
  1698. cpuDiff, gpuDiff, ramDiff, totalDiff)
  1699. if asset.Adjustment != 0 {
  1700. log.Debugf(" Adjustment: %.4f", asset.Adjustment)
  1701. }
  1702. // Compare resource amounts vs costs: higher resources should have higher costs
  1703. assetCPUHours := asset.CPUCoreHours
  1704. assetGPUHours := asset.GPUHours
  1705. assetRAMBytes := asset.RAMByteHours
  1706. allocCPUHours := allocTotals.CPUCoreHours
  1707. allocGPUHours := allocTotals.GPUHours
  1708. allocRAMBytes := allocTotals.RAMByteHours
  1709. // Warn if resource amounts and costs are inverted (higher resources but lower costs)
  1710. if assetCPUHours > 0 && allocCPUHours > 0 {
  1711. if assetCPUHours > allocCPUHours && assetCPU < allocTotals.CPUCost {
  1712. log.Warnf("Resource-cost inversion for %s CPU: asset has MORE hours (%.2f) but LESS cost (%.4f) than allocations (hours: %.2f, cost: %.4f)",
  1713. node, assetCPUHours, assetCPU, allocCPUHours, allocTotals.CPUCost)
  1714. } else if assetCPUHours < allocCPUHours && assetCPU > allocTotals.CPUCost {
  1715. log.Warnf("Resource-cost inversion for %s CPU: asset has LESS hours (%.2f) but MORE cost (%.4f) than allocations (hours: %.2f, cost: %.4f)",
  1716. node, assetCPUHours, assetCPU, allocCPUHours, allocTotals.CPUCost)
  1717. }
  1718. }
  1719. if assetGPUHours > 0 && allocGPUHours > 0 {
  1720. if assetGPUHours > allocGPUHours && assetGPU < allocTotals.GPUCost {
  1721. log.Warnf("Resource-cost inversion for %s GPU: asset has MORE hours (%.2f) but LESS cost (%.4f) than allocations (hours: %.2f, cost: %.4f)",
  1722. node, assetGPUHours, assetGPU, allocGPUHours, allocTotals.GPUCost)
  1723. } else if assetGPUHours < allocGPUHours && assetGPU > allocTotals.GPUCost {
  1724. log.Warnf("Resource-cost inversion for %s GPU: asset has LESS hours (%.2f) but MORE cost (%.4f) than allocations (hours: %.2f, cost: %.4f)",
  1725. node, assetGPUHours, assetGPU, allocGPUHours, allocTotals.GPUCost)
  1726. }
  1727. }
  1728. if assetRAMBytes > 0 && allocRAMBytes > 0 {
  1729. if assetRAMBytes > allocRAMBytes && assetRAM < allocTotals.RAMCost {
  1730. log.Warnf("Resource-cost inversion for %s RAM: asset has MORE byte-hours (%.2f) but LESS cost (%.4f) than allocations (byte-hours: %.2f, cost: %.4f)",
  1731. node, assetRAMBytes, assetRAM, allocRAMBytes, allocTotals.RAMCost)
  1732. } else if assetRAMBytes < allocRAMBytes && assetRAM > allocTotals.RAMCost {
  1733. log.Warnf("Resource-cost inversion for %s RAM: asset has LESS byte-hours (%.2f) but MORE cost (%.4f) than allocations (byte-hours: %.2f, cost: %.4f)",
  1734. node, assetRAMBytes, assetRAM, allocRAMBytes, allocTotals.RAMCost)
  1735. }
  1736. }
  1737. // Log resource amounts for debugging
  1738. log.Debugf(" Resource Hours:")
  1739. log.Debugf(" Asset: CPU=%.2f hours, GPU=%.2f hours, RAM=%.2f byte-hours",
  1740. assetCPUHours, assetGPUHours, assetRAMBytes)
  1741. log.Debugf(" Allocation: CPU=%.2f hours, GPU=%.2f hours, RAM=%.2f byte-hours",
  1742. allocCPUHours, allocGPUHours, allocRAMBytes)
  1743. }
  1744. // 3) Sum total of all node costs
  1745. totalNodeCPU := 0.0
  1746. totalNodeGPU := 0.0
  1747. totalNodeRAM := 0.0
  1748. totalNodeCost := 0.0
  1749. nodeCount := 0
  1750. for _, asset := range assetSet.Nodes {
  1751. totalNodeCPU += asset.CPUCost
  1752. totalNodeGPU += asset.GPUCost
  1753. totalNodeRAM += asset.RAMCost
  1754. totalNodeCost += asset.TotalCost()
  1755. nodeCount++
  1756. }
  1757. log.Debugf("Total Node Asset Costs:")
  1758. log.Debugf(" Nodes: %d", nodeCount)
  1759. log.Debugf(" CPU: %.4f", totalNodeCPU)
  1760. log.Debugf(" GPU: %.4f", totalNodeGPU)
  1761. log.Debugf(" RAM: %.4f", totalNodeRAM)
  1762. log.Debugf(" Total: %.4f", totalNodeCost)
  1763. // 4) Sum total of all allocation costs
  1764. totalAllocCPU := 0.0
  1765. totalAllocGPU := 0.0
  1766. totalAllocRAM := 0.0
  1767. totalAllocCost := 0.0
  1768. allocCount := 0
  1769. for _, alloc := range allocSet.Allocations {
  1770. totalAllocCPU += alloc.CPUCost
  1771. totalAllocGPU += alloc.GPUCost
  1772. totalAllocRAM += alloc.RAMCost
  1773. totalAllocCost += alloc.TotalCost()
  1774. allocCount++
  1775. }
  1776. log.Debugf("Total Allocation Costs:")
  1777. log.Debugf(" Allocations: %d", allocCount)
  1778. log.Debugf(" CPU: %.4f", totalAllocCPU)
  1779. log.Debugf(" GPU: %.4f", totalAllocGPU)
  1780. log.Debugf(" RAM: %.4f", totalAllocRAM)
  1781. log.Debugf(" Total: %.4f", totalAllocCost)
  1782. // Overall comparison
  1783. log.Debugf("Overall Asset vs Allocation:")
  1784. log.Debugf(" CPU Difference: %.4f (Asset - Allocation)", totalNodeCPU-totalAllocCPU)
  1785. log.Debugf(" GPU Difference: %.4f (Asset - Allocation)", totalNodeGPU-totalAllocGPU)
  1786. log.Debugf(" RAM Difference: %.4f (Asset - Allocation)", totalNodeRAM-totalAllocRAM)
  1787. log.Debugf(" Total Difference: %.4f (Asset - Allocation)", totalNodeCost-totalAllocCost)
  1788. log.Debugf("=== End Asset-Allocation Debug Analysis ===")
  1789. }
  1790. func computeIdleAllocations(allocSet *opencost.AllocationSet, assetSet *opencost.AssetSet, idleByNode bool) (*opencost.AllocationSet, error) {
  1791. if !allocSet.Window.Equal(assetSet.Window) {
  1792. return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
  1793. }
  1794. // Run debug analysis when log level is debug
  1795. debugAssetAllocationMismatch(allocSet, assetSet)
  1796. var allocTotals map[string]*opencost.AllocationTotals
  1797. var assetTotals map[string]*opencost.AssetTotals
  1798. if idleByNode {
  1799. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationNodeProp)
  1800. assetTotals = opencost.ComputeAssetTotals(assetSet, true)
  1801. } else {
  1802. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationClusterProp)
  1803. assetTotals = opencost.ComputeAssetTotals(assetSet, false)
  1804. }
  1805. start, end := *allocSet.Window.Start(), *allocSet.Window.End()
  1806. idleSet := opencost.NewAllocationSet(start, end)
  1807. for key, assetTotal := range assetTotals {
  1808. allocTotal, ok := allocTotals[key]
  1809. if !ok {
  1810. log.Warnf("Allocation: did not find allocations for asset key: %s", key)
  1811. // Use a zero-value set of totals. This indicates either (1) an
  1812. // error computing totals, or (2) that no allocations ran on the
  1813. // given node for the given window.
  1814. allocTotal = &opencost.AllocationTotals{
  1815. Cluster: assetTotal.Cluster,
  1816. Node: assetTotal.Node,
  1817. Start: assetTotal.Start,
  1818. End: assetTotal.End,
  1819. }
  1820. }
  1821. // Insert one idle allocation for each key (whether by node or
  1822. // by cluster), defined as the difference between the total
  1823. // asset cost and the allocated cost per-resource.
  1824. // Idle costs are clamped to zero to prevent negative values that can occur
  1825. // when asset total costs are less than allocated costs. This can happen when:
  1826. // - Pricing data is unavailable (promless mode, API failures, missing price data)
  1827. // - Custom pricing is misconfigured or returns zero values
  1828. // - Cloud billing adjustments reduce asset costs below allocation costs
  1829. // - Allocation calculations exceed asset costs due to timing or rounding
  1830. name := fmt.Sprintf("%s/%s", key, opencost.IdleSuffix)
  1831. cpuIdleCost := assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost()
  1832. gpuIdleCost := assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost()
  1833. ramIdleCost := assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost()
  1834. // Clamp idle costs to zero to prevent negative idle allocations
  1835. if cpuIdleCost < 0 {
  1836. log.Warnf("Negative CPU idle cost detected for %s: asset total (%.4f) < allocation total (%.4f), clamping to 0",
  1837. key, assetTotal.TotalCPUCost(), allocTotal.TotalCPUCost())
  1838. cpuIdleCost = 0
  1839. }
  1840. if gpuIdleCost < 0 {
  1841. log.Warnf("Negative GPU idle cost detected for %s: asset total (%.4f) < allocation total (%.4f), clamping to 0",
  1842. key, assetTotal.TotalGPUCost(), allocTotal.TotalGPUCost())
  1843. gpuIdleCost = 0
  1844. }
  1845. if ramIdleCost < 0 {
  1846. log.Warnf("Negative RAM idle cost detected for %s: asset total (%.4f) < allocation total (%.4f), clamping to 0",
  1847. key, assetTotal.TotalRAMCost(), allocTotal.TotalRAMCost())
  1848. ramIdleCost = 0
  1849. }
  1850. err := idleSet.Insert(&opencost.Allocation{
  1851. Name: name,
  1852. Window: idleSet.Window.Clone(),
  1853. Properties: &opencost.AllocationProperties{
  1854. Cluster: assetTotal.Cluster,
  1855. Node: assetTotal.Node,
  1856. ProviderID: assetTotal.ProviderID,
  1857. },
  1858. Start: assetTotal.Start,
  1859. End: assetTotal.End,
  1860. CPUCost: cpuIdleCost,
  1861. GPUCost: gpuIdleCost,
  1862. RAMCost: ramIdleCost,
  1863. })
  1864. if err != nil {
  1865. return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
  1866. }
  1867. }
  1868. return idleSet, nil
  1869. }
  1870. func (cm *CostModel) GetDataSource() source.OpenCostDataSource {
  1871. return cm.DataSource
  1872. }