costmodel.go 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802
  1. package costmodel
  2. import (
  3. "errors"
  4. "fmt"
  5. "math"
  6. "regexp"
  7. "strconv"
  8. "strings"
  9. "time"
  10. "github.com/opencost/opencost/core/pkg/clustercache"
  11. "github.com/opencost/opencost/core/pkg/clusters"
  12. "github.com/opencost/opencost/core/pkg/log"
  13. "github.com/opencost/opencost/core/pkg/opencost"
  14. "github.com/opencost/opencost/core/pkg/source"
  15. "github.com/opencost/opencost/core/pkg/util"
  16. "github.com/opencost/opencost/core/pkg/util/promutil"
  17. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
  18. "github.com/opencost/opencost/pkg/env"
  19. v1 "k8s.io/api/core/v1"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/labels"
  22. "golang.org/x/sync/singleflight"
  23. )
  24. const (
  25. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  26. unmountedPVsContainer = "unmounted-pvs"
  27. )
  28. // isCron matches a CronJob name and captures the non-timestamp name
  29. //
  30. // We support either a 10 character timestamp OR an 8 character timestamp
  31. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  32. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  33. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  34. type CostModel struct {
  35. Cache clustercache.ClusterCache
  36. ClusterMap clusters.ClusterMap
  37. BatchDuration time.Duration
  38. RequestGroup *singleflight.Group
  39. DataSource source.OpenCostDataSource
  40. Provider costAnalyzerCloud.Provider
  41. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  42. }
  43. func NewCostModel(
  44. dataSource source.OpenCostDataSource,
  45. provider costAnalyzerCloud.Provider,
  46. cache clustercache.ClusterCache,
  47. clusterMap clusters.ClusterMap,
  48. batchDuration time.Duration,
  49. ) *CostModel {
  50. // request grouping to prevent over-requesting the same data prior to caching
  51. requestGroup := new(singleflight.Group)
  52. return &CostModel{
  53. Cache: cache,
  54. ClusterMap: clusterMap,
  55. BatchDuration: batchDuration,
  56. DataSource: dataSource,
  57. Provider: provider,
  58. RequestGroup: requestGroup,
  59. }
  60. }
  61. type CostData struct {
  62. Name string `json:"name,omitempty"`
  63. PodName string `json:"podName,omitempty"`
  64. NodeName string `json:"nodeName,omitempty"`
  65. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  66. Namespace string `json:"namespace,omitempty"`
  67. Deployments []string `json:"deployments,omitempty"`
  68. Services []string `json:"services,omitempty"`
  69. Daemonsets []string `json:"daemonsets,omitempty"`
  70. Statefulsets []string `json:"statefulsets,omitempty"`
  71. Jobs []string `json:"jobs,omitempty"`
  72. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  73. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  74. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  75. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  76. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  77. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  78. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  79. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  80. NetworkData []*util.Vector `json:"network,omitempty"`
  81. Annotations map[string]string `json:"annotations,omitempty"`
  82. Labels map[string]string `json:"labels,omitempty"`
  83. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  84. ClusterID string `json:"clusterId"`
  85. ClusterName string `json:"clusterName"`
  86. }
  87. func (cd *CostData) String() string {
  88. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  89. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  90. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  91. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  92. }
  93. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  94. hasController = false
  95. if len(cd.Deployments) > 0 {
  96. name = cd.Deployments[0]
  97. kind = "deployment"
  98. hasController = true
  99. } else if len(cd.Statefulsets) > 0 {
  100. name = cd.Statefulsets[0]
  101. kind = "statefulset"
  102. hasController = true
  103. } else if len(cd.Daemonsets) > 0 {
  104. name = cd.Daemonsets[0]
  105. kind = "daemonset"
  106. hasController = true
  107. } else if len(cd.Jobs) > 0 {
  108. name = cd.Jobs[0]
  109. kind = "job"
  110. hasController = true
  111. match := isCron.FindStringSubmatch(name)
  112. if match != nil {
  113. name = match[1]
  114. }
  115. }
  116. return name, kind, hasController
  117. }
  118. func (cm *CostModel) ComputeCostData(start, end time.Time) (map[string]*CostData, error) {
  119. // Cluster ID is specific to the source cluster
  120. clusterID := env.GetClusterID()
  121. cp := cm.Provider
  122. ds := cm.DataSource
  123. mq := ds.Metrics()
  124. grp := source.NewQueryGroup()
  125. resChRAMUsage := source.WithGroup(grp, mq.QueryRAMUsageAvg(start, end))
  126. resChCPUUsage := source.WithGroup(grp, mq.QueryCPUUsageAvg(start, end))
  127. resChNetZoneRequests := source.WithGroup(grp, mq.QueryNetZoneGiB(start, end))
  128. resChNetRegionRequests := source.WithGroup(grp, mq.QueryNetRegionGiB(start, end))
  129. resChNetInternetRequests := source.WithGroup(grp, mq.QueryNetInternetGiB(start, end))
  130. // Pull pod information from k8s API
  131. podlist := cm.Cache.GetAllPods()
  132. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  133. if err != nil {
  134. return nil, err
  135. }
  136. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  137. if err != nil {
  138. return nil, err
  139. }
  140. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  141. if err != nil {
  142. return nil, err
  143. }
  144. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  145. if err != nil {
  146. return nil, err
  147. }
  148. // Process Prometheus query results. Handle errors using ctx.Errors.
  149. resRAMUsage, _ := resChRAMUsage.Await()
  150. resCPUUsage, _ := resChCPUUsage.Await()
  151. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  152. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  153. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  154. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  155. // NOTE: will not propagate unless coupled with errors.
  156. if grp.HasErrors() {
  157. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  158. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  159. for _, queryErr := range grp.Errors() {
  160. if queryErr.Error != nil {
  161. log.Errorf("ComputeCostData: Request Error: %s", queryErr.Error)
  162. }
  163. if queryErr.ParseError != nil {
  164. log.Errorf("ComputeCostData: Parsing Error: %s", queryErr.ParseError)
  165. }
  166. }
  167. // ErrorCollection is an collection of errors wrapped in a single error implementation
  168. // We opt to not return an error for the sake of running as a pure exporter.
  169. log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", grp.Error())
  170. }
  171. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  172. nodes, err := cm.GetNodeCost()
  173. if err != nil {
  174. log.Warnf("GetNodeCost: no node cost model available: %s", err)
  175. return nil, err
  176. }
  177. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  178. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  179. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  180. if err != nil {
  181. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  182. }
  183. if pvClaimMapping != nil {
  184. err = cm.addPVData(pvClaimMapping)
  185. if err != nil {
  186. return nil, err
  187. }
  188. // copy claim mappings into zombies, then remove as they're discovered
  189. for k, v := range pvClaimMapping {
  190. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  191. }
  192. }
  193. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  194. if err != nil {
  195. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  196. networkUsageMap = make(map[string]*NetworkUsageData)
  197. }
  198. containerNameCost := make(map[string]*CostData)
  199. containers := make(map[string]bool)
  200. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, clusterID)
  201. if err != nil {
  202. return nil, err
  203. }
  204. for key := range RAMUsedMap {
  205. containers[key] = true
  206. }
  207. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, clusterID) // No need to normalize here, as this comes from a counter
  208. if err != nil {
  209. return nil, err
  210. }
  211. for key := range CPUUsedMap {
  212. containers[key] = true
  213. }
  214. currentContainers := make(map[string]clustercache.Pod)
  215. for _, pod := range podlist {
  216. if pod.Status.Phase != v1.PodRunning {
  217. continue
  218. }
  219. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  220. if err != nil {
  221. return nil, err
  222. }
  223. for _, c := range cs {
  224. containers[c.Key()] = true // captures any containers that existed for a time < a prometheus scrape interval. We currently charge 0 for this but should charge something.
  225. currentContainers[c.Key()] = *pod
  226. }
  227. }
  228. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  229. missingContainers := make(map[string]*CostData)
  230. for key := range containers {
  231. if _, ok := containerNameCost[key]; ok {
  232. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  233. }
  234. // The _else_ case for this statement is the case in which the container has been
  235. // deleted so we have usage information but not request information. In that case,
  236. // we return partial data for CPU and RAM: only usage and not requests.
  237. if pod, ok := currentContainers[key]; ok {
  238. podName := pod.Name
  239. ns := pod.Namespace
  240. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  241. podLabels := pod.Labels
  242. if podLabels == nil {
  243. podLabels = make(map[string]string)
  244. }
  245. for k, v := range nsLabels {
  246. if _, ok := podLabels[k]; !ok {
  247. podLabels[k] = v
  248. }
  249. }
  250. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  251. podAnnotations := pod.Annotations
  252. if podAnnotations == nil {
  253. podAnnotations = make(map[string]string)
  254. }
  255. for k, v := range nsAnnotations {
  256. if _, ok := podAnnotations[k]; !ok {
  257. podAnnotations[k] = v
  258. }
  259. }
  260. nodeName := pod.Spec.NodeName
  261. var nodeData *costAnalyzerCloud.Node
  262. if _, ok := nodes[nodeName]; ok {
  263. nodeData = nodes[nodeName]
  264. }
  265. nsKey := ns + "," + clusterID
  266. var podDeployments []string
  267. if _, ok := podDeploymentsMapping[nsKey]; ok {
  268. if ds, ok := podDeploymentsMapping[nsKey][pod.Name]; ok {
  269. podDeployments = ds
  270. } else {
  271. podDeployments = []string{}
  272. }
  273. }
  274. var podPVs []*PersistentVolumeClaimData
  275. podClaims := pod.Spec.Volumes
  276. for _, vol := range podClaims {
  277. if vol.PersistentVolumeClaim != nil {
  278. name := vol.PersistentVolumeClaim.ClaimName
  279. key := ns + "," + name + "," + clusterID
  280. if pvClaim, ok := pvClaimMapping[key]; ok {
  281. pvClaim.TimesClaimed++
  282. podPVs = append(podPVs, pvClaim)
  283. // Remove entry from potential unmounted pvs
  284. delete(unmountedPVs, key)
  285. }
  286. }
  287. }
  288. var podNetCosts []*util.Vector
  289. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  290. netCosts, err := GetNetworkCost(usage, cp)
  291. if err != nil {
  292. log.Debugf("Error pulling network costs: %s", err.Error())
  293. } else {
  294. podNetCosts = netCosts
  295. }
  296. }
  297. var podServices []string
  298. if _, ok := podServicesMapping[nsKey]; ok {
  299. if svcs, ok := podServicesMapping[nsKey][pod.Name]; ok {
  300. podServices = svcs
  301. } else {
  302. podServices = []string{}
  303. }
  304. }
  305. for i, container := range pod.Spec.Containers {
  306. containerName := container.Name
  307. // recreate the key and look up data for this container
  308. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  309. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  310. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  311. // details on the "amount" API. See
  312. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  313. // for the units of memory and CPU.
  314. ramRequestBytes := container.Resources.Requests.Memory().Value()
  315. // Because information on container RAM & CPU requests isn't
  316. // coming from Prometheus, it won't have a timestamp associated
  317. // with it. We need to provide a timestamp.
  318. RAMReqV := []*util.Vector{
  319. {
  320. Value: float64(ramRequestBytes),
  321. Timestamp: float64(time.Now().UTC().Unix()),
  322. },
  323. }
  324. // use millicores so we can convert to cores in a float64 format
  325. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  326. CPUReqV := []*util.Vector{
  327. {
  328. Value: float64(cpuRequestMilliCores) / 1000,
  329. Timestamp: float64(time.Now().UTC().Unix()),
  330. },
  331. }
  332. gpuReqCount := 0.0
  333. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  334. gpuReqCount = g.AsApproximateFloat64()
  335. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  336. gpuReqCount = g.AsApproximateFloat64()
  337. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  338. gpuReqCount = g.AsApproximateFloat64()
  339. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  340. gpuReqCount = g.AsApproximateFloat64()
  341. }
  342. GPUReqV := []*util.Vector{
  343. {
  344. Value: float64(gpuReqCount),
  345. Timestamp: float64(time.Now().UTC().Unix()),
  346. },
  347. }
  348. RAMUsedV, ok := RAMUsedMap[newKey]
  349. if !ok {
  350. log.Debug("no RAM usage for " + newKey)
  351. RAMUsedV = []*util.Vector{{}}
  352. }
  353. CPUUsedV, ok := CPUUsedMap[newKey]
  354. if !ok {
  355. log.Debug("no CPU usage for " + newKey)
  356. CPUUsedV = []*util.Vector{{}}
  357. }
  358. var pvReq []*PersistentVolumeClaimData
  359. var netReq []*util.Vector
  360. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  361. pvReq = podPVs
  362. netReq = podNetCosts
  363. }
  364. costs := &CostData{
  365. Name: containerName,
  366. PodName: podName,
  367. NodeName: nodeName,
  368. Namespace: ns,
  369. Deployments: podDeployments,
  370. Services: podServices,
  371. Daemonsets: getDaemonsetsOfPod(pod),
  372. Jobs: getJobsOfPod(pod),
  373. Statefulsets: getStatefulSetsOfPod(pod),
  374. NodeData: nodeData,
  375. RAMReq: RAMReqV,
  376. RAMUsed: RAMUsedV,
  377. CPUReq: CPUReqV,
  378. CPUUsed: CPUUsedV,
  379. GPUReq: GPUReqV,
  380. PVCData: pvReq,
  381. NetworkData: netReq,
  382. Annotations: podAnnotations,
  383. Labels: podLabels,
  384. NamespaceLabels: nsLabels,
  385. ClusterID: clusterID,
  386. ClusterName: cm.ClusterMap.NameFor(clusterID),
  387. }
  388. var cpuReq, cpuUse *util.Vector
  389. if len(costs.CPUReq) > 0 {
  390. cpuReq = costs.CPUReq[0]
  391. }
  392. if len(costs.CPUUsed) > 0 {
  393. cpuUse = costs.CPUUsed[0]
  394. }
  395. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  396. var ramReq, ramUse *util.Vector
  397. if len(costs.RAMReq) > 0 {
  398. ramReq = costs.RAMReq[0]
  399. }
  400. if len(costs.RAMUsed) > 0 {
  401. ramUse = costs.RAMUsed[0]
  402. }
  403. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  404. containerNameCost[newKey] = costs
  405. }
  406. } else {
  407. // The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
  408. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  409. c, err := NewContainerMetricFromKey(key)
  410. if err != nil {
  411. return nil, err
  412. }
  413. // CPU and RAM requests are obtained from the Kubernetes API.
  414. // If this case has been reached, the Kubernetes API will not
  415. // have information about the pod because it no longer exists.
  416. //
  417. // The case where this matters is minimal, mainly in environments
  418. // with very short-lived pods that over-request resources.
  419. RAMReqV := []*util.Vector{{}}
  420. CPUReqV := []*util.Vector{{}}
  421. GPUReqV := []*util.Vector{{}}
  422. RAMUsedV, ok := RAMUsedMap[key]
  423. if !ok {
  424. log.Debug("no RAM usage for " + key)
  425. RAMUsedV = []*util.Vector{{}}
  426. }
  427. CPUUsedV, ok := CPUUsedMap[key]
  428. if !ok {
  429. log.Debug("no CPU usage for " + key)
  430. CPUUsedV = []*util.Vector{{}}
  431. }
  432. node, ok := nodes[c.NodeName]
  433. if !ok {
  434. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  435. if n, ok := missingNodes[c.NodeName]; ok {
  436. node = n
  437. } else {
  438. node = &costAnalyzerCloud.Node{}
  439. missingNodes[c.NodeName] = node
  440. }
  441. }
  442. namespacelabels := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  443. namespaceAnnotations := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  444. costs := &CostData{
  445. Name: c.ContainerName,
  446. PodName: c.PodName,
  447. NodeName: c.NodeName,
  448. NodeData: node,
  449. Namespace: c.Namespace,
  450. RAMReq: RAMReqV,
  451. RAMUsed: RAMUsedV,
  452. CPUReq: CPUReqV,
  453. CPUUsed: CPUUsedV,
  454. GPUReq: GPUReqV,
  455. Annotations: namespaceAnnotations,
  456. NamespaceLabels: namespacelabels,
  457. ClusterID: c.ClusterID,
  458. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  459. }
  460. var cpuReq, cpuUse *util.Vector
  461. if len(costs.CPUReq) > 0 {
  462. cpuReq = costs.CPUReq[0]
  463. }
  464. if len(costs.CPUUsed) > 0 {
  465. cpuUse = costs.CPUUsed[0]
  466. }
  467. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  468. var ramReq, ramUse *util.Vector
  469. if len(costs.RAMReq) > 0 {
  470. ramReq = costs.RAMReq[0]
  471. }
  472. if len(costs.RAMUsed) > 0 {
  473. ramUse = costs.RAMUsed[0]
  474. }
  475. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  476. containerNameCost[key] = costs
  477. missingContainers[key] = costs
  478. }
  479. }
  480. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  481. // to pass along the cost data
  482. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  483. for k, costs := range unmounted {
  484. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  485. containerNameCost[k] = costs
  486. }
  487. err = findDeletedNodeInfo(cm.DataSource, missingNodes, start, end)
  488. if err != nil {
  489. log.Errorf("Error fetching historical node data: %s", err.Error())
  490. }
  491. err = findDeletedPodInfo(cm.DataSource, missingContainers, start, end)
  492. if err != nil {
  493. log.Errorf("Error fetching historical pod data: %s", err.Error())
  494. }
  495. return containerNameCost, err
  496. }
  497. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  498. costs := make(map[string]*CostData)
  499. if len(unmountedPVs) == 0 {
  500. return costs
  501. }
  502. for k, pv := range unmountedPVs {
  503. keyParts := strings.Split(k, ",")
  504. if len(keyParts) != 3 {
  505. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  506. continue
  507. }
  508. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  509. namespacelabels := namespaceLabelsMapping[ns+","+clusterID]
  510. namespaceAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  511. metric := NewContainerMetricFromValues(ns, unmountedPVsContainer, unmountedPVsContainer, "", clusterID)
  512. key := metric.Key()
  513. if costData, ok := costs[key]; !ok {
  514. costs[key] = &CostData{
  515. Name: unmountedPVsContainer,
  516. PodName: unmountedPVsContainer,
  517. NodeName: "",
  518. Annotations: namespaceAnnotations,
  519. Namespace: ns,
  520. NamespaceLabels: namespacelabels,
  521. Labels: namespacelabels,
  522. ClusterID: clusterID,
  523. ClusterName: clusterMap.NameFor(clusterID),
  524. PVCData: pv,
  525. }
  526. } else {
  527. costData.PVCData = append(costData.PVCData, pv...)
  528. }
  529. }
  530. return costs
  531. }
  532. func findDeletedPodInfo(dataSource source.OpenCostDataSource, missingContainers map[string]*CostData, start, end time.Time) error {
  533. if len(missingContainers) > 0 {
  534. mq := dataSource.Metrics()
  535. podLabelsResCh := mq.QueryPodLabels(start, end)
  536. podLabelsResult, err := podLabelsResCh.Await()
  537. if err != nil {
  538. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  539. }
  540. podLabels := make(map[string]map[string]string)
  541. if podLabelsResult != nil {
  542. podLabels, err = parsePodLabels(podLabelsResult)
  543. if err != nil {
  544. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  545. }
  546. }
  547. for key, costData := range missingContainers {
  548. cm, _ := NewContainerMetricFromKey(key)
  549. labels, ok := podLabels[cm.PodName]
  550. if !ok {
  551. labels = make(map[string]string)
  552. }
  553. for k, v := range costData.NamespaceLabels {
  554. labels[k] = v
  555. }
  556. costData.Labels = labels
  557. }
  558. }
  559. return nil
  560. }
  561. func findDeletedNodeInfo(dataSource source.OpenCostDataSource, missingNodes map[string]*costAnalyzerCloud.Node, start, end time.Time) error {
  562. if len(missingNodes) > 0 {
  563. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  564. grp := source.NewQueryGroup()
  565. mq := dataSource.Metrics()
  566. cpuCostResCh := source.WithGroup(grp, mq.QueryNodeCPUPricePerHr(start, end))
  567. ramCostResCh := source.WithGroup(grp, mq.QueryNodeRAMPricePerGiBHr(start, end))
  568. gpuCostResCh := source.WithGroup(grp, mq.QueryNodeGPUPricePerHr(start, end))
  569. cpuCostRes, _ := cpuCostResCh.Await()
  570. ramCostRes, _ := ramCostResCh.Await()
  571. gpuCostRes, _ := gpuCostResCh.Await()
  572. if grp.HasErrors() {
  573. return grp.Error()
  574. }
  575. cpuCosts, err := getCost(cpuCostRes, cpuCostNode, cpuCostData)
  576. if err != nil {
  577. return err
  578. }
  579. ramCosts, err := getCost(ramCostRes, ramCostNode, ramCostData)
  580. if err != nil {
  581. return err
  582. }
  583. gpuCosts, err := getCost(gpuCostRes, gpuCostNode, gpuCostData)
  584. if err != nil {
  585. return err
  586. }
  587. if len(cpuCosts) == 0 {
  588. log.Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  589. }
  590. for node, costv := range cpuCosts {
  591. if _, ok := missingNodes[node]; ok {
  592. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  593. } else {
  594. log.DedupedWarningf(5, "Node `%s` in prometheus but not k8s api", node)
  595. }
  596. }
  597. for node, costv := range ramCosts {
  598. if _, ok := missingNodes[node]; ok {
  599. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  600. }
  601. }
  602. for node, costv := range gpuCosts {
  603. if _, ok := missingNodes[node]; ok {
  604. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  605. }
  606. }
  607. }
  608. return nil
  609. }
  610. // getContainerAllocation takes the max between request and usage. This function
  611. // returns a slice containing a single element describing the container's
  612. // allocation.
  613. //
  614. // Additionally, the timestamp of the allocation will be the highest value
  615. // timestamp between the two vectors. This mitigates situations where
  616. // Timestamp=0. This should have no effect on the metrics emitted by the
  617. // CostModelMetricsEmitter
  618. func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
  619. var result []*util.Vector
  620. if req != nil && used != nil {
  621. x1 := req.Value
  622. if math.IsNaN(x1) {
  623. log.Debugf("NaN value found during %s allocation calculation for requests.", allocationType)
  624. x1 = 0.0
  625. }
  626. y1 := used.Value
  627. if math.IsNaN(y1) {
  628. log.Debugf("NaN value found during %s allocation calculation for used.", allocationType)
  629. y1 = 0.0
  630. }
  631. result = []*util.Vector{
  632. {
  633. Value: math.Max(x1, y1),
  634. Timestamp: math.Max(req.Timestamp, used.Timestamp),
  635. },
  636. }
  637. if result[0].Value == 0 && result[0].Timestamp == 0 {
  638. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  639. }
  640. } else if req != nil {
  641. result = []*util.Vector{
  642. {
  643. Value: req.Value,
  644. Timestamp: req.Timestamp,
  645. },
  646. }
  647. } else if used != nil {
  648. result = []*util.Vector{
  649. {
  650. Value: used.Value,
  651. Timestamp: used.Timestamp,
  652. },
  653. }
  654. } else {
  655. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  656. result = []*util.Vector{
  657. {
  658. Value: 0,
  659. Timestamp: float64(time.Now().UTC().Unix()),
  660. },
  661. }
  662. }
  663. return result
  664. }
  665. func (cm *CostModel) addPVData(pvClaimMapping map[string]*PersistentVolumeClaimData) error {
  666. cache := cm.Cache
  667. cloud := cm.Provider
  668. cfg, err := cloud.GetConfig()
  669. if err != nil {
  670. return err
  671. }
  672. // Pull a region from the first node
  673. var defaultRegion string
  674. nodeList := cache.GetAllNodes()
  675. if len(nodeList) > 0 {
  676. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  677. }
  678. storageClasses := cache.GetAllStorageClasses()
  679. storageClassMap := make(map[string]map[string]string)
  680. for _, storageClass := range storageClasses {
  681. params := storageClass.Parameters
  682. storageClassMap[storageClass.Name] = params
  683. if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.Annotations["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  684. storageClassMap["default"] = params
  685. storageClassMap[""] = params
  686. }
  687. }
  688. pvs := cache.GetAllPersistentVolumes()
  689. pvMap := make(map[string]*costAnalyzerCloud.PV)
  690. for _, pv := range pvs {
  691. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  692. if !ok {
  693. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  694. }
  695. var region string
  696. if r, ok := util.GetRegion(pv.Labels); ok {
  697. region = r
  698. } else {
  699. region = defaultRegion
  700. }
  701. cacPv := &costAnalyzerCloud.PV{
  702. Class: pv.Spec.StorageClassName,
  703. Region: region,
  704. Parameters: parameters,
  705. }
  706. err := cm.GetPVCost(cacPv, pv, region)
  707. if err != nil {
  708. return err
  709. }
  710. pvMap[pv.Name] = cacPv
  711. }
  712. for _, pvc := range pvClaimMapping {
  713. if vol, ok := pvMap[pvc.VolumeName]; ok {
  714. pvc.Volume = vol
  715. } else {
  716. log.Debugf("PV not found, using default")
  717. pvc.Volume = &costAnalyzerCloud.PV{
  718. Cost: cfg.Storage,
  719. }
  720. }
  721. }
  722. return nil
  723. }
  724. func (cm *CostModel) GetPVCost(pv *costAnalyzerCloud.PV, kpv *clustercache.PersistentVolume, defaultRegion string) error {
  725. cp := cm.Provider
  726. cfg, err := cp.GetConfig()
  727. if err != nil {
  728. return err
  729. }
  730. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  731. pv.ProviderID = key.ID()
  732. pvWithCost, err := cp.PVPricing(key)
  733. if err != nil {
  734. pv.Cost = cfg.Storage
  735. return err
  736. }
  737. if pvWithCost == nil || pvWithCost.Cost == "" {
  738. pv.Cost = cfg.Storage
  739. return nil // set default cost
  740. }
  741. pv.Cost = pvWithCost.Cost
  742. return nil
  743. }
  744. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  745. if cm.pricingMetadata != nil {
  746. return cm.pricingMetadata, nil
  747. } else {
  748. return nil, fmt.Errorf("Node costs not yet calculated")
  749. }
  750. }
  751. func (cm *CostModel) GetNodeCost() (map[string]*costAnalyzerCloud.Node, error) {
  752. cp := cm.Provider
  753. cfg, err := cp.GetConfig()
  754. if err != nil {
  755. return nil, err
  756. }
  757. nodeList := cm.Cache.GetAllNodes()
  758. nodes := make(map[string]*costAnalyzerCloud.Node)
  759. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  760. TotalNodes: 0,
  761. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  762. }
  763. for _, n := range nodeList {
  764. name := n.Name
  765. nodeLabels := n.Labels
  766. nodeLabels["providerID"] = n.SpecProviderID
  767. pmd.TotalNodes++
  768. cnode, _, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  769. if err != nil {
  770. log.Infof("Error getting node pricing. Error: %s", err.Error())
  771. if cnode != nil {
  772. nodes[name] = cnode
  773. continue
  774. } else {
  775. cnode = &costAnalyzerCloud.Node{
  776. VCPUCost: cfg.CPU,
  777. RAMCost: cfg.RAM,
  778. }
  779. }
  780. }
  781. pmd.PricingTypeCounts[cnode.PricingType]++
  782. // newCnode builds upon cnode but populates/overrides certain fields.
  783. // cnode was populated leveraging cloud provider public pricing APIs.
  784. newCnode := *cnode
  785. if newCnode.InstanceType == "" {
  786. it, _ := util.GetInstanceType(n.Labels)
  787. newCnode.InstanceType = it
  788. }
  789. if newCnode.Region == "" {
  790. region, _ := util.GetRegion(n.Labels)
  791. newCnode.Region = region
  792. }
  793. if newCnode.ArchType == "" {
  794. arch, _ := util.GetArchType(n.Labels)
  795. newCnode.ArchType = arch
  796. }
  797. newCnode.ProviderID = n.SpecProviderID
  798. var cpu float64
  799. if newCnode.VCPU == "" {
  800. cpu = float64(n.Status.Capacity.Cpu().Value())
  801. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  802. } else {
  803. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  804. if err != nil {
  805. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  806. }
  807. }
  808. if math.IsNaN(cpu) {
  809. log.Warnf("cpu parsed as NaN. Setting to 0.")
  810. cpu = 0
  811. }
  812. var ram float64
  813. if newCnode.RAM == "" {
  814. newCnode.RAM = n.Status.Capacity.Memory().String()
  815. }
  816. ram = float64(n.Status.Capacity.Memory().Value())
  817. if math.IsNaN(ram) {
  818. log.Warnf("ram parsed as NaN. Setting to 0.")
  819. ram = 0
  820. }
  821. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  822. gpuc, err := strconv.ParseFloat(newCnode.GPU, 64)
  823. if err != nil {
  824. gpuc = 0.0
  825. }
  826. // The k8s API will often report more accurate results for GPU count
  827. // than cloud provider public pricing APIs. If found, override the
  828. // original value.
  829. gpuOverride, vgpuOverride, err := getGPUCount(cm.Cache, n)
  830. if err != nil {
  831. log.Warnf("Unable to get GPUCount for node %s: %s", n.Name, err.Error())
  832. }
  833. if gpuOverride > 0 {
  834. newCnode.GPU = fmt.Sprintf("%f", gpuOverride)
  835. gpuc = gpuOverride
  836. }
  837. if vgpuOverride > 0 {
  838. newCnode.VGPU = fmt.Sprintf("%f", vgpuOverride)
  839. }
  840. // Special case for SUSE rancher, since it won't behave with normal
  841. // calculations, courtesy of the instance type not being "real" (a
  842. // recognizable AWS instance type.)
  843. if newCnode.InstanceType == "rke2" {
  844. log.Infof(
  845. "Found a SUSE Rancher node %s, defaulting and skipping math",
  846. cp.GetKey(nodeLabels, n).Features(),
  847. )
  848. defaultCPUCorePrice, err := strconv.ParseFloat(cfg.CPU, 64)
  849. if err != nil {
  850. log.Errorf("Could not parse default cpu price")
  851. defaultCPUCorePrice = 0
  852. }
  853. if math.IsNaN(defaultCPUCorePrice) {
  854. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  855. defaultCPUCorePrice = 0
  856. }
  857. // Some customers may want GPU pricing to be determined by the labels affixed to their nodes. GpuPricing
  858. // passes the node's labels to the provider, which then cross-references them with the labels that the
  859. // provider knows to have label-specific costs associated with them, and returns that cost. See CSVProvider
  860. // for an example implementation.
  861. var gpuPrice float64
  862. gpuPricing, err := cp.GpuPricing(nodeLabels)
  863. if err != nil {
  864. log.Errorf("Could not determine custom GPU pricing: %s", err)
  865. gpuPrice = 0
  866. } else if len(gpuPricing) > 0 {
  867. gpuPrice, err = strconv.ParseFloat(gpuPricing, 64)
  868. if err != nil {
  869. log.Errorf("Could not parse custom GPU pricing: %s", err)
  870. gpuPrice = 0
  871. } else if math.IsNaN(gpuPrice) {
  872. log.Warnf("Custom GPU pricing parsed as NaN. Setting to 0.")
  873. gpuPrice = 0
  874. } else {
  875. log.Infof("Using custom GPU pricing for node \"%s\": %f", name, gpuPrice)
  876. }
  877. } else {
  878. gpuPrice, err = strconv.ParseFloat(cfg.GPU, 64)
  879. if err != nil {
  880. log.Errorf("Could not parse default gpu price")
  881. gpuPrice = 0
  882. }
  883. if math.IsNaN(gpuPrice) {
  884. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  885. gpuPrice = 0
  886. }
  887. }
  888. defaultRAMPrice, err := strconv.ParseFloat(cfg.RAM, 64)
  889. if err != nil {
  890. log.Errorf("Could not parse default ram price")
  891. defaultRAMPrice = 0
  892. }
  893. if math.IsNaN(defaultRAMPrice) {
  894. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  895. defaultRAMPrice = 0
  896. }
  897. defaultGPUPrice, err := strconv.ParseFloat(cfg.GPU, 64)
  898. if err != nil {
  899. log.Errorf("Could not parse default gpu price")
  900. defaultGPUPrice = 0
  901. }
  902. if math.IsNaN(defaultGPUPrice) {
  903. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  904. defaultGPUPrice = 0
  905. }
  906. // Just say no to doing the ratios!
  907. cpuCost := defaultCPUCorePrice * cpu
  908. gpuCost := gpuPrice * gpuc
  909. ramCost := defaultRAMPrice * ram
  910. nodeCost := cpuCost + gpuCost + ramCost
  911. newCnode.Cost = fmt.Sprintf("%f", nodeCost)
  912. newCnode.VCPUCost = fmt.Sprintf("%f", defaultCPUCorePrice)
  913. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  914. newCnode.RAMCost = fmt.Sprintf("%f", defaultRAMPrice)
  915. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  916. } else if newCnode.GPU != "" && newCnode.GPUCost == "" {
  917. // was the big thing to investigate. All the funky ratio math
  918. // we were doing was messing with their default pricing. for SUSE Rancher.
  919. // We reach this when a GPU is detected on a node, but no cost for
  920. // the GPU is defined in the OnDemand pricing. Calculate ratios of
  921. // CPU to RAM and GPU to RAM costs, then distribute the total node
  922. // cost among the CPU, RAM, and GPU.
  923. log.Tracef("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  924. // Some customers may want GPU pricing to be determined by the labels affixed to their nodes. GpuPricing
  925. // passes the node's labels to the provider, which then cross-references them with the labels that the
  926. // provider knows to have label-specific costs associated with them, and returns that cost. See CSVProvider
  927. // for an example implementation.
  928. gpuPricing, err := cp.GpuPricing(nodeLabels)
  929. if err != nil {
  930. log.Errorf("Could not determine custom GPU pricing: %s", err)
  931. } else if len(gpuPricing) > 0 {
  932. newCnode.GPUCost = gpuPricing
  933. log.Infof("Using custom GPU pricing for node \"%s\": %s", name, gpuPricing)
  934. }
  935. if newCnode.GPUCost == "" {
  936. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  937. if err != nil {
  938. log.Errorf("Could not parse default cpu price")
  939. defaultCPU = 0
  940. }
  941. if math.IsNaN(defaultCPU) {
  942. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  943. defaultCPU = 0
  944. }
  945. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  946. if err != nil {
  947. log.Errorf("Could not parse default ram price")
  948. defaultRAM = 0
  949. }
  950. if math.IsNaN(defaultRAM) {
  951. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  952. defaultRAM = 0
  953. }
  954. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  955. if err != nil {
  956. log.Errorf("Could not parse default gpu price")
  957. defaultGPU = 0
  958. }
  959. if math.IsNaN(defaultGPU) {
  960. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  961. defaultGPU = 0
  962. }
  963. cpuToRAMRatio := defaultCPU / defaultRAM
  964. if math.IsNaN(cpuToRAMRatio) {
  965. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  966. cpuToRAMRatio = 10
  967. }
  968. gpuToRAMRatio := defaultGPU / defaultRAM
  969. if math.IsNaN(gpuToRAMRatio) {
  970. log.Warnf("gpuToRAMRatio is NaN. Setting to 100.")
  971. gpuToRAMRatio = 100
  972. }
  973. ramGB := ram / 1024 / 1024 / 1024
  974. if math.IsNaN(ramGB) {
  975. log.Warnf("ramGB is NaN. Setting to 0.")
  976. ramGB = 0
  977. }
  978. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  979. if math.IsNaN(ramMultiple) {
  980. log.Warnf("ramMultiple is NaN. Setting to 0.")
  981. ramMultiple = 0
  982. }
  983. var nodePrice float64
  984. if newCnode.Cost != "" {
  985. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  986. if err != nil {
  987. log.Errorf("Could not parse total node price")
  988. return nil, err
  989. }
  990. } else if newCnode.VCPUCost != "" {
  991. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  992. if err != nil {
  993. log.Errorf("Could not parse node vcpu price")
  994. return nil, err
  995. }
  996. } else { // add case to use default pricing model when API data fails.
  997. log.Debugf("No node price or CPUprice found, falling back to default")
  998. nodePrice = defaultCPU*cpu + defaultRAM*ram + gpuc*defaultGPU
  999. }
  1000. if math.IsNaN(nodePrice) {
  1001. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1002. nodePrice = 0
  1003. }
  1004. ramPrice := (nodePrice / ramMultiple)
  1005. if math.IsNaN(ramPrice) {
  1006. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1007. ramPrice = 0
  1008. }
  1009. cpuPrice := ramPrice * cpuToRAMRatio
  1010. gpuPrice := ramPrice * gpuToRAMRatio
  1011. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1012. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1013. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1014. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1015. }
  1016. } else if newCnode.RAMCost == "" {
  1017. // We reach this when no RAM cost is defined in the OnDemand
  1018. // pricing. It calculates a cpuToRAMRatio and ramMultiple to
  1019. // distrubte the total node cost among CPU and RAM costs.
  1020. log.Tracef("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1021. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1022. if err != nil {
  1023. log.Warnf("Could not parse default cpu price")
  1024. defaultCPU = 0
  1025. }
  1026. if math.IsNaN(defaultCPU) {
  1027. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1028. defaultCPU = 0
  1029. }
  1030. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1031. if err != nil {
  1032. log.Warnf("Could not parse default ram price")
  1033. defaultRAM = 0
  1034. }
  1035. if math.IsNaN(defaultRAM) {
  1036. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1037. defaultRAM = 0
  1038. }
  1039. cpuToRAMRatio := defaultCPU / defaultRAM
  1040. if math.IsNaN(cpuToRAMRatio) {
  1041. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1042. cpuToRAMRatio = 10
  1043. }
  1044. ramGB := ram / 1024 / 1024 / 1024
  1045. if math.IsNaN(ramGB) {
  1046. log.Warnf("ramGB is NaN. Setting to 0.")
  1047. ramGB = 0
  1048. }
  1049. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1050. if math.IsNaN(ramMultiple) {
  1051. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1052. ramMultiple = 0
  1053. }
  1054. var nodePrice float64
  1055. if newCnode.Cost != "" {
  1056. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1057. if err != nil {
  1058. log.Warnf("Could not parse total node price")
  1059. return nil, err
  1060. }
  1061. if newCnode.GPUCost != "" {
  1062. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1063. if err != nil {
  1064. log.Warnf("Could not parse node gpu price")
  1065. return nil, err
  1066. }
  1067. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1068. }
  1069. } else if newCnode.VCPUCost != "" {
  1070. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1071. if err != nil {
  1072. log.Warnf("Could not parse node vcpu price")
  1073. return nil, err
  1074. }
  1075. } else { // add case to use default pricing model when API data fails.
  1076. log.Debugf("No node price or CPUprice found, falling back to default")
  1077. nodePrice = defaultCPU*cpu + defaultRAM*ramGB
  1078. }
  1079. if math.IsNaN(nodePrice) {
  1080. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1081. nodePrice = 0
  1082. }
  1083. ramPrice := (nodePrice / ramMultiple)
  1084. if math.IsNaN(ramPrice) {
  1085. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1086. ramPrice = 0
  1087. }
  1088. cpuPrice := ramPrice * cpuToRAMRatio
  1089. if defaultRAM != 0 {
  1090. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1091. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1092. } else { // just assign the full price to CPU
  1093. if cpu != 0 {
  1094. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1095. } else {
  1096. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1097. }
  1098. }
  1099. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1100. log.Tracef("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1101. }
  1102. nodes[name] = &newCnode
  1103. }
  1104. cm.pricingMetadata = pmd
  1105. cp.ApplyReservedInstancePricing(nodes)
  1106. return nodes, nil
  1107. }
  1108. // TODO: drop some logs
  1109. func (cm *CostModel) GetLBCost() (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1110. // for fetching prices from cloud provider
  1111. // cfg, err := cp.GetConfig()
  1112. // if err != nil {
  1113. // return nil, err
  1114. // }
  1115. cp := cm.Provider
  1116. servicesList := cm.Cache.GetAllServices()
  1117. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1118. for _, service := range servicesList {
  1119. namespace := service.Namespace
  1120. name := service.Name
  1121. key := serviceKey{
  1122. Cluster: env.GetClusterID(),
  1123. Namespace: namespace,
  1124. Service: name,
  1125. }
  1126. if service.Type == "LoadBalancer" {
  1127. loadBalancer, err := cp.LoadBalancerPricing()
  1128. if err != nil {
  1129. return nil, err
  1130. }
  1131. newLoadBalancer := *loadBalancer
  1132. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1133. address := loadBalancerIngress.IP
  1134. // Some cloud providers use hostname rather than IP
  1135. if address == "" {
  1136. address = loadBalancerIngress.Hostname
  1137. }
  1138. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1139. }
  1140. loadBalancerMap[key] = &newLoadBalancer
  1141. }
  1142. }
  1143. return loadBalancerMap, nil
  1144. }
  1145. func getPodServices(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1146. servicesList := cache.GetAllServices()
  1147. podServicesMapping := make(map[string]map[string][]string)
  1148. for _, service := range servicesList {
  1149. namespace := service.Namespace
  1150. name := service.Name
  1151. key := namespace + "," + clusterID
  1152. if _, ok := podServicesMapping[key]; !ok {
  1153. podServicesMapping[key] = make(map[string][]string)
  1154. }
  1155. s := labels.Nothing()
  1156. if len(service.SpecSelector) > 0 {
  1157. s = labels.Set(service.SpecSelector).AsSelectorPreValidated()
  1158. }
  1159. for _, pod := range podList {
  1160. labelSet := labels.Set(pod.Labels)
  1161. if s.Matches(labelSet) && pod.Namespace == namespace {
  1162. services, ok := podServicesMapping[key][pod.Name]
  1163. if ok {
  1164. podServicesMapping[key][pod.Name] = append(services, name)
  1165. } else {
  1166. podServicesMapping[key][pod.Name] = []string{name}
  1167. }
  1168. }
  1169. }
  1170. }
  1171. return podServicesMapping, nil
  1172. }
  1173. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1174. ssList := cache.GetAllStatefulSets()
  1175. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1176. for _, ss := range ssList {
  1177. namespace := ss.Namespace
  1178. name := ss.Name
  1179. key := namespace + "," + clusterID
  1180. if _, ok := podSSMapping[key]; !ok {
  1181. podSSMapping[key] = make(map[string][]string)
  1182. }
  1183. s, err := metav1.LabelSelectorAsSelector(ss.SpecSelector)
  1184. if err != nil {
  1185. log.Errorf("Error doing deployment label conversion: %s", err.Error())
  1186. }
  1187. for _, pod := range podList {
  1188. labelSet := labels.Set(pod.Labels)
  1189. if s.Matches(labelSet) && pod.Namespace == namespace {
  1190. sss, ok := podSSMapping[key][pod.Name]
  1191. if ok {
  1192. podSSMapping[key][pod.Name] = append(sss, name)
  1193. } else {
  1194. podSSMapping[key][pod.Name] = []string{name}
  1195. }
  1196. }
  1197. }
  1198. }
  1199. return podSSMapping, nil
  1200. }
  1201. func getPodDeployments(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1202. deploymentsList := cache.GetAllDeployments()
  1203. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1204. for _, deployment := range deploymentsList {
  1205. namespace := deployment.Namespace
  1206. name := deployment.Name
  1207. key := namespace + "," + clusterID
  1208. if _, ok := podDeploymentsMapping[key]; !ok {
  1209. podDeploymentsMapping[key] = make(map[string][]string)
  1210. }
  1211. s, err := metav1.LabelSelectorAsSelector(deployment.SpecSelector)
  1212. if err != nil {
  1213. log.Errorf("Error doing deployment label conversion: %s", err)
  1214. }
  1215. for _, pod := range podList {
  1216. labelSet := labels.Set(pod.Labels)
  1217. if s.Matches(labelSet) && pod.Namespace == namespace {
  1218. deployments, ok := podDeploymentsMapping[key][pod.Name]
  1219. if ok {
  1220. podDeploymentsMapping[key][pod.Name] = append(deployments, name)
  1221. } else {
  1222. podDeploymentsMapping[key][pod.Name] = []string{name}
  1223. }
  1224. }
  1225. }
  1226. }
  1227. return podDeploymentsMapping, nil
  1228. }
  1229. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1230. nsToLabels := make(map[string]map[string]string)
  1231. nss := cache.GetAllNamespaces()
  1232. for _, ns := range nss {
  1233. labels := make(map[string]string)
  1234. for k, v := range ns.Labels {
  1235. labels[promutil.SanitizeLabelName(k)] = v
  1236. }
  1237. nsToLabels[ns.Name+","+clusterID] = labels
  1238. }
  1239. return nsToLabels, nil
  1240. }
  1241. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1242. nsToAnnotations := make(map[string]map[string]string)
  1243. nss := cache.GetAllNamespaces()
  1244. for _, ns := range nss {
  1245. annotations := make(map[string]string)
  1246. for k, v := range ns.Annotations {
  1247. annotations[promutil.SanitizeLabelName(k)] = v
  1248. }
  1249. nsToAnnotations[ns.Name+","+clusterID] = annotations
  1250. }
  1251. return nsToAnnotations, nil
  1252. }
  1253. func getDaemonsetsOfPod(pod clustercache.Pod) []string {
  1254. for _, ownerReference := range pod.OwnerReferences {
  1255. if ownerReference.Kind == "DaemonSet" {
  1256. return []string{ownerReference.Name}
  1257. }
  1258. }
  1259. return []string{}
  1260. }
  1261. func getJobsOfPod(pod clustercache.Pod) []string {
  1262. for _, ownerReference := range pod.OwnerReferences {
  1263. if ownerReference.Kind == "Job" {
  1264. return []string{ownerReference.Name}
  1265. }
  1266. }
  1267. return []string{}
  1268. }
  1269. func getStatefulSetsOfPod(pod clustercache.Pod) []string {
  1270. for _, ownerReference := range pod.OwnerReferences {
  1271. if ownerReference.Kind == "StatefulSet" {
  1272. return []string{ownerReference.Name}
  1273. }
  1274. }
  1275. return []string{}
  1276. }
  1277. // getGPUCount reads the node's Status and Labels (via the k8s API) to identify
  1278. // the number of GPUs and vGPUs are equipped on the node. If unable to identify
  1279. // a GPU count, it will return -1.
  1280. func getGPUCount(cache clustercache.ClusterCache, n *clustercache.Node) (float64, float64, error) {
  1281. g, hasGpu := n.Status.Capacity["nvidia.com/gpu"]
  1282. _, hasReplicas := n.Labels["nvidia.com/gpu.replicas"]
  1283. // Case 1: Standard NVIDIA GPU
  1284. if hasGpu && g.Value() != 0 && !hasReplicas {
  1285. return float64(g.Value()), float64(g.Value()), nil
  1286. }
  1287. // Case 2: NVIDIA GPU with GPU Feature Discovery (GFD) Pod enabled.
  1288. // Ref: https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/gpu-sharing.html#verifying-the-gpu-time-slicing-configuration
  1289. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L44-L45
  1290. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L103-L118
  1291. if hasReplicas {
  1292. resultGPU := 0.0
  1293. resultVGPU := 0.0
  1294. if c, ok := n.Labels["nvidia.com/gpu.count"]; ok {
  1295. var err error
  1296. resultGPU, err = strconv.ParseFloat(c, 64)
  1297. if err != nil {
  1298. return -1, -1, fmt.Errorf("could not parse label \"nvidia.com/gpu.count\": %v", err)
  1299. }
  1300. }
  1301. if s, ok := n.Status.Capacity["nvidia.com/gpu.shared"]; ok { // GFD configured `renameByDefault=true`
  1302. resultVGPU = float64(s.Value())
  1303. } else if g, ok := n.Status.Capacity["nvidia.com/gpu"]; ok { // GFD configured `renameByDefault=false`
  1304. resultVGPU = float64(g.Value())
  1305. } else {
  1306. resultVGPU = resultGPU
  1307. }
  1308. return resultGPU, resultVGPU, nil
  1309. }
  1310. // Case 3: AWS vGPU
  1311. if vgpu, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  1312. vgpuCount, err := getAllocatableVGPUs(cache)
  1313. if err != nil {
  1314. return -1, -1, err
  1315. }
  1316. vgpuCoeff := 10.0
  1317. if vgpuCount > 0.0 {
  1318. vgpuCoeff = vgpuCount
  1319. }
  1320. if vgpu.Value() != 0 {
  1321. resultGPU := float64(vgpu.Value()) / vgpuCoeff
  1322. resultVGPU := float64(vgpu.Value())
  1323. return resultGPU, resultVGPU, nil
  1324. }
  1325. }
  1326. // No GPU found
  1327. return -1, -1, nil
  1328. }
  1329. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  1330. daemonsets := cache.GetAllDaemonSets()
  1331. vgpuCount := 0.0
  1332. for _, ds := range daemonsets {
  1333. dsContainerList := &ds.SpecContainers
  1334. for _, ctnr := range *dsContainerList {
  1335. if ctnr.Args != nil {
  1336. for _, arg := range ctnr.Args {
  1337. if strings.Contains(arg, "--vgpu=") {
  1338. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  1339. if err != nil {
  1340. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  1341. continue
  1342. }
  1343. vgpuCount = vgpus
  1344. return vgpuCount, nil
  1345. }
  1346. }
  1347. }
  1348. }
  1349. }
  1350. return vgpuCount, nil
  1351. }
  1352. type PersistentVolumeClaimData struct {
  1353. Class string `json:"class"`
  1354. Claim string `json:"claim"`
  1355. Namespace string `json:"namespace"`
  1356. ClusterID string `json:"clusterId"`
  1357. TimesClaimed int `json:"timesClaimed"`
  1358. VolumeName string `json:"volumeName"`
  1359. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  1360. Values []*util.Vector `json:"values"`
  1361. }
  1362. func measureTime(start time.Time, threshold time.Duration, name string) {
  1363. elapsed := time.Since(start)
  1364. if elapsed > threshold {
  1365. log.Infof("[Profiler] %s: %s", elapsed, name)
  1366. }
  1367. }
  1368. func (cm *CostModel) QueryAllocation(window opencost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy opencost.AccumulateOption, shareIdle bool) (*opencost.AllocationSetRange, error) {
  1369. // Validate window is legal
  1370. if window.IsOpen() || window.IsNegative() {
  1371. return nil, fmt.Errorf("illegal window: %s", window)
  1372. }
  1373. var totalsStore opencost.TotalsStore
  1374. // Idle is required for proportional asset costs
  1375. if includeProportionalAssetResourceCosts {
  1376. if !includeIdle {
  1377. return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
  1378. }
  1379. totalsStore = opencost.NewMemoryTotalsStore()
  1380. }
  1381. // Begin with empty response
  1382. asr := opencost.NewAllocationSetRange()
  1383. // Query for AllocationSets in increments of the given step duration,
  1384. // appending each to the response.
  1385. stepStart := *window.Start()
  1386. stepEnd := stepStart.Add(step)
  1387. var isAKS bool
  1388. for window.End().After(stepStart) {
  1389. allocSet, err := cm.ComputeAllocation(stepStart, stepEnd, resolution)
  1390. if err != nil {
  1391. return nil, fmt.Errorf("error computing allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1392. }
  1393. if includeIdle {
  1394. assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
  1395. if err != nil {
  1396. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1397. }
  1398. if includeProportionalAssetResourceCosts {
  1399. // AKS is a special case - there can be a maximum of 2
  1400. // load balancers (1 public and 1 private) in an AKS cluster
  1401. // therefore, when calculating PARCs for load balancers,
  1402. // we must know if this is an AKS cluster
  1403. for _, node := range assetSet.Nodes {
  1404. if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
  1405. isAKS = true
  1406. break
  1407. }
  1408. }
  1409. _, err := opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  1410. if err != nil {
  1411. log.Errorf("ETL: error updating asset resource totals for %s: %s", assetSet.Window, err)
  1412. }
  1413. }
  1414. idleSet, err := computeIdleAllocations(allocSet, assetSet, true)
  1415. if err != nil {
  1416. return nil, fmt.Errorf("error computing idle allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1417. }
  1418. for _, idleAlloc := range idleSet.Allocations {
  1419. allocSet.Insert(idleAlloc)
  1420. }
  1421. }
  1422. asr.Append(allocSet)
  1423. stepStart = stepEnd
  1424. stepEnd = stepStart.Add(step)
  1425. }
  1426. // Set aggregation options and aggregate
  1427. var shareIdleOpt string
  1428. if shareIdle {
  1429. shareIdleOpt = opencost.ShareWeighted
  1430. } else {
  1431. shareIdleOpt = opencost.ShareNone
  1432. }
  1433. opts := &opencost.AllocationAggregationOptions{
  1434. IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
  1435. IdleByNode: idleByNode,
  1436. IncludeAggregatedMetadata: includeAggregatedMetadata,
  1437. ShareIdle: shareIdleOpt,
  1438. }
  1439. // Aggregate
  1440. err := asr.AggregateBy(aggregate, opts)
  1441. if err != nil {
  1442. return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
  1443. }
  1444. // Accumulate, if requested
  1445. if accumulateBy != opencost.AccumulateOptionNone {
  1446. asr, err = asr.Accumulate(accumulateBy)
  1447. if err != nil {
  1448. log.Errorf("error accumulating by %v: %s", accumulateBy, err)
  1449. return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
  1450. }
  1451. // when accumulating and returning PARCs, we need the totals for the
  1452. // accumulated windows to accurately compute a fraction
  1453. if includeProportionalAssetResourceCosts {
  1454. assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
  1455. if err != nil {
  1456. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  1457. }
  1458. _, err = opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  1459. if err != nil {
  1460. log.Errorf("ETL: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  1461. }
  1462. }
  1463. }
  1464. if includeProportionalAssetResourceCosts {
  1465. for _, as := range asr.Allocations {
  1466. totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
  1467. if !ok {
  1468. log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  1469. return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  1470. }
  1471. totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
  1472. if !ok {
  1473. log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  1474. return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  1475. }
  1476. var totalPublicLbCost, totalPrivateLbCost float64
  1477. if isAKS && sharedLoadBalancer {
  1478. // loop through all assetTotals, adding all load balancer costs by public and private
  1479. for _, tot := range totalStoreByNode {
  1480. if tot.PrivateLoadBalancer {
  1481. totalPrivateLbCost += tot.LoadBalancerCost
  1482. } else {
  1483. totalPublicLbCost += tot.LoadBalancerCost
  1484. }
  1485. }
  1486. }
  1487. // loop through each allocation set, using total cost from totals store
  1488. for _, alloc := range as.Allocations {
  1489. for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
  1490. key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
  1491. // for each parc , check the totals store for each
  1492. // on a totals hit, set the corresponding total and calculate percentage
  1493. var totals *opencost.AssetTotals
  1494. if totalsLoc, found := totalStoreByCluster[key]; found {
  1495. totals = totalsLoc
  1496. }
  1497. if totalsLoc, found := totalStoreByNode[key]; found {
  1498. totals = totalsLoc
  1499. }
  1500. if totals == nil {
  1501. log.Errorf("unable to locate asset totals for allocation %s, corresponding PARC is being skipped", key)
  1502. continue
  1503. }
  1504. parc.CPUTotalCost = totals.CPUCost
  1505. parc.GPUTotalCost = totals.GPUCost
  1506. parc.RAMTotalCost = totals.RAMCost
  1507. parc.PVTotalCost = totals.PersistentVolumeCost
  1508. if isAKS && sharedLoadBalancer && len(alloc.LoadBalancers) > 0 {
  1509. // Azure is a special case - use computed totals above
  1510. // use the lbAllocations in the object to determine if
  1511. // this PARC is a public or private load balancer
  1512. // then set the total accordingly
  1513. // AKS only has 1 public and 1 private load balancer
  1514. lbAlloc, found := alloc.LoadBalancers[key]
  1515. if found {
  1516. if lbAlloc.Private {
  1517. parc.LoadBalancerTotalCost = totalPrivateLbCost
  1518. } else {
  1519. parc.LoadBalancerTotalCost = totalPublicLbCost
  1520. }
  1521. }
  1522. } else {
  1523. parc.LoadBalancerTotalCost = totals.LoadBalancerCost
  1524. }
  1525. opencost.ComputePercentages(&parc)
  1526. alloc.ProportionalAssetResourceCosts[rawKey] = parc
  1527. }
  1528. }
  1529. }
  1530. }
  1531. return asr, nil
  1532. }
  1533. func computeIdleAllocations(allocSet *opencost.AllocationSet, assetSet *opencost.AssetSet, idleByNode bool) (*opencost.AllocationSet, error) {
  1534. if !allocSet.Window.Equal(assetSet.Window) {
  1535. return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
  1536. }
  1537. var allocTotals map[string]*opencost.AllocationTotals
  1538. var assetTotals map[string]*opencost.AssetTotals
  1539. if idleByNode {
  1540. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationNodeProp)
  1541. assetTotals = opencost.ComputeAssetTotals(assetSet, true)
  1542. } else {
  1543. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationClusterProp)
  1544. assetTotals = opencost.ComputeAssetTotals(assetSet, false)
  1545. }
  1546. start, end := *allocSet.Window.Start(), *allocSet.Window.End()
  1547. idleSet := opencost.NewAllocationSet(start, end)
  1548. for key, assetTotal := range assetTotals {
  1549. allocTotal, ok := allocTotals[key]
  1550. if !ok {
  1551. log.Warnf("ETL: did not find allocations for asset key: %s", key)
  1552. // Use a zero-value set of totals. This indicates either (1) an
  1553. // error computing totals, or (2) that no allocations ran on the
  1554. // given node for the given window.
  1555. allocTotal = &opencost.AllocationTotals{
  1556. Cluster: assetTotal.Cluster,
  1557. Node: assetTotal.Node,
  1558. Start: assetTotal.Start,
  1559. End: assetTotal.End,
  1560. }
  1561. }
  1562. // Insert one idle allocation for each key (whether by node or
  1563. // by cluster), defined as the difference between the total
  1564. // asset cost and the allocated cost per-resource.
  1565. name := fmt.Sprintf("%s/%s", key, opencost.IdleSuffix)
  1566. err := idleSet.Insert(&opencost.Allocation{
  1567. Name: name,
  1568. Window: idleSet.Window.Clone(),
  1569. Properties: &opencost.AllocationProperties{
  1570. Cluster: assetTotal.Cluster,
  1571. Node: assetTotal.Node,
  1572. ProviderID: assetTotal.Node,
  1573. },
  1574. Start: assetTotal.Start,
  1575. End: assetTotal.End,
  1576. CPUCost: assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost(),
  1577. GPUCost: assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost(),
  1578. RAMCost: assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost(),
  1579. })
  1580. if err != nil {
  1581. return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
  1582. }
  1583. }
  1584. return idleSet, nil
  1585. }
  1586. func (cm *CostModel) GetDataSource() source.OpenCostDataSource {
  1587. return cm.DataSource
  1588. }