costmodel.go 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803
  1. package costmodel
  2. import (
  3. "errors"
  4. "fmt"
  5. "maps"
  6. "math"
  7. "regexp"
  8. "strconv"
  9. "strings"
  10. "time"
  11. "github.com/opencost/opencost/core/pkg/clustercache"
  12. "github.com/opencost/opencost/core/pkg/clusters"
  13. coreenv "github.com/opencost/opencost/core/pkg/env"
  14. "github.com/opencost/opencost/core/pkg/log"
  15. "github.com/opencost/opencost/core/pkg/opencost"
  16. "github.com/opencost/opencost/core/pkg/source"
  17. "github.com/opencost/opencost/core/pkg/util"
  18. "github.com/opencost/opencost/core/pkg/util/promutil"
  19. costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
  20. v1 "k8s.io/api/core/v1"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/labels"
  23. "golang.org/x/sync/singleflight"
  24. )
  25. const (
  26. profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
  27. unmountedPVsContainer = "unmounted-pvs"
  28. )
  29. // isCron matches a CronJob name and captures the non-timestamp name
  30. //
  31. // We support either a 10 character timestamp OR an 8 character timestamp
  32. // because batch/v1beta1 CronJobs creates Jobs with 10 character timestamps
  33. // and batch/v1 CronJobs create Jobs with 8 character timestamps.
  34. var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
  35. type CostModel struct {
  36. Cache clustercache.ClusterCache
  37. ClusterMap clusters.ClusterMap
  38. BatchDuration time.Duration
  39. RequestGroup *singleflight.Group
  40. DataSource source.OpenCostDataSource
  41. Provider costAnalyzerCloud.Provider
  42. pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
  43. }
  44. func NewCostModel(
  45. dataSource source.OpenCostDataSource,
  46. provider costAnalyzerCloud.Provider,
  47. cache clustercache.ClusterCache,
  48. clusterMap clusters.ClusterMap,
  49. batchDuration time.Duration,
  50. ) *CostModel {
  51. // request grouping to prevent over-requesting the same data prior to caching
  52. requestGroup := new(singleflight.Group)
  53. return &CostModel{
  54. Cache: cache,
  55. ClusterMap: clusterMap,
  56. BatchDuration: batchDuration,
  57. DataSource: dataSource,
  58. Provider: provider,
  59. RequestGroup: requestGroup,
  60. }
  61. }
  62. type CostData struct {
  63. Name string `json:"name,omitempty"`
  64. PodName string `json:"podName,omitempty"`
  65. NodeName string `json:"nodeName,omitempty"`
  66. NodeData *costAnalyzerCloud.Node `json:"node,omitempty"`
  67. Namespace string `json:"namespace,omitempty"`
  68. Deployments []string `json:"deployments,omitempty"`
  69. Services []string `json:"services,omitempty"`
  70. Daemonsets []string `json:"daemonsets,omitempty"`
  71. Statefulsets []string `json:"statefulsets,omitempty"`
  72. Jobs []string `json:"jobs,omitempty"`
  73. RAMReq []*util.Vector `json:"ramreq,omitempty"`
  74. RAMUsed []*util.Vector `json:"ramused,omitempty"`
  75. RAMAllocation []*util.Vector `json:"ramallocated,omitempty"`
  76. CPUReq []*util.Vector `json:"cpureq,omitempty"`
  77. CPUUsed []*util.Vector `json:"cpuused,omitempty"`
  78. CPUAllocation []*util.Vector `json:"cpuallocated,omitempty"`
  79. GPUReq []*util.Vector `json:"gpureq,omitempty"`
  80. PVCData []*PersistentVolumeClaimData `json:"pvcData,omitempty"`
  81. NetworkData []*util.Vector `json:"network,omitempty"`
  82. Annotations map[string]string `json:"annotations,omitempty"`
  83. Labels map[string]string `json:"labels,omitempty"`
  84. NamespaceLabels map[string]string `json:"namespaceLabels,omitempty"`
  85. ClusterID string `json:"clusterId"`
  86. ClusterName string `json:"clusterName"`
  87. }
  88. func (cd *CostData) String() string {
  89. return fmt.Sprintf("\n\tName: %s; PodName: %s, NodeName: %s\n\tNamespace: %s\n\tDeployments: %s\n\tServices: %s\n\tCPU (req, used, alloc): %d, %d, %d\n\tRAM (req, used, alloc): %d, %d, %d",
  90. cd.Name, cd.PodName, cd.NodeName, cd.Namespace, strings.Join(cd.Deployments, ", "), strings.Join(cd.Services, ", "),
  91. len(cd.CPUReq), len(cd.CPUUsed), len(cd.CPUAllocation),
  92. len(cd.RAMReq), len(cd.RAMUsed), len(cd.RAMAllocation))
  93. }
  94. func (cd *CostData) GetController() (name string, kind string, hasController bool) {
  95. hasController = false
  96. if len(cd.Deployments) > 0 {
  97. name = cd.Deployments[0]
  98. kind = "deployment"
  99. hasController = true
  100. } else if len(cd.Statefulsets) > 0 {
  101. name = cd.Statefulsets[0]
  102. kind = "statefulset"
  103. hasController = true
  104. } else if len(cd.Daemonsets) > 0 {
  105. name = cd.Daemonsets[0]
  106. kind = "daemonset"
  107. hasController = true
  108. } else if len(cd.Jobs) > 0 {
  109. name = cd.Jobs[0]
  110. kind = "job"
  111. hasController = true
  112. match := isCron.FindStringSubmatch(name)
  113. if match != nil {
  114. name = match[1]
  115. }
  116. }
  117. return name, kind, hasController
  118. }
  119. func (cm *CostModel) ComputeCostData(start, end time.Time) (map[string]*CostData, error) {
  120. // Cluster ID is specific to the source cluster
  121. clusterID := coreenv.GetClusterID()
  122. cp := cm.Provider
  123. ds := cm.DataSource
  124. mq := ds.Metrics()
  125. grp := source.NewQueryGroup()
  126. resChRAMUsage := source.WithGroup(grp, mq.QueryRAMUsageAvg(start, end))
  127. resChCPUUsage := source.WithGroup(grp, mq.QueryCPUUsageAvg(start, end))
  128. resChNetZoneRequests := source.WithGroup(grp, mq.QueryNetZoneGiB(start, end))
  129. resChNetRegionRequests := source.WithGroup(grp, mq.QueryNetRegionGiB(start, end))
  130. resChNetInternetRequests := source.WithGroup(grp, mq.QueryNetInternetGiB(start, end))
  131. // Pull pod information from k8s API
  132. podlist := cm.Cache.GetAllPods()
  133. podDeploymentsMapping, err := getPodDeployments(cm.Cache, podlist, clusterID)
  134. if err != nil {
  135. return nil, err
  136. }
  137. podServicesMapping, err := getPodServices(cm.Cache, podlist, clusterID)
  138. if err != nil {
  139. return nil, err
  140. }
  141. namespaceLabelsMapping, err := getNamespaceLabels(cm.Cache, clusterID)
  142. if err != nil {
  143. return nil, err
  144. }
  145. namespaceAnnotationsMapping, err := getNamespaceAnnotations(cm.Cache, clusterID)
  146. if err != nil {
  147. return nil, err
  148. }
  149. // Process Prometheus query results. Handle errors using ctx.Errors.
  150. resRAMUsage, _ := resChRAMUsage.Await()
  151. resCPUUsage, _ := resChCPUUsage.Await()
  152. resNetZoneRequests, _ := resChNetZoneRequests.Await()
  153. resNetRegionRequests, _ := resChNetRegionRequests.Await()
  154. resNetInternetRequests, _ := resChNetInternetRequests.Await()
  155. // NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
  156. // NOTE: will not propagate unless coupled with errors.
  157. if grp.HasErrors() {
  158. // To keep the context of where the errors are occurring, we log the errors here and pass them the error
  159. // back to the caller. The caller should handle the specific case where error is an ErrorCollection
  160. for _, queryErr := range grp.Errors() {
  161. if queryErr.Error != nil {
  162. log.Errorf("ComputeCostData: Request Error: %s", queryErr.Error)
  163. }
  164. if queryErr.ParseError != nil {
  165. log.Errorf("ComputeCostData: Parsing Error: %s", queryErr.ParseError)
  166. }
  167. }
  168. // ErrorCollection is an collection of errors wrapped in a single error implementation
  169. // We opt to not return an error for the sake of running as a pure exporter.
  170. log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", grp.Error())
  171. }
  172. defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
  173. nodes, err := cm.GetNodeCost()
  174. if err != nil {
  175. log.Warnf("GetNodeCost: no node cost model available: %s", err)
  176. return nil, err
  177. }
  178. // Unmounted PVs represent the PVs that are not mounted or tied to a volume on a container
  179. unmountedPVs := make(map[string][]*PersistentVolumeClaimData)
  180. pvClaimMapping, err := GetPVInfoLocal(cm.Cache, clusterID)
  181. if err != nil {
  182. log.Warnf("GetPVInfo: unable to get PV data: %s", err.Error())
  183. }
  184. if pvClaimMapping != nil {
  185. err = cm.addPVData(pvClaimMapping)
  186. if err != nil {
  187. return nil, err
  188. }
  189. // copy claim mappings into zombies, then remove as they're discovered
  190. for k, v := range pvClaimMapping {
  191. unmountedPVs[k] = []*PersistentVolumeClaimData{v}
  192. }
  193. }
  194. networkUsageMap, err := GetNetworkUsageData(resNetZoneRequests, resNetRegionRequests, resNetInternetRequests, clusterID)
  195. if err != nil {
  196. log.Warnf("Unable to get Network Cost Data: %s", err.Error())
  197. networkUsageMap = make(map[string]*NetworkUsageData)
  198. }
  199. containerNameCost := make(map[string]*CostData)
  200. containers := make(map[string]bool)
  201. RAMUsedMap, err := GetContainerMetricVector(resRAMUsage, clusterID)
  202. if err != nil {
  203. return nil, err
  204. }
  205. for key := range RAMUsedMap {
  206. containers[key] = true
  207. }
  208. CPUUsedMap, err := GetContainerMetricVector(resCPUUsage, clusterID) // No need to normalize here, as this comes from a counter
  209. if err != nil {
  210. return nil, err
  211. }
  212. for key := range CPUUsedMap {
  213. containers[key] = true
  214. }
  215. currentContainers := make(map[string]clustercache.Pod)
  216. for _, pod := range podlist {
  217. if pod.Status.Phase != v1.PodRunning {
  218. continue
  219. }
  220. cs, err := NewContainerMetricsFromPod(pod, clusterID)
  221. if err != nil {
  222. return nil, err
  223. }
  224. for _, c := range cs {
  225. containers[c.Key()] = true // captures any containers that existed for a time < a prometheus scrape interval. We currently charge 0 for this but should charge something.
  226. currentContainers[c.Key()] = *pod
  227. }
  228. }
  229. missingNodes := make(map[string]*costAnalyzerCloud.Node)
  230. missingContainers := make(map[string]*CostData)
  231. for key := range containers {
  232. if _, ok := containerNameCost[key]; ok {
  233. continue // because ordering is important for the allocation model (all PV's applied to the first), just dedupe if it's already been added.
  234. }
  235. // The _else_ case for this statement is the case in which the container has been
  236. // deleted so we have usage information but not request information. In that case,
  237. // we return partial data for CPU and RAM: only usage and not requests.
  238. if pod, ok := currentContainers[key]; ok {
  239. podName := pod.Name
  240. ns := pod.Namespace
  241. nsLabels := namespaceLabelsMapping[ns+","+clusterID]
  242. podLabels := maps.Clone(pod.Labels)
  243. if podLabels == nil {
  244. podLabels = make(map[string]string)
  245. }
  246. for k, v := range nsLabels {
  247. if _, ok := podLabels[k]; !ok {
  248. podLabels[k] = v
  249. }
  250. }
  251. nsAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  252. podAnnotations := pod.Annotations
  253. if podAnnotations == nil {
  254. podAnnotations = make(map[string]string)
  255. }
  256. for k, v := range nsAnnotations {
  257. if _, ok := podAnnotations[k]; !ok {
  258. podAnnotations[k] = v
  259. }
  260. }
  261. nodeName := pod.Spec.NodeName
  262. var nodeData *costAnalyzerCloud.Node
  263. if _, ok := nodes[nodeName]; ok {
  264. nodeData = nodes[nodeName]
  265. }
  266. nsKey := ns + "," + clusterID
  267. var podDeployments []string
  268. if _, ok := podDeploymentsMapping[nsKey]; ok {
  269. if ds, ok := podDeploymentsMapping[nsKey][pod.Name]; ok {
  270. podDeployments = ds
  271. } else {
  272. podDeployments = []string{}
  273. }
  274. }
  275. var podPVs []*PersistentVolumeClaimData
  276. podClaims := pod.Spec.Volumes
  277. for _, vol := range podClaims {
  278. if vol.PersistentVolumeClaim != nil {
  279. name := vol.PersistentVolumeClaim.ClaimName
  280. key := ns + "," + name + "," + clusterID
  281. if pvClaim, ok := pvClaimMapping[key]; ok {
  282. pvClaim.TimesClaimed++
  283. podPVs = append(podPVs, pvClaim)
  284. // Remove entry from potential unmounted pvs
  285. delete(unmountedPVs, key)
  286. }
  287. }
  288. }
  289. var podNetCosts []*util.Vector
  290. if usage, ok := networkUsageMap[ns+","+podName+","+clusterID]; ok {
  291. netCosts, err := GetNetworkCost(usage, cp)
  292. if err != nil {
  293. log.Debugf("Error pulling network costs: %s", err.Error())
  294. } else {
  295. podNetCosts = netCosts
  296. }
  297. }
  298. var podServices []string
  299. if _, ok := podServicesMapping[nsKey]; ok {
  300. if svcs, ok := podServicesMapping[nsKey][pod.Name]; ok {
  301. podServices = svcs
  302. } else {
  303. podServices = []string{}
  304. }
  305. }
  306. for i, container := range pod.Spec.Containers {
  307. containerName := container.Name
  308. // recreate the key and look up data for this container
  309. newKey := NewContainerMetricFromValues(ns, podName, containerName, pod.Spec.NodeName, clusterID).Key()
  310. // k8s.io/apimachinery/pkg/api/resource/amount.go and
  311. // k8s.io/apimachinery/pkg/api/resource/quantity.go for
  312. // details on the "amount" API. See
  313. // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types
  314. // for the units of memory and CPU.
  315. ramRequestBytes := container.Resources.Requests.Memory().Value()
  316. // Because information on container RAM & CPU requests isn't
  317. // coming from Prometheus, it won't have a timestamp associated
  318. // with it. We need to provide a timestamp.
  319. RAMReqV := []*util.Vector{
  320. {
  321. Value: float64(ramRequestBytes),
  322. Timestamp: float64(time.Now().UTC().Unix()),
  323. },
  324. }
  325. // use millicores so we can convert to cores in a float64 format
  326. cpuRequestMilliCores := container.Resources.Requests.Cpu().MilliValue()
  327. CPUReqV := []*util.Vector{
  328. {
  329. Value: float64(cpuRequestMilliCores) / 1000,
  330. Timestamp: float64(time.Now().UTC().Unix()),
  331. },
  332. }
  333. gpuReqCount := 0.0
  334. if g, ok := container.Resources.Requests["nvidia.com/gpu"]; ok {
  335. gpuReqCount = g.AsApproximateFloat64()
  336. } else if g, ok := container.Resources.Limits["nvidia.com/gpu"]; ok {
  337. gpuReqCount = g.AsApproximateFloat64()
  338. } else if g, ok := container.Resources.Requests["k8s.amazonaws.com/vgpu"]; ok {
  339. gpuReqCount = g.AsApproximateFloat64()
  340. } else if g, ok := container.Resources.Limits["k8s.amazonaws.com/vgpu"]; ok {
  341. gpuReqCount = g.AsApproximateFloat64()
  342. }
  343. GPUReqV := []*util.Vector{
  344. {
  345. Value: float64(gpuReqCount),
  346. Timestamp: float64(time.Now().UTC().Unix()),
  347. },
  348. }
  349. RAMUsedV, ok := RAMUsedMap[newKey]
  350. if !ok {
  351. log.Debug("no RAM usage for " + newKey)
  352. RAMUsedV = []*util.Vector{{}}
  353. }
  354. CPUUsedV, ok := CPUUsedMap[newKey]
  355. if !ok {
  356. log.Debug("no CPU usage for " + newKey)
  357. CPUUsedV = []*util.Vector{{}}
  358. }
  359. var pvReq []*PersistentVolumeClaimData
  360. var netReq []*util.Vector
  361. if i == 0 { // avoid duplicating by just assigning all claims to the first container.
  362. pvReq = podPVs
  363. netReq = podNetCosts
  364. }
  365. costs := &CostData{
  366. Name: containerName,
  367. PodName: podName,
  368. NodeName: nodeName,
  369. Namespace: ns,
  370. Deployments: podDeployments,
  371. Services: podServices,
  372. Daemonsets: getDaemonsetsOfPod(pod),
  373. Jobs: getJobsOfPod(pod),
  374. Statefulsets: getStatefulSetsOfPod(pod),
  375. NodeData: nodeData,
  376. RAMReq: RAMReqV,
  377. RAMUsed: RAMUsedV,
  378. CPUReq: CPUReqV,
  379. CPUUsed: CPUUsedV,
  380. GPUReq: GPUReqV,
  381. PVCData: pvReq,
  382. NetworkData: netReq,
  383. Annotations: podAnnotations,
  384. Labels: podLabels,
  385. NamespaceLabels: nsLabels,
  386. ClusterID: clusterID,
  387. ClusterName: cm.ClusterMap.NameFor(clusterID),
  388. }
  389. var cpuReq, cpuUse *util.Vector
  390. if len(costs.CPUReq) > 0 {
  391. cpuReq = costs.CPUReq[0]
  392. }
  393. if len(costs.CPUUsed) > 0 {
  394. cpuUse = costs.CPUUsed[0]
  395. }
  396. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  397. var ramReq, ramUse *util.Vector
  398. if len(costs.RAMReq) > 0 {
  399. ramReq = costs.RAMReq[0]
  400. }
  401. if len(costs.RAMUsed) > 0 {
  402. ramUse = costs.RAMUsed[0]
  403. }
  404. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  405. containerNameCost[newKey] = costs
  406. }
  407. } else {
  408. // The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
  409. log.Debug("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
  410. c, err := NewContainerMetricFromKey(key)
  411. if err != nil {
  412. return nil, err
  413. }
  414. // CPU and RAM requests are obtained from the Kubernetes API.
  415. // If this case has been reached, the Kubernetes API will not
  416. // have information about the pod because it no longer exists.
  417. //
  418. // The case where this matters is minimal, mainly in environments
  419. // with very short-lived pods that over-request resources.
  420. RAMReqV := []*util.Vector{{}}
  421. CPUReqV := []*util.Vector{{}}
  422. GPUReqV := []*util.Vector{{}}
  423. RAMUsedV, ok := RAMUsedMap[key]
  424. if !ok {
  425. log.Debug("no RAM usage for " + key)
  426. RAMUsedV = []*util.Vector{{}}
  427. }
  428. CPUUsedV, ok := CPUUsedMap[key]
  429. if !ok {
  430. log.Debug("no CPU usage for " + key)
  431. CPUUsedV = []*util.Vector{{}}
  432. }
  433. node, ok := nodes[c.NodeName]
  434. if !ok {
  435. log.Debugf("Node \"%s\" has been deleted from Kubernetes. Query historical data to get it.", c.NodeName)
  436. if n, ok := missingNodes[c.NodeName]; ok {
  437. node = n
  438. } else {
  439. node = &costAnalyzerCloud.Node{}
  440. missingNodes[c.NodeName] = node
  441. }
  442. }
  443. namespacelabels := namespaceLabelsMapping[c.Namespace+","+c.ClusterID]
  444. namespaceAnnotations := namespaceAnnotationsMapping[c.Namespace+","+c.ClusterID]
  445. costs := &CostData{
  446. Name: c.ContainerName,
  447. PodName: c.PodName,
  448. NodeName: c.NodeName,
  449. NodeData: node,
  450. Namespace: c.Namespace,
  451. RAMReq: RAMReqV,
  452. RAMUsed: RAMUsedV,
  453. CPUReq: CPUReqV,
  454. CPUUsed: CPUUsedV,
  455. GPUReq: GPUReqV,
  456. Annotations: namespaceAnnotations,
  457. NamespaceLabels: namespacelabels,
  458. ClusterID: c.ClusterID,
  459. ClusterName: cm.ClusterMap.NameFor(c.ClusterID),
  460. }
  461. var cpuReq, cpuUse *util.Vector
  462. if len(costs.CPUReq) > 0 {
  463. cpuReq = costs.CPUReq[0]
  464. }
  465. if len(costs.CPUUsed) > 0 {
  466. cpuUse = costs.CPUUsed[0]
  467. }
  468. costs.CPUAllocation = getContainerAllocation(cpuReq, cpuUse, "CPU")
  469. var ramReq, ramUse *util.Vector
  470. if len(costs.RAMReq) > 0 {
  471. ramReq = costs.RAMReq[0]
  472. }
  473. if len(costs.RAMUsed) > 0 {
  474. ramUse = costs.RAMUsed[0]
  475. }
  476. costs.RAMAllocation = getContainerAllocation(ramReq, ramUse, "RAM")
  477. containerNameCost[key] = costs
  478. missingContainers[key] = costs
  479. }
  480. }
  481. // Use unmounted pvs to create a mapping of "Unmounted-<Namespace>" containers
  482. // to pass along the cost data
  483. unmounted := findUnmountedPVCostData(cm.ClusterMap, unmountedPVs, namespaceLabelsMapping, namespaceAnnotationsMapping)
  484. for k, costs := range unmounted {
  485. log.Debugf("Unmounted PVs in Namespace/ClusterID: %s/%s", costs.Namespace, costs.ClusterID)
  486. containerNameCost[k] = costs
  487. }
  488. err = findDeletedNodeInfo(cm.DataSource, missingNodes, start, end)
  489. if err != nil {
  490. log.Errorf("Error fetching historical node data: %s", err.Error())
  491. }
  492. err = findDeletedPodInfo(cm.DataSource, missingContainers, start, end)
  493. if err != nil {
  494. log.Errorf("Error fetching historical pod data: %s", err.Error())
  495. }
  496. return containerNameCost, err
  497. }
  498. func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[string][]*PersistentVolumeClaimData, namespaceLabelsMapping map[string]map[string]string, namespaceAnnotationsMapping map[string]map[string]string) map[string]*CostData {
  499. costs := make(map[string]*CostData)
  500. if len(unmountedPVs) == 0 {
  501. return costs
  502. }
  503. for k, pv := range unmountedPVs {
  504. keyParts := strings.Split(k, ",")
  505. if len(keyParts) != 3 {
  506. log.Warnf("Unmounted PV used key with incorrect parts: %s", k)
  507. continue
  508. }
  509. ns, _, clusterID := keyParts[0], keyParts[1], keyParts[2]
  510. namespacelabels := namespaceLabelsMapping[ns+","+clusterID]
  511. namespaceAnnotations := namespaceAnnotationsMapping[ns+","+clusterID]
  512. metric := NewContainerMetricFromValues(ns, unmountedPVsContainer, unmountedPVsContainer, "", clusterID)
  513. key := metric.Key()
  514. if costData, ok := costs[key]; !ok {
  515. costs[key] = &CostData{
  516. Name: unmountedPVsContainer,
  517. PodName: unmountedPVsContainer,
  518. NodeName: "",
  519. Annotations: namespaceAnnotations,
  520. Namespace: ns,
  521. NamespaceLabels: namespacelabels,
  522. Labels: namespacelabels,
  523. ClusterID: clusterID,
  524. ClusterName: clusterMap.NameFor(clusterID),
  525. PVCData: pv,
  526. }
  527. } else {
  528. costData.PVCData = append(costData.PVCData, pv...)
  529. }
  530. }
  531. return costs
  532. }
  533. func findDeletedPodInfo(dataSource source.OpenCostDataSource, missingContainers map[string]*CostData, start, end time.Time) error {
  534. if len(missingContainers) > 0 {
  535. mq := dataSource.Metrics()
  536. podLabelsResCh := mq.QueryPodLabels(start, end)
  537. podLabelsResult, err := podLabelsResCh.Await()
  538. if err != nil {
  539. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  540. }
  541. podLabels := make(map[string]map[string]string)
  542. if podLabelsResult != nil {
  543. podLabels, err = parsePodLabels(podLabelsResult)
  544. if err != nil {
  545. log.Errorf("failed to parse historical pod labels: %s", err.Error())
  546. }
  547. }
  548. for key, costData := range missingContainers {
  549. cm, _ := NewContainerMetricFromKey(key)
  550. labels, ok := podLabels[cm.PodName]
  551. if !ok {
  552. labels = make(map[string]string)
  553. }
  554. for k, v := range costData.NamespaceLabels {
  555. labels[k] = v
  556. }
  557. costData.Labels = labels
  558. }
  559. }
  560. return nil
  561. }
  562. func findDeletedNodeInfo(dataSource source.OpenCostDataSource, missingNodes map[string]*costAnalyzerCloud.Node, start, end time.Time) error {
  563. if len(missingNodes) > 0 {
  564. defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
  565. grp := source.NewQueryGroup()
  566. mq := dataSource.Metrics()
  567. cpuCostResCh := source.WithGroup(grp, mq.QueryNodeCPUPricePerHr(start, end))
  568. ramCostResCh := source.WithGroup(grp, mq.QueryNodeRAMPricePerGiBHr(start, end))
  569. gpuCostResCh := source.WithGroup(grp, mq.QueryNodeGPUPricePerHr(start, end))
  570. cpuCostRes, _ := cpuCostResCh.Await()
  571. ramCostRes, _ := ramCostResCh.Await()
  572. gpuCostRes, _ := gpuCostResCh.Await()
  573. if grp.HasErrors() {
  574. return grp.Error()
  575. }
  576. cpuCosts, err := getCost(cpuCostRes, cpuCostNode, cpuCostData)
  577. if err != nil {
  578. return err
  579. }
  580. ramCosts, err := getCost(ramCostRes, ramCostNode, ramCostData)
  581. if err != nil {
  582. return err
  583. }
  584. gpuCosts, err := getCost(gpuCostRes, gpuCostNode, gpuCostData)
  585. if err != nil {
  586. return err
  587. }
  588. if len(cpuCosts) == 0 {
  589. log.Infof("Kubecost prometheus metrics not currently available. Ingest this server's /metrics endpoint to get that data.")
  590. }
  591. for node, costv := range cpuCosts {
  592. if _, ok := missingNodes[node]; ok {
  593. missingNodes[node].VCPUCost = fmt.Sprintf("%f", costv[0].Value)
  594. } else {
  595. log.DedupedWarningf(5, "Node `%s` in prometheus but not k8s api", node)
  596. }
  597. }
  598. for node, costv := range ramCosts {
  599. if _, ok := missingNodes[node]; ok {
  600. missingNodes[node].RAMCost = fmt.Sprintf("%f", costv[0].Value)
  601. }
  602. }
  603. for node, costv := range gpuCosts {
  604. if _, ok := missingNodes[node]; ok {
  605. missingNodes[node].GPUCost = fmt.Sprintf("%f", costv[0].Value)
  606. }
  607. }
  608. }
  609. return nil
  610. }
  611. // getContainerAllocation takes the max between request and usage. This function
  612. // returns a slice containing a single element describing the container's
  613. // allocation.
  614. //
  615. // Additionally, the timestamp of the allocation will be the highest value
  616. // timestamp between the two vectors. This mitigates situations where
  617. // Timestamp=0. This should have no effect on the metrics emitted by the
  618. // CostModelMetricsEmitter
  619. func getContainerAllocation(req *util.Vector, used *util.Vector, allocationType string) []*util.Vector {
  620. var result []*util.Vector
  621. if req != nil && used != nil {
  622. x1 := req.Value
  623. if math.IsNaN(x1) {
  624. log.Debugf("NaN value found during %s allocation calculation for requests.", allocationType)
  625. x1 = 0.0
  626. }
  627. y1 := used.Value
  628. if math.IsNaN(y1) {
  629. log.Debugf("NaN value found during %s allocation calculation for used.", allocationType)
  630. y1 = 0.0
  631. }
  632. result = []*util.Vector{
  633. {
  634. Value: math.Max(x1, y1),
  635. Timestamp: math.Max(req.Timestamp, used.Timestamp),
  636. },
  637. }
  638. if result[0].Value == 0 && result[0].Timestamp == 0 {
  639. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  640. }
  641. } else if req != nil {
  642. result = []*util.Vector{
  643. {
  644. Value: req.Value,
  645. Timestamp: req.Timestamp,
  646. },
  647. }
  648. } else if used != nil {
  649. result = []*util.Vector{
  650. {
  651. Value: used.Value,
  652. Timestamp: used.Timestamp,
  653. },
  654. }
  655. } else {
  656. log.Debugf("No request or usage data found during %s allocation calculation. Setting allocation to 0.", allocationType)
  657. result = []*util.Vector{
  658. {
  659. Value: 0,
  660. Timestamp: float64(time.Now().UTC().Unix()),
  661. },
  662. }
  663. }
  664. return result
  665. }
  666. func (cm *CostModel) addPVData(pvClaimMapping map[string]*PersistentVolumeClaimData) error {
  667. cache := cm.Cache
  668. cloud := cm.Provider
  669. cfg, err := cloud.GetConfig()
  670. if err != nil {
  671. return err
  672. }
  673. // Pull a region from the first node
  674. var defaultRegion string
  675. nodeList := cache.GetAllNodes()
  676. if len(nodeList) > 0 {
  677. defaultRegion, _ = util.GetRegion(nodeList[0].Labels)
  678. }
  679. storageClasses := cache.GetAllStorageClasses()
  680. storageClassMap := make(map[string]map[string]string)
  681. for _, storageClass := range storageClasses {
  682. params := storageClass.Parameters
  683. storageClassMap[storageClass.Name] = params
  684. if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.Annotations["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  685. storageClassMap["default"] = params
  686. storageClassMap[""] = params
  687. }
  688. }
  689. pvs := cache.GetAllPersistentVolumes()
  690. pvMap := make(map[string]*costAnalyzerCloud.PV)
  691. for _, pv := range pvs {
  692. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  693. if !ok {
  694. log.Debugf("Unable to find parameters for storage class \"%s\". Does pv \"%s\" have a storageClassName?", pv.Spec.StorageClassName, pv.Name)
  695. }
  696. var region string
  697. if r, ok := util.GetRegion(pv.Labels); ok {
  698. region = r
  699. } else {
  700. region = defaultRegion
  701. }
  702. cacPv := &costAnalyzerCloud.PV{
  703. Class: pv.Spec.StorageClassName,
  704. Region: region,
  705. Parameters: parameters,
  706. }
  707. err := cm.GetPVCost(cacPv, pv, region)
  708. if err != nil {
  709. return err
  710. }
  711. pvMap[pv.Name] = cacPv
  712. }
  713. for _, pvc := range pvClaimMapping {
  714. if vol, ok := pvMap[pvc.VolumeName]; ok {
  715. pvc.Volume = vol
  716. } else {
  717. log.Debugf("PV not found, using default")
  718. pvc.Volume = &costAnalyzerCloud.PV{
  719. Cost: cfg.Storage,
  720. }
  721. }
  722. }
  723. return nil
  724. }
  725. func (cm *CostModel) GetPVCost(pv *costAnalyzerCloud.PV, kpv *clustercache.PersistentVolume, defaultRegion string) error {
  726. cp := cm.Provider
  727. cfg, err := cp.GetConfig()
  728. if err != nil {
  729. return err
  730. }
  731. key := cp.GetPVKey(kpv, pv.Parameters, defaultRegion)
  732. pv.ProviderID = key.ID()
  733. pvWithCost, err := cp.PVPricing(key)
  734. if err != nil {
  735. pv.Cost = cfg.Storage
  736. return err
  737. }
  738. if pvWithCost == nil || pvWithCost.Cost == "" {
  739. pv.Cost = cfg.Storage
  740. return nil // set default cost
  741. }
  742. pv.Cost = pvWithCost.Cost
  743. return nil
  744. }
  745. func (cm *CostModel) GetPricingSourceCounts() (*costAnalyzerCloud.PricingMatchMetadata, error) {
  746. if cm.pricingMetadata != nil {
  747. return cm.pricingMetadata, nil
  748. } else {
  749. return nil, fmt.Errorf("Node costs not yet calculated")
  750. }
  751. }
  752. func (cm *CostModel) GetNodeCost() (map[string]*costAnalyzerCloud.Node, error) {
  753. cp := cm.Provider
  754. cfg, err := cp.GetConfig()
  755. if err != nil {
  756. return nil, err
  757. }
  758. nodeList := cm.Cache.GetAllNodes()
  759. nodes := make(map[string]*costAnalyzerCloud.Node)
  760. pmd := &costAnalyzerCloud.PricingMatchMetadata{
  761. TotalNodes: 0,
  762. PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
  763. }
  764. for _, n := range nodeList {
  765. name := n.Name
  766. nodeLabels := n.Labels
  767. nodeLabels["providerID"] = n.SpecProviderID
  768. pmd.TotalNodes++
  769. cnode, _, err := cp.NodePricing(cp.GetKey(nodeLabels, n))
  770. if err != nil {
  771. log.Infof("Error getting node pricing. Error: %s", err.Error())
  772. if cnode != nil {
  773. nodes[name] = cnode
  774. continue
  775. } else {
  776. cnode = &costAnalyzerCloud.Node{
  777. VCPUCost: cfg.CPU,
  778. RAMCost: cfg.RAM,
  779. }
  780. }
  781. }
  782. pmd.PricingTypeCounts[cnode.PricingType]++
  783. // newCnode builds upon cnode but populates/overrides certain fields.
  784. // cnode was populated leveraging cloud provider public pricing APIs.
  785. newCnode := *cnode
  786. if newCnode.InstanceType == "" {
  787. it, _ := util.GetInstanceType(n.Labels)
  788. newCnode.InstanceType = it
  789. }
  790. if newCnode.Region == "" {
  791. region, _ := util.GetRegion(n.Labels)
  792. newCnode.Region = region
  793. }
  794. if newCnode.ArchType == "" {
  795. arch, _ := util.GetArchType(n.Labels)
  796. newCnode.ArchType = arch
  797. }
  798. newCnode.ProviderID = n.SpecProviderID
  799. var cpu float64
  800. if newCnode.VCPU == "" {
  801. cpu = float64(n.Status.Capacity.Cpu().Value())
  802. newCnode.VCPU = n.Status.Capacity.Cpu().String()
  803. } else {
  804. cpu, err = strconv.ParseFloat(newCnode.VCPU, 64)
  805. if err != nil {
  806. log.Warnf("parsing VCPU value: \"%s\" as float64", newCnode.VCPU)
  807. }
  808. }
  809. if math.IsNaN(cpu) {
  810. log.Warnf("cpu parsed as NaN. Setting to 0.")
  811. cpu = 0
  812. }
  813. var ram float64
  814. if newCnode.RAM == "" {
  815. newCnode.RAM = n.Status.Capacity.Memory().String()
  816. }
  817. ram = float64(n.Status.Capacity.Memory().Value())
  818. if math.IsNaN(ram) {
  819. log.Warnf("ram parsed as NaN. Setting to 0.")
  820. ram = 0
  821. }
  822. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  823. gpuc, err := strconv.ParseFloat(newCnode.GPU, 64)
  824. if err != nil {
  825. gpuc = 0.0
  826. }
  827. // The k8s API will often report more accurate results for GPU count
  828. // than cloud provider public pricing APIs. If found, override the
  829. // original value.
  830. gpuOverride, vgpuOverride, err := getGPUCount(cm.Cache, n)
  831. if err != nil {
  832. log.Warnf("Unable to get GPUCount for node %s: %s", n.Name, err.Error())
  833. }
  834. if gpuOverride > 0 {
  835. newCnode.GPU = fmt.Sprintf("%f", gpuOverride)
  836. gpuc = gpuOverride
  837. }
  838. if vgpuOverride > 0 {
  839. newCnode.VGPU = fmt.Sprintf("%f", vgpuOverride)
  840. }
  841. // Special case for SUSE rancher, since it won't behave with normal
  842. // calculations, courtesy of the instance type not being "real" (a
  843. // recognizable AWS instance type.)
  844. if newCnode.InstanceType == "rke2" {
  845. log.Infof(
  846. "Found a SUSE Rancher node %s, defaulting and skipping math",
  847. cp.GetKey(nodeLabels, n).Features(),
  848. )
  849. defaultCPUCorePrice, err := strconv.ParseFloat(cfg.CPU, 64)
  850. if err != nil {
  851. log.Errorf("Could not parse default cpu price")
  852. defaultCPUCorePrice = 0
  853. }
  854. if math.IsNaN(defaultCPUCorePrice) {
  855. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  856. defaultCPUCorePrice = 0
  857. }
  858. // Some customers may want GPU pricing to be determined by the labels affixed to their nodes. GpuPricing
  859. // passes the node's labels to the provider, which then cross-references them with the labels that the
  860. // provider knows to have label-specific costs associated with them, and returns that cost. See CSVProvider
  861. // for an example implementation.
  862. var gpuPrice float64
  863. gpuPricing, err := cp.GpuPricing(nodeLabels)
  864. if err != nil {
  865. log.Errorf("Could not determine custom GPU pricing: %s", err)
  866. gpuPrice = 0
  867. } else if len(gpuPricing) > 0 {
  868. gpuPrice, err = strconv.ParseFloat(gpuPricing, 64)
  869. if err != nil {
  870. log.Errorf("Could not parse custom GPU pricing: %s", err)
  871. gpuPrice = 0
  872. } else if math.IsNaN(gpuPrice) {
  873. log.Warnf("Custom GPU pricing parsed as NaN. Setting to 0.")
  874. gpuPrice = 0
  875. } else {
  876. log.Infof("Using custom GPU pricing for node \"%s\": %f", name, gpuPrice)
  877. }
  878. } else {
  879. gpuPrice, err = strconv.ParseFloat(cfg.GPU, 64)
  880. if err != nil {
  881. log.Errorf("Could not parse default gpu price")
  882. gpuPrice = 0
  883. }
  884. if math.IsNaN(gpuPrice) {
  885. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  886. gpuPrice = 0
  887. }
  888. }
  889. defaultRAMPrice, err := strconv.ParseFloat(cfg.RAM, 64)
  890. if err != nil {
  891. log.Errorf("Could not parse default ram price")
  892. defaultRAMPrice = 0
  893. }
  894. if math.IsNaN(defaultRAMPrice) {
  895. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  896. defaultRAMPrice = 0
  897. }
  898. defaultGPUPrice, err := strconv.ParseFloat(cfg.GPU, 64)
  899. if err != nil {
  900. log.Errorf("Could not parse default gpu price")
  901. defaultGPUPrice = 0
  902. }
  903. if math.IsNaN(defaultGPUPrice) {
  904. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  905. defaultGPUPrice = 0
  906. }
  907. // Just say no to doing the ratios!
  908. cpuCost := defaultCPUCorePrice * cpu
  909. gpuCost := gpuPrice * gpuc
  910. ramCost := defaultRAMPrice * ram
  911. nodeCost := cpuCost + gpuCost + ramCost
  912. newCnode.Cost = fmt.Sprintf("%f", nodeCost)
  913. newCnode.VCPUCost = fmt.Sprintf("%f", defaultCPUCorePrice)
  914. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  915. newCnode.RAMCost = fmt.Sprintf("%f", defaultRAMPrice)
  916. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  917. } else if newCnode.GPU != "" && newCnode.GPUCost == "" {
  918. // was the big thing to investigate. All the funky ratio math
  919. // we were doing was messing with their default pricing. for SUSE Rancher.
  920. // We reach this when a GPU is detected on a node, but no cost for
  921. // the GPU is defined in the OnDemand pricing. Calculate ratios of
  922. // CPU to RAM and GPU to RAM costs, then distribute the total node
  923. // cost among the CPU, RAM, and GPU.
  924. log.Tracef("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  925. // Some customers may want GPU pricing to be determined by the labels affixed to their nodes. GpuPricing
  926. // passes the node's labels to the provider, which then cross-references them with the labels that the
  927. // provider knows to have label-specific costs associated with them, and returns that cost. See CSVProvider
  928. // for an example implementation.
  929. gpuPricing, err := cp.GpuPricing(nodeLabels)
  930. if err != nil {
  931. log.Errorf("Could not determine custom GPU pricing: %s", err)
  932. } else if len(gpuPricing) > 0 {
  933. newCnode.GPUCost = gpuPricing
  934. log.Infof("Using custom GPU pricing for node \"%s\": %s", name, gpuPricing)
  935. }
  936. if newCnode.GPUCost == "" {
  937. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  938. if err != nil {
  939. log.Errorf("Could not parse default cpu price")
  940. defaultCPU = 0
  941. }
  942. if math.IsNaN(defaultCPU) {
  943. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  944. defaultCPU = 0
  945. }
  946. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  947. if err != nil {
  948. log.Errorf("Could not parse default ram price")
  949. defaultRAM = 0
  950. }
  951. if math.IsNaN(defaultRAM) {
  952. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  953. defaultRAM = 0
  954. }
  955. defaultGPU, err := strconv.ParseFloat(cfg.GPU, 64)
  956. if err != nil {
  957. log.Errorf("Could not parse default gpu price")
  958. defaultGPU = 0
  959. }
  960. if math.IsNaN(defaultGPU) {
  961. log.Warnf("defaultGPU parsed as NaN. Setting to 0.")
  962. defaultGPU = 0
  963. }
  964. cpuToRAMRatio := defaultCPU / defaultRAM
  965. if math.IsNaN(cpuToRAMRatio) {
  966. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  967. cpuToRAMRatio = 10
  968. }
  969. gpuToRAMRatio := defaultGPU / defaultRAM
  970. if math.IsNaN(gpuToRAMRatio) {
  971. log.Warnf("gpuToRAMRatio is NaN. Setting to 100.")
  972. gpuToRAMRatio = 100
  973. }
  974. ramGB := ram / 1024 / 1024 / 1024
  975. if math.IsNaN(ramGB) {
  976. log.Warnf("ramGB is NaN. Setting to 0.")
  977. ramGB = 0
  978. }
  979. ramMultiple := gpuc*gpuToRAMRatio + cpu*cpuToRAMRatio + ramGB
  980. if math.IsNaN(ramMultiple) {
  981. log.Warnf("ramMultiple is NaN. Setting to 0.")
  982. ramMultiple = 0
  983. }
  984. var nodePrice float64
  985. if newCnode.Cost != "" {
  986. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  987. if err != nil {
  988. log.Errorf("Could not parse total node price")
  989. return nil, err
  990. }
  991. } else if newCnode.VCPUCost != "" {
  992. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  993. if err != nil {
  994. log.Errorf("Could not parse node vcpu price")
  995. return nil, err
  996. }
  997. } else { // add case to use default pricing model when API data fails.
  998. log.Debugf("No node price or CPUprice found, falling back to default")
  999. nodePrice = defaultCPU*cpu + defaultRAM*ram + gpuc*defaultGPU
  1000. }
  1001. if math.IsNaN(nodePrice) {
  1002. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1003. nodePrice = 0
  1004. }
  1005. ramPrice := (nodePrice / ramMultiple)
  1006. if math.IsNaN(ramPrice) {
  1007. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1008. ramPrice = 0
  1009. }
  1010. cpuPrice := ramPrice * cpuToRAMRatio
  1011. gpuPrice := ramPrice * gpuToRAMRatio
  1012. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1013. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1014. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1015. newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
  1016. }
  1017. } else if newCnode.RAMCost == "" {
  1018. // We reach this when no RAM cost is defined in the OnDemand
  1019. // pricing. It calculates a cpuToRAMRatio and ramMultiple to
  1020. // distrubte the total node cost among CPU and RAM costs.
  1021. log.Tracef("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
  1022. defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
  1023. if err != nil {
  1024. log.Warnf("Could not parse default cpu price")
  1025. defaultCPU = 0
  1026. }
  1027. if math.IsNaN(defaultCPU) {
  1028. log.Warnf("defaultCPU parsed as NaN. Setting to 0.")
  1029. defaultCPU = 0
  1030. }
  1031. defaultRAM, err := strconv.ParseFloat(cfg.RAM, 64)
  1032. if err != nil {
  1033. log.Warnf("Could not parse default ram price")
  1034. defaultRAM = 0
  1035. }
  1036. if math.IsNaN(defaultRAM) {
  1037. log.Warnf("defaultRAM parsed as NaN. Setting to 0.")
  1038. defaultRAM = 0
  1039. }
  1040. cpuToRAMRatio := defaultCPU / defaultRAM
  1041. if math.IsNaN(cpuToRAMRatio) {
  1042. log.Warnf("cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 10.", defaultCPU, defaultRAM)
  1043. cpuToRAMRatio = 10
  1044. }
  1045. ramGB := ram / 1024 / 1024 / 1024
  1046. if math.IsNaN(ramGB) {
  1047. log.Warnf("ramGB is NaN. Setting to 0.")
  1048. ramGB = 0
  1049. }
  1050. ramMultiple := cpu*cpuToRAMRatio + ramGB
  1051. if math.IsNaN(ramMultiple) {
  1052. log.Warnf("ramMultiple is NaN. Setting to 0.")
  1053. ramMultiple = 0
  1054. }
  1055. var nodePrice float64
  1056. if newCnode.Cost != "" {
  1057. nodePrice, err = strconv.ParseFloat(newCnode.Cost, 64)
  1058. if err != nil {
  1059. log.Warnf("Could not parse total node price")
  1060. return nil, err
  1061. }
  1062. if newCnode.GPUCost != "" {
  1063. gpuPrice, err := strconv.ParseFloat(newCnode.GPUCost, 64)
  1064. if err != nil {
  1065. log.Warnf("Could not parse node gpu price")
  1066. return nil, err
  1067. }
  1068. nodePrice = nodePrice - gpuPrice // remove the gpuPrice from the total, we're just costing out RAM and CPU.
  1069. }
  1070. } else if newCnode.VCPUCost != "" {
  1071. nodePrice, err = strconv.ParseFloat(newCnode.VCPUCost, 64) // all the price was allocated to the CPU
  1072. if err != nil {
  1073. log.Warnf("Could not parse node vcpu price")
  1074. return nil, err
  1075. }
  1076. } else { // add case to use default pricing model when API data fails.
  1077. log.Debugf("No node price or CPUprice found, falling back to default")
  1078. nodePrice = defaultCPU*cpu + defaultRAM*ramGB
  1079. }
  1080. if math.IsNaN(nodePrice) {
  1081. log.Warnf("nodePrice parsed as NaN. Setting to 0.")
  1082. nodePrice = 0
  1083. }
  1084. ramPrice := (nodePrice / ramMultiple)
  1085. if math.IsNaN(ramPrice) {
  1086. log.Warnf("ramPrice[nodePrice: %f / ramMultiple: %f] parsed as NaN. Setting to 0.", nodePrice, ramMultiple)
  1087. ramPrice = 0
  1088. }
  1089. cpuPrice := ramPrice * cpuToRAMRatio
  1090. if defaultRAM != 0 {
  1091. newCnode.VCPUCost = fmt.Sprintf("%f", cpuPrice)
  1092. newCnode.RAMCost = fmt.Sprintf("%f", ramPrice)
  1093. } else { // just assign the full price to CPU
  1094. if cpu != 0 {
  1095. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice/cpu)
  1096. } else {
  1097. newCnode.VCPUCost = fmt.Sprintf("%f", nodePrice)
  1098. }
  1099. }
  1100. newCnode.RAMBytes = fmt.Sprintf("%f", ram)
  1101. log.Tracef("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
  1102. }
  1103. nodes[name] = &newCnode
  1104. }
  1105. cm.pricingMetadata = pmd
  1106. cp.ApplyReservedInstancePricing(nodes)
  1107. return nodes, nil
  1108. }
  1109. // TODO: drop some logs
  1110. func (cm *CostModel) GetLBCost() (map[serviceKey]*costAnalyzerCloud.LoadBalancer, error) {
  1111. // for fetching prices from cloud provider
  1112. // cfg, err := cp.GetConfig()
  1113. // if err != nil {
  1114. // return nil, err
  1115. // }
  1116. cp := cm.Provider
  1117. servicesList := cm.Cache.GetAllServices()
  1118. loadBalancerMap := make(map[serviceKey]*costAnalyzerCloud.LoadBalancer)
  1119. for _, service := range servicesList {
  1120. namespace := service.Namespace
  1121. name := service.Name
  1122. key := serviceKey{
  1123. Cluster: coreenv.GetClusterID(),
  1124. Namespace: namespace,
  1125. Service: name,
  1126. }
  1127. if service.Type == "LoadBalancer" {
  1128. loadBalancer, err := cp.LoadBalancerPricing()
  1129. if err != nil {
  1130. return nil, err
  1131. }
  1132. newLoadBalancer := *loadBalancer
  1133. for _, loadBalancerIngress := range service.Status.LoadBalancer.Ingress {
  1134. address := loadBalancerIngress.IP
  1135. // Some cloud providers use hostname rather than IP
  1136. if address == "" {
  1137. address = loadBalancerIngress.Hostname
  1138. }
  1139. newLoadBalancer.IngressIPAddresses = append(newLoadBalancer.IngressIPAddresses, address)
  1140. }
  1141. loadBalancerMap[key] = &newLoadBalancer
  1142. }
  1143. }
  1144. return loadBalancerMap, nil
  1145. }
  1146. func getPodServices(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1147. servicesList := cache.GetAllServices()
  1148. podServicesMapping := make(map[string]map[string][]string)
  1149. for _, service := range servicesList {
  1150. namespace := service.Namespace
  1151. name := service.Name
  1152. key := namespace + "," + clusterID
  1153. if _, ok := podServicesMapping[key]; !ok {
  1154. podServicesMapping[key] = make(map[string][]string)
  1155. }
  1156. s := labels.Nothing()
  1157. if len(service.SpecSelector) > 0 {
  1158. s = labels.Set(service.SpecSelector).AsSelectorPreValidated()
  1159. }
  1160. for _, pod := range podList {
  1161. labelSet := labels.Set(pod.Labels)
  1162. if s.Matches(labelSet) && pod.Namespace == namespace {
  1163. services, ok := podServicesMapping[key][pod.Name]
  1164. if ok {
  1165. podServicesMapping[key][pod.Name] = append(services, name)
  1166. } else {
  1167. podServicesMapping[key][pod.Name] = []string{name}
  1168. }
  1169. }
  1170. }
  1171. }
  1172. return podServicesMapping, nil
  1173. }
  1174. func getPodStatefulsets(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1175. ssList := cache.GetAllStatefulSets()
  1176. podSSMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1177. for _, ss := range ssList {
  1178. namespace := ss.Namespace
  1179. name := ss.Name
  1180. key := namespace + "," + clusterID
  1181. if _, ok := podSSMapping[key]; !ok {
  1182. podSSMapping[key] = make(map[string][]string)
  1183. }
  1184. s, err := metav1.LabelSelectorAsSelector(ss.SpecSelector)
  1185. if err != nil {
  1186. log.Errorf("Error doing deployment label conversion: %s", err.Error())
  1187. }
  1188. for _, pod := range podList {
  1189. labelSet := labels.Set(pod.Labels)
  1190. if s.Matches(labelSet) && pod.Namespace == namespace {
  1191. sss, ok := podSSMapping[key][pod.Name]
  1192. if ok {
  1193. podSSMapping[key][pod.Name] = append(sss, name)
  1194. } else {
  1195. podSSMapping[key][pod.Name] = []string{name}
  1196. }
  1197. }
  1198. }
  1199. }
  1200. return podSSMapping, nil
  1201. }
  1202. func getPodDeployments(cache clustercache.ClusterCache, podList []*clustercache.Pod, clusterID string) (map[string]map[string][]string, error) {
  1203. deploymentsList := cache.GetAllDeployments()
  1204. podDeploymentsMapping := make(map[string]map[string][]string) // namespace: podName: [deploymentNames]
  1205. for _, deployment := range deploymentsList {
  1206. namespace := deployment.Namespace
  1207. name := deployment.Name
  1208. key := namespace + "," + clusterID
  1209. if _, ok := podDeploymentsMapping[key]; !ok {
  1210. podDeploymentsMapping[key] = make(map[string][]string)
  1211. }
  1212. s, err := metav1.LabelSelectorAsSelector(deployment.SpecSelector)
  1213. if err != nil {
  1214. log.Errorf("Error doing deployment label conversion: %s", err)
  1215. }
  1216. for _, pod := range podList {
  1217. labelSet := labels.Set(pod.Labels)
  1218. if s.Matches(labelSet) && pod.Namespace == namespace {
  1219. deployments, ok := podDeploymentsMapping[key][pod.Name]
  1220. if ok {
  1221. podDeploymentsMapping[key][pod.Name] = append(deployments, name)
  1222. } else {
  1223. podDeploymentsMapping[key][pod.Name] = []string{name}
  1224. }
  1225. }
  1226. }
  1227. }
  1228. return podDeploymentsMapping, nil
  1229. }
  1230. func getNamespaceLabels(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1231. nsToLabels := make(map[string]map[string]string)
  1232. nss := cache.GetAllNamespaces()
  1233. for _, ns := range nss {
  1234. labels := make(map[string]string)
  1235. for k, v := range ns.Labels {
  1236. labels[promutil.SanitizeLabelName(k)] = v
  1237. }
  1238. nsToLabels[ns.Name+","+clusterID] = labels
  1239. }
  1240. return nsToLabels, nil
  1241. }
  1242. func getNamespaceAnnotations(cache clustercache.ClusterCache, clusterID string) (map[string]map[string]string, error) {
  1243. nsToAnnotations := make(map[string]map[string]string)
  1244. nss := cache.GetAllNamespaces()
  1245. for _, ns := range nss {
  1246. annotations := make(map[string]string)
  1247. for k, v := range ns.Annotations {
  1248. annotations[promutil.SanitizeLabelName(k)] = v
  1249. }
  1250. nsToAnnotations[ns.Name+","+clusterID] = annotations
  1251. }
  1252. return nsToAnnotations, nil
  1253. }
  1254. func getDaemonsetsOfPod(pod clustercache.Pod) []string {
  1255. for _, ownerReference := range pod.OwnerReferences {
  1256. if ownerReference.Kind == "DaemonSet" {
  1257. return []string{ownerReference.Name}
  1258. }
  1259. }
  1260. return []string{}
  1261. }
  1262. func getJobsOfPod(pod clustercache.Pod) []string {
  1263. for _, ownerReference := range pod.OwnerReferences {
  1264. if ownerReference.Kind == "Job" {
  1265. return []string{ownerReference.Name}
  1266. }
  1267. }
  1268. return []string{}
  1269. }
  1270. func getStatefulSetsOfPod(pod clustercache.Pod) []string {
  1271. for _, ownerReference := range pod.OwnerReferences {
  1272. if ownerReference.Kind == "StatefulSet" {
  1273. return []string{ownerReference.Name}
  1274. }
  1275. }
  1276. return []string{}
  1277. }
  1278. // getGPUCount reads the node's Status and Labels (via the k8s API) to identify
  1279. // the number of GPUs and vGPUs are equipped on the node. If unable to identify
  1280. // a GPU count, it will return -1.
  1281. func getGPUCount(cache clustercache.ClusterCache, n *clustercache.Node) (float64, float64, error) {
  1282. g, hasGpu := n.Status.Capacity["nvidia.com/gpu"]
  1283. _, hasReplicas := n.Labels["nvidia.com/gpu.replicas"]
  1284. // Case 1: Standard NVIDIA GPU
  1285. if hasGpu && g.Value() != 0 && !hasReplicas {
  1286. return float64(g.Value()), float64(g.Value()), nil
  1287. }
  1288. // Case 2: NVIDIA GPU with GPU Feature Discovery (GFD) Pod enabled.
  1289. // Ref: https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/gpu-sharing.html#verifying-the-gpu-time-slicing-configuration
  1290. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L44-L45
  1291. // Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L103-L118
  1292. if hasReplicas {
  1293. resultGPU := 0.0
  1294. resultVGPU := 0.0
  1295. if c, ok := n.Labels["nvidia.com/gpu.count"]; ok {
  1296. var err error
  1297. resultGPU, err = strconv.ParseFloat(c, 64)
  1298. if err != nil {
  1299. return -1, -1, fmt.Errorf("could not parse label \"nvidia.com/gpu.count\": %v", err)
  1300. }
  1301. }
  1302. if s, ok := n.Status.Capacity["nvidia.com/gpu.shared"]; ok { // GFD configured `renameByDefault=true`
  1303. resultVGPU = float64(s.Value())
  1304. } else if g, ok := n.Status.Capacity["nvidia.com/gpu"]; ok { // GFD configured `renameByDefault=false`
  1305. resultVGPU = float64(g.Value())
  1306. } else {
  1307. resultVGPU = resultGPU
  1308. }
  1309. return resultGPU, resultVGPU, nil
  1310. }
  1311. // Case 3: AWS vGPU
  1312. if vgpu, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
  1313. vgpuCount, err := getAllocatableVGPUs(cache)
  1314. if err != nil {
  1315. return -1, -1, err
  1316. }
  1317. vgpuCoeff := 10.0
  1318. if vgpuCount > 0.0 {
  1319. vgpuCoeff = vgpuCount
  1320. }
  1321. if vgpu.Value() != 0 {
  1322. resultGPU := float64(vgpu.Value()) / vgpuCoeff
  1323. resultVGPU := float64(vgpu.Value())
  1324. return resultGPU, resultVGPU, nil
  1325. }
  1326. }
  1327. // No GPU found
  1328. return -1, -1, nil
  1329. }
  1330. func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
  1331. daemonsets := cache.GetAllDaemonSets()
  1332. vgpuCount := 0.0
  1333. for _, ds := range daemonsets {
  1334. dsContainerList := &ds.SpecContainers
  1335. for _, ctnr := range *dsContainerList {
  1336. if ctnr.Args != nil {
  1337. for _, arg := range ctnr.Args {
  1338. if strings.Contains(arg, "--vgpu=") {
  1339. vgpus, err := strconv.ParseFloat(arg[strings.IndexByte(arg, '=')+1:], 64)
  1340. if err != nil {
  1341. log.Errorf("failed to parse vgpu allocation string %s: %v", arg, err)
  1342. continue
  1343. }
  1344. vgpuCount = vgpus
  1345. return vgpuCount, nil
  1346. }
  1347. }
  1348. }
  1349. }
  1350. }
  1351. return vgpuCount, nil
  1352. }
  1353. type PersistentVolumeClaimData struct {
  1354. Class string `json:"class"`
  1355. Claim string `json:"claim"`
  1356. Namespace string `json:"namespace"`
  1357. ClusterID string `json:"clusterId"`
  1358. TimesClaimed int `json:"timesClaimed"`
  1359. VolumeName string `json:"volumeName"`
  1360. Volume *costAnalyzerCloud.PV `json:"persistentVolume"`
  1361. Values []*util.Vector `json:"values"`
  1362. }
  1363. func measureTime(start time.Time, threshold time.Duration, name string) {
  1364. elapsed := time.Since(start)
  1365. if elapsed > threshold {
  1366. log.Infof("[Profiler] %s: %s", elapsed, name)
  1367. }
  1368. }
  1369. func (cm *CostModel) QueryAllocation(window opencost.Window, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy opencost.AccumulateOption, shareIdle bool) (*opencost.AllocationSetRange, error) {
  1370. // Validate window is legal
  1371. if window.IsOpen() || window.IsNegative() {
  1372. return nil, fmt.Errorf("illegal window: %s", window)
  1373. }
  1374. var totalsStore opencost.TotalsStore
  1375. // Idle is required for proportional asset costs
  1376. if includeProportionalAssetResourceCosts {
  1377. if !includeIdle {
  1378. return nil, errors.New("bad request - includeIdle must be set true if includeProportionalAssetResourceCosts is true")
  1379. }
  1380. totalsStore = opencost.NewMemoryTotalsStore()
  1381. }
  1382. // Begin with empty response
  1383. asr := opencost.NewAllocationSetRange()
  1384. // Query for AllocationSets in increments of the given step duration,
  1385. // appending each to the response.
  1386. stepStart := *window.Start()
  1387. stepEnd := stepStart.Add(step)
  1388. var isAKS bool
  1389. for window.End().After(stepStart) {
  1390. allocSet, err := cm.ComputeAllocation(stepStart, stepEnd)
  1391. if err != nil {
  1392. return nil, fmt.Errorf("error computing allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1393. }
  1394. if includeIdle {
  1395. assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
  1396. if err != nil {
  1397. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1398. }
  1399. if includeProportionalAssetResourceCosts {
  1400. // AKS is a special case - there can be a maximum of 2
  1401. // load balancers (1 public and 1 private) in an AKS cluster
  1402. // therefore, when calculating PARCs for load balancers,
  1403. // we must know if this is an AKS cluster
  1404. for _, node := range assetSet.Nodes {
  1405. if _, found := node.Labels["label_kubernetes_azure_com_cluster"]; found {
  1406. isAKS = true
  1407. break
  1408. }
  1409. }
  1410. _, err := opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  1411. if err != nil {
  1412. log.Errorf("Allocation: error updating asset resource totals for %s: %s", assetSet.Window, err)
  1413. }
  1414. }
  1415. idleSet, err := computeIdleAllocations(allocSet, assetSet, true)
  1416. if err != nil {
  1417. return nil, fmt.Errorf("error computing idle allocations for %s: %w", opencost.NewClosedWindow(stepStart, stepEnd), err)
  1418. }
  1419. for _, idleAlloc := range idleSet.Allocations {
  1420. allocSet.Insert(idleAlloc)
  1421. }
  1422. }
  1423. asr.Append(allocSet)
  1424. stepStart = stepEnd
  1425. stepEnd = stepStart.Add(step)
  1426. }
  1427. // Set aggregation options and aggregate
  1428. var shareIdleOpt string
  1429. if shareIdle {
  1430. shareIdleOpt = opencost.ShareWeighted
  1431. } else {
  1432. shareIdleOpt = opencost.ShareNone
  1433. }
  1434. opts := &opencost.AllocationAggregationOptions{
  1435. IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
  1436. IdleByNode: idleByNode,
  1437. IncludeAggregatedMetadata: includeAggregatedMetadata,
  1438. ShareIdle: shareIdleOpt,
  1439. }
  1440. // Aggregate
  1441. err := asr.AggregateBy(aggregate, opts)
  1442. if err != nil {
  1443. return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
  1444. }
  1445. // Accumulate, if requested
  1446. if accumulateBy != opencost.AccumulateOptionNone {
  1447. asr, err = asr.Accumulate(accumulateBy)
  1448. if err != nil {
  1449. log.Errorf("error accumulating by %v: %s", accumulateBy, err)
  1450. return nil, fmt.Errorf("error accumulating by %v: %s", accumulateBy, err)
  1451. }
  1452. // when accumulating and returning PARCs, we need the totals for the
  1453. // accumulated windows to accurately compute a fraction
  1454. if includeProportionalAssetResourceCosts {
  1455. assetSet, err := cm.ComputeAssets(*asr.Window().Start(), *asr.Window().End())
  1456. if err != nil {
  1457. return nil, fmt.Errorf("error computing assets for %s: %w", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  1458. }
  1459. _, err = opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
  1460. if err != nil {
  1461. log.Errorf("Allocation: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
  1462. }
  1463. }
  1464. }
  1465. if includeProportionalAssetResourceCosts {
  1466. for _, as := range asr.Allocations {
  1467. totalStoreByNode, ok := totalsStore.GetAssetTotalsByNode(as.Start(), as.End())
  1468. if !ok {
  1469. log.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  1470. return nil, fmt.Errorf("unable to locate allocation totals for node for window %v - %v", as.Start(), as.End())
  1471. }
  1472. totalStoreByCluster, ok := totalsStore.GetAssetTotalsByCluster(as.Start(), as.End())
  1473. if !ok {
  1474. log.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  1475. return nil, fmt.Errorf("unable to locate allocation totals for cluster for window %v - %v", as.Start(), as.End())
  1476. }
  1477. var totalPublicLbCost, totalPrivateLbCost float64
  1478. if isAKS && sharedLoadBalancer {
  1479. // loop through all assetTotals, adding all load balancer costs by public and private
  1480. for _, tot := range totalStoreByNode {
  1481. if tot.PrivateLoadBalancer {
  1482. totalPrivateLbCost += tot.LoadBalancerCost
  1483. } else {
  1484. totalPublicLbCost += tot.LoadBalancerCost
  1485. }
  1486. }
  1487. }
  1488. // loop through each allocation set, using total cost from totals store
  1489. for _, alloc := range as.Allocations {
  1490. for rawKey, parc := range alloc.ProportionalAssetResourceCosts {
  1491. key := strings.TrimSuffix(strings.ReplaceAll(rawKey, ",", "/"), "/")
  1492. // for each parc , check the totals store for each
  1493. // on a totals hit, set the corresponding total and calculate percentage
  1494. var totals *opencost.AssetTotals
  1495. if totalsLoc, found := totalStoreByCluster[key]; found {
  1496. totals = totalsLoc
  1497. }
  1498. if totalsLoc, found := totalStoreByNode[key]; found {
  1499. totals = totalsLoc
  1500. }
  1501. if totals == nil {
  1502. log.Errorf("unable to locate asset totals for allocation %s, corresponding PARC is being skipped", key)
  1503. continue
  1504. }
  1505. parc.CPUTotalCost = totals.CPUCost
  1506. parc.GPUTotalCost = totals.GPUCost
  1507. parc.RAMTotalCost = totals.RAMCost
  1508. parc.PVTotalCost = totals.PersistentVolumeCost
  1509. if isAKS && sharedLoadBalancer && len(alloc.LoadBalancers) > 0 {
  1510. // Azure is a special case - use computed totals above
  1511. // use the lbAllocations in the object to determine if
  1512. // this PARC is a public or private load balancer
  1513. // then set the total accordingly
  1514. // AKS only has 1 public and 1 private load balancer
  1515. lbAlloc, found := alloc.LoadBalancers[key]
  1516. if found {
  1517. if lbAlloc.Private {
  1518. parc.LoadBalancerTotalCost = totalPrivateLbCost
  1519. } else {
  1520. parc.LoadBalancerTotalCost = totalPublicLbCost
  1521. }
  1522. }
  1523. } else {
  1524. parc.LoadBalancerTotalCost = totals.LoadBalancerCost
  1525. }
  1526. opencost.ComputePercentages(&parc)
  1527. alloc.ProportionalAssetResourceCosts[rawKey] = parc
  1528. }
  1529. }
  1530. }
  1531. }
  1532. return asr, nil
  1533. }
  1534. func computeIdleAllocations(allocSet *opencost.AllocationSet, assetSet *opencost.AssetSet, idleByNode bool) (*opencost.AllocationSet, error) {
  1535. if !allocSet.Window.Equal(assetSet.Window) {
  1536. return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
  1537. }
  1538. var allocTotals map[string]*opencost.AllocationTotals
  1539. var assetTotals map[string]*opencost.AssetTotals
  1540. if idleByNode {
  1541. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationNodeProp)
  1542. assetTotals = opencost.ComputeAssetTotals(assetSet, true)
  1543. } else {
  1544. allocTotals = opencost.ComputeAllocationTotals(allocSet, opencost.AllocationClusterProp)
  1545. assetTotals = opencost.ComputeAssetTotals(assetSet, false)
  1546. }
  1547. start, end := *allocSet.Window.Start(), *allocSet.Window.End()
  1548. idleSet := opencost.NewAllocationSet(start, end)
  1549. for key, assetTotal := range assetTotals {
  1550. allocTotal, ok := allocTotals[key]
  1551. if !ok {
  1552. log.Warnf("Allocation: did not find allocations for asset key: %s", key)
  1553. // Use a zero-value set of totals. This indicates either (1) an
  1554. // error computing totals, or (2) that no allocations ran on the
  1555. // given node for the given window.
  1556. allocTotal = &opencost.AllocationTotals{
  1557. Cluster: assetTotal.Cluster,
  1558. Node: assetTotal.Node,
  1559. Start: assetTotal.Start,
  1560. End: assetTotal.End,
  1561. }
  1562. }
  1563. // Insert one idle allocation for each key (whether by node or
  1564. // by cluster), defined as the difference between the total
  1565. // asset cost and the allocated cost per-resource.
  1566. name := fmt.Sprintf("%s/%s", key, opencost.IdleSuffix)
  1567. err := idleSet.Insert(&opencost.Allocation{
  1568. Name: name,
  1569. Window: idleSet.Window.Clone(),
  1570. Properties: &opencost.AllocationProperties{
  1571. Cluster: assetTotal.Cluster,
  1572. Node: assetTotal.Node,
  1573. ProviderID: assetTotal.Node,
  1574. },
  1575. Start: assetTotal.Start,
  1576. End: assetTotal.End,
  1577. CPUCost: assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost(),
  1578. GPUCost: assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost(),
  1579. RAMCost: assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost(),
  1580. })
  1581. if err != nil {
  1582. return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
  1583. }
  1584. }
  1585. return idleSet, nil
  1586. }
  1587. func (cm *CostModel) GetDataSource() source.OpenCostDataSource {
  1588. return cm.DataSource
  1589. }