metrics.go 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. package costmodel
  2. import (
  3. "math"
  4. "strconv"
  5. "strings"
  6. "sync"
  7. "time"
  8. "github.com/opencost/opencost/core/pkg/clusters"
  9. "github.com/opencost/opencost/core/pkg/log"
  10. "github.com/opencost/opencost/core/pkg/util"
  11. "github.com/opencost/opencost/core/pkg/util/atomic"
  12. "github.com/opencost/opencost/core/pkg/util/promutil"
  13. "github.com/opencost/opencost/pkg/cloud/models"
  14. "github.com/opencost/opencost/pkg/clustercache"
  15. "github.com/opencost/opencost/pkg/env"
  16. "github.com/opencost/opencost/pkg/errors"
  17. "github.com/opencost/opencost/pkg/metrics"
  18. "github.com/opencost/opencost/pkg/prom"
  19. promclient "github.com/prometheus/client_golang/api"
  20. "github.com/prometheus/client_golang/prometheus"
  21. dto "github.com/prometheus/client_model/go"
  22. v1 "k8s.io/api/core/v1"
  23. )
  24. //--------------------------------------------------------------------------
  25. // ClusterInfoCollector
  26. //--------------------------------------------------------------------------
  27. // ClusterInfoCollector is a prometheus collector that generates ClusterInfoMetrics
  28. type ClusterInfoCollector struct {
  29. ClusterInfo clusters.ClusterInfoProvider
  30. metricsConfig metrics.MetricsConfig
  31. }
  32. // Describe sends the super-set of all possible descriptors of metrics
  33. // collected by this Collector.
  34. func (cic ClusterInfoCollector) Describe(ch chan<- *prometheus.Desc) {
  35. disabledMetrics := cic.metricsConfig.GetDisabledMetricsMap()
  36. if _, disabled := disabledMetrics["kubecost_cluster_info"]; disabled {
  37. return
  38. }
  39. ch <- prometheus.NewDesc("kubecost_cluster_info", "Kubecost Cluster Info", []string{}, nil)
  40. }
  41. // Collect is called by the Prometheus registry when collecting metrics.
  42. func (cic ClusterInfoCollector) Collect(ch chan<- prometheus.Metric) {
  43. disabledMetrics := cic.metricsConfig.GetDisabledMetricsMap()
  44. if _, disabled := disabledMetrics["kubecost_cluster_info"]; disabled {
  45. return
  46. }
  47. clusterInfo := cic.ClusterInfo.GetClusterInfo()
  48. labels := promutil.MapToLabels(clusterInfo)
  49. m := newClusterInfoMetric("kubecost_cluster_info", labels)
  50. ch <- m
  51. }
  52. //--------------------------------------------------------------------------
  53. // ClusterInfoMetric
  54. //--------------------------------------------------------------------------
  55. // ClusterInfoMetric is a prometheus.Metric used to encode the local cluster info
  56. type ClusterInfoMetric struct {
  57. fqName string
  58. help string
  59. labels map[string]string
  60. }
  61. // Creates a new ClusterInfoMetric, implementation of prometheus.Metric
  62. func newClusterInfoMetric(fqName string, labels map[string]string) ClusterInfoMetric {
  63. return ClusterInfoMetric{
  64. fqName: fqName,
  65. labels: labels,
  66. help: "kubecost_cluster_info ClusterInfo",
  67. }
  68. }
  69. // Desc returns the descriptor for the Metric. This method idempotently
  70. // returns the same descriptor throughout the lifetime of the Metric.
  71. func (cim ClusterInfoMetric) Desc() *prometheus.Desc {
  72. l := prometheus.Labels{}
  73. return prometheus.NewDesc(cim.fqName, cim.help, promutil.LabelNamesFrom(cim.labels), l)
  74. }
  75. // Write encodes the Metric into a "Metric" Protocol Buffer data
  76. // transmission object.
  77. func (cim ClusterInfoMetric) Write(m *dto.Metric) error {
  78. h := float64(1)
  79. m.Gauge = &dto.Gauge{
  80. Value: &h,
  81. }
  82. var labels []*dto.LabelPair
  83. for k, v := range cim.labels {
  84. labels = append(labels, &dto.LabelPair{
  85. Name: toStringPtr(k),
  86. Value: toStringPtr(v),
  87. })
  88. }
  89. m.Label = labels
  90. return nil
  91. }
  92. // returns a pointer to the string provided
  93. func toStringPtr(s string) *string { return &s }
  94. //--------------------------------------------------------------------------
  95. // Cost Model Metrics Initialization
  96. //--------------------------------------------------------------------------
  97. // Only allow the metrics to be instantiated and registered once
  98. var metricsInit sync.Once
  99. var (
  100. cpuGv *prometheus.GaugeVec
  101. ramGv *prometheus.GaugeVec
  102. gpuGv *prometheus.GaugeVec
  103. gpuCountGv *prometheus.GaugeVec
  104. pvGv *prometheus.GaugeVec
  105. spotGv *prometheus.GaugeVec
  106. totalGv *prometheus.GaugeVec
  107. ramAllocGv *prometheus.GaugeVec
  108. cpuAllocGv *prometheus.GaugeVec
  109. gpuAllocGv *prometheus.GaugeVec
  110. pvAllocGv *prometheus.GaugeVec
  111. networkZoneEgressCostG prometheus.Gauge
  112. networkRegionEgressCostG prometheus.Gauge
  113. networkInternetEgressCostG prometheus.Gauge
  114. clusterManagementCostGv *prometheus.GaugeVec
  115. lbCostGv *prometheus.GaugeVec
  116. )
  117. // initCostModelMetrics uses a sync.Once to ensure that these metrics are only created once
  118. func initCostModelMetrics(clusterCache clustercache.ClusterCache, provider models.Provider, clusterInfo clusters.ClusterInfoProvider, metricsConfig *metrics.MetricsConfig) {
  119. disabledMetrics := metricsConfig.GetDisabledMetricsMap()
  120. var toRegisterGV []*prometheus.GaugeVec
  121. var toRegisterGauge []prometheus.Gauge
  122. metricsInit.Do(func() {
  123. cpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  124. Name: "node_cpu_hourly_cost",
  125. Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
  126. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  127. if _, disabled := disabledMetrics["node_cpu_hourly_cost"]; !disabled {
  128. toRegisterGV = append(toRegisterGV, cpuGv)
  129. }
  130. ramGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  131. Name: "node_ram_hourly_cost",
  132. Help: "node_ram_hourly_cost hourly cost for each gb of ram on this node",
  133. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  134. if _, disabled := disabledMetrics["node_ram_hourly_cost"]; !disabled {
  135. toRegisterGV = append(toRegisterGV, ramGv)
  136. }
  137. gpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  138. Name: "node_gpu_hourly_cost",
  139. Help: "node_gpu_hourly_cost hourly cost for each gpu on this node",
  140. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  141. if _, disabled := disabledMetrics["node_gpu_hourly_cost"]; !disabled {
  142. toRegisterGV = append(toRegisterGV, gpuGv)
  143. }
  144. gpuCountGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  145. Name: "node_gpu_count",
  146. Help: "node_gpu_count count of gpu on this node",
  147. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  148. if _, disabled := disabledMetrics["node_gpu_count"]; !disabled {
  149. toRegisterGV = append(toRegisterGV, gpuCountGv)
  150. }
  151. pvGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  152. Name: "pv_hourly_cost",
  153. Help: "pv_hourly_cost Cost per GB per hour on a persistent disk",
  154. }, []string{"volumename", "persistentvolume", "provider_id"})
  155. if _, disabled := disabledMetrics["pv_hourly_cost"]; !disabled {
  156. toRegisterGV = append(toRegisterGV, pvGv)
  157. }
  158. spotGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  159. Name: "kubecost_node_is_spot",
  160. Help: "kubecost_node_is_spot Cloud provider info about node preemptibility",
  161. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  162. if _, disabled := disabledMetrics["kubecost_node_is_spot"]; !disabled {
  163. toRegisterGV = append(toRegisterGV, spotGv)
  164. }
  165. totalGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  166. Name: "node_total_hourly_cost",
  167. Help: "node_total_hourly_cost Total node cost per hour",
  168. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  169. if _, disabled := disabledMetrics["node_total_hourly_cost"]; !disabled {
  170. toRegisterGV = append(toRegisterGV, totalGv)
  171. }
  172. ramAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  173. Name: "container_memory_allocation_bytes",
  174. Help: "container_memory_allocation_bytes Bytes of RAM used",
  175. }, []string{"namespace", "pod", "container", "instance", "node"})
  176. if _, disabled := disabledMetrics["container_memory_allocation_bytes"]; !disabled {
  177. toRegisterGV = append(toRegisterGV, ramAllocGv)
  178. }
  179. cpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  180. Name: "container_cpu_allocation",
  181. Help: "container_cpu_allocation Percent of a single CPU used in a minute",
  182. }, []string{"namespace", "pod", "container", "instance", "node"})
  183. if _, disabled := disabledMetrics["container_cpu_allocation"]; !disabled {
  184. toRegisterGV = append(toRegisterGV, cpuAllocGv)
  185. }
  186. gpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  187. Name: "container_gpu_allocation",
  188. Help: "container_gpu_allocation GPU used",
  189. }, []string{"namespace", "pod", "container", "instance", "node"})
  190. if _, disabled := disabledMetrics["container_gpu_allocation"]; !disabled {
  191. toRegisterGV = append(toRegisterGV, gpuAllocGv)
  192. }
  193. pvAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  194. Name: "pod_pvc_allocation",
  195. Help: "pod_pvc_allocation Bytes used by a PVC attached to a pod",
  196. }, []string{"namespace", "pod", "persistentvolumeclaim", "persistentvolume"})
  197. if _, disabled := disabledMetrics["pod_pvc_allocation"]; !disabled {
  198. toRegisterGV = append(toRegisterGV, pvAllocGv)
  199. }
  200. networkZoneEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  201. Name: "kubecost_network_zone_egress_cost",
  202. Help: "kubecost_network_zone_egress_cost Total cost per GB egress across zones",
  203. })
  204. if _, disabled := disabledMetrics["kubecost_network_zone_egress_cost"]; !disabled {
  205. toRegisterGauge = append(toRegisterGauge, networkZoneEgressCostG)
  206. }
  207. networkRegionEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  208. Name: "kubecost_network_region_egress_cost",
  209. Help: "kubecost_network_region_egress_cost Total cost per GB egress across regions",
  210. })
  211. if _, disabled := disabledMetrics["kubecost_network_region_egress_cost"]; !disabled {
  212. toRegisterGauge = append(toRegisterGauge, networkRegionEgressCostG)
  213. }
  214. networkInternetEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  215. Name: "kubecost_network_internet_egress_cost",
  216. Help: "kubecost_network_internet_egress_cost Total cost per GB of internet egress.",
  217. })
  218. if _, disabled := disabledMetrics["kubecost_network_internet_egress_cost"]; !disabled {
  219. toRegisterGauge = append(toRegisterGauge, networkInternetEgressCostG)
  220. }
  221. clusterManagementCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  222. Name: "kubecost_cluster_management_cost",
  223. Help: "kubecost_cluster_management_cost Hourly cost paid as a cluster management fee.",
  224. }, []string{"provisioner_name"})
  225. if _, disabled := disabledMetrics["kubecost_cluster_management_cost"]; !disabled {
  226. toRegisterGV = append(toRegisterGV, clusterManagementCostGv)
  227. }
  228. lbCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // no differentiation between ELB and ALB right now
  229. Name: "kubecost_load_balancer_cost",
  230. Help: "kubecost_load_balancer_cost Hourly cost of load balancer",
  231. }, []string{"ingress_ip", "namespace", "service_name"}) // assumes one ingress IP per load balancer
  232. if _, disabled := disabledMetrics["kubecost_load_balancer_cost"]; !disabled {
  233. toRegisterGV = append(toRegisterGV, lbCostGv)
  234. }
  235. // Register cost-model metrics for emission
  236. for _, gv := range toRegisterGV {
  237. prometheus.MustRegister(gv)
  238. }
  239. for _, g := range toRegisterGauge {
  240. prometheus.MustRegister(g)
  241. }
  242. // General Metric Collectors
  243. prometheus.MustRegister(ClusterInfoCollector{
  244. ClusterInfo: clusterInfo,
  245. metricsConfig: *metricsConfig,
  246. })
  247. })
  248. }
  249. //--------------------------------------------------------------------------
  250. // CostModelMetricsEmitter
  251. //--------------------------------------------------------------------------
  252. // CostModelMetricsEmitter emits all cost-model specific metrics calculated by
  253. // the CostModel.ComputeCostData() method.
  254. type CostModelMetricsEmitter struct {
  255. PrometheusClient promclient.Client
  256. KubeClusterCache clustercache.ClusterCache
  257. CloudProvider models.Provider
  258. Model *CostModel
  259. // Metrics
  260. CPUPriceRecorder *prometheus.GaugeVec
  261. RAMPriceRecorder *prometheus.GaugeVec
  262. PersistentVolumePriceRecorder *prometheus.GaugeVec
  263. GPUPriceRecorder *prometheus.GaugeVec
  264. GPUCountRecorder *prometheus.GaugeVec
  265. PVAllocationRecorder *prometheus.GaugeVec
  266. NodeSpotRecorder *prometheus.GaugeVec
  267. NodeTotalPriceRecorder *prometheus.GaugeVec
  268. RAMAllocationRecorder *prometheus.GaugeVec
  269. CPUAllocationRecorder *prometheus.GaugeVec
  270. GPUAllocationRecorder *prometheus.GaugeVec
  271. ClusterManagementCostRecorder *prometheus.GaugeVec
  272. LBCostRecorder *prometheus.GaugeVec
  273. NetworkZoneEgressRecorder prometheus.Gauge
  274. NetworkRegionEgressRecorder prometheus.Gauge
  275. NetworkInternetEgressRecorder prometheus.Gauge
  276. // Concurrent Flow Control - Manages the run state of the metric emitter
  277. runState atomic.AtomicRunState
  278. }
  279. // NewCostModelMetricsEmitter creates a new cost-model metrics emitter. Use Start() to begin metric emission.
  280. func NewCostModelMetricsEmitter(promClient promclient.Client, clusterCache clustercache.ClusterCache, provider models.Provider, clusterInfo clusters.ClusterInfoProvider, model *CostModel) *CostModelMetricsEmitter {
  281. // Get metric configurations, if any
  282. metricsConfig, err := metrics.GetMetricsConfig()
  283. if err != nil {
  284. log.Infof("Failed to get metrics config before init: %s", err)
  285. }
  286. if len(metricsConfig.DisabledMetrics) > 0 {
  287. log.Infof("Starting metrics init with disabled metrics: %v", metricsConfig.DisabledMetrics)
  288. }
  289. // init will only actually execute once to register the custom gauges
  290. initCostModelMetrics(clusterCache, provider, clusterInfo, metricsConfig)
  291. metrics.InitKubeMetrics(clusterCache, metricsConfig, &metrics.KubeMetricsOpts{
  292. EmitKubecostControllerMetrics: true,
  293. EmitNamespaceAnnotations: env.IsEmitNamespaceAnnotationsMetric(),
  294. EmitPodAnnotations: env.IsEmitPodAnnotationsMetric(),
  295. EmitKubeStateMetrics: env.IsEmitKsmV1Metrics(),
  296. EmitKubeStateMetricsV1Only: env.IsEmitKsmV1MetricsOnly(),
  297. EmitDeprecatedMetrics: env.IsEmitDeprecatedMetrics(),
  298. })
  299. metrics.InitOpencostTelemetry(metricsConfig)
  300. return &CostModelMetricsEmitter{
  301. PrometheusClient: promClient,
  302. KubeClusterCache: clusterCache,
  303. CloudProvider: provider,
  304. Model: model,
  305. CPUPriceRecorder: cpuGv,
  306. RAMPriceRecorder: ramGv,
  307. GPUPriceRecorder: gpuGv,
  308. GPUCountRecorder: gpuCountGv,
  309. PersistentVolumePriceRecorder: pvGv,
  310. NodeSpotRecorder: spotGv,
  311. NodeTotalPriceRecorder: totalGv,
  312. RAMAllocationRecorder: ramAllocGv,
  313. CPUAllocationRecorder: cpuAllocGv,
  314. GPUAllocationRecorder: gpuAllocGv,
  315. PVAllocationRecorder: pvAllocGv,
  316. NetworkZoneEgressRecorder: networkZoneEgressCostG,
  317. NetworkRegionEgressRecorder: networkRegionEgressCostG,
  318. NetworkInternetEgressRecorder: networkInternetEgressCostG,
  319. ClusterManagementCostRecorder: clusterManagementCostGv,
  320. LBCostRecorder: lbCostGv,
  321. }
  322. }
  323. // IsRunning returns true if metric recording is running.
  324. func (cmme *CostModelMetricsEmitter) IsRunning() bool {
  325. return cmme.runState.IsRunning()
  326. }
  327. // NodeCostAverages tracks a running average of a node's cost attributes.
  328. // The averages are used to detect and discard spurrious outliers.
  329. type NodeCostAverages struct {
  330. CpuCostAverage float64
  331. RamCostAverage float64
  332. NumCpuDataPoints float64
  333. NumRamDataPoints float64
  334. }
  335. // StartCostModelMetricRecording starts the go routine that emits metrics used to determine
  336. // cluster costs.
  337. func (cmme *CostModelMetricsEmitter) Start() bool {
  338. // wait for a reset to prevent a race between start and stop calls
  339. cmme.runState.WaitForReset()
  340. // Check to see if we're already recording, and atomically advance the run state to start if we're not
  341. if !cmme.runState.Start() {
  342. log.Errorf("Attempted to start cost model metric recording when it's already running.")
  343. return false
  344. }
  345. go func() {
  346. defer errors.HandlePanic()
  347. containerSeen := make(map[string]bool)
  348. nodeSeen := make(map[string]bool)
  349. loadBalancerSeen := make(map[string]bool)
  350. pvSeen := make(map[string]bool)
  351. pvcSeen := make(map[string]bool)
  352. nodeCostAverages := make(map[string]NodeCostAverages)
  353. getKeyFromLabelStrings := func(labels ...string) string {
  354. return strings.Join(labels, ",")
  355. }
  356. getLabelStringsFromKey := func(key string) []string {
  357. return strings.Split(key, ",")
  358. }
  359. var defaultRegion string = ""
  360. nodeList := cmme.KubeClusterCache.GetAllNodes()
  361. if len(nodeList) > 0 {
  362. var ok bool
  363. defaultRegion, ok = util.GetRegion(nodeList[0].Labels)
  364. if !ok {
  365. log.DedupedWarningf(5, "Failed to read default region from labels on node %s", nodeList[0].Name)
  366. }
  367. }
  368. for {
  369. log.Debugf("Recording prices...")
  370. podlist := cmme.KubeClusterCache.GetAllPods()
  371. podStatus := make(map[string]v1.PodPhase)
  372. for _, pod := range podlist {
  373. podStatus[pod.Name] = pod.Status.Phase
  374. }
  375. cfg, _ := cmme.CloudProvider.GetConfig()
  376. provisioner, clusterManagementCost, err := cmme.CloudProvider.ClusterManagementPricing()
  377. if err != nil {
  378. log.Errorf("Error getting cluster management cost %s", err.Error())
  379. }
  380. cmme.ClusterManagementCostRecorder.WithLabelValues(provisioner).Set(clusterManagementCost)
  381. // Record network pricing at global scope
  382. networkCosts, err := cmme.CloudProvider.NetworkPricing()
  383. if err != nil {
  384. log.Debugf("Failed to retrieve network costs: %s", err.Error())
  385. } else {
  386. cmme.NetworkZoneEgressRecorder.Set(networkCosts.ZoneNetworkEgressCost)
  387. cmme.NetworkRegionEgressRecorder.Set(networkCosts.RegionNetworkEgressCost)
  388. cmme.NetworkInternetEgressRecorder.Set(networkCosts.InternetNetworkEgressCost)
  389. }
  390. // TODO: Pass PrometheusClient and CloudProvider into CostModel on instantiation so this isn't so awkward
  391. data, err := cmme.Model.ComputeCostData(cmme.PrometheusClient, cmme.CloudProvider, "2m", "", "")
  392. if err != nil {
  393. // For an error collection, we'll just log the length of the errors (ComputeCostData already logs the
  394. // actual errors)
  395. if prom.IsErrorCollection(err) {
  396. if ec, ok := err.(prom.QueryErrorCollection); ok {
  397. log.Errorf("Error in price recording: %d errors occurred", len(ec.Errors()))
  398. }
  399. } else {
  400. log.Errorf("Error in price recording: " + err.Error())
  401. }
  402. // zero the for loop so the time.Sleep will still work
  403. data = map[string]*CostData{}
  404. }
  405. // TODO: Pass CloudProvider into CostModel on instantiation so this isn't so awkward
  406. nodes, err := cmme.Model.GetNodeCost(cmme.CloudProvider)
  407. if err != nil {
  408. log.Warnf("Error getting Node cost: %s", err)
  409. }
  410. for nodeName, node := range nodes {
  411. // Emit costs, guarding against NaN inputs for custom pricing.
  412. cpuCost, _ := strconv.ParseFloat(node.VCPUCost, 64)
  413. if math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
  414. cpuCost, _ = strconv.ParseFloat(cfg.CPU, 64)
  415. if math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
  416. cpuCost = 0
  417. }
  418. }
  419. cpu, _ := strconv.ParseFloat(node.VCPU, 64)
  420. if math.IsNaN(cpu) || math.IsInf(cpu, 0) {
  421. cpu = 1 // Assume 1 CPU
  422. }
  423. ramCost, _ := strconv.ParseFloat(node.RAMCost, 64)
  424. if math.IsNaN(ramCost) || math.IsInf(ramCost, 0) {
  425. ramCost, _ = strconv.ParseFloat(cfg.RAM, 64)
  426. if math.IsNaN(ramCost) || math.IsInf(ramCost, 0) {
  427. ramCost = 0
  428. }
  429. }
  430. ram, _ := strconv.ParseFloat(node.RAMBytes, 64)
  431. if math.IsNaN(ram) || math.IsInf(ram, 0) {
  432. ram = 0
  433. }
  434. gpu, _ := strconv.ParseFloat(node.GPU, 64)
  435. if math.IsNaN(gpu) || math.IsInf(gpu, 0) {
  436. gpu = 0
  437. }
  438. gpuCost, _ := strconv.ParseFloat(node.GPUCost, 64)
  439. if math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
  440. gpuCost, _ = strconv.ParseFloat(cfg.GPU, 64)
  441. if math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
  442. gpuCost = 0
  443. }
  444. }
  445. nodeType := node.InstanceType
  446. nodeRegion := node.Region
  447. totalCost := cpu*cpuCost + ramCost*(ram/1024/1024/1024) + gpu*gpuCost
  448. labelKey := getKeyFromLabelStrings(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType)
  449. avgCosts, ok := nodeCostAverages[labelKey]
  450. // initialize average cost tracking for this node if there is none
  451. if !ok {
  452. avgCosts = NodeCostAverages{
  453. CpuCostAverage: cpuCost,
  454. RamCostAverage: ramCost,
  455. NumCpuDataPoints: 1,
  456. NumRamDataPoints: 1,
  457. }
  458. nodeCostAverages[labelKey] = avgCosts
  459. }
  460. cmme.GPUCountRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(gpu)
  461. cmme.GPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(gpuCost)
  462. const outlierFactor float64 = 30
  463. // don't record cpuCost, ramCost, or gpuCost in the case of wild outliers
  464. // k8s api sometimes causes cost spikes as described here:
  465. // https://github.com/opencost/opencost/issues/927
  466. cpuOutlierCutoff := outlierFactor * avgCosts.CpuCostAverage
  467. if cpuCost < cpuOutlierCutoff {
  468. cmme.CPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(cpuCost)
  469. avgCosts.CpuCostAverage = (avgCosts.CpuCostAverage*avgCosts.NumCpuDataPoints + cpuCost) / (avgCosts.NumCpuDataPoints + 1)
  470. avgCosts.NumCpuDataPoints += 1
  471. } else {
  472. log.Debugf("CPU cost outlier detected; skipping data point: %s had %f as cost, which is above %f.", nodeName, cpuCost, cpuOutlierCutoff)
  473. }
  474. ramOutlierCutoff := outlierFactor * avgCosts.RamCostAverage
  475. if ramCost < ramOutlierCutoff {
  476. cmme.RAMPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(ramCost)
  477. avgCosts.RamCostAverage = (avgCosts.RamCostAverage*avgCosts.NumRamDataPoints + ramCost) / (avgCosts.NumRamDataPoints + 1)
  478. avgCosts.NumRamDataPoints += 1
  479. } else {
  480. log.Debugf("RAM cost outlier detected; skipping data point: %s had %f as cost, which is above %f.", nodeName, ramCost, ramOutlierCutoff)
  481. }
  482. // skip redording totalCost if any constituent costs were outliers
  483. if cpuCost < cpuOutlierCutoff && ramCost < ramOutlierCutoff {
  484. cmme.NodeTotalPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(totalCost)
  485. } else {
  486. log.Debugf("CPU and RAM outlier detected, not recording node %s total cost %f", nodeName, totalCost)
  487. }
  488. nodeCostAverages[labelKey] = avgCosts
  489. if node.IsSpot() {
  490. cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(1.0)
  491. } else {
  492. cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(0.0)
  493. }
  494. nodeSeen[labelKey] = true
  495. }
  496. // TODO: Pass CloudProvider into CostModel on instantiation so this isn't so awkward
  497. loadBalancers, err := cmme.Model.GetLBCost(cmme.CloudProvider)
  498. if err != nil {
  499. log.Warnf("Error getting LoadBalancer cost: %s", err)
  500. }
  501. for lbKey, lb := range loadBalancers {
  502. // TODO: parse (if necessary) and calculate cost associated with loadBalancer based on dynamic cloud prices fetched into each lb struct on GetLBCost() call
  503. namespace := lbKey.Namespace
  504. serviceName := lbKey.Service
  505. ingressIP := ""
  506. if len(lb.IngressIPAddresses) > 0 {
  507. ingressIP = lb.IngressIPAddresses[0] // assumes one ingress IP per load balancer
  508. }
  509. cmme.LBCostRecorder.WithLabelValues(ingressIP, namespace, serviceName).Set(lb.Cost)
  510. labelKey := getKeyFromLabelStrings(ingressIP, namespace, serviceName)
  511. loadBalancerSeen[labelKey] = true
  512. }
  513. for _, costs := range data {
  514. nodeName := costs.NodeName
  515. namespace := costs.Namespace
  516. podName := costs.PodName
  517. containerName := costs.Name
  518. if costs.PVCData != nil {
  519. for _, pvc := range costs.PVCData {
  520. if pvc.Volume != nil {
  521. timesClaimed := pvc.TimesClaimed
  522. if timesClaimed == 0 {
  523. timesClaimed = 1 // unallocated PVs are unclaimed but have a full allocation
  524. }
  525. cmme.PVAllocationRecorder.WithLabelValues(namespace, podName, pvc.Claim, pvc.VolumeName).Set(pvc.Values[0].Value / float64(timesClaimed))
  526. labelKey := getKeyFromLabelStrings(namespace, podName, pvc.Claim, pvc.VolumeName)
  527. pvcSeen[labelKey] = true
  528. }
  529. }
  530. }
  531. if len(costs.RAMAllocation) > 0 {
  532. cmme.RAMAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.RAMAllocation[0].Value)
  533. }
  534. if len(costs.CPUAllocation) > 0 {
  535. cmme.CPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.CPUAllocation[0].Value)
  536. }
  537. if len(costs.GPUReq) > 0 {
  538. // allocation here is set to the request because shared GPU usage not yet supported.
  539. // if VPGUs, request x (actual/virtual)
  540. vgpu := 0.0
  541. gpu := 0.0
  542. var err, verr error
  543. if matchedNode, found := nodes[nodeName]; found {
  544. vgpu, verr = strconv.ParseFloat(matchedNode.VGPU, 64)
  545. gpu, err = strconv.ParseFloat(matchedNode.GPU, 64)
  546. } else {
  547. log.Tracef("cost data for node %s had GPUReq, but there was no cost data available for the node", nodeName)
  548. log.Trace("defaulting GPU to 0 cost")
  549. }
  550. gpualloc := costs.GPUReq[0].Value
  551. if verr != nil && err != nil && vgpu != 0 {
  552. gpualloc = gpualloc * (gpu / vgpu)
  553. }
  554. cmme.GPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(gpualloc)
  555. }
  556. labelKey := getKeyFromLabelStrings(namespace, podName, containerName, nodeName, nodeName)
  557. if podStatus[podName] == v1.PodRunning { // Only report data for current pods
  558. containerSeen[labelKey] = true
  559. } else {
  560. containerSeen[labelKey] = false
  561. }
  562. }
  563. storageClasses := cmme.KubeClusterCache.GetAllStorageClasses()
  564. storageClassMap := make(map[string]map[string]string)
  565. for _, storageClass := range storageClasses {
  566. params := storageClass.Parameters
  567. storageClassMap[storageClass.Name] = params
  568. if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.Annotations["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  569. storageClassMap["default"] = params
  570. storageClassMap[""] = params
  571. }
  572. }
  573. pvs := cmme.KubeClusterCache.GetAllPersistentVolumes()
  574. for _, pv := range pvs {
  575. // Omit pv_hourly_cost if the volume status is failed
  576. if pv.Status.Phase == v1.VolumeFailed {
  577. continue
  578. }
  579. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  580. if !ok {
  581. log.Debugf("Unable to find parameters for storage class \"%s\". Pv \"%s\" might have an empty or invalid storageClassName.", pv.Spec.StorageClassName, pv.Name)
  582. }
  583. var region string
  584. if r, ok := util.GetRegion(pv.Labels); ok {
  585. region = r
  586. } else {
  587. region = defaultRegion
  588. }
  589. cacPv := &models.PV{
  590. Class: pv.Spec.StorageClassName,
  591. Region: region,
  592. Parameters: parameters,
  593. }
  594. // TODO: GetPVCost should be a method in CostModel?
  595. GetPVCost(cacPv, pv, cmme.CloudProvider, region)
  596. c, _ := strconv.ParseFloat(cacPv.Cost, 64)
  597. cmme.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name, cacPv.ProviderID).Set(c)
  598. labelKey := getKeyFromLabelStrings(pv.Name, pv.Name, cacPv.ProviderID)
  599. pvSeen[labelKey] = true
  600. }
  601. // Remove metrics on Nodes/LoadBalancers/Containers/PVs that no
  602. // longer exist
  603. for labelString, seen := range nodeSeen {
  604. if !seen {
  605. log.Debugf("Removing metrics for %s, no data observed recently", labelString)
  606. labels := getLabelStringsFromKey(labelString)
  607. ok := cmme.NodeTotalPriceRecorder.DeleteLabelValues(labels...)
  608. if ok {
  609. log.Debugf("No data observed for node with labels %v, removed from totalprice", labels)
  610. } else {
  611. log.Warnf("Failed to remove label set %v from metric node_total_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  612. }
  613. ok = cmme.NodeSpotRecorder.DeleteLabelValues(labels...)
  614. if ok {
  615. log.Debugf("No data observed for node with labels %v, removed from spot records", labels)
  616. } else {
  617. log.Warnf("Failed to remove label set %v from metric kubecost_node_is_spot. Failure to remove stale metrics may result in inaccurate data.", labels)
  618. }
  619. ok = cmme.CPUPriceRecorder.DeleteLabelValues(labels...)
  620. if ok {
  621. log.Debugf("No data observed for node with labels %v, removed from cpuprice", labels)
  622. } else {
  623. log.Warnf("Failed to remove label set %v from metric node_cpu_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  624. }
  625. ok = cmme.GPUPriceRecorder.DeleteLabelValues(labels...)
  626. if ok {
  627. log.Debugf("No data observed for node with labels %v, removed from gpuprice", labels)
  628. } else {
  629. log.Warnf("Failed to remove label set %v from metric node_gpu_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  630. }
  631. ok = cmme.GPUCountRecorder.DeleteLabelValues(labels...)
  632. if ok {
  633. log.Debugf("No data observed for node with labels %v, removed from gpucount", labels)
  634. } else {
  635. log.Warnf("Failed to remove label set %v from metric node_gpu_count. Failure to remove stale metrics may result in inaccurate data.", labels)
  636. }
  637. ok = cmme.RAMPriceRecorder.DeleteLabelValues(labels...)
  638. if ok {
  639. log.Debugf("No data observed for node with labels %v, removed from ramprice", labels)
  640. } else {
  641. log.Warnf("Failed to remove label set %v from metric node_ram_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  642. }
  643. delete(nodeSeen, labelString)
  644. delete(nodeCostAverages, labelString)
  645. } else {
  646. nodeSeen[labelString] = false
  647. }
  648. }
  649. for labelString, seen := range loadBalancerSeen {
  650. if !seen {
  651. labels := getLabelStringsFromKey(labelString)
  652. ok := cmme.LBCostRecorder.DeleteLabelValues(labels...)
  653. if !ok {
  654. log.Warnf("Failed to remove label set %v from metric kubecost_load_balancer_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  655. }
  656. delete(loadBalancerSeen, labelString)
  657. } else {
  658. loadBalancerSeen[labelString] = false
  659. }
  660. }
  661. for labelString, seen := range containerSeen {
  662. if !seen {
  663. labels := getLabelStringsFromKey(labelString)
  664. if len(labels) >= 2 && labels[1] != unmountedPVsContainer { // special "pod" to contain the unmounted PVs - does not have RAM/CPU/...
  665. ok := cmme.RAMAllocationRecorder.DeleteLabelValues(labels...)
  666. if !ok {
  667. log.Warnf("Failed to remove label set %v from metric container_memory_allocation_bytes. Failure to remove stale metrics may result in inaccurate data.", labels)
  668. }
  669. ok = cmme.CPUAllocationRecorder.DeleteLabelValues(labels...)
  670. if !ok {
  671. log.Warnf("Failed to remove label set %v from metric container_cpu_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  672. }
  673. ok = cmme.GPUAllocationRecorder.DeleteLabelValues(labels...)
  674. if !ok {
  675. log.Warnf("Failed to remove label set %v from metric container_gpu_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  676. }
  677. } else {
  678. log.Debugf("Did not try to delete RAM/CPU/GPU for fake '%s' container: %v", unmountedPVsContainer, labels)
  679. }
  680. delete(containerSeen, labelString)
  681. } else {
  682. containerSeen[labelString] = false
  683. }
  684. }
  685. for labelString, seen := range pvSeen {
  686. if !seen {
  687. labels := getLabelStringsFromKey(labelString)
  688. ok := cmme.PersistentVolumePriceRecorder.DeleteLabelValues(labels...)
  689. if !ok {
  690. log.Warnf("Failed to remove label set %v from metric pv_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  691. }
  692. delete(pvSeen, labelString)
  693. } else {
  694. pvSeen[labelString] = false
  695. }
  696. }
  697. for labelString, seen := range pvcSeen {
  698. if !seen {
  699. labels := getLabelStringsFromKey(labelString)
  700. ok := cmme.PVAllocationRecorder.DeleteLabelValues(labels...)
  701. if !ok {
  702. log.Warnf("Failed to remove label set %v from metric pod_pvc_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  703. }
  704. delete(pvcSeen, labelString)
  705. } else {
  706. pvcSeen[labelString] = false
  707. }
  708. }
  709. select {
  710. case <-time.After(time.Minute):
  711. case <-cmme.runState.OnStop():
  712. cmme.runState.Reset()
  713. return
  714. }
  715. }
  716. }()
  717. return true
  718. }
  719. // Stop halts the metrics emission loop after the current emission is completed
  720. // or if the emission is paused.
  721. func (cmme *CostModelMetricsEmitter) Stop() {
  722. cmme.runState.Stop()
  723. }