metrics.go 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815
  1. package costmodel
  2. import (
  3. "math"
  4. "strconv"
  5. "strings"
  6. "sync"
  7. "time"
  8. "github.com/opencost/opencost/core/pkg/clustercache"
  9. "github.com/opencost/opencost/core/pkg/clusters"
  10. "github.com/opencost/opencost/core/pkg/errors"
  11. "github.com/opencost/opencost/core/pkg/log"
  12. "github.com/opencost/opencost/core/pkg/source"
  13. "github.com/opencost/opencost/core/pkg/util"
  14. "github.com/opencost/opencost/core/pkg/util/atomic"
  15. "github.com/opencost/opencost/core/pkg/util/promutil"
  16. "github.com/opencost/opencost/pkg/cloud/models"
  17. "github.com/opencost/opencost/pkg/env"
  18. "github.com/opencost/opencost/pkg/metrics"
  19. promclient "github.com/prometheus/client_golang/api"
  20. "github.com/prometheus/client_golang/prometheus"
  21. dto "github.com/prometheus/client_model/go"
  22. v1 "k8s.io/api/core/v1"
  23. )
  24. //--------------------------------------------------------------------------
  25. // ClusterInfoCollector
  26. //--------------------------------------------------------------------------
  27. // ClusterInfoCollector is a prometheus collector that generates ClusterInfoMetrics
  28. type ClusterInfoCollector struct {
  29. ClusterInfo clusters.ClusterInfoProvider
  30. metricsConfig metrics.MetricsConfig
  31. }
  32. // Describe sends the super-set of all possible descriptors of metrics
  33. // collected by this Collector.
  34. func (cic ClusterInfoCollector) Describe(ch chan<- *prometheus.Desc) {
  35. disabledMetrics := cic.metricsConfig.GetDisabledMetricsMap()
  36. if _, disabled := disabledMetrics["kubecost_cluster_info"]; disabled {
  37. return
  38. }
  39. ch <- prometheus.NewDesc("kubecost_cluster_info", "Kubecost Cluster Info", []string{}, nil)
  40. }
  41. // Collect is called by the Prometheus registry when collecting metrics.
  42. func (cic ClusterInfoCollector) Collect(ch chan<- prometheus.Metric) {
  43. disabledMetrics := cic.metricsConfig.GetDisabledMetricsMap()
  44. if _, disabled := disabledMetrics["kubecost_cluster_info"]; disabled {
  45. return
  46. }
  47. clusterInfo := cic.ClusterInfo.GetClusterInfo()
  48. labels := promutil.MapToLabels(clusterInfo)
  49. m := newClusterInfoMetric("kubecost_cluster_info", labels)
  50. ch <- m
  51. }
  52. //--------------------------------------------------------------------------
  53. // ClusterInfoMetric
  54. //--------------------------------------------------------------------------
  55. // ClusterInfoMetric is a prometheus.Metric used to encode the local cluster info
  56. type ClusterInfoMetric struct {
  57. fqName string
  58. help string
  59. labels map[string]string
  60. }
  61. // Creates a new ClusterInfoMetric, implementation of prometheus.Metric
  62. func newClusterInfoMetric(fqName string, labels map[string]string) ClusterInfoMetric {
  63. return ClusterInfoMetric{
  64. fqName: fqName,
  65. labels: labels,
  66. help: "kubecost_cluster_info ClusterInfo",
  67. }
  68. }
  69. // Desc returns the descriptor for the Metric. This method idempotently
  70. // returns the same descriptor throughout the lifetime of the Metric.
  71. func (cim ClusterInfoMetric) Desc() *prometheus.Desc {
  72. l := prometheus.Labels{}
  73. return prometheus.NewDesc(cim.fqName, cim.help, promutil.LabelNamesFrom(cim.labels), l)
  74. }
  75. // Write encodes the Metric into a "Metric" Protocol Buffer data
  76. // transmission object.
  77. func (cim ClusterInfoMetric) Write(m *dto.Metric) error {
  78. h := float64(1)
  79. m.Gauge = &dto.Gauge{
  80. Value: &h,
  81. }
  82. var labels []*dto.LabelPair
  83. for k, v := range cim.labels {
  84. labels = append(labels, &dto.LabelPair{
  85. Name: toStringPtr(k),
  86. Value: toStringPtr(v),
  87. })
  88. }
  89. m.Label = labels
  90. return nil
  91. }
  92. // returns a pointer to the string provided
  93. func toStringPtr(s string) *string { return &s }
  94. //--------------------------------------------------------------------------
  95. // Cost Model Metrics Initialization
  96. //--------------------------------------------------------------------------
  97. // Only allow the metrics to be instantiated and registered once
  98. var metricsInit sync.Once
  99. var (
  100. cpuGv *prometheus.GaugeVec
  101. ramGv *prometheus.GaugeVec
  102. gpuGv *prometheus.GaugeVec
  103. gpuCountGv *prometheus.GaugeVec
  104. pvGv *prometheus.GaugeVec
  105. spotGv *prometheus.GaugeVec
  106. totalGv *prometheus.GaugeVec
  107. ramAllocGv *prometheus.GaugeVec
  108. cpuAllocGv *prometheus.GaugeVec
  109. gpuAllocGv *prometheus.GaugeVec
  110. pvAllocGv *prometheus.GaugeVec
  111. networkZoneEgressCostG prometheus.Gauge
  112. networkRegionEgressCostG prometheus.Gauge
  113. networkInternetEgressCostG prometheus.Gauge
  114. clusterManagementCostGv *prometheus.GaugeVec
  115. lbCostGv *prometheus.GaugeVec
  116. )
  117. // initCostModelMetrics uses a sync.Once to ensure that these metrics are only created once
  118. func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfig *metrics.MetricsConfig) {
  119. disabledMetrics := metricsConfig.GetDisabledMetricsMap()
  120. var toRegisterGV []*prometheus.GaugeVec
  121. var toRegisterGauge []prometheus.Gauge
  122. metricsInit.Do(func() {
  123. cpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  124. Name: "node_cpu_hourly_cost",
  125. Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
  126. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  127. if _, disabled := disabledMetrics["node_cpu_hourly_cost"]; !disabled {
  128. toRegisterGV = append(toRegisterGV, cpuGv)
  129. }
  130. ramGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  131. Name: "node_ram_hourly_cost",
  132. Help: "node_ram_hourly_cost hourly cost for each gb of ram on this node",
  133. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  134. if _, disabled := disabledMetrics["node_ram_hourly_cost"]; !disabled {
  135. toRegisterGV = append(toRegisterGV, ramGv)
  136. }
  137. gpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  138. Name: "node_gpu_hourly_cost",
  139. Help: "node_gpu_hourly_cost hourly cost for each gpu on this node",
  140. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  141. if _, disabled := disabledMetrics["node_gpu_hourly_cost"]; !disabled {
  142. toRegisterGV = append(toRegisterGV, gpuGv)
  143. }
  144. gpuCountGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  145. Name: "node_gpu_count",
  146. Help: "node_gpu_count count of gpu on this node",
  147. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  148. if _, disabled := disabledMetrics["node_gpu_count"]; !disabled {
  149. toRegisterGV = append(toRegisterGV, gpuCountGv)
  150. }
  151. pvGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  152. Name: "pv_hourly_cost",
  153. Help: "pv_hourly_cost Cost per GB per hour on a persistent disk",
  154. }, []string{"volumename", "persistentvolume", "provider_id"})
  155. if _, disabled := disabledMetrics["pv_hourly_cost"]; !disabled {
  156. toRegisterGV = append(toRegisterGV, pvGv)
  157. }
  158. spotGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  159. Name: "kubecost_node_is_spot",
  160. Help: "kubecost_node_is_spot Cloud provider info about node preemptibility",
  161. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  162. if _, disabled := disabledMetrics["kubecost_node_is_spot"]; !disabled {
  163. toRegisterGV = append(toRegisterGV, spotGv)
  164. }
  165. totalGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  166. Name: "node_total_hourly_cost",
  167. Help: "node_total_hourly_cost Total node cost per hour",
  168. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch"})
  169. if _, disabled := disabledMetrics["node_total_hourly_cost"]; !disabled {
  170. toRegisterGV = append(toRegisterGV, totalGv)
  171. }
  172. ramAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  173. Name: "container_memory_allocation_bytes",
  174. Help: "container_memory_allocation_bytes Bytes of RAM used",
  175. }, []string{"namespace", "pod", "container", "instance", "node"})
  176. if _, disabled := disabledMetrics["container_memory_allocation_bytes"]; !disabled {
  177. toRegisterGV = append(toRegisterGV, ramAllocGv)
  178. }
  179. cpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  180. Name: "container_cpu_allocation",
  181. Help: "container_cpu_allocation Percent of a single CPU used in a minute",
  182. }, []string{"namespace", "pod", "container", "instance", "node"})
  183. if _, disabled := disabledMetrics["container_cpu_allocation"]; !disabled {
  184. toRegisterGV = append(toRegisterGV, cpuAllocGv)
  185. }
  186. gpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  187. Name: "container_gpu_allocation",
  188. Help: "container_gpu_allocation GPU used",
  189. }, []string{"namespace", "pod", "container", "instance", "node"})
  190. if _, disabled := disabledMetrics["container_gpu_allocation"]; !disabled {
  191. toRegisterGV = append(toRegisterGV, gpuAllocGv)
  192. }
  193. pvAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  194. Name: "pod_pvc_allocation",
  195. Help: "pod_pvc_allocation Bytes used by a PVC attached to a pod",
  196. }, []string{"namespace", "pod", "persistentvolumeclaim", "persistentvolume"})
  197. if _, disabled := disabledMetrics["pod_pvc_allocation"]; !disabled {
  198. toRegisterGV = append(toRegisterGV, pvAllocGv)
  199. }
  200. networkZoneEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  201. Name: "kubecost_network_zone_egress_cost",
  202. Help: "kubecost_network_zone_egress_cost Total cost per GB egress across zones",
  203. })
  204. if _, disabled := disabledMetrics["kubecost_network_zone_egress_cost"]; !disabled {
  205. toRegisterGauge = append(toRegisterGauge, networkZoneEgressCostG)
  206. }
  207. networkRegionEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  208. Name: "kubecost_network_region_egress_cost",
  209. Help: "kubecost_network_region_egress_cost Total cost per GB egress across regions",
  210. })
  211. if _, disabled := disabledMetrics["kubecost_network_region_egress_cost"]; !disabled {
  212. toRegisterGauge = append(toRegisterGauge, networkRegionEgressCostG)
  213. }
  214. networkInternetEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  215. Name: "kubecost_network_internet_egress_cost",
  216. Help: "kubecost_network_internet_egress_cost Total cost per GB of internet egress.",
  217. })
  218. if _, disabled := disabledMetrics["kubecost_network_internet_egress_cost"]; !disabled {
  219. toRegisterGauge = append(toRegisterGauge, networkInternetEgressCostG)
  220. }
  221. clusterManagementCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  222. Name: "kubecost_cluster_management_cost",
  223. Help: "kubecost_cluster_management_cost Hourly cost paid as a cluster management fee.",
  224. }, []string{"provisioner_name"})
  225. if _, disabled := disabledMetrics["kubecost_cluster_management_cost"]; !disabled {
  226. toRegisterGV = append(toRegisterGV, clusterManagementCostGv)
  227. }
  228. lbCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // no differentiation between ELB and ALB right now
  229. Name: "kubecost_load_balancer_cost",
  230. Help: "kubecost_load_balancer_cost Hourly cost of load balancer",
  231. }, []string{"ingress_ip", "namespace", "service_name"}) // assumes one ingress IP per load balancer
  232. if _, disabled := disabledMetrics["kubecost_load_balancer_cost"]; !disabled {
  233. toRegisterGV = append(toRegisterGV, lbCostGv)
  234. }
  235. // Register cost-model metrics for emission
  236. for _, gv := range toRegisterGV {
  237. prometheus.MustRegister(gv)
  238. }
  239. for _, g := range toRegisterGauge {
  240. prometheus.MustRegister(g)
  241. }
  242. // General Metric Collectors
  243. prometheus.MustRegister(ClusterInfoCollector{
  244. ClusterInfo: clusterInfo,
  245. metricsConfig: *metricsConfig,
  246. })
  247. })
  248. }
  249. //--------------------------------------------------------------------------
  250. // CostModelMetricsEmitter
  251. //--------------------------------------------------------------------------
  252. // CostModelMetricsEmitter emits all cost-model specific metrics calculated by
  253. // the CostModel.ComputeCostData() method.
  254. type CostModelMetricsEmitter struct {
  255. PrometheusClient promclient.Client
  256. KubeClusterCache clustercache.ClusterCache
  257. CloudProvider models.Provider
  258. Model *CostModel
  259. // Metrics
  260. CPUPriceRecorder *prometheus.GaugeVec
  261. RAMPriceRecorder *prometheus.GaugeVec
  262. PersistentVolumePriceRecorder *prometheus.GaugeVec
  263. GPUPriceRecorder *prometheus.GaugeVec
  264. GPUCountRecorder *prometheus.GaugeVec
  265. PVAllocationRecorder *prometheus.GaugeVec
  266. NodeSpotRecorder *prometheus.GaugeVec
  267. NodeTotalPriceRecorder *prometheus.GaugeVec
  268. RAMAllocationRecorder *prometheus.GaugeVec
  269. CPUAllocationRecorder *prometheus.GaugeVec
  270. GPUAllocationRecorder *prometheus.GaugeVec
  271. ClusterManagementCostRecorder *prometheus.GaugeVec
  272. LBCostRecorder *prometheus.GaugeVec
  273. NetworkZoneEgressRecorder prometheus.Gauge
  274. NetworkRegionEgressRecorder prometheus.Gauge
  275. NetworkInternetEgressRecorder prometheus.Gauge
  276. // Concurrent Flow Control - Manages the run state of the metric emitter
  277. runState atomic.AtomicRunState
  278. }
  279. // NewCostModelMetricsEmitter creates a new cost-model metrics emitter. Use Start() to begin metric emission.
  280. func NewCostModelMetricsEmitter(clusterCache clustercache.ClusterCache, provider models.Provider, clusterInfo clusters.ClusterInfoProvider, model *CostModel) *CostModelMetricsEmitter {
  281. // Get metric configurations, if any
  282. metricsConfig, err := metrics.GetMetricsConfig()
  283. if err != nil {
  284. log.Infof("Failed to get metrics config before init: %s", err)
  285. }
  286. if len(metricsConfig.DisabledMetrics) > 0 {
  287. log.Infof("Starting metrics init with disabled metrics: %v", metricsConfig.DisabledMetrics)
  288. }
  289. // init will only actually execute once to register the custom gauges
  290. initCostModelMetrics(clusterInfo, metricsConfig)
  291. metrics.InitKubeMetrics(clusterCache, metricsConfig, &metrics.KubeMetricsOpts{
  292. EmitKubecostControllerMetrics: true,
  293. EmitNamespaceAnnotations: env.IsEmitNamespaceAnnotationsMetric(),
  294. EmitPodAnnotations: env.IsEmitPodAnnotationsMetric(),
  295. EmitKubeStateMetrics: env.IsEmitKsmV1Metrics(),
  296. EmitKubeStateMetricsV1Only: env.IsEmitKsmV1MetricsOnly(),
  297. EmitDeprecatedMetrics: env.IsEmitDeprecatedMetrics(),
  298. })
  299. metrics.InitOpencostTelemetry(metricsConfig)
  300. return &CostModelMetricsEmitter{
  301. KubeClusterCache: clusterCache,
  302. CloudProvider: provider,
  303. Model: model,
  304. CPUPriceRecorder: cpuGv,
  305. RAMPriceRecorder: ramGv,
  306. GPUPriceRecorder: gpuGv,
  307. GPUCountRecorder: gpuCountGv,
  308. PersistentVolumePriceRecorder: pvGv,
  309. NodeSpotRecorder: spotGv,
  310. NodeTotalPriceRecorder: totalGv,
  311. RAMAllocationRecorder: ramAllocGv,
  312. CPUAllocationRecorder: cpuAllocGv,
  313. GPUAllocationRecorder: gpuAllocGv,
  314. PVAllocationRecorder: pvAllocGv,
  315. NetworkZoneEgressRecorder: networkZoneEgressCostG,
  316. NetworkRegionEgressRecorder: networkRegionEgressCostG,
  317. NetworkInternetEgressRecorder: networkInternetEgressCostG,
  318. ClusterManagementCostRecorder: clusterManagementCostGv,
  319. LBCostRecorder: lbCostGv,
  320. }
  321. }
  322. // IsRunning returns true if metric recording is running.
  323. func (cmme *CostModelMetricsEmitter) IsRunning() bool {
  324. return cmme.runState.IsRunning()
  325. }
  326. // NodeCostAverages tracks a running average of a node's cost attributes.
  327. // The averages are used to detect and discard spurrious outliers.
  328. type NodeCostAverages struct {
  329. CpuCostAverage float64
  330. RamCostAverage float64
  331. NumCpuDataPoints float64
  332. NumRamDataPoints float64
  333. }
  334. // StartCostModelMetricRecording starts the go routine that emits metrics used to determine
  335. // cluster costs.
  336. func (cmme *CostModelMetricsEmitter) Start() bool {
  337. // wait for a reset to prevent a race between start and stop calls
  338. cmme.runState.WaitForReset()
  339. // Check to see if we're already recording, and atomically advance the run state to start if we're not
  340. if !cmme.runState.Start() {
  341. log.Errorf("Attempted to start cost model metric recording when it's already running.")
  342. return false
  343. }
  344. go func() {
  345. defer errors.HandlePanic()
  346. containerSeen := make(map[string]bool)
  347. nodeSeen := make(map[string]bool)
  348. loadBalancerSeen := make(map[string]bool)
  349. pvSeen := make(map[string]bool)
  350. pvcSeen := make(map[string]bool)
  351. nodeCostAverages := make(map[string]NodeCostAverages)
  352. getKeyFromLabelStrings := func(labels ...string) string {
  353. return strings.Join(labels, ",")
  354. }
  355. getLabelStringsFromKey := func(key string) []string {
  356. return strings.Split(key, ",")
  357. }
  358. var defaultRegion string = ""
  359. nodeList := cmme.KubeClusterCache.GetAllNodes()
  360. if len(nodeList) > 0 {
  361. var ok bool
  362. defaultRegion, ok = util.GetRegion(nodeList[0].Labels)
  363. if !ok {
  364. log.DedupedWarningf(5, "Failed to read default region from labels on node %s", nodeList[0].Name)
  365. }
  366. }
  367. for {
  368. log.Debugf("Recording prices...")
  369. podlist := cmme.KubeClusterCache.GetAllPods()
  370. podStatus := make(map[string]v1.PodPhase)
  371. for _, pod := range podlist {
  372. podStatus[pod.Name] = pod.Status.Phase
  373. }
  374. cfg, _ := cmme.CloudProvider.GetConfig()
  375. provisioner, clusterManagementCost, err := cmme.CloudProvider.ClusterManagementPricing()
  376. if err != nil {
  377. log.Errorf("Error getting cluster management cost %s", err.Error())
  378. }
  379. cmme.ClusterManagementCostRecorder.WithLabelValues(provisioner).Set(clusterManagementCost)
  380. // Record network pricing at global scope
  381. networkCosts, err := cmme.CloudProvider.NetworkPricing()
  382. if err != nil {
  383. log.Debugf("Failed to retrieve network costs: %s", err.Error())
  384. } else {
  385. cmme.NetworkZoneEgressRecorder.Set(networkCosts.ZoneNetworkEgressCost)
  386. cmme.NetworkRegionEgressRecorder.Set(networkCosts.RegionNetworkEgressCost)
  387. cmme.NetworkInternetEgressRecorder.Set(networkCosts.InternetNetworkEgressCost)
  388. }
  389. end := time.Now()
  390. start := end.Add(-time.Minute * 2)
  391. data, err := cmme.Model.ComputeCostData(start, end)
  392. if err != nil {
  393. // For an error collection, we'll just log the length of the errors (ComputeCostData already logs the
  394. // actual errors)
  395. if source.IsErrorCollection(err) {
  396. if ec, ok := err.(source.QueryErrorCollection); ok {
  397. log.Errorf("Error in price recording: %d errors occurred", len(ec.Errors()))
  398. }
  399. } else {
  400. log.Errorf("Error in price recording: %s", err)
  401. }
  402. // zero the for loop so the time.Sleep will still work
  403. data = map[string]*CostData{}
  404. }
  405. nodes, err := cmme.Model.GetNodeCost()
  406. if err != nil {
  407. log.Warnf("Error getting Node cost: %s", err)
  408. }
  409. for nodeName, node := range nodes {
  410. // Emit costs, guarding against NaN inputs for custom pricing.
  411. cpuCost, _ := strconv.ParseFloat(node.VCPUCost, 64)
  412. if math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
  413. cpuCost, _ = strconv.ParseFloat(cfg.CPU, 64)
  414. if math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
  415. cpuCost = 0
  416. }
  417. }
  418. cpu, _ := strconv.ParseFloat(node.VCPU, 64)
  419. if math.IsNaN(cpu) || math.IsInf(cpu, 0) {
  420. cpu = 1 // Assume 1 CPU
  421. }
  422. ramCost, _ := strconv.ParseFloat(node.RAMCost, 64)
  423. if math.IsNaN(ramCost) || math.IsInf(ramCost, 0) {
  424. ramCost, _ = strconv.ParseFloat(cfg.RAM, 64)
  425. if math.IsNaN(ramCost) || math.IsInf(ramCost, 0) {
  426. ramCost = 0
  427. }
  428. }
  429. ram, _ := strconv.ParseFloat(node.RAMBytes, 64)
  430. if math.IsNaN(ram) || math.IsInf(ram, 0) {
  431. ram = 0
  432. }
  433. gpu, _ := strconv.ParseFloat(node.GPU, 64)
  434. if math.IsNaN(gpu) || math.IsInf(gpu, 0) {
  435. gpu = 0
  436. }
  437. gpuCost, _ := strconv.ParseFloat(node.GPUCost, 64)
  438. if math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
  439. gpuCost, _ = strconv.ParseFloat(cfg.GPU, 64)
  440. if math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
  441. gpuCost = 0
  442. }
  443. }
  444. nodeType := node.InstanceType
  445. nodeRegion := node.Region
  446. totalCost := cpu*cpuCost + ramCost*(ram/1024/1024/1024) + gpu*gpuCost
  447. labelKey := getKeyFromLabelStrings(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType)
  448. avgCosts, ok := nodeCostAverages[labelKey]
  449. // initialize average cost tracking for this node if there is none
  450. if !ok {
  451. avgCosts = NodeCostAverages{
  452. CpuCostAverage: cpuCost,
  453. RamCostAverage: ramCost,
  454. NumCpuDataPoints: 1,
  455. NumRamDataPoints: 1,
  456. }
  457. nodeCostAverages[labelKey] = avgCosts
  458. }
  459. cmme.GPUCountRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(gpu)
  460. cmme.GPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(gpuCost)
  461. const outlierFactor float64 = 30
  462. // don't record cpuCost, ramCost, or gpuCost in the case of wild outliers
  463. // k8s api sometimes causes cost spikes as described here:
  464. // https://github.com/opencost/opencost/issues/927
  465. cpuOutlierCutoff := outlierFactor * avgCosts.CpuCostAverage
  466. if cpuCost < cpuOutlierCutoff {
  467. cmme.CPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(cpuCost)
  468. avgCosts.CpuCostAverage = (avgCosts.CpuCostAverage*avgCosts.NumCpuDataPoints + cpuCost) / (avgCosts.NumCpuDataPoints + 1)
  469. avgCosts.NumCpuDataPoints += 1
  470. } else {
  471. log.Debugf("CPU cost outlier detected; skipping data point: %s had %f as cost, which is above %f.", nodeName, cpuCost, cpuOutlierCutoff)
  472. }
  473. ramOutlierCutoff := outlierFactor * avgCosts.RamCostAverage
  474. if ramCost < ramOutlierCutoff {
  475. cmme.RAMPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(ramCost)
  476. avgCosts.RamCostAverage = (avgCosts.RamCostAverage*avgCosts.NumRamDataPoints + ramCost) / (avgCosts.NumRamDataPoints + 1)
  477. avgCosts.NumRamDataPoints += 1
  478. } else {
  479. log.Debugf("RAM cost outlier detected; skipping data point: %s had %f as cost, which is above %f.", nodeName, ramCost, ramOutlierCutoff)
  480. }
  481. // skip redording totalCost if any constituent costs were outliers
  482. if cpuCost < cpuOutlierCutoff && ramCost < ramOutlierCutoff {
  483. cmme.NodeTotalPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(totalCost)
  484. } else {
  485. log.Debugf("CPU and RAM outlier detected, not recording node %s total cost %f", nodeName, totalCost)
  486. }
  487. nodeCostAverages[labelKey] = avgCosts
  488. if node.IsSpot() {
  489. cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(1.0)
  490. } else {
  491. cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType).Set(0.0)
  492. }
  493. nodeSeen[labelKey] = true
  494. }
  495. loadBalancers, err := cmme.Model.GetLBCost()
  496. if err != nil {
  497. log.Warnf("Error getting LoadBalancer cost: %s", err)
  498. }
  499. for lbKey, lb := range loadBalancers {
  500. // TODO: parse (if necessary) and calculate cost associated with loadBalancer based on dynamic cloud prices fetched into each lb struct on GetLBCost() call
  501. namespace := lbKey.Namespace
  502. serviceName := lbKey.Service
  503. ingressIP := ""
  504. if len(lb.IngressIPAddresses) > 0 {
  505. ingressIP = lb.IngressIPAddresses[0] // assumes one ingress IP per load balancer
  506. }
  507. cmme.LBCostRecorder.WithLabelValues(ingressIP, namespace, serviceName).Set(lb.Cost)
  508. labelKey := getKeyFromLabelStrings(ingressIP, namespace, serviceName)
  509. loadBalancerSeen[labelKey] = true
  510. }
  511. for _, costs := range data {
  512. nodeName := costs.NodeName
  513. namespace := costs.Namespace
  514. podName := costs.PodName
  515. containerName := costs.Name
  516. if costs.PVCData != nil {
  517. for _, pvc := range costs.PVCData {
  518. if pvc.Volume != nil {
  519. timesClaimed := pvc.TimesClaimed
  520. if timesClaimed == 0 {
  521. timesClaimed = 1 // unallocated PVs are unclaimed but have a full allocation
  522. }
  523. cmme.PVAllocationRecorder.WithLabelValues(namespace, podName, pvc.Claim, pvc.VolumeName).Set(pvc.Values[0].Value / float64(timesClaimed))
  524. labelKey := getKeyFromLabelStrings(namespace, podName, pvc.Claim, pvc.VolumeName)
  525. pvcSeen[labelKey] = true
  526. }
  527. }
  528. }
  529. if len(costs.RAMAllocation) > 0 {
  530. cmme.RAMAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.RAMAllocation[0].Value)
  531. }
  532. if len(costs.CPUAllocation) > 0 {
  533. cmme.CPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(costs.CPUAllocation[0].Value)
  534. }
  535. if len(costs.GPUReq) > 0 {
  536. // allocation here is set to the request because shared GPU usage not yet supported.
  537. // if VPGUs, request x (actual/virtual)
  538. vgpu := 0.0
  539. gpu := 0.0
  540. var err, verr error
  541. if matchedNode, found := nodes[nodeName]; found {
  542. vgpu, verr = strconv.ParseFloat(matchedNode.VGPU, 64)
  543. gpu, err = strconv.ParseFloat(matchedNode.GPU, 64)
  544. } else {
  545. log.Tracef("cost data for node %s had GPUReq, but there was no cost data available for the node", nodeName)
  546. log.Trace("defaulting GPU to 0 cost")
  547. }
  548. gpualloc := costs.GPUReq[0].Value
  549. if verr != nil && err != nil && vgpu != 0 {
  550. gpualloc = gpualloc * (gpu / vgpu)
  551. }
  552. cmme.GPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName).Set(gpualloc)
  553. }
  554. labelKey := getKeyFromLabelStrings(namespace, podName, containerName, nodeName, nodeName)
  555. if podStatus[podName] == v1.PodRunning { // Only report data for current pods
  556. containerSeen[labelKey] = true
  557. } else {
  558. containerSeen[labelKey] = false
  559. }
  560. }
  561. storageClasses := cmme.KubeClusterCache.GetAllStorageClasses()
  562. storageClassMap := make(map[string]map[string]string)
  563. for _, storageClass := range storageClasses {
  564. params := storageClass.Parameters
  565. storageClassMap[storageClass.Name] = params
  566. if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.Annotations["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  567. storageClassMap["default"] = params
  568. storageClassMap[""] = params
  569. }
  570. }
  571. pvs := cmme.KubeClusterCache.GetAllPersistentVolumes()
  572. for _, pv := range pvs {
  573. // Omit pv_hourly_cost if the volume status is failed
  574. if pv.Status.Phase == v1.VolumeFailed {
  575. continue
  576. }
  577. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  578. if !ok {
  579. log.Debugf("Unable to find parameters for storage class \"%s\". Pv \"%s\" might have an empty or invalid storageClassName.", pv.Spec.StorageClassName, pv.Name)
  580. }
  581. var region string
  582. if r, ok := util.GetRegion(pv.Labels); ok {
  583. region = r
  584. } else {
  585. region = defaultRegion
  586. }
  587. cacPv := &models.PV{
  588. Class: pv.Spec.StorageClassName,
  589. Region: region,
  590. Parameters: parameters,
  591. }
  592. cmme.Model.GetPVCost(cacPv, pv, region)
  593. c, _ := strconv.ParseFloat(cacPv.Cost, 64)
  594. cmme.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name, cacPv.ProviderID).Set(c)
  595. labelKey := getKeyFromLabelStrings(pv.Name, pv.Name, cacPv.ProviderID)
  596. pvSeen[labelKey] = true
  597. }
  598. // Remove metrics on Nodes/LoadBalancers/Containers/PVs that no
  599. // longer exist
  600. for labelString, seen := range nodeSeen {
  601. if !seen {
  602. log.Debugf("Removing metrics for %s, no data observed recently", labelString)
  603. labels := getLabelStringsFromKey(labelString)
  604. ok := cmme.NodeTotalPriceRecorder.DeleteLabelValues(labels...)
  605. if ok {
  606. log.Debugf("No data observed for node with labels %v, removed from totalprice", labels)
  607. } else {
  608. log.Warnf("Failed to remove label set %v from metric node_total_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  609. }
  610. ok = cmme.NodeSpotRecorder.DeleteLabelValues(labels...)
  611. if ok {
  612. log.Debugf("No data observed for node with labels %v, removed from spot records", labels)
  613. } else {
  614. log.Warnf("Failed to remove label set %v from metric kubecost_node_is_spot. Failure to remove stale metrics may result in inaccurate data.", labels)
  615. }
  616. ok = cmme.CPUPriceRecorder.DeleteLabelValues(labels...)
  617. if ok {
  618. log.Debugf("No data observed for node with labels %v, removed from cpuprice", labels)
  619. } else {
  620. log.Warnf("Failed to remove label set %v from metric node_cpu_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  621. }
  622. ok = cmme.GPUPriceRecorder.DeleteLabelValues(labels...)
  623. if ok {
  624. log.Debugf("No data observed for node with labels %v, removed from gpuprice", labels)
  625. } else {
  626. log.Warnf("Failed to remove label set %v from metric node_gpu_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  627. }
  628. ok = cmme.GPUCountRecorder.DeleteLabelValues(labels...)
  629. if ok {
  630. log.Debugf("No data observed for node with labels %v, removed from gpucount", labels)
  631. } else {
  632. log.Warnf("Failed to remove label set %v from metric node_gpu_count. Failure to remove stale metrics may result in inaccurate data.", labels)
  633. }
  634. ok = cmme.RAMPriceRecorder.DeleteLabelValues(labels...)
  635. if ok {
  636. log.Debugf("No data observed for node with labels %v, removed from ramprice", labels)
  637. } else {
  638. log.Warnf("Failed to remove label set %v from metric node_ram_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  639. }
  640. delete(nodeSeen, labelString)
  641. delete(nodeCostAverages, labelString)
  642. } else {
  643. nodeSeen[labelString] = false
  644. }
  645. }
  646. for labelString, seen := range loadBalancerSeen {
  647. if !seen {
  648. labels := getLabelStringsFromKey(labelString)
  649. ok := cmme.LBCostRecorder.DeleteLabelValues(labels...)
  650. if !ok {
  651. log.Warnf("Failed to remove label set %v from metric kubecost_load_balancer_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  652. }
  653. delete(loadBalancerSeen, labelString)
  654. } else {
  655. loadBalancerSeen[labelString] = false
  656. }
  657. }
  658. for labelString, seen := range containerSeen {
  659. if !seen {
  660. labels := getLabelStringsFromKey(labelString)
  661. if len(labels) >= 2 && labels[1] != unmountedPVsContainer { // special "pod" to contain the unmounted PVs - does not have RAM/CPU/...
  662. ok := cmme.RAMAllocationRecorder.DeleteLabelValues(labels...)
  663. if !ok {
  664. log.Warnf("Failed to remove label set %v from metric container_memory_allocation_bytes. Failure to remove stale metrics may result in inaccurate data.", labels)
  665. }
  666. ok = cmme.CPUAllocationRecorder.DeleteLabelValues(labels...)
  667. if !ok {
  668. log.Warnf("Failed to remove label set %v from metric container_cpu_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  669. }
  670. ok = cmme.GPUAllocationRecorder.DeleteLabelValues(labels...)
  671. if !ok {
  672. log.Warnf("Failed to remove label set %v from metric container_gpu_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  673. }
  674. } else {
  675. log.Debugf("Did not try to delete RAM/CPU/GPU for fake '%s' container: %v", unmountedPVsContainer, labels)
  676. }
  677. delete(containerSeen, labelString)
  678. } else {
  679. containerSeen[labelString] = false
  680. }
  681. }
  682. for labelString, seen := range pvSeen {
  683. if !seen {
  684. labels := getLabelStringsFromKey(labelString)
  685. ok := cmme.PersistentVolumePriceRecorder.DeleteLabelValues(labels...)
  686. if !ok {
  687. log.Warnf("Failed to remove label set %v from metric pv_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  688. }
  689. delete(pvSeen, labelString)
  690. } else {
  691. pvSeen[labelString] = false
  692. }
  693. }
  694. for labelString, seen := range pvcSeen {
  695. if !seen {
  696. labels := getLabelStringsFromKey(labelString)
  697. ok := cmme.PVAllocationRecorder.DeleteLabelValues(labels...)
  698. if !ok {
  699. log.Warnf("Failed to remove label set %v from metric pod_pvc_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  700. }
  701. delete(pvcSeen, labelString)
  702. } else {
  703. pvcSeen[labelString] = false
  704. }
  705. }
  706. select {
  707. case <-time.After(time.Minute):
  708. case <-cmme.runState.OnStop():
  709. cmme.runState.Reset()
  710. return
  711. }
  712. }
  713. }()
  714. return true
  715. }
  716. // Stop halts the metrics emission loop after the current emission is completed
  717. // or if the emission is paused.
  718. func (cmme *CostModelMetricsEmitter) Stop() {
  719. cmme.runState.Stop()
  720. }