metrics.go 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. package costmodel
  2. import (
  3. "math"
  4. "strconv"
  5. "strings"
  6. "sync"
  7. "time"
  8. "github.com/opencost/opencost/core/pkg/clustercache"
  9. "github.com/opencost/opencost/core/pkg/clusters"
  10. "github.com/opencost/opencost/core/pkg/errors"
  11. "github.com/opencost/opencost/core/pkg/log"
  12. "github.com/opencost/opencost/core/pkg/source"
  13. "github.com/opencost/opencost/core/pkg/util"
  14. "github.com/opencost/opencost/core/pkg/util/atomic"
  15. "github.com/opencost/opencost/core/pkg/util/promutil"
  16. "github.com/opencost/opencost/pkg/cloud/models"
  17. "github.com/opencost/opencost/pkg/env"
  18. "github.com/opencost/opencost/pkg/metrics"
  19. promclient "github.com/prometheus/client_golang/api"
  20. "github.com/prometheus/client_golang/prometheus"
  21. dto "github.com/prometheus/client_model/go"
  22. v1 "k8s.io/api/core/v1"
  23. )
  24. //--------------------------------------------------------------------------
  25. // ClusterInfoCollector
  26. //--------------------------------------------------------------------------
  27. // ClusterInfoCollector is a prometheus collector that generates ClusterInfoMetrics
  28. type ClusterInfoCollector struct {
  29. ClusterInfo clusters.ClusterInfoProvider
  30. metricsConfig metrics.MetricsConfig
  31. }
  32. // Describe sends the super-set of all possible descriptors of metrics
  33. // collected by this Collector.
  34. func (cic ClusterInfoCollector) Describe(ch chan<- *prometheus.Desc) {
  35. disabledMetrics := cic.metricsConfig.GetDisabledMetricsMap()
  36. if _, disabled := disabledMetrics["kubecost_cluster_info"]; disabled {
  37. return
  38. }
  39. ch <- prometheus.NewDesc("kubecost_cluster_info", "Kubecost Cluster Info", []string{}, nil)
  40. }
  41. // Collect is called by the Prometheus registry when collecting metrics.
  42. func (cic ClusterInfoCollector) Collect(ch chan<- prometheus.Metric) {
  43. disabledMetrics := cic.metricsConfig.GetDisabledMetricsMap()
  44. if _, disabled := disabledMetrics["kubecost_cluster_info"]; disabled {
  45. return
  46. }
  47. clusterInfo := cic.ClusterInfo.GetClusterInfo()
  48. labels := promutil.MapToLabels(clusterInfo)
  49. m := newClusterInfoMetric("kubecost_cluster_info", labels)
  50. ch <- m
  51. }
  52. //--------------------------------------------------------------------------
  53. // ClusterInfoMetric
  54. //--------------------------------------------------------------------------
  55. // ClusterInfoMetric is a prometheus.Metric used to encode the local cluster info
  56. type ClusterInfoMetric struct {
  57. fqName string
  58. help string
  59. labels map[string]string
  60. }
  61. // Creates a new ClusterInfoMetric, implementation of prometheus.Metric
  62. func newClusterInfoMetric(fqName string, labels map[string]string) ClusterInfoMetric {
  63. return ClusterInfoMetric{
  64. fqName: fqName,
  65. labels: labels,
  66. help: "kubecost_cluster_info ClusterInfo",
  67. }
  68. }
  69. // Desc returns the descriptor for the Metric. This method idempotently
  70. // returns the same descriptor throughout the lifetime of the Metric.
  71. func (cim ClusterInfoMetric) Desc() *prometheus.Desc {
  72. l := prometheus.Labels{}
  73. return prometheus.NewDesc(cim.fqName, cim.help, promutil.LabelNamesFrom(cim.labels), l)
  74. }
  75. // Write encodes the Metric into a "Metric" Protocol Buffer data
  76. // transmission object.
  77. func (cim ClusterInfoMetric) Write(m *dto.Metric) error {
  78. h := float64(1)
  79. m.Gauge = &dto.Gauge{
  80. Value: &h,
  81. }
  82. var labels []*dto.LabelPair
  83. for k, v := range cim.labels {
  84. labels = append(labels, &dto.LabelPair{
  85. Name: toStringPtr(k),
  86. Value: toStringPtr(v),
  87. })
  88. }
  89. m.Label = labels
  90. return nil
  91. }
  92. // returns a pointer to the string provided
  93. func toStringPtr(s string) *string { return &s }
  94. //--------------------------------------------------------------------------
  95. // Cost Model Metrics Initialization
  96. //--------------------------------------------------------------------------
  97. // Only allow the metrics to be instantiated and registered once
  98. var metricsInit sync.Once
  99. var (
  100. cpuGv *prometheus.GaugeVec
  101. ramGv *prometheus.GaugeVec
  102. gpuGv *prometheus.GaugeVec
  103. gpuCountGv *prometheus.GaugeVec
  104. pvGv *prometheus.GaugeVec
  105. spotGv *prometheus.GaugeVec
  106. totalGv *prometheus.GaugeVec
  107. ramAllocGv *prometheus.GaugeVec
  108. cpuAllocGv *prometheus.GaugeVec
  109. gpuAllocGv *prometheus.GaugeVec
  110. pvAllocGv *prometheus.GaugeVec
  111. networkZoneEgressCostG prometheus.Gauge
  112. networkRegionEgressCostG prometheus.Gauge
  113. networkInternetEgressCostG prometheus.Gauge
  114. clusterManagementCostGv *prometheus.GaugeVec
  115. lbCostGv *prometheus.GaugeVec
  116. )
  117. // initCostModelMetrics uses a sync.Once to ensure that these metrics are only created once
  118. func initCostModelMetrics(clusterInfo clusters.ClusterInfoProvider, metricsConfig *metrics.MetricsConfig) {
  119. disabledMetrics := metricsConfig.GetDisabledMetricsMap()
  120. var toRegisterGV []*prometheus.GaugeVec
  121. var toRegisterGauge []prometheus.Gauge
  122. metricsInit.Do(func() {
  123. cpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  124. Name: "node_cpu_hourly_cost",
  125. Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
  126. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
  127. if _, disabled := disabledMetrics["node_cpu_hourly_cost"]; !disabled {
  128. toRegisterGV = append(toRegisterGV, cpuGv)
  129. }
  130. ramGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  131. Name: "node_ram_hourly_cost",
  132. Help: "node_ram_hourly_cost hourly cost for each gb of ram on this node",
  133. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
  134. if _, disabled := disabledMetrics["node_ram_hourly_cost"]; !disabled {
  135. toRegisterGV = append(toRegisterGV, ramGv)
  136. }
  137. gpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  138. Name: "node_gpu_hourly_cost",
  139. Help: "node_gpu_hourly_cost hourly cost for each gpu on this node",
  140. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
  141. if _, disabled := disabledMetrics["node_gpu_hourly_cost"]; !disabled {
  142. toRegisterGV = append(toRegisterGV, gpuGv)
  143. }
  144. gpuCountGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  145. Name: "node_gpu_count",
  146. Help: "node_gpu_count count of gpu on this node",
  147. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
  148. if _, disabled := disabledMetrics["node_gpu_count"]; !disabled {
  149. toRegisterGV = append(toRegisterGV, gpuCountGv)
  150. }
  151. pvGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  152. Name: "pv_hourly_cost",
  153. Help: "pv_hourly_cost Cost per GB per hour on a persistent disk",
  154. }, []string{"volumename", "persistentvolume", "provider_id", "uid"})
  155. if _, disabled := disabledMetrics["pv_hourly_cost"]; !disabled {
  156. toRegisterGV = append(toRegisterGV, pvGv)
  157. }
  158. spotGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  159. Name: "kubecost_node_is_spot",
  160. Help: "kubecost_node_is_spot Cloud provider info about node preemptibility",
  161. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
  162. if _, disabled := disabledMetrics["kubecost_node_is_spot"]; !disabled {
  163. toRegisterGV = append(toRegisterGV, spotGv)
  164. }
  165. totalGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  166. Name: "node_total_hourly_cost",
  167. Help: "node_total_hourly_cost Total node cost per hour",
  168. }, []string{"instance", "node", "instance_type", "region", "provider_id", "arch", "uid"})
  169. if _, disabled := disabledMetrics["node_total_hourly_cost"]; !disabled {
  170. toRegisterGV = append(toRegisterGV, totalGv)
  171. }
  172. ramAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  173. Name: "container_memory_allocation_bytes",
  174. Help: "container_memory_allocation_bytes Bytes of RAM used",
  175. }, []string{"namespace", "pod", "container", "instance", "node", "uid"})
  176. if _, disabled := disabledMetrics["container_memory_allocation_bytes"]; !disabled {
  177. toRegisterGV = append(toRegisterGV, ramAllocGv)
  178. }
  179. cpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  180. Name: "container_cpu_allocation",
  181. Help: "container_cpu_allocation Percent of a single CPU used in a minute",
  182. }, []string{"namespace", "pod", "container", "instance", "node", "uid"})
  183. if _, disabled := disabledMetrics["container_cpu_allocation"]; !disabled {
  184. toRegisterGV = append(toRegisterGV, cpuAllocGv)
  185. }
  186. gpuAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  187. Name: "container_gpu_allocation",
  188. Help: "container_gpu_allocation GPU used",
  189. }, []string{"namespace", "pod", "container", "instance", "node", "uid"})
  190. if _, disabled := disabledMetrics["container_gpu_allocation"]; !disabled {
  191. toRegisterGV = append(toRegisterGV, gpuAllocGv)
  192. }
  193. pvAllocGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  194. Name: "pod_pvc_allocation",
  195. Help: "pod_pvc_allocation Bytes used by a PVC attached to a pod",
  196. }, []string{"namespace", "pod", "persistentvolumeclaim", "persistentvolume", "uid"})
  197. if _, disabled := disabledMetrics["pod_pvc_allocation"]; !disabled {
  198. toRegisterGV = append(toRegisterGV, pvAllocGv)
  199. }
  200. networkZoneEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  201. Name: "kubecost_network_zone_egress_cost",
  202. Help: "kubecost_network_zone_egress_cost Total cost per GB egress across zones",
  203. })
  204. if _, disabled := disabledMetrics["kubecost_network_zone_egress_cost"]; !disabled {
  205. toRegisterGauge = append(toRegisterGauge, networkZoneEgressCostG)
  206. }
  207. networkRegionEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  208. Name: "kubecost_network_region_egress_cost",
  209. Help: "kubecost_network_region_egress_cost Total cost per GB egress across regions",
  210. })
  211. if _, disabled := disabledMetrics["kubecost_network_region_egress_cost"]; !disabled {
  212. toRegisterGauge = append(toRegisterGauge, networkRegionEgressCostG)
  213. }
  214. networkInternetEgressCostG = prometheus.NewGauge(prometheus.GaugeOpts{
  215. Name: "kubecost_network_internet_egress_cost",
  216. Help: "kubecost_network_internet_egress_cost Total cost per GB of internet egress.",
  217. })
  218. if _, disabled := disabledMetrics["kubecost_network_internet_egress_cost"]; !disabled {
  219. toRegisterGauge = append(toRegisterGauge, networkInternetEgressCostG)
  220. }
  221. clusterManagementCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
  222. Name: "kubecost_cluster_management_cost",
  223. Help: "kubecost_cluster_management_cost Hourly cost paid as a cluster management fee.",
  224. }, []string{"provisioner_name"})
  225. if _, disabled := disabledMetrics["kubecost_cluster_management_cost"]; !disabled {
  226. toRegisterGV = append(toRegisterGV, clusterManagementCostGv)
  227. }
  228. lbCostGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // no differentiation between ELB and ALB right now
  229. Name: "kubecost_load_balancer_cost",
  230. Help: "kubecost_load_balancer_cost Hourly cost of load balancer",
  231. }, []string{"ingress_ip", "namespace", "service_name", "uid"}) // assumes one ingress IP per load balancer
  232. if _, disabled := disabledMetrics["kubecost_load_balancer_cost"]; !disabled {
  233. toRegisterGV = append(toRegisterGV, lbCostGv)
  234. }
  235. // Register cost-model metrics for emission
  236. for _, gv := range toRegisterGV {
  237. prometheus.MustRegister(gv)
  238. }
  239. for _, g := range toRegisterGauge {
  240. prometheus.MustRegister(g)
  241. }
  242. // General Metric Collectors
  243. prometheus.MustRegister(ClusterInfoCollector{
  244. ClusterInfo: clusterInfo,
  245. metricsConfig: *metricsConfig,
  246. })
  247. })
  248. }
  249. //--------------------------------------------------------------------------
  250. // CostModelMetricsEmitter
  251. //--------------------------------------------------------------------------
  252. // CostModelMetricsEmitter emits all cost-model specific metrics calculated by
  253. // the CostModel.ComputeCostData() method.
  254. type CostModelMetricsEmitter struct {
  255. PrometheusClient promclient.Client
  256. KubeClusterCache clustercache.ClusterCache
  257. CloudProvider models.Provider
  258. Model *CostModel
  259. // Metrics
  260. CPUPriceRecorder *prometheus.GaugeVec
  261. RAMPriceRecorder *prometheus.GaugeVec
  262. PersistentVolumePriceRecorder *prometheus.GaugeVec
  263. GPUPriceRecorder *prometheus.GaugeVec
  264. GPUCountRecorder *prometheus.GaugeVec
  265. PVAllocationRecorder *prometheus.GaugeVec
  266. NodeSpotRecorder *prometheus.GaugeVec
  267. NodeTotalPriceRecorder *prometheus.GaugeVec
  268. RAMAllocationRecorder *prometheus.GaugeVec
  269. CPUAllocationRecorder *prometheus.GaugeVec
  270. GPUAllocationRecorder *prometheus.GaugeVec
  271. ClusterManagementCostRecorder *prometheus.GaugeVec
  272. LBCostRecorder *prometheus.GaugeVec
  273. NetworkZoneEgressRecorder prometheus.Gauge
  274. NetworkRegionEgressRecorder prometheus.Gauge
  275. NetworkInternetEgressRecorder prometheus.Gauge
  276. // Concurrent Flow Control - Manages the run state of the metric emitter
  277. runState atomic.AtomicRunState
  278. }
  279. // NewCostModelMetricsEmitter creates a new cost-model metrics emitter. Use Start() to begin metric emission.
  280. func NewCostModelMetricsEmitter(clusterCache clustercache.ClusterCache, provider models.Provider, clusterInfo clusters.ClusterInfoProvider, model *CostModel) *CostModelMetricsEmitter {
  281. // Get metric configurations, if any
  282. metricsConfig, err := metrics.GetMetricsConfig()
  283. if err != nil {
  284. log.Infof("Failed to get metrics config before init: %s", err)
  285. }
  286. if len(metricsConfig.DisabledMetrics) > 0 {
  287. log.Infof("Starting metrics init with disabled metrics: %v", metricsConfig.DisabledMetrics)
  288. }
  289. // init will only actually execute once to register the custom gauges
  290. initCostModelMetrics(clusterInfo, metricsConfig)
  291. metrics.InitKubeMetrics(clusterCache, metricsConfig, &metrics.KubeMetricsOpts{
  292. EmitKubecostControllerMetrics: true,
  293. EmitNamespaceAnnotations: env.IsEmitNamespaceAnnotationsMetric(),
  294. EmitPodAnnotations: env.IsEmitPodAnnotationsMetric(),
  295. EmitKubeStateMetrics: env.IsEmitKsmV1Metrics(),
  296. EmitKubeStateMetricsV1Only: env.IsEmitKsmV1MetricsOnly(),
  297. EmitDeprecatedMetrics: env.IsEmitDeprecatedMetrics(),
  298. })
  299. metrics.InitOpencostTelemetry(metricsConfig)
  300. return &CostModelMetricsEmitter{
  301. KubeClusterCache: clusterCache,
  302. CloudProvider: provider,
  303. Model: model,
  304. CPUPriceRecorder: cpuGv,
  305. RAMPriceRecorder: ramGv,
  306. GPUPriceRecorder: gpuGv,
  307. GPUCountRecorder: gpuCountGv,
  308. PersistentVolumePriceRecorder: pvGv,
  309. NodeSpotRecorder: spotGv,
  310. NodeTotalPriceRecorder: totalGv,
  311. RAMAllocationRecorder: ramAllocGv,
  312. CPUAllocationRecorder: cpuAllocGv,
  313. GPUAllocationRecorder: gpuAllocGv,
  314. PVAllocationRecorder: pvAllocGv,
  315. NetworkZoneEgressRecorder: networkZoneEgressCostG,
  316. NetworkRegionEgressRecorder: networkRegionEgressCostG,
  317. NetworkInternetEgressRecorder: networkInternetEgressCostG,
  318. ClusterManagementCostRecorder: clusterManagementCostGv,
  319. LBCostRecorder: lbCostGv,
  320. }
  321. }
  322. // IsRunning returns true if metric recording is running.
  323. func (cmme *CostModelMetricsEmitter) IsRunning() bool {
  324. return cmme.runState.IsRunning()
  325. }
  326. // NodeCostAverages tracks a running average of a node's cost attributes.
  327. // The averages are used to detect and discard spurrious outliers.
  328. type NodeCostAverages struct {
  329. CpuCostAverage float64
  330. RamCostAverage float64
  331. NumCpuDataPoints float64
  332. NumRamDataPoints float64
  333. }
  334. // StartCostModelMetricRecording starts the go routine that emits metrics used to determine
  335. // cluster costs.
  336. func (cmme *CostModelMetricsEmitter) Start() bool {
  337. // wait for a reset to prevent a race between start and stop calls
  338. cmme.runState.WaitForReset()
  339. // Check to see if we're already recording, and atomically advance the run state to start if we're not
  340. if !cmme.runState.Start() {
  341. log.Errorf("Attempted to start cost model metric recording when it's already running.")
  342. return false
  343. }
  344. go func() {
  345. defer errors.HandlePanic()
  346. containerSeen := make(map[string]bool)
  347. nodeSeen := make(map[string]bool)
  348. loadBalancerSeen := make(map[string]bool)
  349. pvSeen := make(map[string]bool)
  350. pvcSeen := make(map[string]bool)
  351. nodeCostAverages := make(map[string]NodeCostAverages)
  352. getKeyFromLabelStrings := func(labels ...string) string {
  353. return strings.Join(labels, ",")
  354. }
  355. getLabelStringsFromKey := func(key string) []string {
  356. return strings.Split(key, ",")
  357. }
  358. var defaultRegion string = ""
  359. nodeList := cmme.KubeClusterCache.GetAllNodes()
  360. if len(nodeList) > 0 {
  361. var ok bool
  362. defaultRegion, ok = util.GetRegion(nodeList[0].Labels)
  363. if !ok {
  364. log.DedupedWarningf(5, "Failed to read default region from labels on node %s", nodeList[0].Name)
  365. }
  366. }
  367. for {
  368. log.Debugf("Recording prices...")
  369. podlist := cmme.KubeClusterCache.GetAllPods()
  370. podStatus := make(map[string]v1.PodPhase)
  371. podUIDs := make(map[string]string)
  372. for _, pod := range podlist {
  373. podStatus[pod.Name] = pod.Status.Phase
  374. podUIDs[pod.Name] = string(pod.UID)
  375. }
  376. // Create node UID lookup map
  377. nodeList := cmme.KubeClusterCache.GetAllNodes()
  378. nodeUIDs := make(map[string]string)
  379. for _, node := range nodeList {
  380. nodeUIDs[node.Name] = string(node.UID)
  381. }
  382. // Create PV UID lookup map
  383. pvList := cmme.KubeClusterCache.GetAllPersistentVolumes()
  384. pvUIDs := make(map[string]string)
  385. for _, pv := range pvList {
  386. pvUIDs[pv.Name] = string(pv.UID)
  387. }
  388. // Create service UID lookup map
  389. serviceList := cmme.KubeClusterCache.GetAllServices()
  390. serviceUIDs := make(map[string]string)
  391. for _, service := range serviceList {
  392. serviceKey := service.Namespace + "/" + service.Name
  393. serviceUIDs[serviceKey] = string(service.UID)
  394. }
  395. cfg, _ := cmme.CloudProvider.GetConfig()
  396. provisioner, clusterManagementCost, err := cmme.CloudProvider.ClusterManagementPricing()
  397. if err != nil {
  398. log.Errorf("Error getting cluster management cost %s", err.Error())
  399. }
  400. cmme.ClusterManagementCostRecorder.WithLabelValues(provisioner).Set(clusterManagementCost)
  401. // Record network pricing at global scope
  402. networkCosts, err := cmme.CloudProvider.NetworkPricing()
  403. if err != nil {
  404. log.Debugf("Failed to retrieve network costs: %s", err.Error())
  405. } else {
  406. cmme.NetworkZoneEgressRecorder.Set(networkCosts.ZoneNetworkEgressCost)
  407. cmme.NetworkRegionEgressRecorder.Set(networkCosts.RegionNetworkEgressCost)
  408. cmme.NetworkInternetEgressRecorder.Set(networkCosts.InternetNetworkEgressCost)
  409. }
  410. end := time.Now()
  411. queryWindow := env.GetMetricsEmitterQueryWindow()
  412. start := end.Add(-queryWindow)
  413. data, err := cmme.Model.ComputeCostData(start, end)
  414. if err != nil {
  415. // For an error collection, we'll just log the length of the errors (ComputeCostData already logs the
  416. // actual errors)
  417. if source.IsErrorCollection(err) {
  418. if ec, ok := err.(source.QueryErrorCollection); ok {
  419. log.Errorf("Error in price recording: %d errors occurred", len(ec.Errors()))
  420. }
  421. } else {
  422. log.Errorf("Error in price recording: %s", err)
  423. }
  424. // zero the for loop so the time.Sleep will still work
  425. data = map[string]*CostData{}
  426. }
  427. nodes, err := cmme.Model.GetNodeCost()
  428. if err != nil {
  429. log.Warnf("Error getting Node cost: %s", err)
  430. }
  431. for nodeName, node := range nodes {
  432. // Get node UID first
  433. nodeUID := nodeUIDs[nodeName]
  434. // Emit costs, guarding against NaN inputs for custom pricing.
  435. cpuCost, _ := strconv.ParseFloat(node.VCPUCost, 64)
  436. if math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
  437. cpuCost, _ = strconv.ParseFloat(cfg.CPU, 64)
  438. if math.IsNaN(cpuCost) || math.IsInf(cpuCost, 0) {
  439. cpuCost = 0
  440. }
  441. }
  442. cpu, _ := strconv.ParseFloat(node.VCPU, 64)
  443. if math.IsNaN(cpu) || math.IsInf(cpu, 0) {
  444. cpu = 1 // Assume 1 CPU
  445. }
  446. ramCost, _ := strconv.ParseFloat(node.RAMCost, 64)
  447. if math.IsNaN(ramCost) || math.IsInf(ramCost, 0) {
  448. ramCost, _ = strconv.ParseFloat(cfg.RAM, 64)
  449. if math.IsNaN(ramCost) || math.IsInf(ramCost, 0) {
  450. ramCost = 0
  451. }
  452. }
  453. ram, _ := strconv.ParseFloat(node.RAMBytes, 64)
  454. if math.IsNaN(ram) || math.IsInf(ram, 0) {
  455. ram = 0
  456. }
  457. gpu, _ := strconv.ParseFloat(node.GPU, 64)
  458. if math.IsNaN(gpu) || math.IsInf(gpu, 0) {
  459. gpu = 0
  460. }
  461. gpuCost, _ := strconv.ParseFloat(node.GPUCost, 64)
  462. if math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
  463. gpuCost, _ = strconv.ParseFloat(cfg.GPU, 64)
  464. if math.IsNaN(gpuCost) || math.IsInf(gpuCost, 0) {
  465. gpuCost = 0
  466. }
  467. }
  468. nodeType := node.InstanceType
  469. nodeRegion := node.Region
  470. totalCost := cpu*cpuCost + ramCost*(ram/1024/1024/1024) + gpu*gpuCost
  471. labelKey := getKeyFromLabelStrings(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID)
  472. avgCosts, ok := nodeCostAverages[labelKey]
  473. // initialize average cost tracking for this node if there is none
  474. if !ok {
  475. avgCosts = NodeCostAverages{
  476. CpuCostAverage: cpuCost,
  477. RamCostAverage: ramCost,
  478. NumCpuDataPoints: 1,
  479. NumRamDataPoints: 1,
  480. }
  481. nodeCostAverages[labelKey] = avgCosts
  482. }
  483. cmme.GPUCountRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(gpu)
  484. cmme.GPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(gpuCost)
  485. const outlierFactor float64 = 30
  486. // don't record cpuCost, ramCost, or gpuCost in the case of wild outliers
  487. // k8s api sometimes causes cost spikes as described here:
  488. // https://github.com/opencost/opencost/issues/927
  489. cpuOutlierCutoff := outlierFactor * avgCosts.CpuCostAverage
  490. if cpuCost < cpuOutlierCutoff {
  491. cmme.CPUPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(cpuCost)
  492. avgCosts.CpuCostAverage = (avgCosts.CpuCostAverage*avgCosts.NumCpuDataPoints + cpuCost) / (avgCosts.NumCpuDataPoints + 1)
  493. avgCosts.NumCpuDataPoints += 1
  494. } else {
  495. log.Debugf("CPU cost outlier detected; skipping data point: %s had %f as cost, which is above %f.", nodeName, cpuCost, cpuOutlierCutoff)
  496. }
  497. ramOutlierCutoff := outlierFactor * avgCosts.RamCostAverage
  498. if ramCost < ramOutlierCutoff {
  499. cmme.RAMPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(ramCost)
  500. avgCosts.RamCostAverage = (avgCosts.RamCostAverage*avgCosts.NumRamDataPoints + ramCost) / (avgCosts.NumRamDataPoints + 1)
  501. avgCosts.NumRamDataPoints += 1
  502. } else {
  503. log.Debugf("RAM cost outlier detected; skipping data point: %s had %f as cost, which is above %f.", nodeName, ramCost, ramOutlierCutoff)
  504. }
  505. // skip redording totalCost if any constituent costs were outliers
  506. if cpuCost < cpuOutlierCutoff && ramCost < ramOutlierCutoff {
  507. cmme.NodeTotalPriceRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(totalCost)
  508. } else {
  509. log.Debugf("CPU and RAM outlier detected, not recording node %s total cost %f", nodeName, totalCost)
  510. }
  511. nodeCostAverages[labelKey] = avgCosts
  512. if node.IsSpot() {
  513. cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(1.0)
  514. } else {
  515. cmme.NodeSpotRecorder.WithLabelValues(nodeName, nodeName, nodeType, nodeRegion, node.ProviderID, node.ArchType, nodeUID).Set(0.0)
  516. }
  517. nodeSeen[labelKey] = true
  518. }
  519. loadBalancers, err := cmme.Model.GetLBCost()
  520. if err != nil {
  521. log.Warnf("Error getting LoadBalancer cost: %s", err)
  522. }
  523. for lbKey, lb := range loadBalancers {
  524. // TODO: parse (if necessary) and calculate cost associated with loadBalancer based on dynamic cloud prices fetched into each lb struct on GetLBCost() call
  525. namespace := lbKey.Namespace
  526. serviceName := lbKey.Service
  527. ingressIP := ""
  528. if len(lb.IngressIPAddresses) > 0 {
  529. ingressIP = lb.IngressIPAddresses[0] // assumes one ingress IP per load balancer
  530. }
  531. serviceKey := namespace + "/" + serviceName
  532. serviceUID := serviceUIDs[serviceKey]
  533. cmme.LBCostRecorder.WithLabelValues(ingressIP, namespace, serviceName, serviceUID).Set(lb.Cost)
  534. labelKey := getKeyFromLabelStrings(ingressIP, namespace, serviceName, serviceUID)
  535. loadBalancerSeen[labelKey] = true
  536. }
  537. for _, costs := range data {
  538. nodeName := costs.NodeName
  539. namespace := costs.Namespace
  540. podName := costs.PodName
  541. containerName := costs.Name
  542. if costs.PVCData != nil {
  543. for _, pvc := range costs.PVCData {
  544. if pvc.Volume != nil {
  545. timesClaimed := pvc.TimesClaimed
  546. if timesClaimed == 0 {
  547. timesClaimed = 1 // unallocated PVs are unclaimed but have a full allocation
  548. }
  549. podUID := podUIDs[podName]
  550. cmme.PVAllocationRecorder.WithLabelValues(namespace, podName, pvc.Claim, pvc.VolumeName, podUID).Set(pvc.Values[0].Value / float64(timesClaimed))
  551. labelKey := getKeyFromLabelStrings(namespace, podName, pvc.Claim, pvc.VolumeName, podUID)
  552. pvcSeen[labelKey] = true
  553. }
  554. }
  555. }
  556. if len(costs.RAMAllocation) > 0 {
  557. podUID := podUIDs[podName]
  558. cmme.RAMAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName, podUID).Set(costs.RAMAllocation[0].Value)
  559. }
  560. if len(costs.CPUAllocation) > 0 {
  561. podUID := podUIDs[podName]
  562. cmme.CPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName, podUID).Set(costs.CPUAllocation[0].Value)
  563. }
  564. if len(costs.GPUReq) > 0 {
  565. // allocation here is set to the request because shared GPU usage not yet supported.
  566. // if VPGUs, request x (actual/virtual)
  567. vgpu := 0.0
  568. gpu := 0.0
  569. var err, verr error
  570. if matchedNode, found := nodes[nodeName]; found {
  571. vgpu, verr = strconv.ParseFloat(matchedNode.VGPU, 64)
  572. gpu, err = strconv.ParseFloat(matchedNode.GPU, 64)
  573. } else {
  574. log.Tracef("cost data for node %s had GPUReq, but there was no cost data available for the node", nodeName)
  575. log.Trace("defaulting GPU to 0 cost")
  576. }
  577. gpualloc := costs.GPUReq[0].Value
  578. if verr != nil && err != nil && vgpu != 0 {
  579. gpualloc = gpualloc * (gpu / vgpu)
  580. }
  581. podUID := podUIDs[podName]
  582. cmme.GPUAllocationRecorder.WithLabelValues(namespace, podName, containerName, nodeName, nodeName, podUID).Set(gpualloc)
  583. }
  584. podUID := podUIDs[podName]
  585. labelKey := getKeyFromLabelStrings(namespace, podName, containerName, nodeName, nodeName, podUID)
  586. if podStatus[podName] == v1.PodRunning { // Only report data for current pods
  587. containerSeen[labelKey] = true
  588. } else {
  589. containerSeen[labelKey] = false
  590. }
  591. }
  592. storageClasses := cmme.KubeClusterCache.GetAllStorageClasses()
  593. storageClassMap := make(map[string]map[string]string)
  594. for _, storageClass := range storageClasses {
  595. params := storageClass.Parameters
  596. storageClassMap[storageClass.Name] = params
  597. if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.Annotations["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
  598. storageClassMap["default"] = params
  599. storageClassMap[""] = params
  600. }
  601. }
  602. pvs := cmme.KubeClusterCache.GetAllPersistentVolumes()
  603. for _, pv := range pvs {
  604. // Omit pv_hourly_cost if the volume status is failed
  605. if pv.Status.Phase == v1.VolumeFailed {
  606. continue
  607. }
  608. parameters, ok := storageClassMap[pv.Spec.StorageClassName]
  609. if !ok {
  610. log.Debugf("Unable to find parameters for storage class \"%s\". Pv \"%s\" might have an empty or invalid storageClassName.", pv.Spec.StorageClassName, pv.Name)
  611. }
  612. var region string
  613. if r, ok := util.GetRegion(pv.Labels); ok {
  614. region = r
  615. } else {
  616. region = defaultRegion
  617. }
  618. cacPv := &models.PV{
  619. Class: pv.Spec.StorageClassName,
  620. Region: region,
  621. Parameters: parameters,
  622. }
  623. cmme.Model.GetPVCost(cacPv, pv, region)
  624. c, _ := strconv.ParseFloat(cacPv.Cost, 64)
  625. pvUID := pvUIDs[pv.Name]
  626. cmme.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name, cacPv.ProviderID, pvUID).Set(c)
  627. labelKey := getKeyFromLabelStrings(pv.Name, pv.Name, cacPv.ProviderID, pvUID)
  628. pvSeen[labelKey] = true
  629. }
  630. // Remove metrics on Nodes/LoadBalancers/Containers/PVs that no
  631. // longer exist
  632. for labelString, seen := range nodeSeen {
  633. if !seen {
  634. log.Debugf("Removing metrics for %s, no data observed recently", labelString)
  635. labels := getLabelStringsFromKey(labelString)
  636. ok := cmme.NodeTotalPriceRecorder.DeleteLabelValues(labels...)
  637. if ok {
  638. log.Debugf("No data observed for node with labels %v, removed from totalprice", labels)
  639. } else {
  640. log.Warnf("Failed to remove label set %v from metric node_total_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  641. }
  642. ok = cmme.NodeSpotRecorder.DeleteLabelValues(labels...)
  643. if ok {
  644. log.Debugf("No data observed for node with labels %v, removed from spot records", labels)
  645. } else {
  646. log.Warnf("Failed to remove label set %v from metric kubecost_node_is_spot. Failure to remove stale metrics may result in inaccurate data.", labels)
  647. }
  648. ok = cmme.CPUPriceRecorder.DeleteLabelValues(labels...)
  649. if ok {
  650. log.Debugf("No data observed for node with labels %v, removed from cpuprice", labels)
  651. } else {
  652. log.Warnf("Failed to remove label set %v from metric node_cpu_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  653. }
  654. ok = cmme.GPUPriceRecorder.DeleteLabelValues(labels...)
  655. if ok {
  656. log.Debugf("No data observed for node with labels %v, removed from gpuprice", labels)
  657. } else {
  658. log.Warnf("Failed to remove label set %v from metric node_gpu_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  659. }
  660. ok = cmme.GPUCountRecorder.DeleteLabelValues(labels...)
  661. if ok {
  662. log.Debugf("No data observed for node with labels %v, removed from gpucount", labels)
  663. } else {
  664. log.Warnf("Failed to remove label set %v from metric node_gpu_count. Failure to remove stale metrics may result in inaccurate data.", labels)
  665. }
  666. ok = cmme.RAMPriceRecorder.DeleteLabelValues(labels...)
  667. if ok {
  668. log.Debugf("No data observed for node with labels %v, removed from ramprice", labels)
  669. } else {
  670. log.Warnf("Failed to remove label set %v from metric node_ram_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  671. }
  672. delete(nodeSeen, labelString)
  673. delete(nodeCostAverages, labelString)
  674. } else {
  675. nodeSeen[labelString] = false
  676. }
  677. }
  678. for labelString, seen := range loadBalancerSeen {
  679. if !seen {
  680. labels := getLabelStringsFromKey(labelString)
  681. ok := cmme.LBCostRecorder.DeleteLabelValues(labels...)
  682. if !ok {
  683. log.Warnf("Failed to remove label set %v from metric kubecost_load_balancer_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  684. }
  685. delete(loadBalancerSeen, labelString)
  686. } else {
  687. loadBalancerSeen[labelString] = false
  688. }
  689. }
  690. for labelString, seen := range containerSeen {
  691. if !seen {
  692. labels := getLabelStringsFromKey(labelString)
  693. if len(labels) >= 2 && labels[1] != unmountedPVsContainer { // special "pod" to contain the unmounted PVs - does not have RAM/CPU/...
  694. ok := cmme.RAMAllocationRecorder.DeleteLabelValues(labels...)
  695. if !ok {
  696. log.Warnf("Failed to remove label set %v from metric container_memory_allocation_bytes. Failure to remove stale metrics may result in inaccurate data.", labels)
  697. }
  698. ok = cmme.CPUAllocationRecorder.DeleteLabelValues(labels...)
  699. if !ok {
  700. log.Warnf("Failed to remove label set %v from metric container_cpu_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  701. }
  702. ok = cmme.GPUAllocationRecorder.DeleteLabelValues(labels...)
  703. if !ok {
  704. log.Warnf("Failed to remove label set %v from metric container_gpu_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  705. }
  706. } else {
  707. log.Debugf("Did not try to delete RAM/CPU/GPU for fake '%s' container: %v", unmountedPVsContainer, labels)
  708. }
  709. delete(containerSeen, labelString)
  710. } else {
  711. containerSeen[labelString] = false
  712. }
  713. }
  714. for labelString, seen := range pvSeen {
  715. if !seen {
  716. labels := getLabelStringsFromKey(labelString)
  717. ok := cmme.PersistentVolumePriceRecorder.DeleteLabelValues(labels...)
  718. if !ok {
  719. log.Warnf("Failed to remove label set %v from metric pv_hourly_cost. Failure to remove stale metrics may result in inaccurate data.", labels)
  720. }
  721. delete(pvSeen, labelString)
  722. } else {
  723. pvSeen[labelString] = false
  724. }
  725. }
  726. for labelString, seen := range pvcSeen {
  727. if !seen {
  728. labels := getLabelStringsFromKey(labelString)
  729. ok := cmme.PVAllocationRecorder.DeleteLabelValues(labels...)
  730. if !ok {
  731. log.Warnf("Failed to remove label set %v from metric pod_pvc_allocation. Failure to remove stale metrics may result in inaccurate data.", labels)
  732. }
  733. delete(pvcSeen, labelString)
  734. } else {
  735. pvcSeen[labelString] = false
  736. }
  737. }
  738. select {
  739. case <-time.After(time.Minute):
  740. case <-cmme.runState.OnStop():
  741. cmme.runState.Reset()
  742. return
  743. }
  744. }
  745. }()
  746. return true
  747. }
  748. // Stop halts the metrics emission loop after the current emission is completed
  749. // or if the emission is paused.
  750. func (cmme *CostModelMetricsEmitter) Stop() {
  751. cmme.runState.Stop()
  752. }