diagnostics.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. package prom
  2. import (
  3. "fmt"
  4. "github.com/opencost/opencost/core/pkg/log"
  5. "github.com/opencost/opencost/core/pkg/source"
  6. prometheus "github.com/prometheus/client_golang/api"
  7. )
  8. // Prometheus Metric Diagnostic IDs
  9. const (
  10. // CAdvisorDiagnosticMetricID is the identifier of the metric used to determine if cAdvisor is being scraped.
  11. CAdvisorDiagnosticMetricID = "cadvisorMetric"
  12. // CAdvisorLabelDiagnosticMetricID is the identifier of the metric used to determine if cAdvisor labels are correct.
  13. CAdvisorLabelDiagnosticMetricID = "cadvisorLabel"
  14. // KSMDiagnosticMetricID is the identifier for the metric used to determine if KSM metrics are being scraped.
  15. KSMDiagnosticMetricID = "ksmMetric"
  16. // KSMVersionDiagnosticMetricID is the identifier for the metric used to determine if KSM version is correct.
  17. KSMVersionDiagnosticMetricID = "ksmVersion"
  18. // KubecostDiagnosticMetricID is the identifier for the metric used to determine if Kubecost metrics are being scraped.
  19. KubecostDiagnosticMetricID = "kubecostMetric"
  20. // ScrapeIntervalDiagnosticMetricID is the identifier for the metric used to determine if prometheus has its own self-scraped
  21. // metrics.
  22. ScrapeIntervalDiagnosticMetricID = "scrapeInterval"
  23. // CPUThrottlingDiagnosticMetricID is the identifier for the metric used to determine if CPU throttling is being applied to the
  24. // cost-model container.
  25. CPUThrottlingDiagnosticMetricID = "cpuThrottling"
  26. // KubecostRecordingRuleCPUUsageID is the identifier for the query used to
  27. // determine of the CPU usage recording rule is set up correctly.
  28. KubecostRecordingRuleCPUUsageID = "kubecostRecordingRuleCPUUsage"
  29. // CAdvisorWorkingSetBytesMetricID is the identifier for the query used to determine
  30. // if cAdvisor working set bytes data is being scraped
  31. CAdvisorWorkingSetBytesMetricID = "cadvisorWorkingSetBytesMetric"
  32. // KSMCPUCapacityMetricID is the identifier for the query used to determine if
  33. // KSM CPU capacity data is being scraped
  34. KSMCPUCapacityMetricID = "ksmCpuCapacityMetric"
  35. // KSMAllocatableCPUCoresMetricID is the identifier for the query used to determine
  36. // if KSM allocatable CPU core data is being scraped
  37. KSMAllocatableCPUCoresMetricID = "ksmAllocatableCpuCoresMetric"
  38. )
  39. const DocumentationBaseURL = "https://www.opencost.io/docs/"
  40. // diagnostic definitions mapping holds all of the diagnostic definitions that can be used for prometheus metrics diagnostics
  41. var diagnosticDefinitions map[string]*diagnosticDefinition = map[string]*diagnosticDefinition{
  42. CAdvisorDiagnosticMetricID: {
  43. ID: CAdvisorDiagnosticMetricID,
  44. QueryFmt: `absent_over_time(container_cpu_usage_seconds_total{%s}[5m] %s)`,
  45. Label: "cAdvisor metrics available",
  46. Description: "Determine if cAdvisor metrics are available during last 5 minutes.",
  47. DocLink: fmt.Sprintf("%s#cadvisor-metrics-available", DocumentationBaseURL),
  48. },
  49. KSMDiagnosticMetricID: {
  50. ID: KSMDiagnosticMetricID,
  51. QueryFmt: `absent_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[5m] %s)`,
  52. Label: "Kube-state-metrics available",
  53. Description: "Determine if metrics from kube-state-metrics are available during last 5 minutes.",
  54. DocLink: fmt.Sprintf("%s#kube-state-metrics-metrics-available", DocumentationBaseURL),
  55. },
  56. KubecostDiagnosticMetricID: {
  57. ID: KubecostDiagnosticMetricID,
  58. QueryFmt: `absent_over_time(node_cpu_hourly_cost{%s}[5m] %s)`,
  59. Label: "Kubecost metrics available",
  60. Description: "Determine if metrics from Kubecost are available during last 5 minutes.",
  61. },
  62. CAdvisorLabelDiagnosticMetricID: {
  63. ID: CAdvisorLabelDiagnosticMetricID,
  64. QueryFmt: `absent_over_time(container_cpu_usage_seconds_total{container!="",pod!="", %s}[5m] %s)`,
  65. Label: "Expected cAdvisor labels available",
  66. Description: "Determine if expected cAdvisor labels are present during last 5 minutes.",
  67. DocLink: fmt.Sprintf("%s#cadvisor-metrics-available", DocumentationBaseURL),
  68. },
  69. KSMVersionDiagnosticMetricID: {
  70. ID: KSMVersionDiagnosticMetricID,
  71. QueryFmt: `absent_over_time(kube_persistentvolume_capacity_bytes{%s}[5m] %s)`,
  72. Label: "Expected kube-state-metrics version found",
  73. Description: "Determine if metric in required kube-state-metrics version are present during last 5 minutes.",
  74. DocLink: fmt.Sprintf("%s#expected-kube-state-metrics-version-found", DocumentationBaseURL),
  75. },
  76. ScrapeIntervalDiagnosticMetricID: {
  77. ID: ScrapeIntervalDiagnosticMetricID,
  78. QueryFmt: `absent_over_time(prometheus_target_interval_length_seconds{%s}[5m] %s)`,
  79. Label: "Expected Prometheus self-scrape metrics available",
  80. Description: "Determine if prometheus has its own self-scraped metrics during the last 5 minutes.",
  81. },
  82. CPUThrottlingDiagnosticMetricID: {
  83. ID: CPUThrottlingDiagnosticMetricID,
  84. QueryFmt: `avg(increase(container_cpu_cfs_throttled_periods_total{container="cost-model", %s}[10m] %s)) by (container_name, pod_name, namespace)
  85. / avg(increase(container_cpu_cfs_periods_total{container="cost-model",%s}[10m] %s)) by (container_name, pod_name, namespace) > 0.2`,
  86. Label: "Kubecost is not CPU throttled",
  87. Description: "Kubecost loading slowly? A kubecost component might be CPU throttled",
  88. },
  89. KubecostRecordingRuleCPUUsageID: {
  90. ID: KubecostRecordingRuleCPUUsageID,
  91. QueryFmt: `absent_over_time(kubecost_container_cpu_usage_irate{%s}[5m] %s)`,
  92. Label: "Kubecost's CPU usage recording rule is set up",
  93. Description: "If the 'kubecost_container_cpu_usage_irate' recording rule is not set up, Allocation pipeline build may put pressure on your Prometheus due to the use of a subquery.",
  94. DocLink: "https://www.opencost.io/docs/installation/prometheus",
  95. },
  96. CAdvisorWorkingSetBytesMetricID: {
  97. ID: CAdvisorWorkingSetBytesMetricID,
  98. QueryFmt: `absent_over_time(container_memory_working_set_bytes{container="cost-model", container!="POD", instance!="", %s}[5m] %s)`,
  99. Label: "cAdvisor working set bytes metrics available",
  100. Description: "Determine if cAdvisor working set bytes metrics are available during last 5 minutes.",
  101. },
  102. KSMCPUCapacityMetricID: {
  103. ID: KSMCPUCapacityMetricID,
  104. QueryFmt: `absent_over_time(kube_node_status_capacity_cpu_cores{%s}[5m] %s)`,
  105. Label: "KSM had CPU capacity during the last 5 minutes",
  106. Description: "Determine if KSM had CPU capacity during the last 5 minutes",
  107. },
  108. KSMAllocatableCPUCoresMetricID: {
  109. ID: KSMAllocatableCPUCoresMetricID,
  110. QueryFmt: `absent_over_time(kube_node_status_allocatable_cpu_cores{%s}[5m] %s)`,
  111. Label: "KSM had allocatable CPU cores during the last 5 minutes",
  112. Description: "Determine if KSM had allocatable CPU cores during the last 5 minutes",
  113. },
  114. }
  115. // RequestCounter is used to determine if the prometheus client keeps track of
  116. // the concurrent outbound requests
  117. type RequestCounter interface {
  118. TotalQueuedRequests() int
  119. TotalOutboundRequests() int
  120. }
  121. // QueuedPromRequest is a representation of a request waiting to be sent by the prometheus
  122. // client.
  123. type QueuedPromRequest struct {
  124. Context string `json:"context"`
  125. Query string `json:"query"`
  126. QueueTime int64 `json:"queueTime"`
  127. }
  128. // PrometheusQueueState contains diagnostic information concerning the state of the prometheus request
  129. // queue
  130. type PrometheusQueueState struct {
  131. QueuedRequests []*QueuedPromRequest `json:"queuedRequests"`
  132. OutboundRequests int `json:"outboundRequests"`
  133. TotalRequests int `json:"totalRequests"`
  134. MaxQueryConcurrency int `json:"maxQueryConcurrency"`
  135. }
  136. // GetPrometheusQueueState is a diagnostic function that probes the prometheus request queue and gathers
  137. // query, context, and queue statistics.
  138. func GetPrometheusQueueState(client prometheus.Client, config *OpenCostPrometheusConfig) (*PrometheusQueueState, error) {
  139. rlpc, ok := client.(*RateLimitedPrometheusClient)
  140. if !ok {
  141. return nil, fmt.Errorf("Failed to get prometheus queue state for the provided client. Must be of type RateLimitedPrometheusClient.")
  142. }
  143. outbound := rlpc.TotalOutboundRequests()
  144. requests := []*QueuedPromRequest{}
  145. rlpc.EachQueuedRequest(func(ctx string, query string, queueTimeMs int64) {
  146. requests = append(requests, &QueuedPromRequest{
  147. Context: ctx,
  148. Query: query,
  149. QueueTime: queueTimeMs,
  150. })
  151. })
  152. return &PrometheusQueueState{
  153. QueuedRequests: requests,
  154. OutboundRequests: outbound,
  155. TotalRequests: outbound + len(requests),
  156. MaxQueryConcurrency: config.ClientConfig.QueryConcurrency,
  157. }, nil
  158. }
  159. // LogPrometheusClientState logs the current state, with respect to outbound requests, if that
  160. // information is available.
  161. func LogPrometheusClientState(client prometheus.Client) {
  162. if rc, ok := client.(RequestCounter); ok {
  163. queued := rc.TotalQueuedRequests()
  164. outbound := rc.TotalOutboundRequests()
  165. total := queued + outbound
  166. log.Infof("Outbound Requests: %d, Queued Requests: %d, Total Requests: %d", outbound, queued, total)
  167. }
  168. }
  169. // GetPrometheusMetrics returns a list of the state of Prometheus metric used by kubecost using the provided client
  170. func GetPrometheusMetrics(client prometheus.Client, config *OpenCostPrometheusConfig, offset string) PrometheusDiagnostics {
  171. ctx := NewNamedContext(client, config, DiagnosticContextName)
  172. var result []*PrometheusDiagnostic
  173. for _, definition := range diagnosticDefinitions {
  174. pd := definition.NewDiagnostic(config.ClusterFilter, offset)
  175. err := pd.executePrometheusDiagnosticQuery(ctx)
  176. // log the errror, append to results anyways, and continue
  177. if err != nil {
  178. log.Errorf("error: %s", err.Error())
  179. }
  180. result = append(result, pd)
  181. }
  182. return result
  183. }
  184. // GetPrometheusMetricsByID returns a list of the state of specific Prometheus metrics by identifier.
  185. func GetPrometheusMetricsByID(ids []string, client prometheus.Client, config *OpenCostPrometheusConfig, offset string) PrometheusDiagnostics {
  186. ctx := NewNamedContext(client, config, DiagnosticContextName)
  187. var result []*PrometheusDiagnostic
  188. for _, id := range ids {
  189. if definition, ok := diagnosticDefinitions[id]; ok {
  190. pd := definition.NewDiagnostic(config.ClusterFilter, offset)
  191. err := pd.executePrometheusDiagnosticQuery(ctx)
  192. // log the errror, append to results anyways, and continue
  193. if err != nil {
  194. log.Errorf("error: %s", err.Error())
  195. }
  196. result = append(result, pd)
  197. } else {
  198. log.Warnf("Failed to find diagnostic definition for id: %s", id)
  199. }
  200. }
  201. return result
  202. }
  203. // PrometheusDiagnostics is a PrometheusDiagnostic container with helper methods.
  204. type PrometheusDiagnostics []*PrometheusDiagnostic
  205. // HasFailure returns true if any of the diagnostic tests didn't pass.
  206. func (pd PrometheusDiagnostics) HasFailure() bool {
  207. for _, p := range pd {
  208. if !p.Passed {
  209. return true
  210. }
  211. }
  212. return false
  213. }
  214. // diagnosticDefinition is a definition of a diagnostic that can be used to create new
  215. // PrometheusDiagnostic instances using the definition's fields.
  216. type diagnosticDefinition struct {
  217. ID string
  218. QueryFmt string
  219. Label string
  220. Description string
  221. DocLink string
  222. }
  223. // NewDiagnostic creates a new PrometheusDiagnostic instance using the provided definition data.
  224. func (pdd *diagnosticDefinition) NewDiagnostic(filter string, offset string) *PrometheusDiagnostic {
  225. // FIXME: Any reasonable way to get the total number of replacements required in the query?
  226. // FIXME: All of the other queries require a single offset replace, but CPUThrottle requires two.
  227. var query string
  228. if pdd.ID == CPUThrottlingDiagnosticMetricID {
  229. query = fmt.Sprintf(pdd.QueryFmt, filter, offset, filter, offset)
  230. } else {
  231. query = fmt.Sprintf(pdd.QueryFmt, filter, offset)
  232. }
  233. return &PrometheusDiagnostic{
  234. ID: pdd.ID,
  235. Query: query,
  236. Label: pdd.Label,
  237. Description: pdd.Description,
  238. DocLink: pdd.DocLink,
  239. }
  240. }
  241. // PrometheusDiagnostic holds information about a metric and the query to ensure it is functional
  242. type PrometheusDiagnostic struct {
  243. ID string `json:"id"`
  244. Query string `json:"query"`
  245. Label string `json:"label"`
  246. Description string `json:"description"`
  247. DocLink string `json:"docLink"`
  248. Result []*source.QueryResult `json:"result"`
  249. Passed bool `json:"passed"`
  250. }
  251. // executePrometheusDiagnosticQuery executes a PrometheusDiagnostic query using the given context
  252. func (pd *PrometheusDiagnostic) executePrometheusDiagnosticQuery(ctx *Context) error {
  253. resultCh := ctx.Query(pd.Query)
  254. result, err := resultCh.Await()
  255. if err != nil {
  256. return fmt.Errorf("prometheus diagnostic %s failed with error: %s", pd.ID, err)
  257. }
  258. if result == nil {
  259. result = []*source.QueryResult{}
  260. }
  261. pd.Result = result
  262. pd.Passed = len(result) == 0
  263. return nil
  264. }
  265. func (pd *PrometheusDiagnostic) AsMap() map[string]any {
  266. return map[string]any{
  267. "query": pd.Query,
  268. "label": pd.Label,
  269. "docLink": pd.DocLink,
  270. "result": pd.Result,
  271. "passed": pd.Passed,
  272. }
  273. }