diagnostics.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. package prom
  2. import (
  3. "fmt"
  4. "time"
  5. "github.com/opencost/opencost/core/pkg/log"
  6. "github.com/opencost/opencost/pkg/env"
  7. prometheus "github.com/prometheus/client_golang/api"
  8. )
  9. // Prometheus Metric Diagnostic IDs
  10. const (
  11. // CAdvisorDiagnosticMetricID is the identifier of the metric used to determine if cAdvisor is being scraped.
  12. CAdvisorDiagnosticMetricID = "cadvisorMetric"
  13. // CAdvisorLabelDiagnosticMetricID is the identifier of the metric used to determine if cAdvisor labels are correct.
  14. CAdvisorLabelDiagnosticMetricID = "cadvisorLabel"
  15. // KSMDiagnosticMetricID is the identifier for the metric used to determine if KSM metrics are being scraped.
  16. KSMDiagnosticMetricID = "ksmMetric"
  17. // KSMVersionDiagnosticMetricID is the identifier for the metric used to determine if KSM version is correct.
  18. KSMVersionDiagnosticMetricID = "ksmVersion"
  19. // KubecostDiagnosticMetricID is the identifier for the metric used to determine if Kubecost metrics are being scraped.
  20. KubecostDiagnosticMetricID = "kubecostMetric"
  21. // ScrapeIntervalDiagnosticMetricID is the identifier for the metric used to determine if prometheus has its own self-scraped
  22. // metrics.
  23. ScrapeIntervalDiagnosticMetricID = "scrapeInterval"
  24. // CPUThrottlingDiagnosticMetricID is the identifier for the metric used to determine if CPU throttling is being applied to the
  25. // cost-model container.
  26. CPUThrottlingDiagnosticMetricID = "cpuThrottling"
  27. // KubecostRecordingRuleCPUUsageID is the identifier for the query used to
  28. // determine of the CPU usage recording rule is set up correctly.
  29. KubecostRecordingRuleCPUUsageID = "kubecostRecordingRuleCPUUsage"
  30. // CAdvisorWorkingSetBytesMetricID is the identifier for the query used to determine
  31. // if cAdvisor working set bytes data is being scraped
  32. CAdvisorWorkingSetBytesMetricID = "cadvisorWorkingSetBytesMetric"
  33. // KSMCPUCapacityMetricID is the identifier for the query used to determine if
  34. // KSM CPU capacity data is being scraped
  35. KSMCPUCapacityMetricID = "ksmCpuCapacityMetric"
  36. // KSMAllocatableCPUCoresMetricID is the identifier for the query used to determine
  37. // if KSM allocatable CPU core data is being scraped
  38. KSMAllocatableCPUCoresMetricID = "ksmAllocatableCpuCoresMetric"
  39. )
  40. const DocumentationBaseURL = "https://www.opencost.io/docs/"
  41. // diagnostic definitions mapping holds all of the diagnostic definitions that can be used for prometheus metrics diagnostics
  42. var diagnosticDefinitions map[string]*diagnosticDefinition = map[string]*diagnosticDefinition{
  43. CAdvisorDiagnosticMetricID: {
  44. ID: CAdvisorDiagnosticMetricID,
  45. QueryFmt: `absent_over_time(container_cpu_usage_seconds_total{%s}[5m] %s)`,
  46. Label: "cAdvisor metrics available",
  47. Description: "Determine if cAdvisor metrics are available during last 5 minutes.",
  48. DocLink: fmt.Sprintf("%s#cadvisor-metrics-available", DocumentationBaseURL),
  49. },
  50. KSMDiagnosticMetricID: {
  51. ID: KSMDiagnosticMetricID,
  52. QueryFmt: `absent_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[5m] %s)`,
  53. Label: "Kube-state-metrics available",
  54. Description: "Determine if metrics from kube-state-metrics are available during last 5 minutes.",
  55. DocLink: fmt.Sprintf("%s#kube-state-metrics-metrics-available", DocumentationBaseURL),
  56. },
  57. KubecostDiagnosticMetricID: {
  58. ID: KubecostDiagnosticMetricID,
  59. QueryFmt: `absent_over_time(node_cpu_hourly_cost{%s}[5m] %s)`,
  60. Label: "Kubecost metrics available",
  61. Description: "Determine if metrics from Kubecost are available during last 5 minutes.",
  62. },
  63. CAdvisorLabelDiagnosticMetricID: {
  64. ID: CAdvisorLabelDiagnosticMetricID,
  65. QueryFmt: `absent_over_time(container_cpu_usage_seconds_total{container!="",pod!="", %s}[5m] %s)`,
  66. Label: "Expected cAdvisor labels available",
  67. Description: "Determine if expected cAdvisor labels are present during last 5 minutes.",
  68. DocLink: fmt.Sprintf("%s#cadvisor-metrics-available", DocumentationBaseURL),
  69. },
  70. KSMVersionDiagnosticMetricID: {
  71. ID: KSMVersionDiagnosticMetricID,
  72. QueryFmt: `absent_over_time(kube_persistentvolume_capacity_bytes{%s}[5m] %s)`,
  73. Label: "Expected kube-state-metrics version found",
  74. Description: "Determine if metric in required kube-state-metrics version are present during last 5 minutes.",
  75. DocLink: fmt.Sprintf("%s#expected-kube-state-metrics-version-found", DocumentationBaseURL),
  76. },
  77. ScrapeIntervalDiagnosticMetricID: {
  78. ID: ScrapeIntervalDiagnosticMetricID,
  79. QueryFmt: `absent_over_time(prometheus_target_interval_length_seconds{%s}[5m] %s)`,
  80. Label: "Expected Prometheus self-scrape metrics available",
  81. Description: "Determine if prometheus has its own self-scraped metrics during the last 5 minutes.",
  82. },
  83. CPUThrottlingDiagnosticMetricID: {
  84. ID: CPUThrottlingDiagnosticMetricID,
  85. QueryFmt: `avg(increase(container_cpu_cfs_throttled_periods_total{container="cost-model", %s}[10m] %s)) by (container_name, pod_name, namespace)
  86. / avg(increase(container_cpu_cfs_periods_total{container="cost-model",%s}[10m] %s)) by (container_name, pod_name, namespace) > 0.2`,
  87. Label: "Kubecost is not CPU throttled",
  88. Description: "Kubecost loading slowly? A kubecost component might be CPU throttled",
  89. },
  90. KubecostRecordingRuleCPUUsageID: {
  91. ID: KubecostRecordingRuleCPUUsageID,
  92. QueryFmt: `absent_over_time(kubecost_container_cpu_usage_irate{%s}[5m] %s)`,
  93. Label: "Kubecost's CPU usage recording rule is set up",
  94. Description: "If the 'kubecost_container_cpu_usage_irate' recording rule is not set up, Allocation pipeline build may put pressure on your Prometheus due to the use of a subquery.",
  95. DocLink: "https://www.opencost.io/docs/installation/prometheus",
  96. },
  97. CAdvisorWorkingSetBytesMetricID: {
  98. ID: CAdvisorWorkingSetBytesMetricID,
  99. QueryFmt: `absent_over_time(container_memory_working_set_bytes{container="cost-model", container!="POD", instance!="", %s}[5m] %s)`,
  100. Label: "cAdvisor working set bytes metrics available",
  101. Description: "Determine if cAdvisor working set bytes metrics are available during last 5 minutes.",
  102. },
  103. KSMCPUCapacityMetricID: {
  104. ID: KSMCPUCapacityMetricID,
  105. QueryFmt: `absent_over_time(kube_node_status_capacity_cpu_cores{%s}[5m] %s)`,
  106. Label: "KSM had CPU capacity during the last 5 minutes",
  107. Description: "Determine if KSM had CPU capacity during the last 5 minutes",
  108. },
  109. KSMAllocatableCPUCoresMetricID: {
  110. ID: KSMAllocatableCPUCoresMetricID,
  111. QueryFmt: `absent_over_time(kube_node_status_allocatable_cpu_cores{%s}[5m] %s)`,
  112. Label: "KSM had allocatable CPU cores during the last 5 minutes",
  113. Description: "Determine if KSM had allocatable CPU cores during the last 5 minutes",
  114. },
  115. }
  116. // QueuedPromRequest is a representation of a request waiting to be sent by the prometheus
  117. // client.
  118. type QueuedPromRequest struct {
  119. Context string `json:"context"`
  120. Query string `json:"query"`
  121. QueueTime int64 `json:"queueTime"`
  122. }
  123. // PrometheusQueueState contains diagnostic information concerning the state of the prometheus request
  124. // queue
  125. type PrometheusQueueState struct {
  126. QueuedRequests []*QueuedPromRequest `json:"queuedRequests"`
  127. OutboundRequests int `json:"outboundRequests"`
  128. TotalRequests int `json:"totalRequests"`
  129. MaxQueryConcurrency int `json:"maxQueryConcurrency"`
  130. }
  131. // GetPrometheusQueueState is a diagnostic function that probes the prometheus request queue and gathers
  132. // query, context, and queue statistics.
  133. func GetPrometheusQueueState(client prometheus.Client) (*PrometheusQueueState, error) {
  134. rlpc, ok := client.(*RateLimitedPrometheusClient)
  135. if !ok {
  136. return nil, fmt.Errorf("Failed to get prometheus queue state for the provided client. Must be of type RateLimitedPrometheusClient.")
  137. }
  138. outbound := rlpc.TotalOutboundRequests()
  139. requests := []*QueuedPromRequest{}
  140. rlpc.queue.Each(func(_ int, req *workRequest) {
  141. requests = append(requests, &QueuedPromRequest{
  142. Context: req.contextName,
  143. Query: req.query,
  144. QueueTime: time.Since(req.start).Milliseconds(),
  145. })
  146. })
  147. return &PrometheusQueueState{
  148. QueuedRequests: requests,
  149. OutboundRequests: outbound,
  150. TotalRequests: outbound + len(requests),
  151. MaxQueryConcurrency: env.GetMaxQueryConcurrency(),
  152. }, nil
  153. }
  154. // LogPrometheusClientState logs the current state, with respect to outbound requests, if that
  155. // information is available.
  156. func LogPrometheusClientState(client prometheus.Client) {
  157. if rc, ok := client.(requestCounter); ok {
  158. queued := rc.TotalQueuedRequests()
  159. outbound := rc.TotalOutboundRequests()
  160. total := queued + outbound
  161. log.Infof("Outbound Requests: %d, Queued Requests: %d, Total Requests: %d", outbound, queued, total)
  162. }
  163. }
  164. // GetPrometheusMetrics returns a list of the state of Prometheus metric used by kubecost using the provided client
  165. func GetPrometheusMetrics(client prometheus.Client, offset string) PrometheusDiagnostics {
  166. ctx := NewNamedContext(client, DiagnosticContextName)
  167. var result []*PrometheusDiagnostic
  168. for _, definition := range diagnosticDefinitions {
  169. pd := definition.NewDiagnostic(offset)
  170. err := pd.executePrometheusDiagnosticQuery(ctx)
  171. // log the errror, append to results anyways, and continue
  172. if err != nil {
  173. log.Errorf(err.Error())
  174. }
  175. result = append(result, pd)
  176. }
  177. return result
  178. }
  179. // GetPrometheusMetricsByID returns a list of the state of specific Prometheus metrics by identifier.
  180. func GetPrometheusMetricsByID(ids []string, client prometheus.Client, offset string) PrometheusDiagnostics {
  181. ctx := NewNamedContext(client, DiagnosticContextName)
  182. var result []*PrometheusDiagnostic
  183. for _, id := range ids {
  184. if definition, ok := diagnosticDefinitions[id]; ok {
  185. pd := definition.NewDiagnostic(offset)
  186. err := pd.executePrometheusDiagnosticQuery(ctx)
  187. // log the errror, append to results anyways, and continue
  188. if err != nil {
  189. log.Errorf(err.Error())
  190. }
  191. result = append(result, pd)
  192. } else {
  193. log.Warnf("Failed to find diagnostic definition for id: %s", id)
  194. }
  195. }
  196. return result
  197. }
  198. // PrometheusDiagnostics is a PrometheusDiagnostic container with helper methods.
  199. type PrometheusDiagnostics []*PrometheusDiagnostic
  200. // HasFailure returns true if any of the diagnostic tests didn't pass.
  201. func (pd PrometheusDiagnostics) HasFailure() bool {
  202. for _, p := range pd {
  203. if !p.Passed {
  204. return true
  205. }
  206. }
  207. return false
  208. }
  209. // diagnosticDefinition is a definition of a diagnostic that can be used to create new
  210. // PrometheusDiagnostic instances using the definition's fields.
  211. type diagnosticDefinition struct {
  212. ID string
  213. QueryFmt string
  214. Label string
  215. Description string
  216. DocLink string
  217. }
  218. // NewDiagnostic creates a new PrometheusDiagnostic instance using the provided definition data.
  219. func (pdd *diagnosticDefinition) NewDiagnostic(offset string) *PrometheusDiagnostic {
  220. // FIXME: Any reasonable way to get the total number of replacements required in the query?
  221. // FIXME: All of the other queries require a single offset replace, but CPUThrottle requires two.
  222. var query string
  223. filter := env.GetPromClusterFilter()
  224. if pdd.ID == CPUThrottlingDiagnosticMetricID {
  225. query = fmt.Sprintf(pdd.QueryFmt, filter, offset, filter, offset)
  226. } else {
  227. query = fmt.Sprintf(pdd.QueryFmt, filter, offset)
  228. }
  229. return &PrometheusDiagnostic{
  230. ID: pdd.ID,
  231. Query: query,
  232. Label: pdd.Label,
  233. Description: pdd.Description,
  234. DocLink: pdd.DocLink,
  235. }
  236. }
  237. // PrometheusDiagnostic holds information about a metric and the query to ensure it is functional
  238. type PrometheusDiagnostic struct {
  239. ID string `json:"id"`
  240. Query string `json:"query"`
  241. Label string `json:"label"`
  242. Description string `json:"description"`
  243. DocLink string `json:"docLink"`
  244. Result []*QueryResult `json:"result"`
  245. Passed bool `json:"passed"`
  246. }
  247. // executePrometheusDiagnosticQuery executes a PrometheusDiagnostic query using the given context
  248. func (pd *PrometheusDiagnostic) executePrometheusDiagnosticQuery(ctx *Context) error {
  249. resultCh := ctx.Query(pd.Query)
  250. result, err := resultCh.Await()
  251. if err != nil {
  252. return fmt.Errorf("prometheus diagnostic %s failed with error: %s", pd.ID, err)
  253. }
  254. if result == nil {
  255. result = []*QueryResult{}
  256. }
  257. pd.Result = result
  258. pd.Passed = len(result) == 0
  259. return nil
  260. }