Procházet zdrojové kódy

Initial pass at a prometheus extraction. This is very much in-progress and will not compile as-is. DataSource contract is also very up in the air at this point.

Signed-off-by: Matt Bolt <mbolt35@gmail.com>
Matt Bolt před 1 rokem
rodič
revize
ebb80d7c95
58 změnil soubory, kde provedl 4798 přidání a 2219 odebrání
  1. 1 0
      core/go.mod
  2. 2 0
      core/go.sum
  3. 129 0
      core/pkg/source/datasource.go
  4. 46 3
      core/pkg/source/error.go
  5. 1 1
      core/pkg/source/error_test.go
  6. 95 0
      core/pkg/source/querygroup.go
  7. 225 0
      core/pkg/source/queryresult.go
  8. 1 0
      go.mod
  9. 3 0
      modules/collector-source/README.md
  10. 3 0
      modules/prometheus-source/README.md
  11. 61 0
      modules/prometheus-source/go.mod
  12. 698 0
      modules/prometheus-source/go.sum
  13. 228 0
      modules/prometheus-source/pkg/env/promenv.go
  14. 205 0
      modules/prometheus-source/pkg/prom/config.go
  15. 0 0
      modules/prometheus-source/pkg/prom/contextnames.go
  16. 1992 0
      modules/prometheus-source/pkg/prom/datasource.go
  17. 27 20
      modules/prometheus-source/pkg/prom/diagnostics.go
  18. 0 0
      modules/prometheus-source/pkg/prom/helpers.go
  19. 0 0
      modules/prometheus-source/pkg/prom/ids.go
  20. 10 26
      modules/prometheus-source/pkg/prom/prom.go
  21. 92 42
      modules/prometheus-source/pkg/prom/query.go
  22. 5 3
      modules/prometheus-source/pkg/prom/query_test.go
  23. 0 0
      modules/prometheus-source/pkg/prom/ratelimitedclient_test.go
  24. 12 119
      modules/prometheus-source/pkg/prom/result.go
  25. 60 0
      modules/prometheus-source/pkg/prom/thanos.go
  26. 15 12
      modules/prometheus-source/pkg/prom/validate.go
  27. 0 0
      modules/prometheus-source/pkg/prom/warning.go
  28. 56 0
      modules/prometheus-source/pkg/thanos/thanos.go
  29. 0 6
      pkg/cloud/alibaba/provider.go
  30. 0 4
      pkg/cloud/aws/provider.go
  31. 0 4
      pkg/cloud/azure/provider.go
  32. 0 31
      pkg/cloud/gcp/provider.go
  33. 0 2
      pkg/cloud/models/models.go
  34. 0 5
      pkg/cloud/oracle/provider.go
  35. 0 6
      pkg/cloud/otc/provider.go
  36. 0 5
      pkg/cloud/provider/customprovider.go
  37. 0 5
      pkg/cloud/scaleway/provider.go
  38. 1 1
      pkg/cmd/agent/agent.go
  39. 49 47
      pkg/costmodel/aggregation.go
  40. 6 5
      pkg/costmodel/allocation.go
  41. 60 59
      pkg/costmodel/allocation_helpers.go
  42. 13 13
      pkg/costmodel/allocation_helpers_test.go
  43. 13 12
      pkg/costmodel/allocation_incubating.go
  44. 3 3
      pkg/costmodel/assets.go
  45. 161 390
      pkg/costmodel/cluster.go
  46. 42 42
      pkg/costmodel/cluster_helpers.go
  47. 15 15
      pkg/costmodel/cluster_helpers_test.go
  48. 2 1
      pkg/costmodel/clusters/clustermap.go
  49. 19 36
      pkg/costmodel/containerkeys.go
  50. 181 152
      pkg/costmodel/costmodel.go
  51. 15 15
      pkg/costmodel/key.go
  52. 3 2
      pkg/costmodel/metrics.go
  53. 5 6
      pkg/costmodel/networkcosts.go
  54. 52 52
      pkg/costmodel/resultparsers.go
  55. 186 400
      pkg/costmodel/router.go
  56. 0 375
      pkg/costmodel/sql.go
  57. 5 190
      pkg/env/costmodelenv.go
  58. 0 109
      pkg/thanos/thanos.go

+ 1 - 0
core/go.mod

@@ -9,6 +9,7 @@ require (
 	github.com/hashicorp/go-multierror v1.1.1
 	github.com/hashicorp/go-plugin v1.6.0
 	github.com/json-iterator/go v1.1.12
+	github.com/julienschmidt/httprouter v1.3.0
 	github.com/patrickmn/go-cache v2.1.0+incompatible
 	github.com/rs/zerolog v1.26.1
 	github.com/spf13/viper v1.8.1

+ 2 - 0
core/go.sum

@@ -196,6 +196,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
 github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=

+ 129 - 0
core/pkg/source/datasource.go

@@ -0,0 +1,129 @@
+package source
+
+import (
+	"time"
+
+	"github.com/julienschmidt/httprouter"
+)
+
+type InstantMetricsQuerier interface {
+	QueryRAMUsage(window string, offset string) QueryResultsChan
+	QueryCPUUsage(window string, offset string) QueryResultsChan
+	QueryNetworkInZoneRequests(window string, offset string) QueryResultsChan
+	QueryNetworkInRegionRequests(window string, offset string) QueryResultsChan
+	QueryNetworkInternetRequests(window string, offset string) QueryResultsChan
+	QueryNormalization(window string, offset string) QueryResultsChan
+
+	QueryHistoricalCPUCost(window string, offset string) QueryResultsChan
+	QueryHistoricalRAMCost(window string, offset string) QueryResultsChan
+	QueryHistoricalGPUCost(window string, offset string) QueryResultsChan
+	QueryHistoricalPodLabels(window string, offset string) QueryResultsChan
+}
+
+type RangeMetricsQuerier interface {
+	QueryRAMRequestsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryRAMUsageOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryRAMAllocationOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryCPURequestsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryCPUUsageOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryCPUAllocationOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryGPURequestsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryPVRequestsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryPVCAllocationOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryPVHourlyCostOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryNetworkInZoneOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryNetworkInRegionOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryNetworkInternetOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryNamespaceLabelsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryNamespaceAnnotationsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryPodLabelsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryPodAnnotationsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryServiceLabelsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryDeploymentLabelsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryStatefulsetLabelsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryPodJobsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+	QueryPodDaemonsetsOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+
+	QueryNormalizationOverTime(start, end time.Time, resolution time.Duration) QueryResultsChan
+}
+
+type ClusterMetricsQuerier interface {
+	// Cluster Disks
+	QueryPVCost(start, end time.Time) QueryResultsChan
+	QueryPVSize(start, end time.Time) QueryResultsChan
+	QueryPVStorageClass(start, end time.Time) QueryResultsChan
+	QueryPVUsedAverage(start, end time.Time) QueryResultsChan
+	QueryPVUsedMax(start, end time.Time) QueryResultsChan
+	QueryPVCInfo(start, end time.Time) QueryResultsChan
+	QueryPVActiveMinutes(start, end time.Time) QueryResultsChan
+
+	// Local Cluster Disks
+	QueryLocalStorageCost(start, end time.Time) QueryResultsChan
+	QueryLocalStorageUsedCost(start, end time.Time) QueryResultsChan
+	QueryLocalStorageUsedAvg(start, end time.Time) QueryResultsChan
+	QueryLocalStorageUsedMax(start, end time.Time) QueryResultsChan
+	QueryLocalStorageBytes(start, end time.Time) QueryResultsChan
+	QueryLocalStorageActiveMinutes(start, end time.Time) QueryResultsChan
+	QueryLocalStorageBytesByProvider(provider string, start, end time.Time) QueryResultsChan
+	QueryLocalStorageUsedByProvider(provider string, start, end time.Time) QueryResultsChan
+
+	// Nodes
+	QueryNodeCPUHourlyCost(start, end time.Time) QueryResultsChan
+	QueryNodeCPUCoresCapacity(start, end time.Time) QueryResultsChan
+	QueryNodeCPUCoresAllocatable(start, end time.Time) QueryResultsChan
+	QueryNodeRAMHourlyCost(start, end time.Time) QueryResultsChan
+	QueryNodeRAMBytesCapacity(start, end time.Time) QueryResultsChan
+	QueryNodeRAMBytesAllocatable(start, end time.Time) QueryResultsChan
+	QueryNodeGPUCount(start, end time.Time) QueryResultsChan
+	QueryNodeGPUHourlyCost(start, end time.Time) QueryResultsChan
+	QueryNodeLabels(start, end time.Time) QueryResultsChan
+	QueryNodeActiveMinutes(start, end time.Time) QueryResultsChan
+	QueryNodeIsSpot(start, end time.Time) QueryResultsChan
+	QueryNodeCPUModeTotal(start, end time.Time) QueryResultsChan
+
+	QueryNodeCPUModePercent(start, end time.Time) QueryResultsChan
+	QueryNodeRAMSystemPercent(start, end time.Time) QueryResultsChan
+	QueryNodeRAMUserPercent(start, end time.Time) QueryResultsChan
+
+	QueryNodeTotalLocalStorage(start, end time.Time) QueryResultsChan
+	QueryNodeUsedLocalStorage(start, end time.Time) QueryResultsChan
+
+	// Load Balancers
+	QueryLBCost(start, end time.Time) QueryResultsChan
+	QueryLBActiveMinutes(start, end time.Time) QueryResultsChan
+
+	// Cluster Costs
+	QueryDataCount(start, end time.Time) QueryResultsChan
+	QueryTotalGPU(start, end time.Time) QueryResultsChan
+	QueryTotalCPU(start, end time.Time) QueryResultsChan
+	QueryTotalRAM(start, end time.Time) QueryResultsChan
+	QueryTotalStorage(start, end time.Time) QueryResultsChan
+
+	// Cluster Costs
+	QueryClusterCores(start, end time.Time, step time.Duration) QueryResultsChan
+	QueryClusterRAM(start, end time.Time, step time.Duration) QueryResultsChan
+	QueryClusterStorage(start, end time.Time, step time.Duration) QueryResultsChan
+	QueryClusterStorageByProvider(provider string, start, end time.Time, step time.Duration) QueryResultsChan
+	QueryClusterTotal(start, end time.Time, step time.Duration) QueryResultsChan
+	QueryClusterTotalByProvider(provider string, start, end time.Time, step time.Duration) QueryResultsChan
+	QueryClusterNodes(start, end time.Time, step time.Duration) QueryResultsChan
+	QueryClusterNodesByProvider(provider string, start, end time.Time, step time.Duration) QueryResultsChan
+}
+
+type OpenCostDataSource interface {
+	InstantMetricsQuerier
+	RangeMetricsQuerier
+	ClusterMetricsQuerier
+
+	RegisterEndPoints(router *httprouter.Router)
+
+	BatchDuration() time.Duration
+}

+ 46 - 3
pkg/prom/error.go → core/pkg/source/error.go

@@ -1,6 +1,7 @@
-package prom
+package source
 
 import (
+	"errors"
 	"fmt"
 	"reflect"
 	"strings"
@@ -23,7 +24,33 @@ func IsNoStoreAPIWarning(warning string) bool {
 }
 
 //--------------------------------------------------------------------------
-//  Prometheus Error Collection
+//  Help Retry Error
+//--------------------------------------------------------------------------
+
+// HelpRetryError is a wrapper error type which indicates an error should induce a retry, and
+// is non-fatal
+type HelpRetryError struct {
+	wrapped error
+}
+
+func (h *HelpRetryError) Unwrap() error {
+	return h.wrapped
+}
+
+func (h *HelpRetryError) Error() string {
+	return h.wrapped.Error()
+}
+
+func NewHelpRetryError(cause error) error {
+	return &HelpRetryError{wrapped: cause}
+}
+
+func IsRetryable(err error) bool {
+	return errors.Is(err, &HelpRetryError{})
+}
+
+//--------------------------------------------------------------------------
+//  Error Collection
 //--------------------------------------------------------------------------
 
 type QueryError struct {
@@ -89,6 +116,22 @@ type QueryErrorCollector struct {
 	warnings []*QueryWarning
 }
 
+// Appends a QueryError to the errors list
+func (ec *QueryErrorCollector) AppendError(err *QueryError) {
+	ec.m.Lock()
+	defer ec.m.Unlock()
+
+	ec.errors = append(ec.errors, err)
+}
+
+// Appends a QueryWarning to the warnings list
+func (ec *QueryErrorCollector) AppendWarning(warn *QueryWarning) {
+	ec.m.Lock()
+	defer ec.m.Unlock()
+
+	ec.warnings = append(ec.warnings, warn)
+}
+
 // Reports an error to the collector. Ignores if the error is nil and the warnings
 // are empty
 func (ec *QueryErrorCollector) Report(query string, warnings []string, requestError error, parseError error) {
@@ -273,7 +316,7 @@ func IsCommError(err error) bool {
 
 // Error prints the error as a string
 func (pce CommError) Error() string {
-	return fmt.Sprintf("Prometheus communication error: %s", strings.Join(pce.messages, ": "))
+	return fmt.Sprintf("Communication error: %s", strings.Join(pce.messages, ": "))
 }
 
 // Wrap wraps the error with the given message, but persists the error type.

+ 1 - 1
pkg/prom/error_test.go → core/pkg/source/error_test.go

@@ -1,4 +1,4 @@
-package prom
+package source
 
 import (
 	"errors"

+ 95 - 0
core/pkg/source/querygroup.go

@@ -0,0 +1,95 @@
+package source
+
+type QueryGroup struct {
+	errorCollector *QueryErrorCollector
+}
+
+type QueryGroupAsyncResult struct {
+	errorCollector *QueryErrorCollector
+	resultsChan    QueryResultsChan
+}
+
+func NewQueryGroup() *QueryGroup {
+	var errorCollector QueryErrorCollector
+
+	return &QueryGroup{
+		errorCollector: &errorCollector,
+	}
+}
+
+func (qg *QueryGroup) With(resultsChan QueryResultsChan) *QueryGroupAsyncResult {
+	return newQueryGroupAsyncResult(qg.errorCollector, resultsChan)
+}
+
+func (qg *QueryGroup) HasErrors() bool {
+	return qg.errorCollector.IsError()
+}
+
+func (qg *QueryGroup) Error() error {
+	return qg.errorCollector
+}
+
+func (qg *QueryGroup) Errors() []*QueryError {
+	return qg.errorCollector.Errors()
+}
+
+func newQueryGroupAsyncResult(collector *QueryErrorCollector, resultsChan QueryResultsChan) *QueryGroupAsyncResult {
+	return &QueryGroupAsyncResult{
+		errorCollector: collector,
+		resultsChan:    resultsChan,
+	}
+}
+
+func (qgar *QueryGroupAsyncResult) Await() ([]*QueryResult, error) {
+	defer close(qgar.resultsChan)
+	result := <-qgar.resultsChan
+
+	q := result.Query
+	err := result.Error
+
+	if err != nil {
+		qgar.errorCollector.AppendError(&QueryError{Query: q, Error: err})
+		return nil, err
+	}
+
+	return result.Results, nil
+}
+
+type QueryResultCollection []*QueryResults
+
+func (qrc *QueryResultCollection) HasErrors() bool {
+	for _, qr := range *qrc {
+		if qr.Error != nil {
+			return true
+		}
+	}
+	return false
+}
+
+func (qrc *QueryResultCollection) Error() error {
+	var errCollection QueryErrorCollector
+
+	for _, qr := range *qrc {
+		q := qr.Query
+		e := qr.Error
+
+		if e != nil {
+			if IsErrorCollection(e) {
+				if errs, ok := e.(QueryErrorCollection); ok {
+					for _, qErr := range errs.Errors() {
+						errCollection.AppendError(qErr)
+					}
+					for _, qWarn := range errs.Warnings() {
+						errCollection.AppendWarning(qWarn)
+					}
+				} else {
+					errCollection.AppendError(&QueryError{Query: q, Error: e})
+				}
+			} else {
+				errCollection.AppendError(&QueryError{Query: q, Error: e})
+			}
+		}
+	}
+
+	return &errCollection
+}

+ 225 - 0
core/pkg/source/queryresult.go

@@ -0,0 +1,225 @@
+package source
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/util"
+)
+
+// QueryResultsChan is a channel of query results
+type QueryResultsChan chan *QueryResults
+
+// Await returns query results, blocking until they are made available, and
+// deferring the closure of the underlying channel
+func (qrc QueryResultsChan) Await() ([]*QueryResult, error) {
+	defer close(qrc)
+
+	results := <-qrc
+	if results.Error != nil {
+		return nil, results.Error
+	}
+
+	return results.Results, nil
+}
+
+type ResultKeys struct {
+	ClusterKey      string
+	NamespaceKey    string
+	NodeKey         string
+	InstanceKey     string
+	InstanceTypeKey string
+	ContainerKey    string
+	PodKey          string
+	ProviderIDKey   string
+	DeviceKey       string
+}
+
+func DefaultResultKeys() *ResultKeys {
+	return &ResultKeys{
+		ClusterKey:      "cluster_id",
+		NamespaceKey:    "namespace",
+		NodeKey:         "node",
+		InstanceKey:     "instance",
+		InstanceTypeKey: "instance_type",
+		ContainerKey:    "container",
+		PodKey:          "pod",
+		ProviderIDKey:   "provider_id",
+		DeviceKey:       "device",
+	}
+}
+
+func ClusterKeyWithDefaults(clusterKey string) *ResultKeys {
+	keys := DefaultResultKeys()
+	keys.ClusterKey = clusterKey
+	return keys
+}
+
+// QueryResults contains all of the query results and the source query string.
+type QueryResults struct {
+	Query   string
+	Error   error
+	Results []*QueryResult
+}
+
+func NewQueryResults(query string) *QueryResults {
+	return &QueryResults{
+		Query: query,
+	}
+}
+
+// QueryResult contains a single result from a prometheus query. It's common
+// to refer to query results as a slice of QueryResult
+type QueryResult struct {
+	Metric map[string]interface{} `json:"metric"`
+	Values []*util.Vector         `json:"values"`
+
+	keys *ResultKeys
+}
+
+func NewQueryResult(metrics map[string]any, values []*util.Vector, keys *ResultKeys) *QueryResult {
+	if keys == nil {
+		keys = DefaultResultKeys()
+	}
+
+	return &QueryResult{
+		Metric: metrics,
+		Values: values,
+		keys:   keys,
+	}
+}
+
+func (qr *QueryResult) GetCluster() (string, error) {
+	return qr.GetString(qr.keys.ClusterKey)
+}
+
+func (qr *QueryResult) GetNamespace() (string, error) {
+	return qr.GetString(qr.keys.NamespaceKey)
+}
+
+func (qr *QueryResult) GetNode() (string, error) {
+	return qr.GetString(qr.keys.NodeKey)
+}
+
+func (qr *QueryResult) GetInstance() (string, error) {
+	return qr.GetString(qr.keys.InstanceKey)
+}
+
+func (qr *QueryResult) GetInstanceType() (string, error) {
+	return qr.GetString(qr.keys.InstanceTypeKey)
+}
+
+func (qr *QueryResult) GetContainer() (string, error) {
+	value, err := qr.GetString(qr.keys.ContainerKey)
+	if value == "" || err != nil {
+		alternate, e := qr.GetString(qr.keys.ContainerKey + "_name")
+		if alternate == "" || e != nil {
+			return "", fmt.Errorf("'%s' and '%s' fields do not exist in data result vector", qr.keys.ContainerKey, qr.keys.ContainerKey+"_name")
+		}
+		return alternate, nil
+	}
+	return value, nil
+}
+
+func (qr *QueryResult) GetPod() (string, error) {
+	value, err := qr.GetString(qr.keys.PodKey)
+	if value == "" || err != nil {
+		alternate, e := qr.GetString(qr.keys.PodKey + "_name")
+		if alternate == "" || e != nil {
+			return "", fmt.Errorf("'%s' and '%s' fields do not exist in data result vector", qr.keys.PodKey, qr.keys.PodKey+"_name")
+		}
+		return alternate, nil
+	}
+	return value, nil
+}
+
+func (qr *QueryResult) GetProviderID() (string, error) {
+	return qr.GetString(qr.keys.ProviderIDKey)
+}
+
+func (qr *QueryResult) GetDevice() (string, error) {
+	return qr.GetString(qr.keys.DeviceKey)
+}
+
+// GetString returns the requested field, or an error if it does not exist
+func (qr *QueryResult) GetString(field string) (string, error) {
+	f, ok := qr.Metric[field]
+	if !ok {
+		return "", fmt.Errorf("'%s' field does not exist in data result vector", field)
+	}
+
+	strField, ok := f.(string)
+	if !ok {
+		return "", fmt.Errorf("'%s' field is improperly formatted and cannot be converted to string", field)
+	}
+
+	return strField, nil
+}
+
+// GetStrings returns the requested fields, or an error if it does not exist
+func (qr *QueryResult) GetStrings(fields ...string) (map[string]string, error) {
+	values := map[string]string{}
+
+	for _, field := range fields {
+		f, ok := qr.Metric[field]
+		if !ok {
+			return nil, fmt.Errorf("'%s' field does not exist in data result vector", field)
+		}
+
+		value, ok := f.(string)
+		if !ok {
+			return nil, fmt.Errorf("'%s' field is improperly formatted and cannot be converted to string", field)
+		}
+
+		values[field] = value
+	}
+
+	return values, nil
+}
+
+// GetLabels returns all labels and their values from the query result
+func (qr *QueryResult) GetLabels() map[string]string {
+	result := make(map[string]string)
+
+	// Find All keys with prefix label_, remove prefix, add to labels
+	for k, v := range qr.Metric {
+		if !strings.HasPrefix(k, "label_") {
+			continue
+		}
+
+		label := strings.TrimPrefix(k, "label_")
+		value, ok := v.(string)
+		if !ok {
+			log.Warnf("Failed to parse label value for label: '%s'", label)
+			continue
+		}
+
+		result[label] = value
+	}
+
+	return result
+}
+
+// GetAnnotations returns all annotations and their values from the query result
+func (qr *QueryResult) GetAnnotations() map[string]string {
+	result := make(map[string]string)
+
+	// Find All keys with prefix annotation_, remove prefix, add to annotations
+	for k, v := range qr.Metric {
+		if !strings.HasPrefix(k, "annotation_") {
+			continue
+		}
+
+		annotations := strings.TrimPrefix(k, "annotation_")
+		value, ok := v.(string)
+		if !ok {
+			log.Warnf("Failed to parse label value for label: '%s'", annotations)
+			continue
+		}
+
+		result[annotations] = value
+	}
+
+	return result
+}

+ 1 - 0
go.mod

@@ -3,6 +3,7 @@ module github.com/opencost/opencost
 replace (
 	github.com/golang/lint => golang.org/x/lint v0.0.0-20180702182130-06c8688daad7
 	github.com/opencost/opencost/core => ./core
+	github.com/opencost/opencost/modules/prometheus-source => ./modules/prometheus-source
 )
 
 require (

+ 3 - 0
modules/collector-source/README.md

@@ -0,0 +1,3 @@
+# OpenCost Data Sources - Collector
+
+The OpenCost Collector is a data source implementation which provides OpenCost with the metrics and metadata required to calculate cost allocation. The collector is responsible for gathering data from various sources, such as Kubernetes, cloud providers, and other external systems, and transforming it into a format that can be consumed by the OpenCost API.

+ 3 - 0
modules/prometheus-source/README.md

@@ -0,0 +1,3 @@
+# OpenCost Data Sources - Prometheus
+
+The OpenCost Prometheus data source is an implementation which provides OpenCost with the metrics and metadata required to calculate cost allocation. Prometheus provides longer retention periods and more detailed metrics than the OpenCost Collector, which is useful for historical analysis and cost forecasting.

+ 61 - 0
modules/prometheus-source/go.mod

@@ -0,0 +1,61 @@
+module github.com/opencost/opencost/modules/prometheus-source
+
+go 1.23.0
+
+replace (
+	github.com/golang/lint => golang.org/x/lint v0.0.0-20180702182130-06c8688daad7
+	github.com/opencost/opencost/core => ./../../core
+)
+
+require (
+	github.com/opencost/opencost v1.113.0
+	github.com/opencost/opencost/core v0.0.0-20241211165149-ee44b80e2fd0
+	github.com/prometheus/client_golang v1.20.5
+	gopkg.in/yaml.v2 v2.4.0
+	k8s.io/client-go v0.32.0
+)
+
+require (
+	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/fxamacker/cbor/v2 v2.7.0 // indirect
+	github.com/go-logr/logr v1.4.2 // indirect
+	github.com/goccy/go-json v0.10.2 // indirect
+	github.com/gogo/protobuf v1.3.2 // indirect
+	github.com/google/gofuzz v1.2.0 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/julienschmidt/httprouter v1.3.0 // indirect
+	github.com/magiconair/properties v1.8.5 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+	github.com/pelletier/go-toml v1.9.3 // indirect
+	github.com/prometheus/client_model v0.6.1 // indirect
+	github.com/prometheus/common v0.55.0 // indirect
+	github.com/rs/zerolog v1.26.1 // indirect
+	github.com/spf13/afero v1.6.0 // indirect
+	github.com/spf13/cast v1.3.1 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/spf13/viper v1.8.1 // indirect
+	github.com/subosito/gotenv v1.2.0 // indirect
+	github.com/x448/float16 v0.8.4 // indirect
+	golang.org/x/net v0.30.0 // indirect
+	golang.org/x/oauth2 v0.23.0 // indirect
+	golang.org/x/sys v0.26.0 // indirect
+	golang.org/x/term v0.25.0 // indirect
+	golang.org/x/text v0.19.0 // indirect
+	golang.org/x/time v0.7.0 // indirect
+	google.golang.org/protobuf v1.35.1 // indirect
+	gopkg.in/inf.v0 v0.9.1 // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	k8s.io/api v0.32.0 // indirect
+	k8s.io/apimachinery v0.32.0 // indirect
+	k8s.io/klog/v2 v2.130.1 // indirect
+	k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
+	sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
+	sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
+	sigs.k8s.io/yaml v1.4.0 // indirect
+)

+ 698 - 0
modules/prometheus-source/go.sum

@@ -0,0 +1,698 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
+github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/opencost/opencost v1.113.0 h1:QciC1V1e/BmTvsei/MxvAnhiUNEuxCGkehx7+U2G0qQ=
+github.com/opencost/opencost v1.113.0/go.mod h1:o1F9J5JXdtGymtQzLnhiCA2AQCt5/Q5cL7NB6ApHgd8=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
+github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
+github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
+golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
+golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
+golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
+golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
+golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
+k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
+k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
+k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
+k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
+k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

+ 228 - 0
modules/prometheus-source/pkg/env/promenv.go

@@ -0,0 +1,228 @@
+package env
+
+import (
+	"fmt"
+	"runtime"
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/env"
+)
+
+const (
+	PrometheusServerEndpointEnvVar = "PROMETHEUS_SERVER_ENDPOINT"
+
+	PrometheusRetryOnRateLimitResponseEnvVar    = "PROMETHEUS_RETRY_ON_RATE_LIMIT"
+	PrometheusRetryOnRateLimitMaxRetriesEnvVar  = "PROMETHEUS_RETRY_ON_RATE_LIMIT_MAX_RETRIES"
+	PrometheusRetryOnRateLimitDefaultWaitEnvVar = "PROMETHEUS_RETRY_ON_RATE_LIMIT_DEFAULT_WAIT"
+
+	PrometheusQueryTimeoutEnvVar        = "PROMETHEUS_QUERY_TIMEOUT"
+	PrometheusKeepAliveEnvVar           = "PROMETHEUS_KEEP_ALIVE"
+	PrometheusTLSHandshakeTimeoutEnvVar = "PROMETHEUS_TLS_HANDSHAKE_TIMEOUT"
+	ScrapeIntervalEnvVar                = "KUBECOST_SCRAPE_INTERVAL"
+
+	ETLMaxPrometheusQueryDurationMinutes = "ETL_MAX_PROMETHEUS_QUERY_DURATION_MINUTES"
+
+	MaxQueryConcurrencyEnvVar = "MAX_QUERY_CONCURRENCY"
+	QueryLoggingFileEnvVar    = "QUERY_LOGGING_FILE"
+	PromClusterIDLabelEnvVar  = "PROM_CLUSTER_ID_LABEL"
+
+	PrometheusHeaderXScopeOrgIdEnvVar = "PROMETHEUS_HEADER_X_SCOPE_ORGID"
+	InsecureSkipVerifyEnvVar          = "INSECURE_SKIP_VERIFY"
+	KubeRbacProxyEnabledEnvVar        = "KUBE_RBAC_PROXY_ENABLED"
+
+	ThanosEnabledEnvVar      = "THANOS_ENABLED"
+	ThanosQueryUrlEnvVar     = "THANOS_QUERY_URL"
+	ThanosOffsetEnvVar       = "THANOS_QUERY_OFFSET"
+	ThanosMaxSourceResEnvVar = "THANOS_MAX_SOURCE_RESOLUTION"
+
+	DBBasicAuthUsername = "DB_BASIC_AUTH_USERNAME"
+	DBBasicAuthPassword = "DB_BASIC_AUTH_PW"
+	DBBearerToken       = "DB_BEARER_TOKEN"
+
+	MultiClusterBasicAuthUsername = "MC_BASIC_AUTH_USERNAME"
+	MultiClusterBasicAuthPassword = "MC_BASIC_AUTH_PW"
+	MultiClusterBearerToken       = "MC_BEARER_TOKEN"
+
+	CurrentClusterIdFilterEnabledVar = "CURRENT_CLUSTER_ID_FILTER_ENABLED"
+	ClusterIDEnvVar                  = "CLUSTER_ID"
+
+	KubecostJobNameEnvVar      = "KUBECOST_JOB_NAME"
+	ETLResolutionSecondsEnvVar = "ETL_RESOLUTION_SECONDS"
+)
+
+// IsPrometheusRetryOnRateLimitResponse will attempt to retry if a 429 response is received OR a 400 with a body containing
+// ThrottleException (common in AWS services like AMP)
+func IsPrometheusRetryOnRateLimitResponse() bool {
+	return env.GetBool(PrometheusRetryOnRateLimitResponseEnvVar, true)
+}
+
+// GetPrometheusRetryOnRateLimitMaxRetries returns the maximum number of retries that should be attempted prior to failing.
+// Only used if IsPrometheusRetryOnRateLimitResponse() is true.
+func GetPrometheusRetryOnRateLimitMaxRetries() int {
+	return env.GetInt(PrometheusRetryOnRateLimitMaxRetriesEnvVar, 5)
+}
+
+// GetPrometheusRetryOnRateLimitDefaultWait returns the default wait time for a retriable rate limit response without a
+// Retry-After header.
+func GetPrometheusRetryOnRateLimitDefaultWait() time.Duration {
+	return env.GetDuration(PrometheusRetryOnRateLimitDefaultWaitEnvVar, 100*time.Millisecond)
+}
+
+// GetPrometheusHeaderXScopeOrgId returns the default value for X-Scope-OrgID header used for requests in Mimir/Cortex-Tenant API.
+// To use Mimir(or Cortex-Tenant) instead of Prometheus add variable from cluster settings:
+// "PROMETHEUS_HEADER_X_SCOPE_ORGID": "my-cluster-name"
+// Then set Prometheus URL to prometheus API endpoint:
+// "PROMETHEUS_SERVER_ENDPOINT": "http://mimir-url/prometheus/"
+func GetPrometheusHeaderXScopeOrgId() string {
+	return env.Get(PrometheusHeaderXScopeOrgIdEnvVar, "")
+}
+
+// GetPrometheusServerEndpoint returns the environment variable value for PrometheusServerEndpointEnvVar which
+// represents the prometheus server endpoint used to execute prometheus queries.
+func GetPrometheusServerEndpoint() string {
+	return env.Get(PrometheusServerEndpointEnvVar, "")
+}
+
+func GetScrapeInterval() time.Duration {
+	return env.GetDuration(ScrapeIntervalEnvVar, 0)
+}
+
+func GetPrometheusQueryTimeout() time.Duration {
+	return env.GetDuration(PrometheusQueryTimeoutEnvVar, 120*time.Second)
+}
+
+func GetPrometheusKeepAlive() time.Duration {
+	return env.GetDuration(PrometheusKeepAliveEnvVar, 120*time.Second)
+}
+
+func GetPrometheusTLSHandshakeTimeout() time.Duration {
+	return env.GetDuration(PrometheusTLSHandshakeTimeoutEnvVar, 10*time.Second)
+}
+
+// GetJobName returns the environment variable value for JobNameEnvVar, specifying which job name
+// is used for prometheus to scrape the provided metrics.
+func GetJobName() string {
+	return env.Get(KubecostJobNameEnvVar, "kubecost")
+}
+
+func IsInsecureSkipVerify() bool {
+	return env.GetBool(InsecureSkipVerifyEnvVar, false)
+}
+
+func IsKubeRbacProxyEnabled() bool {
+	return env.GetBool(KubeRbacProxyEnabledEnvVar, false)
+}
+
+// GetETLResolution determines the resolution of ETL queries. The smaller the
+// duration, the higher the resolution; the higher the resolution, the more
+// accurate the query results, but the more computationally expensive.
+func GetETLResolution() time.Duration {
+	// Use the configured ETL resolution, or default to
+	// 5m (i.e. 300s)
+	secs := time.Duration(env.GetInt64(ETLResolutionSecondsEnvVar, 300))
+	return secs * time.Second
+}
+
+// IsThanosEnabled returns the environment variable value for ThanosEnabledEnvVar which represents whether
+// or not thanos is enabled.
+func IsThanosEnabled() bool {
+	return env.GetBool(ThanosEnabledEnvVar, false)
+}
+
+// GetThanosQueryUrl returns the environment variable value for ThanosQueryUrlEnvVar which represents the
+// target query endpoint for hitting thanos.
+func GetThanosQueryUrl() string {
+	return env.Get(ThanosQueryUrlEnvVar, "")
+}
+
+// GetThanosOffset returns the environment variable value for ThanosOffsetEnvVar which represents the total
+// amount of time to offset all queries made to thanos.
+func GetThanosOffset() string {
+	return env.Get(ThanosOffsetEnvVar, "3h")
+}
+
+// GetThanosMaxSourceResolution returns the environment variable value for ThanosMaxSourceResEnvVar which represents
+// the max source resolution to use when querying thanos.
+func GetThanosMaxSourceResolution() string {
+	res := env.Get(ThanosMaxSourceResEnvVar, "raw")
+
+	switch res {
+	case "raw":
+		return "0s"
+	case "0s":
+		fallthrough
+	case "5m":
+		fallthrough
+	case "1h":
+		return res
+	default:
+		return "0s"
+	}
+}
+
+// GetMaxQueryConcurrency returns the environment variable value for MaxQueryConcurrencyEnvVar
+func GetMaxQueryConcurrency() int {
+	maxQueryConcurrency := env.GetInt(MaxQueryConcurrencyEnvVar, 5)
+	if maxQueryConcurrency <= 0 {
+		return runtime.GOMAXPROCS(0)
+	}
+	return maxQueryConcurrency
+}
+
+// GetQueryLoggingFile returns a file location if query logging is enabled. Otherwise, empty string
+func GetQueryLoggingFile() string {
+	return env.Get(QueryLoggingFileEnvVar, "")
+}
+
+func GetDBBasicAuthUsername() string {
+	return env.Get(DBBasicAuthUsername, "")
+}
+
+func GetDBBasicAuthUserPassword() string {
+	return env.Get(DBBasicAuthPassword, "")
+
+}
+
+func GetDBBearerToken() string {
+	return env.Get(DBBearerToken, "")
+}
+
+// GetMultiClusterBasicAuthUsername returns the environment variable value for MultiClusterBasicAuthUsername
+func GetMultiClusterBasicAuthUsername() string {
+	return env.Get(MultiClusterBasicAuthUsername, "")
+}
+
+// GetMultiClusterBasicAuthPassword returns the environment variable value for MultiClusterBasicAuthPassword
+func GetMultiClusterBasicAuthPassword() string {
+	return env.Get(MultiClusterBasicAuthPassword, "")
+}
+
+func GetMultiClusterBearerToken() string {
+	return env.Get(MultiClusterBearerToken, "")
+}
+
+func GetETLMaxPrometheusQueryDuration() time.Duration {
+	dayMins := 60 * 24
+	mins := time.Duration(env.GetInt64(ETLMaxPrometheusQueryDurationMinutes, int64(dayMins)))
+	return mins * time.Minute
+}
+
+// GetPromClusterLabel returns the environment variable value for PromClusterIDLabel
+func GetPromClusterLabel() string {
+	return env.Get(PromClusterIDLabelEnvVar, "cluster_id")
+}
+
+// GetClusterID returns the environment variable value for ClusterIDEnvVar which represents the
+// configurable identifier used for multi-cluster metric emission.
+func GetClusterID() string {
+	return env.Get(ClusterIDEnvVar, "")
+}
+
+// GetPromClusterFilter returns environment variable value CurrentClusterIdFilterEnabledVar which
+// represents additional prometheus filter for all metrics for current cluster id
+func GetPromClusterFilter() string {
+	if env.GetBool(CurrentClusterIdFilterEnabledVar, false) {
+		return fmt.Sprintf("%s=\"%s\"", GetPromClusterLabel(), GetClusterID())
+	}
+	return ""
+}

+ 205 - 0
modules/prometheus-source/pkg/prom/config.go

@@ -0,0 +1,205 @@
+package prom
+
+import (
+	"crypto/x509"
+	"fmt"
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/util/timeutil"
+	"github.com/opencost/opencost/modules/prometheus-source/pkg/env"
+
+	restclient "k8s.io/client-go/rest"
+	certutil "k8s.io/client-go/util/cert"
+)
+
+const (
+	ServiceCA = `/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt`
+)
+
+type OpenCostPrometheusConfig struct {
+	ServerEndpoint        string
+	ClientConfig          *PrometheusClientConfig
+	ScrapeInterval        time.Duration
+	JobName               string
+	Offset                string
+	QueryOffset           time.Duration
+	MaxQueryDuration      time.Duration
+	ClusterLabel          string
+	ClusterID             string
+	ClusterFilter         string
+	DataResolution        time.Duration
+	DataResolutionMinutes int
+}
+
+type OpenCostThanosConfig struct {
+	*OpenCostPrometheusConfig
+
+	MaxSourceResulution string
+}
+
+func (ocpc *OpenCostPrometheusConfig) IsRateLimitRetryEnabled() bool {
+	return ocpc.ClientConfig.RateLimitRetryOpts != nil
+}
+
+// NewOpenCostPrometheusConfigFromEnv creates a new OpenCostPrometheusConfig from environment variables.
+func NewOpenCostPrometheusConfigFromEnv() (*OpenCostPrometheusConfig, error) {
+	serverEndpoint := env.GetPrometheusServerEndpoint()
+	if serverEndpoint == "" {
+		return nil, fmt.Errorf("no address for prometheus set in $%s", env.PrometheusServerEndpointEnvVar)
+	}
+
+	queryConcurrency := env.GetMaxQueryConcurrency()
+	log.Infof("Prometheus Client Max Concurrency set to %d", queryConcurrency)
+
+	timeout := env.GetPrometheusQueryTimeout()
+	keepAlive := env.GetPrometheusKeepAlive()
+	tlsHandshakeTimeout := env.GetPrometheusTLSHandshakeTimeout()
+
+	jobName := env.GetJobName()
+	scrapeInterval := env.GetScrapeInterval()
+
+	maxQueryDuration := env.GetETLMaxPrometheusQueryDuration()
+
+	clusterId := env.GetClusterID()
+	clusterLabel := env.GetPromClusterLabel()
+	clusterFilter := env.GetPromClusterFilter()
+
+	var rateLimitRetryOpts *RateLimitRetryOpts = nil
+	if env.IsPrometheusRetryOnRateLimitResponse() {
+		rateLimitRetryOpts = &RateLimitRetryOpts{
+			MaxRetries:       env.GetPrometheusRetryOnRateLimitMaxRetries(),
+			DefaultRetryWait: env.GetPrometheusRetryOnRateLimitDefaultWait(),
+		}
+	}
+
+	auth := &ClientAuth{
+		Username:    env.GetDBBasicAuthUsername(),
+		Password:    env.GetDBBasicAuthUserPassword(),
+		BearerToken: env.GetDBBearerToken(),
+	}
+
+	// We will use the service account token and service-ca.crt to authenticate with the Prometheus server via kube-rbac-proxy.
+	// We need to ensure that the service account has the necessary permissions to access the Prometheus server by binding it to the appropriate role.
+	var tlsCaCert *x509.CertPool
+	if env.IsKubeRbacProxyEnabled() {
+		restConfig, err := restclient.InClusterConfig()
+		if err != nil {
+			log.Errorf("%s was set to true but failed to get in-cluster config: %s", env.KubeRbacProxyEnabledEnvVar, err)
+		}
+		auth.BearerToken = restConfig.BearerToken
+		tlsCaCert, err = certutil.NewPool(ServiceCA)
+		if err != nil {
+			log.Errorf("%s was set to true but failed to load service-ca.crt: %s", env.KubeRbacProxyEnabledEnvVar, err)
+		}
+	}
+
+	dataResolution := env.GetETLResolution()
+
+	// Ensuring if data resolution is less than 60s default it to 1m
+	resolutionMinutes := int(dataResolution.Minutes())
+	if resolutionMinutes == 0 {
+		resolutionMinutes = 1
+	}
+
+	clientConfig := &PrometheusClientConfig{
+		Timeout:               timeout,
+		KeepAlive:             keepAlive,
+		TLSHandshakeTimeout:   tlsHandshakeTimeout,
+		TLSInsecureSkipVerify: env.IsInsecureSkipVerify(),
+		RootCAs:               tlsCaCert,
+		RateLimitRetryOpts:    rateLimitRetryOpts,
+		Auth:                  auth,
+		QueryConcurrency:      queryConcurrency,
+		QueryLogFile:          "",
+		HeaderXScopeOrgId:     env.GetPrometheusHeaderXScopeOrgId(),
+	}
+
+	return &OpenCostPrometheusConfig{
+		ServerEndpoint:        serverEndpoint,
+		ClientConfig:          clientConfig,
+		ScrapeInterval:        scrapeInterval,
+		JobName:               jobName,
+		Offset:                "",
+		QueryOffset:           time.Duration(0),
+		MaxQueryDuration:      maxQueryDuration,
+		ClusterLabel:          clusterLabel,
+		ClusterID:             clusterId,
+		ClusterFilter:         clusterFilter,
+		DataResolution:        dataResolution,
+		DataResolutionMinutes: resolutionMinutes,
+	}, nil
+}
+
+// NewOpenCostPrometheusConfigFromEnv creates a new OpenCostPrometheusConfig from environment variables.
+func NewOpenCostThanosConfigFromEnv() (*OpenCostThanosConfig, error) {
+	serverEndpoint := env.GetThanosQueryUrl()
+	if serverEndpoint == "" {
+		return nil, fmt.Errorf("no address for thanos set in $%s", env.ThanosQueryUrlEnvVar)
+	}
+
+	queryConcurrency := env.GetMaxQueryConcurrency()
+	log.Infof("Thanos Client Max Concurrency set to %d", queryConcurrency)
+
+	timeout := env.GetPrometheusQueryTimeout()
+	keepAlive := env.GetPrometheusKeepAlive()
+	tlsHandshakeTimeout := env.GetPrometheusTLSHandshakeTimeout()
+
+	jobName := env.GetJobName()
+	scrapeInterval := env.GetScrapeInterval()
+
+	maxQueryDuration := env.GetETLMaxPrometheusQueryDuration()
+	clusterLabel := env.GetPromClusterLabel()
+
+	var rateLimitRetryOpts *RateLimitRetryOpts = nil
+	if env.IsPrometheusRetryOnRateLimitResponse() {
+		rateLimitRetryOpts = &RateLimitRetryOpts{
+			MaxRetries:       env.GetPrometheusRetryOnRateLimitMaxRetries(),
+			DefaultRetryWait: env.GetPrometheusRetryOnRateLimitDefaultWait(),
+		}
+	}
+
+	auth := &ClientAuth{
+		Username:    env.GetMultiClusterBasicAuthUsername(),
+		Password:    env.GetMultiClusterBasicAuthPassword(),
+		BearerToken: env.GetMultiClusterBearerToken(),
+	}
+
+	clientConfig := &PrometheusClientConfig{
+		Timeout:               timeout,
+		KeepAlive:             keepAlive,
+		TLSHandshakeTimeout:   tlsHandshakeTimeout,
+		TLSInsecureSkipVerify: env.IsInsecureSkipVerify(),
+		RateLimitRetryOpts:    rateLimitRetryOpts,
+		Auth:                  auth,
+		QueryConcurrency:      queryConcurrency,
+		QueryLogFile:          env.GetQueryLoggingFile(),
+		HeaderXScopeOrgId:     "",
+		RootCAs:               nil,
+	}
+
+	thanosQueryOffset := env.GetThanosOffset()
+	d, err := timeutil.ParseDuration(thanosQueryOffset)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse thanos query offset: %w", err)
+	}
+
+	dataResolution := env.GetETLResolution()
+
+	return &OpenCostThanosConfig{
+		OpenCostPrometheusConfig: &OpenCostPrometheusConfig{
+			ServerEndpoint:   serverEndpoint,
+			ClientConfig:     clientConfig,
+			ScrapeInterval:   scrapeInterval,
+			JobName:          jobName,
+			Offset:           thanosQueryOffset,
+			QueryOffset:      d,
+			MaxQueryDuration: maxQueryDuration,
+			ClusterID:        "", // thanos is multi-cluster
+			ClusterFilter:    "", // thanos is multi-cluster
+			ClusterLabel:     clusterLabel,
+			DataResolution:   dataResolution,
+		},
+		MaxSourceResulution: env.GetThanosMaxSourceResolution(),
+	}, nil
+}

+ 0 - 0
pkg/prom/contextnames.go → modules/prometheus-source/pkg/prom/contextnames.go


+ 1992 - 0
modules/prometheus-source/pkg/prom/datasource.go

@@ -0,0 +1,1992 @@
+package prom
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/modules/prometheus-source/pkg/env"
+	"github.com/opencost/opencost/pkg/prom"
+
+	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/protocol"
+	"github.com/opencost/opencost/core/pkg/source"
+	"github.com/opencost/opencost/core/pkg/util/httputil"
+	"github.com/opencost/opencost/core/pkg/util/json"
+	"github.com/opencost/opencost/core/pkg/util/timeutil"
+
+	prometheus "github.com/prometheus/client_golang/api"
+	prometheusAPI "github.com/prometheus/client_golang/api/prometheus/v1"
+)
+
+const (
+	apiPrefix         = "/api/v1"
+	epAlertManagers   = apiPrefix + "/alertmanagers"
+	epLabelValues     = apiPrefix + "/label/:name/values"
+	epSeries          = apiPrefix + "/series"
+	epTargets         = apiPrefix + "/targets"
+	epSnapshot        = apiPrefix + "/admin/tsdb/snapshot"
+	epDeleteSeries    = apiPrefix + "/admin/tsdb/delete_series"
+	epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
+	epConfig          = apiPrefix + "/status/config"
+	epFlags           = apiPrefix + "/status/flags"
+	epRules           = apiPrefix + "/rules"
+)
+
+// helper for query range proxy requests
+func toStartEndStep(qp httputil.QueryParams) (start, end time.Time, step time.Duration, err error) {
+	var e error
+
+	ss := qp.Get("start", "")
+	es := qp.Get("end", "")
+	ds := qp.Get("duration", "")
+	layout := "2006-01-02T15:04:05.000Z"
+
+	start, e = time.Parse(layout, ss)
+	if e != nil {
+		err = fmt.Errorf("Error parsing time %s. Error: %s", ss, err)
+		return
+	}
+	end, e = time.Parse(layout, es)
+	if e != nil {
+		err = fmt.Errorf("Error parsing time %s. Error: %s", es, err)
+		return
+	}
+	step, e = time.ParseDuration(ds)
+	if e != nil {
+		err = fmt.Errorf("Error parsing duration %s. Error: %s", ds, err)
+		return
+	}
+	err = nil
+
+	return
+}
+
+// FIXME: Before merge, implement a more robust design. This is brittle and bug-prone,
+// FIXME: but decouples the prom requirements from the Provider implementations.
+var providerStorageQueries = map[string]func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string{
+	"aws": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		return ""
+	},
+	"gcp": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		// TODO Set to the price for the appropriate storage class. It's not trivial to determine the local storage disk type
+		// See https://cloud.google.com/compute/disks-image-pricing#persistentdisk
+		localStorageCost := 0.04
+
+		baseMetric := "container_fs_limit_bytes"
+		if used {
+			baseMetric = "container_fs_usage_bytes"
+		}
+
+		fmtCumulativeQuery := `sum(
+			sum_over_time(%s{device!="tmpfs", id="/", %s}[%s:1m])
+		) by (%s) / 60 / 730 / 1024 / 1024 / 1024 * %f`
+
+		fmtMonthlyQuery := `sum(
+			avg_over_time(%s{device!="tmpfs", id="/", %s}[%s:1m])
+		) by (%s) / 1024 / 1024 / 1024 * %f`
+
+		fmtQuery := fmtCumulativeQuery
+		if rate {
+			fmtQuery = fmtMonthlyQuery
+		}
+		fmtWindow := timeutil.DurationString(end.Sub(start))
+
+		return fmt.Sprintf(fmtQuery, baseMetric, config.ClusterFilter, fmtWindow, config.ClusterLabel, localStorageCost)
+	},
+	"azure": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		return ""
+	},
+	"alibaba": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		return ""
+	},
+	"scaleway": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		return ""
+	},
+	"otc": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		return ""
+	},
+	"oracle": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		return ""
+	},
+	"csv": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		return ""
+	},
+	"custom": func(config *OpenCostPrometheusConfig, start, end time.Time, rate bool, used bool) string {
+		return ""
+	},
+}
+
+// creates a new help error which indicates the caller can retry and is non-fatal.
+func newHelpRetryError(format string, args ...any) error {
+	formatWithHelp := format + "\nTroubleshooting help available at: %s"
+	args = append(args, PrometheusTroubleshootingURL)
+
+	cause := fmt.Errorf(formatWithHelp, args...)
+	return source.NewHelpRetryError(cause)
+}
+
+// PrometheusDataSource is the OpenCost data source implementation leveraging Prometheus. Prometheus provides longer retention periods and
+// more detailed metrics than the OpenCost Collector, which is useful for historical analysis and cost forecasting.
+type PrometheusDataSource struct {
+	promConfig   *OpenCostPrometheusConfig
+	promClient   prometheus.Client
+	promContexts *ContextFactory
+
+	thanosConfig   *OpenCostThanosConfig
+	thanosClient   prometheus.Client
+	thanosContexts *ContextFactory
+}
+
+// NewDefaultPrometheusDataSource creates and initializes a new `PrometheusDataSource` with configuration
+// parsed from environment variables. This function will block until a connection to prometheus is established,
+// or fails. It is recommended to run this function in a goroutine on a retry cycle.
+func NewDefaultPrometheusDataSource() (*PrometheusDataSource, error) {
+	config, err := NewOpenCostPrometheusConfigFromEnv()
+	if err != nil {
+		return nil, fmt.Errorf("failed to create prometheus config from env: %w", err)
+	}
+
+	var thanosConfig *OpenCostThanosConfig
+	if env.IsThanosEnabled() {
+		// thanos initialization is not fatal, so we log the error and continue
+		thanosConfig, err = NewOpenCostThanosConfigFromEnv()
+		if err != nil {
+			log.Warnf("Thanos was enabled, but failed to create thanos config from env: %s. Continuing...", err.Error())
+		}
+	}
+
+	return NewPrometheusDataSource(config, thanosConfig)
+}
+
+// NewPrometheusDataSource initializes clients for Prometheus and Thanos, and returns a new PrometheusDataSource.
+func NewPrometheusDataSource(promConfig *OpenCostPrometheusConfig, thanosConfig *OpenCostThanosConfig) (*PrometheusDataSource, error) {
+	promClient, err := NewPrometheusClient(promConfig.ServerEndpoint, promConfig.ClientConfig)
+	if err != nil {
+		return nil, fmt.Errorf("failed to build prometheus client: %w", err)
+	}
+
+	// validation of the prometheus client
+
+	m, err := Validate(promClient, promConfig)
+	if err != nil || !m.Running {
+		if err != nil {
+			return nil, newHelpRetryError("failed to query prometheus at %s: %w", promConfig.ServerEndpoint, err)
+		} else if !m.Running {
+			return nil, newHelpRetryError("prometheus at %s is not running", promConfig.ServerEndpoint)
+		}
+	} else {
+		log.Infof("Success: retrieved the 'up' query against prometheus at: %s", promConfig.ServerEndpoint)
+	}
+
+	// we don't consider this a fatal error, but we log for visibility
+	api := prometheusAPI.NewAPI(promClient)
+	_, err = api.Buildinfo(context.Background())
+	if err != nil {
+		log.Infof("No valid prometheus config file at %s. Error: %s.\nTroubleshooting help available at: %s.\n**Ignore if using cortex/mimir/thanos here**", promConfig.ServerEndpoint, err.Error(), PrometheusTroubleshootingURL)
+	} else {
+		log.Infof("Retrieved a prometheus config file from: %s", promConfig.ServerEndpoint)
+	}
+
+	// Fix scrape interval if zero by attempting to lookup the interval for the configured job
+	if promConfig.ScrapeInterval == 0 {
+		promConfig.ScrapeInterval = time.Minute
+
+		// Lookup scrape interval for kubecost job, update if found
+		si, err := ScrapeIntervalFor(promClient, promConfig.JobName)
+		if err == nil {
+			promConfig.ScrapeInterval = si
+		}
+	}
+
+	log.Infof("Using scrape interval of %f", promConfig.ScrapeInterval.Seconds())
+
+	promContexts := NewContextFactory(promClient, promConfig)
+
+	var thanosClient prometheus.Client
+	var thanosContexts *ContextFactory
+
+	// if the thanos configuration is non-nil, we assume intent to use thanos. However, failure to
+	// initialize the thanos client is not fatal, and we will log the error and continue.
+	if thanosConfig != nil {
+		thanosHost := thanosConfig.ServerEndpoint
+		if thanosHost != "" {
+			thanosCli, _ := NewThanosClient(thanosHost, thanosConfig)
+
+			_, err = Validate(thanosCli, thanosConfig.OpenCostPrometheusConfig)
+			if err != nil {
+				log.Warnf("Failed to query Thanos at %s. Error: %s.", thanosHost, err.Error())
+				thanosClient = thanosCli
+			} else {
+				log.Infof("Success: retrieved the 'up' query against Thanos at: %s", thanosHost)
+
+				thanosClient = thanosCli
+			}
+
+			thanosContexts = NewContextFactory(thanosClient, thanosContexts.config)
+		} else {
+			log.Infof("Error resolving environment variable: $%s", env.ThanosQueryUrlEnvVar)
+		}
+	}
+
+	return &PrometheusDataSource{
+		promConfig:     promConfig,
+		promClient:     promClient,
+		promContexts:   promContexts,
+		thanosConfig:   thanosConfig,
+		thanosClient:   thanosClient,
+		thanosContexts: thanosContexts,
+	}, nil
+}
+
+var proto = protocol.HTTP()
+
+// prometheusMetadata returns the metadata for the prometheus server
+func (pds *PrometheusDataSource) prometheusMetadata(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	resp := proto.ToResponse(Validate(pds.promClient, pds.promConfig))
+	proto.WriteResponse(w, resp)
+}
+
+// prometheusRecordingRules is a proxy for /rules against prometheus
+func (pds *PrometheusDataSource) prometheusRecordingRules(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	u := pds.promClient.URL(epRules, nil)
+
+	req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+	if err != nil {
+		fmt.Fprintf(w, "Error creating Prometheus rule request: "+err.Error())
+	}
+
+	_, body, err := pds.promClient.Do(r.Context(), req)
+	if err != nil {
+		fmt.Fprintf(w, "Error making Prometheus rule request: "+err.Error())
+	} else {
+		w.Write(body)
+	}
+}
+
+// prometheusConfig returns the current configuration of the prometheus server
+func (pds *PrometheusDataSource) prometheusConfig(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	pConfig := map[string]string{
+		"address": pds.promConfig.ServerEndpoint,
+	}
+
+	body, err := json.Marshal(pConfig)
+	if err != nil {
+		fmt.Fprintf(w, "Error marshalling prometheus config")
+	} else {
+		w.Write(body)
+	}
+}
+
+// prometheusTargets is a proxy for /targets against prometheus
+func (pds *PrometheusDataSource) prometheusTargets(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	u := pds.promClient.URL(epTargets, nil)
+
+	req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+	if err != nil {
+		fmt.Fprintf(w, "Error creating Prometheus rule request: "+err.Error())
+	}
+
+	_, body, err := pds.promClient.Do(r.Context(), req)
+	if err != nil {
+		fmt.Fprintf(w, "Error making Prometheus rule request: "+err.Error())
+	} else {
+		w.Write(body)
+	}
+}
+
+// status returns the status of the prometheus client
+func (pds *PrometheusDataSource) status(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	promServer := pds.promConfig.ServerEndpoint
+
+	api := prometheusAPI.NewAPI(pds.promClient)
+	result, err := api.Buildinfo(r.Context())
+	if err != nil {
+		fmt.Fprintf(w, "Using Prometheus at "+promServer+". Error: "+err.Error())
+	} else {
+		fmt.Fprintf(w, "Using Prometheus at "+promServer+". Version: "+result.Version)
+	}
+}
+
+// prometheusQuery is a proxy for /query against prometheus
+func (pds *PrometheusDataSource) prometheusQuery(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	qp := httputil.NewQueryParams(r.URL.Query())
+	query := qp.Get("query", "")
+	if query == "" {
+		proto.WriteResponse(w, proto.ToResponse(nil, fmt.Errorf("Query Parameter 'query' is unset'")))
+		return
+	}
+
+	// Attempt to parse time as either a unix timestamp or as an RFC3339 value
+	var timeVal time.Time
+	timeStr := qp.Get("time", "")
+	if len(timeStr) > 0 {
+		if t, err := strconv.ParseInt(timeStr, 10, 64); err == nil {
+			timeVal = time.Unix(t, 0)
+		} else if t, err := time.Parse(time.RFC3339, timeStr); err == nil {
+			timeVal = t
+		}
+
+		// If time is given, but not parse-able, return an error
+		if timeVal.IsZero() {
+			http.Error(w, fmt.Sprintf("time must be a unix timestamp or RFC3339 value; illegal value given: %s", timeStr), http.StatusBadRequest)
+		}
+	}
+
+	ctx := pds.promContexts.NewNamedContext(FrontendContextName)
+	body, err := ctx.RawQuery(query, timeVal)
+	if err != nil {
+		proto.WriteResponse(w, proto.ToResponse(nil, fmt.Errorf("Error running query %s. Error: %s", query, err)))
+		return
+	}
+
+	w.Write(body) // prometheusQueryRange is a proxy for /query_range against prometheus
+}
+
+func (pds *PrometheusDataSource) prometheusQueryRange(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	qp := httputil.NewQueryParams(r.URL.Query())
+	query := qp.Get("query", "")
+	if query == "" {
+		fmt.Fprintf(w, "Error parsing query from request parameters.")
+		return
+	}
+
+	start, end, duration, err := toStartEndStep(qp)
+	if err != nil {
+		fmt.Fprintf(w, err.Error())
+		return
+	}
+
+	ctx := pds.promContexts.NewNamedContext(prom.FrontendContextName)
+	body, err := ctx.RawQueryRange(query, start, end, duration)
+	if err != nil {
+		fmt.Fprintf(w, "Error running query %s. Error: %s", query, err)
+		return
+	}
+
+	w.Write(body)
+}
+
+// thanosQuery is a proxy for /query against thanos
+func (pds *PrometheusDataSource) thanosQuery(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	if pds.thanosClient == nil {
+		proto.WriteResponse(w, proto.ToResponse(nil, fmt.Errorf("ThanosDisabled")))
+		return
+	}
+
+	qp := httputil.NewQueryParams(r.URL.Query())
+	query := qp.Get("query", "")
+	if query == "" {
+		proto.WriteResponse(w, proto.ToResponse(nil, fmt.Errorf("Query Parameter 'query' is unset'")))
+		return
+	}
+
+	// Attempt to parse time as either a unix timestamp or as an RFC3339 value
+	var timeVal time.Time
+	timeStr := qp.Get("time", "")
+	if len(timeStr) > 0 {
+		if t, err := strconv.ParseInt(timeStr, 10, 64); err == nil {
+			timeVal = time.Unix(t, 0)
+		} else if t, err := time.Parse(time.RFC3339, timeStr); err == nil {
+			timeVal = t
+		}
+
+		// If time is given, but not parse-able, return an error
+		if timeVal.IsZero() {
+			http.Error(w, fmt.Sprintf("time must be a unix timestamp or RFC3339 value; illegal value given: %s", timeStr), http.StatusBadRequest)
+		}
+	}
+
+	ctx := pds.thanosContexts.NewNamedContext(FrontendContextName)
+	body, err := ctx.RawQuery(query, timeVal)
+	if err != nil {
+		proto.WriteResponse(w, proto.ToResponse(nil, fmt.Errorf("Error running query %s. Error: %s", query, err)))
+		return
+	}
+
+	w.Write(body)
+}
+
+// thanosQueryRange is a proxy for /query_range against thanos
+func (pds *PrometheusDataSource) thanosQueryRange(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	if pds.thanosClient == nil {
+		proto.WriteResponse(w, proto.ToResponse(nil, fmt.Errorf("ThanosDisabled")))
+		return
+	}
+
+	qp := httputil.NewQueryParams(r.URL.Query())
+	query := qp.Get("query", "")
+	if query == "" {
+		fmt.Fprintf(w, "Error parsing query from request parameters.")
+		return
+	}
+
+	start, end, duration, err := toStartEndStep(qp)
+	if err != nil {
+		fmt.Fprintf(w, err.Error())
+		return
+	}
+
+	ctx := pds.thanosContexts.NewNamedContext(FrontendContextName)
+	body, err := ctx.RawQueryRange(query, start, end, duration)
+	if err != nil {
+		fmt.Fprintf(w, "Error running query %s. Error: %s", query, err)
+		return
+	}
+
+	w.Write(body)
+}
+
+// promtheusQueueState returns the current state of the prometheus and thanos request queues
+func (pds *PrometheusDataSource) prometheusQueueState(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	promQueueState, err := GetPrometheusQueueState(pds.promClient, pds.promConfig)
+	if err != nil {
+		proto.WriteResponse(w, proto.ToResponse(nil, err))
+		return
+	}
+
+	result := map[string]*PrometheusQueueState{
+		"prometheus": promQueueState,
+	}
+
+	if pds.thanosClient != nil {
+		thanosQueueState, err := GetPrometheusQueueState(pds.thanosClient, pds.thanosConfig.OpenCostPrometheusConfig)
+		if err != nil {
+			log.Warnf("Error getting Thanos queue state: %s", err)
+		} else {
+			result["thanos"] = thanosQueueState
+		}
+	}
+
+	proto.WriteResponse(w, proto.ToResponse(result, nil))
+}
+
+// prometheusMetrics retrieves availability of Prometheus and Thanos metrics
+func (pds *PrometheusDataSource) prometheusMetrics(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	promMetrics := GetPrometheusMetrics(pds.promClient, pds.promConfig, "")
+
+	result := map[string][]*PrometheusDiagnostic{
+		"prometheus": promMetrics,
+	}
+
+	if pds.thanosClient != nil {
+		thanosMetrics := GetPrometheusMetrics(pds.thanosClient, pds.thanosConfig.OpenCostPrometheusConfig, pds.thanosConfig.Offset)
+		result["thanos"] = thanosMetrics
+	}
+
+	proto.WriteResponse(w, proto.ToResponse(result, nil))
+}
+
+func (pds *PrometheusDataSource) RegisterEndPoints(router *httprouter.Router) {
+	// endpoints migrated from server
+	router.GET("/validatePrometheus", pds.prometheusMetadata)
+	router.GET("/prometheusRecordingRules", pds.prometheusRecordingRules)
+	router.GET("/prometheusConfig", pds.prometheusConfig)
+	router.GET("/prometheusTargets", pds.prometheusTargets)
+	router.GET("/status", pds.status)
+
+	// prom query proxies
+	router.GET("/prometheusQuery", pds.prometheusQuery)
+	router.GET("/prometheusQueryRange", pds.prometheusQueryRange)
+	router.GET("/thanosQuery", pds.thanosQuery)
+	router.GET("/thanosQueryRange", pds.thanosQueryRange)
+
+	// diagnostics
+	router.GET("/diagnostics/requestQueue", pds.prometheusQueueState)
+	router.GET("/diagnostics/prometheusMetrics", pds.prometheusMetrics)
+}
+
+func (pds *PrometheusDataSource) RefreshInterval() time.Duration {
+	return pds.promConfig.ScrapeInterval
+}
+
+func (pds *PrometheusDataSource) BatchDuration() time.Duration {
+	return pds.promConfig.MaxQueryDuration
+}
+
+func (pds *PrometheusDataSource) QueryRAMUsage(window string, offset string) source.QueryResultsChan {
+	const ramUsageQuery = `avg(
+		label_replace(
+			label_replace(
+				label_replace(
+					sum_over_time(container_memory_working_set_bytes{container!="", container!="POD", instance!="", %s}[%s] %s), "node", "$1", "instance", "(.+)"
+				), "container_name", "$1", "container", "(.+)"
+			), "pod_name", "$1", "pod", "(.+)"
+		)
+	) by (namespace, container_name, pod_name, node, %s)`
+	// env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryRAMUsage := fmt.Sprintf(ramUsageQuery, cfg.ClusterFilter, window, offset, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryRAMUsage)
+}
+
+func (pds *PrometheusDataSource) QueryCPUUsage(window string, offset string) source.QueryResultsChan {
+	const cpuUsageQuery = `avg(
+		label_replace(
+			label_replace(
+				label_replace(
+					rate(
+						container_cpu_usage_seconds_total{container!="", container!="POD", instance!="", %s}[%s] %s
+					), "node", "$1", "instance", "(.+)"
+				), "container_name", "$1", "container", "(.+)"
+			), "pod_name", "$1", "pod", "(.+)"
+		)
+	) by (namespace, container_name, pod_name, node, %s)`
+	// env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryCPUUsage := fmt.Sprintf(cpuUsageQuery, cfg.ClusterFilter, window, offset, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryCPUUsage)
+}
+
+func (pds *PrometheusDataSource) QueryNetworkInZoneRequests(window string, offset string) source.QueryResultsChan {
+	const zoneNetworkUsageQuery = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
+	// env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryZoneNetworkUsage := fmt.Sprintf(zoneNetworkUsageQuery, cfg.ClusterFilter, window, offset, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryZoneNetworkUsage)
+}
+
+func (pds *PrometheusDataSource) QueryNetworkInRegionRequests(window string, offset string) source.QueryResultsChan {
+	const regionNetworkUsageQuery = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="false", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
+	// env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryRegionNetworkUsage := fmt.Sprintf(regionNetworkUsageQuery, cfg.ClusterFilter, window, offset, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryRegionNetworkUsage)
+}
+
+func (pds *PrometheusDataSource) QueryNetworkInternetRequests(window string, offset string) source.QueryResultsChan {
+	const internetNetworkUsageQuery = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
+	// env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	queryInternetNetworkUsage := fmt.Sprintf(internetNetworkUsageQuery, cfg.ClusterFilter, window, offset, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryInternetNetworkUsage)
+}
+
+func (pds *PrometheusDataSource) QueryNormalization(window string, offset string) source.QueryResultsChan {
+	const normalizationQuery = `max(count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[%s] %s))`
+	// env.GetPromClusterFilter(), window, offset)
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryNormalization := fmt.Sprintf(normalizationQuery, cfg.ClusterFilter, window, offset)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryNormalization)
+}
+
+func (pds *PrometheusDataSource) QueryHistoricalCPUCost(window string, offset string) source.QueryResultsChan {
+	const historicalCPUCostQuery = `avg(avg_over_time(node_cpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`
+	// env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryHistoricalCPUCost := fmt.Sprintf(historicalCPUCostQuery, cfg.ClusterFilter, window, offset, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryHistoricalCPUCost)
+}
+
+func (pds *PrometheusDataSource) QueryHistoricalRAMCost(window string, offset string) source.QueryResultsChan {
+	const historicalRAMCostQuery = `avg(avg_over_time(node_ram_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`
+	// env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryHistoricalRAMCost := fmt.Sprintf(historicalRAMCostQuery, cfg.ClusterFilter, window, offset, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryHistoricalRAMCost)
+}
+
+func (pds *PrometheusDataSource) QueryHistoricalGPUCost(window string, offset string) source.QueryResultsChan {
+	const historicalGPUCostQuery = `avg(avg_over_time(node_gpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`
+	// env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryHistoricalGPUCost := fmt.Sprintf(historicalGPUCostQuery, cfg.ClusterFilter, window, offset, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryHistoricalGPUCost)
+}
+
+func (pds *PrometheusDataSource) QueryHistoricalPodLabels(window string, offset string) source.QueryResultsChan {
+	const historicalPodLabelsQuery = `kube_pod_labels{%s}[%s] %s`
+	// env.GetPromClusterFilter(), window, offset
+
+	if offset != "" && !strings.Contains(offset, "offset") {
+		offset = fmt.Sprintf("offset %s", offset)
+	}
+
+	cfg := pds.promConfig
+
+	queryHistoricalPodLabels := fmt.Sprintf(historicalPodLabelsQuery, cfg.ClusterFilter, window, offset)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataContextName)
+	return ctx.Query(queryHistoricalPodLabels)
+}
+
+func (pds *PrometheusDataSource) QueryRAMRequestsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const ramRequestsQuery = `avg(
+		label_replace(
+			label_replace(
+				sum_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", container!="",container!="POD", node!="", %s}[%s] %s)
+				, "container_name","$1","container","(.+)"
+			), "pod_name","$1","pod","(.+)"
+		)
+	) by (namespace,container_name,pod_name,node,%s)`
+	// env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryRAMRequests := fmt.Sprintf(ramRequestsQuery, cfg.ClusterFilter, resStr, "", cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryRAMRequests, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryRAMUsageOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const ramUsageQuery = `avg(
+		label_replace(
+			label_replace(
+				label_replace(
+					sum_over_time(container_memory_working_set_bytes{container!="", container!="POD", instance!="", %s}[%s] %s), "node", "$1", "instance", "(.+)"
+				), "container_name", "$1", "container", "(.+)"
+			), "pod_name", "$1", "pod", "(.+)"
+		)
+	) by (namespace, container_name, pod_name, node, %s)`
+	// env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryRAMUsage := fmt.Sprintf(ramUsageQuery, cfg.ClusterFilter, resStr, "", cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryRAMUsage, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryRAMAllocationOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	// ramAllocationByteHoursQuery yields the total byte-hour RAM allocation over the given
+	// window, aggregated by container.
+	//  [line 3]  sum_over_time(each byte) = [byte*scrape] by metric
+	//  [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 =  [hours/scrape] by container
+	//  [lines 2,4]  sum(") by unique container key and multiply [byte*scrape] * [hours/scrape] for byte*hours
+	//  [lines 1,5]  relabeling
+	const ramAllocationByteHoursQuery = `
+		label_replace(label_replace(
+			sum(
+				sum_over_time(container_memory_allocation_bytes{container!="",container!="POD", node!="", %s}[%s])
+			) by (namespace,container,pod,node,%s) * %f / 60 / 60
+		, "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
+	// env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	scrapeIntervalSeconds := cfg.ScrapeInterval.Seconds()
+
+	queryRAMAllocationByteHours := fmt.Sprintf(ramAllocationByteHoursQuery, cfg.ClusterFilter, resStr, cfg.ClusterLabel, scrapeIntervalSeconds)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryRAMAllocationByteHours, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryCPURequestsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const cpuRequestsQuery = `avg(
+		label_replace(
+			label_replace(
+				sum_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="",container!="POD", node!="", %s}[%s] %s)
+				, "container_name","$1","container","(.+)"
+			), "pod_name","$1","pod","(.+)"
+		)
+	) by (namespace,container_name,pod_name,node,%s)`
+	// env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryCPURequests := fmt.Sprintf(cpuRequestsQuery, cfg.ClusterFilter, resStr, "", cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryCPURequests, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryCPUUsageOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const cpuUsageQuery = `avg(
+		label_replace(
+			label_replace(
+				label_replace(
+					rate(
+						container_cpu_usage_seconds_total{container!="", container!="POD", instance!="", %s}[%s] %s
+					), "node", "$1", "instance", "(.+)"
+				), "container_name", "$1", "container", "(.+)"
+			), "pod_name", "$1", "pod", "(.+)"
+		)
+	) by (namespace, container_name, pod_name, node, %s)`
+	// env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryCPUUsage := fmt.Sprintf(cpuUsageQuery, cfg.ClusterFilter, resStr, "", cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryCPUUsage, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryCPUAllocationOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	// cpuAllocationQuery yields the total VCPU-hour CPU allocation over the given
+	// window, aggregated by container.
+	//  [line 3] sum_over_time(each VCPU*mins in window) = [VCPU*scrape] by metric
+	//  [line 4] (scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 =  [hours/scrape] by container
+	//  [lines 2,4]  sum(") by unique container key and multiply [VCPU*scrape] * [hours/scrape] for VCPU*hours
+	//  [lines 1,5]  relabeling
+	const cpuAllocationQuery = `
+		label_replace(label_replace(
+			sum(
+				sum_over_time(container_cpu_allocation{container!="",container!="POD", node!="", %s}[%s])
+			) by (namespace,container,pod,node,%s) * %f / 60 / 60
+		, "container_name","$1","container","(.+)"), "pod_name","$1","pod","(.+)")`
+	// env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	scrapeIntervalSeconds := cfg.ScrapeInterval.Seconds()
+
+	queryCPUAllocation := fmt.Sprintf(cpuAllocationQuery, cfg.ClusterFilter, resStr, cfg.ClusterLabel, scrapeIntervalSeconds)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryCPUAllocation, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryGPURequestsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const gpuRequestsQuery = `avg(
+		label_replace(
+			label_replace(
+				sum_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s] %s),
+				"container_name","$1","container","(.+)"
+			), "pod_name","$1","pod","(.+)"
+		)
+	) by (namespace,container_name,pod_name,node,%s)`
+	// env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryGPURequests := fmt.Sprintf(gpuRequestsQuery, cfg.ClusterFilter, resStr, "", cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryGPURequests, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryPVRequestsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const pvRequestsQuery = `avg(avg(kube_persistentvolumeclaim_info{volumename != "", %s}) by (persistentvolumeclaim, storageclass, namespace, volumename, %s, kubernetes_node)
+	*
+	on (persistentvolumeclaim, namespace, %s, kubernetes_node) group_right(storageclass, volumename)
+	sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{%s}) by (persistentvolumeclaim, namespace, %s, kubernetes_node, kubernetes_name)) by (persistentvolumeclaim, storageclass, namespace, %s, volumename, kubernetes_node)`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+	resolution = snapResolutionMinute(resolution)
+
+	queryPVRequests := fmt.Sprintf(pvRequestsQuery, cfg.ClusterFilter, cfg.ClusterLabel, cfg.ClusterLabel, cfg.ClusterFilter, cfg.ClusterLabel, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryPVRequests, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryPVCAllocationOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	// pvcAllocationQuery yields the total byte-hour PVC allocation over the given window.
+	// sum_over_time(each byte) = [byte*scrape] by metric *(scalar(avg(prometheus_target_interval_length_seconds)) = [seconds/scrape] / 60 / 60 =  [hours/scrape] by pod
+	const pvcAllocationQuery = `sum(sum_over_time(pod_pvc_allocation{%s}[%s])) by (%s, namespace, pod, persistentvolume, persistentvolumeclaim) * %f/60/60`
+	// env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	scrapeIntervalSeconds := cfg.ScrapeInterval.Seconds()
+
+	queryPVCAllocation := fmt.Sprintf(pvcAllocationQuery, cfg.ClusterFilter, resStr, cfg.ClusterLabel, scrapeIntervalSeconds)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryPVCAllocation, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryPVHourlyCostOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const pvHourlyCostQuery = `avg_over_time(pv_hourly_cost{%s}[%s])`
+	// env.GetPromClusterFilter(), resStr)
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryPVHourlyCost := fmt.Sprintf(pvHourlyCostQuery, cfg.ClusterFilter, resStr)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryNetworkInZoneOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const netZoneRequestsQuery = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
+	// env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryNetZoneRequests := fmt.Sprintf(netZoneRequestsQuery, cfg.ClusterFilter, resStr, "", cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryNetworkInRegionOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const netRegionRequestsQuery = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="false", sameZone="false", sameRegion="false", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
+	// env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryNetRegionRequests := fmt.Sprintf(netRegionRequestsQuery, cfg.ClusterFilter, resStr, "", cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryNetworkInternetOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const netInternetRequestsQuery = `sum(increase(kubecost_pod_network_egress_bytes_total{internet="true", %s}[%s] %s)) by (namespace,pod_name,%s) / 1024 / 1024 / 1024`
+	// env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryNetInternetRequests := fmt.Sprintf(netInternetRequestsQuery, cfg.ClusterFilter, resStr, "", cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryNamespaceLabelsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const namespaceLabelsQuery = `avg_over_time(kube_namespace_labels{%s}[%s])`
+	// env.GetPromClusterFilter(), resStr
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryNamespaceLabels := fmt.Sprintf(namespaceLabelsQuery, cfg.ClusterFilter, resStr)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryNamespaceLabels, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryNamespaceAnnotationsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const namespaceAnnotationsQuery = `avg_over_time(kube_namespace_annotations{%s}[%s])`
+	// env.GetPromClusterFilter(), resStr
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryNamespaceAnnotations := fmt.Sprintf(namespaceAnnotationsQuery, cfg.ClusterFilter, resStr)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryNamespaceAnnotations, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryPodLabelsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const podLabelsQuery = `avg_over_time(kube_pod_labels{%s}[%s])`
+	// env.GetPromClusterFilter(), resStr
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryPodLabels := fmt.Sprintf(podLabelsQuery, cfg.ClusterFilter, resStr)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryPodLabels, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryPodAnnotationsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const podAnnotationsQuery = `avg_over_time(kube_pod_annotations{%s}[%s])`
+	// env.GetPromClusterFilter(), resStr
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryPodAnnotations := fmt.Sprintf(podAnnotationsQuery, cfg.ClusterFilter, resStr)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryPodAnnotations, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryServiceLabelsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const serviceLabelsQuery = `avg_over_time(service_selector_labels{%s}[%s])`
+	// env.GetPromClusterFilter(), resStr
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryServiceLabels := fmt.Sprintf(serviceLabelsQuery, cfg.ClusterFilter, resStr)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryServiceLabels, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryDeploymentLabelsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const deploymentLabelsQuery = `avg_over_time(deployment_match_labels{%s}[%s])`
+	// env.GetPromClusterFilter(), resStr
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryDeploymentLabels := fmt.Sprintf(deploymentLabelsQuery, cfg.ClusterFilter, resStr)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryDeploymentLabels, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryStatefulsetLabelsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const statefulsetLabelsQuery = `avg_over_time(statefulSet_match_labels{%s}[%s])`
+	// env.GetPromClusterFilter(), resStr
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryStatefulsetLabels := fmt.Sprintf(statefulsetLabelsQuery, cfg.ClusterFilter, resStr)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryStatefulsetLabels, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryPodJobsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const podJobsQuery = `sum(kube_pod_owner{owner_kind="Job", %s}) by (namespace,pod,owner_name,%s)`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel()
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+
+	queryPodJobs := fmt.Sprintf(podJobsQuery, cfg.ClusterFilter, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryPodJobs, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryPodDaemonsetsOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const podDaemonsetsQuery = `sum(kube_pod_owner{owner_kind="DaemonSet", %s}) by (namespace,pod,owner_name,%s)`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel()
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+
+	queryPodDaemonsets := fmt.Sprintf(podDaemonsetsQuery, cfg.ClusterFilter, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryPodDaemonsets, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryNormalizationOverTime(start, end time.Time, resolution time.Duration) source.QueryResultsChan {
+	const normalizationQuery = `max(count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[%s] %s))`
+	// env.GetPromClusterFilter(), resStr, "")
+
+	cfg := pds.promConfig
+
+	resolution = snapResolutionMinute(resolution)
+	resMins := int64(resolution.Minutes())
+	resStr := formatResolutionMinutes(resMins)
+
+	queryNormalization := fmt.Sprintf(normalizationQuery, cfg.ClusterFilter, resStr, "")
+	ctx := pds.promContexts.NewNamedContext(ComputeCostDataRangeContextName)
+	return ctx.QueryRange(queryNormalization, start, end, resolution)
+}
+
+func (pds *PrometheusDataSource) QueryPVCost(start, end time.Time) source.QueryResultsChan {
+	const pvCostQuery = `avg(avg_over_time(pv_hourly_cost{%s}[%s])) by (%s, persistentvolume,provider_id)`
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryPVCost")
+	}
+
+	queryPVCost := fmt.Sprintf(pvCostQuery, pds.promConfig.ClusterFilter, durStr, pds.promConfig.ClusterLabel)
+
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryPVCost, end)
+}
+
+func (pds *PrometheusDataSource) QueryPVSize(start, end time.Time) source.QueryResultsChan {
+	const pvSizeQuery = `avg(avg_over_time(kube_persistentvolume_capacity_bytes{%s}[%s])) by (%s, persistentvolume)`
+
+	cfg := pds.promConfig
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryPVCost")
+	}
+
+	queryPVSize := fmt.Sprintf(pvSizeQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryPVSize, end)
+}
+
+func (pds *PrometheusDataSource) QueryPVStorageClass(start, end time.Time) source.QueryResultsChan {
+	// `avg(avg_over_time(kubecost_pv_info{%s}[%s])) by (%s, persistentvolume, storageclass)`
+	// , env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const pvStorageSizeQuery = `avg(avg_over_time(kubecost_pv_info{%s}[%s])) by (%s, persistentvolume, storageclass)`
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryPVStorageClass")
+	}
+
+	queryPVStorageClass := fmt.Sprintf(pvStorageSizeQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryPVStorageClass, end)
+}
+
+func (pds *PrometheusDataSource) QueryPVUsedAverage(start, end time.Time) source.QueryResultsChan {
+	// `avg(avg_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const pvUsedAverageQuery = `avg(avg_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryPVUsedAverage")
+	}
+
+	queryPVUsedAvg := fmt.Sprintf(pvUsedAverageQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryPVUsedAvg, end)
+}
+
+func (pds *PrometheusDataSource) QueryPVUsedMax(start, end time.Time) source.QueryResultsChan {
+	// `max(max_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const pvUsedMaxQuery = `max(max_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryPVUsedMax")
+	}
+
+	queryPVUsedMax := fmt.Sprintf(pvUsedMaxQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryPVUsedMax, end)
+}
+
+func (pds *PrometheusDataSource) QueryPVCInfo(start, end time.Time) source.QueryResultsChan {
+	// `avg(avg_over_time(kube_persistentvolumeclaim_info{%s}[%s])) by (%s, volumename, persistentvolumeclaim, namespace)`
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const pvcInfoQuery = `avg(avg_over_time(kube_persistentvolumeclaim_info{%s}[%s])) by (%s, volumename, persistentvolumeclaim, namespace)`
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryPVCInfo")
+	}
+
+	queryPVCInfo := fmt.Sprintf(pvcInfoQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryPVCInfo, end)
+}
+
+func (pds *PrometheusDataSource) QueryPVActiveMinutes(start, end time.Time) source.QueryResultsChan {
+	// `avg(kube_persistentvolume_capacity_bytes{%s}) by (%s, persistentvolume)[%s:%dm]`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
+	const pvActiveMinsQuery = `avg(kube_persistentvolume_capacity_bytes{%s}) by (%s, persistentvolume)[%s:%dm]`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryPVActiveMinutes")
+	}
+
+	queryPVActiveMins := fmt.Sprintf(pvActiveMinsQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryPVActiveMins, end)
+}
+
+func (pds *PrometheusDataSource) QueryLocalStorageCost(start, end time.Time) source.QueryResultsChan {
+	// `sum_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
+
+	const localStorageCostQuery = `sum_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
+
+	cfg := pds.promConfig
+	resolution := cfg.DataResolution
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryLocalStorageCost")
+	}
+
+	//Ensuring if data resolution is less than 60s default it to 1m
+	var minsPerResolution int
+	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) == 0 {
+		minsPerResolution = 1
+		log.DedupedWarningf(3, "QueryLocalStorageCost: Configured resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
+	}
+
+	// hourlyToCumulative is a scaling factor that, when multiplied by an
+	// hourly value, converts it to a cumulative value; i.e. [$/hr] *
+	// [min/res]*[hr/min] = [$/res]
+	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
+	costPerGBHr := 0.04 / 730.0
+
+	queryLocalStorageCost := fmt.Sprintf(localStorageCostQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLocalStorageCost, end)
+}
+
+func (pds *PrometheusDataSource) QueryLocalStorageUsedCost(start, end time.Time) source.QueryResultsChan {
+	// `sum_over_time(sum(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
+
+	const localStorageUsedCostQuery = `sum_over_time(sum(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryLocalStorageUsedCost")
+	}
+
+	// hourlyToCumulative is a scaling factor that, when multiplied by an
+	// hourly value, converts it to a cumulative value; i.e. [$/hr] *
+	// [min/res]*[hr/min] = [$/res]
+	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
+	costPerGBHr := 0.04 / 730.0
+
+	queryLocalStorageUsedCost := fmt.Sprintf(localStorageUsedCostQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLocalStorageUsedCost, end)
+}
+
+func (pds *PrometheusDataSource) QueryLocalStorageUsedAvg(start, end time.Time) source.QueryResultsChan {
+	// `avg(sum(avg_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
+
+	const localStorageUsedAvgQuery = `avg(sum(avg_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryLocalStorageUsedAvg")
+	}
+
+	queryLocalStorageUsedAvg := fmt.Sprintf(localStorageUsedAvgQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLocalStorageUsedAvg, end)
+}
+
+func (pds *PrometheusDataSource) QueryLocalStorageUsedMax(start, end time.Time) source.QueryResultsChan {
+	// `max(sum(max_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`
+	//  env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
+	const localStorageUsedMaxQuery = `max(sum(max_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryLocalStorageUsedMax")
+	}
+
+	queryLocalStorageUsedMax := fmt.Sprintf(localStorageUsedMaxQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLocalStorageUsedMax, end)
+}
+
+func (pds *PrometheusDataSource) QueryLocalStorageBytes(start, end time.Time) source.QueryResultsChan {
+	// `avg_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm])`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
+
+	const localStorageBytesQuery = `avg_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm])`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryLocalStorageBytes")
+	}
+
+	queryLocalStorageBytes := fmt.Sprintf(localStorageBytesQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLocalStorageBytes, end)
+}
+
+func (pds *PrometheusDataSource) QueryLocalStorageActiveMinutes(start, end time.Time) source.QueryResultsChan {
+	// `count(node_total_hourly_cost{%s}) by (%s, node)[%s:%dm]`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
+
+	const localStorageActiveMinutesQuery = `count(node_total_hourly_cost{%s}) by (%s, node)[%s:%dm]`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryLocalStorageActiveMinutes")
+	}
+
+	queryLocalStorageActiveMins := fmt.Sprintf(localStorageActiveMinutesQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLocalStorageActiveMins, end)
+}
+
+func (pds *PrometheusDataSource) QueryLocalStorageBytesByProvider(provider string, start, end time.Time) source.QueryResultsChan {
+	var localStorageBytesQuery string
+
+	key := strings.ToLower(provider)
+	if f, ok := providerStorageQueries[key]; ok {
+		localStorageBytesQuery = f(pds.promConfig, start, end, false, false)
+	} else {
+		localStorageBytesQuery = ""
+	}
+
+	if localStorageBytesQuery == "" {
+		return newEmptyResult()
+	}
+
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(localStorageBytesQuery, end)
+}
+
+func (pds *PrometheusDataSource) QueryLocalStorageUsedByProvider(provider string, start, end time.Time) source.QueryResultsChan {
+	var localStorageUsedQuery string
+
+	key := strings.ToLower(provider)
+	if f, ok := providerStorageQueries[key]; ok {
+		localStorageUsedQuery = f(pds.promConfig, start, end, false, true)
+	} else {
+		localStorageUsedQuery = ""
+	}
+
+	if localStorageUsedQuery == "" {
+		return newEmptyResult()
+	}
+
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(localStorageUsedQuery, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeCPUHourlyCost(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const nodeCPUHourlyCostQuery = `avg(avg_over_time(node_cpu_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeCPUHourlyCost")
+	}
+
+	queryNodeCPUHourlyCost := fmt.Sprintf(nodeCPUHourlyCostQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryNodeCPUHourlyCost, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeCPUCoresCapacity(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const nodeCPUCoresCapacityQuery = `avg(avg_over_time(kube_node_status_capacity_cpu_cores{%s}[%s])) by (%s, node)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeCPUCoresCapacity")
+	}
+
+	queryNodeCPUCoresCapacity := fmt.Sprintf(nodeCPUCoresCapacityQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryNodeCPUCoresCapacity, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeCPUCoresAllocatable(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const nodeCPUCoresAllocatableQuery = `avg(avg_over_time(kube_node_status_allocatable_cpu_cores{%s}[%s])) by (%s, node)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeCPUCoresAllocatable")
+	}
+
+	queryNodeCPUCoresAllocatable := fmt.Sprintf(nodeCPUCoresAllocatableQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryNodeCPUCoresAllocatable, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeRAMHourlyCost(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const nodeRAMHourlyCostQuery = `avg(avg_over_time(node_ram_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id) / 1024 / 1024 / 1024`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeRAMHourlyCost")
+	}
+
+	queryNodeRAMHourlyCost := fmt.Sprintf(nodeRAMHourlyCostQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryNodeRAMHourlyCost, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeRAMBytesCapacity(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const nodeRAMBytesCapacityQuery = `avg(avg_over_time(kube_node_status_capacity_memory_bytes{%s}[%s])) by (%s, node)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeRAMBytesCapacity")
+	}
+
+	queryNodeRAMBytesCapacity := fmt.Sprintf(nodeRAMBytesCapacityQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryNodeRAMBytesCapacity, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeRAMBytesAllocatable(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const nodeRAMBytesAllocatableQuery = `avg(avg_over_time(kube_node_status_allocatable_memory_bytes{%s}[%s])) by (%s, node)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeRAMBytesAllocatable")
+	}
+
+	queryNodeRAMBytesAllocatable := fmt.Sprintf(nodeRAMBytesAllocatableQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryNodeRAMBytesAllocatable, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeGPUCount(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const nodeGPUCountQuery = `avg(avg_over_time(node_gpu_count{%s}[%s])) by (%s, node, provider_id)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeGPUCount")
+	}
+
+	queryNodeGPUCount := fmt.Sprintf(nodeGPUCountQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryNodeGPUCount, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeGPUHourlyCost(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
+
+	const nodeGPUHourlyCostQuery = `avg(avg_over_time(node_gpu_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeGPUHourlyCost")
+	}
+
+	queryNodeGPUHourlyCost := fmt.Sprintf(nodeGPUHourlyCostQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryNodeGPUHourlyCost, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeLabels(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, minsPerResolution)
+
+	const labelsQuery = `count_over_time(kube_node_labels{%s}[%s:%dm])`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeLabels")
+	}
+
+	queryLabels := fmt.Sprintf(labelsQuery, cfg.ClusterFilter, durStr, minsPerResolution)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLabels, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeActiveMinutes(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
+
+	const activeMinsQuery = `avg(node_total_hourly_cost{%s}) by (node, %s, provider_id)[%s:%dm]`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeActiveMinutes")
+	}
+
+	queryActiveMins := fmt.Sprintf(activeMinsQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryActiveMins, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeIsSpot(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, minsPerResolution)
+
+	const isSpotQuery = `avg_over_time(kubecost_node_is_spot{%s}[%s:%dm])`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeIsSpot")
+	}
+
+	queryIsSpot := fmt.Sprintf(isSpotQuery, cfg.ClusterFilter, durStr, minsPerResolution)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryIsSpot, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeCPUModeTotal(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel())
+
+	const nodeCPUModeTotalQuery = `sum(rate(node_cpu_seconds_total{%s}[%s:%dm])) by (kubernetes_node, %s, mode)`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeCPUModeTotal")
+	}
+
+	queryCPUModeTotal := fmt.Sprintf(nodeCPUModeTotalQuery, cfg.ClusterFilter, durStr, minsPerResolution, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryCPUModeTotal, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeCPUModePercent(start, end time.Time) source.QueryResultsChan {
+	const fmtQueryCPUModePct = `
+		sum(rate(node_cpu_seconds_total{%s}[%s])) by (%s, mode) / ignoring(mode)
+		group_left sum(rate(node_cpu_seconds_total{%s}[%s])) by (%s)
+	`
+	// env.GetPromClusterFilter(), windowStr, env.GetPromClusterLabel(), env.GetPromClusterFilter(), windowStr, fmtOffset, env.GetPromClusterLabel()
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeCPUModePercent")
+	}
+
+	queryCPUModePct := fmt.Sprintf(fmtQueryCPUModePct, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryCPUModePct, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeRAMSystemPercent(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel(), env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel(), env.GetPromClusterLabel())
+
+	const nodeRAMSystemPctQuery = `sum(sum_over_time(container_memory_working_set_bytes{container_name!="POD",container_name!="",namespace="kube-system", %s}[%s:%dm])) by (instance, %s) / avg(label_replace(sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm])) by (node, %s), "instance", "$1", "node", "(.*)")) by (instance, %s)`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeRAMSystemPercent")
+	}
+
+	queryRAMSystemPct := fmt.Sprintf(nodeRAMSystemPctQuery, cfg.ClusterFilter, durStr, minsPerResolution, cfg.ClusterLabel, cfg.ClusterFilter, durStr, minsPerResolution, cfg.ClusterLabel, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryRAMSystemPct, end)
+}
+
+func (pds *PrometheusDataSource) QueryNodeRAMUserPercent(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel(), env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel(), env.GetPromClusterLabel())
+
+	const nodeRAMUserPctQuery = `sum(sum_over_time(container_memory_working_set_bytes{container_name!="POD",container_name!="",namespace!="kube-system", %s}[%s:%dm])) by (instance, %s) / avg(label_replace(sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm])) by (node, %s), "instance", "$1", "node", "(.*)")) by (instance, %s)`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryNodeRAMUserPercent")
+	}
+
+	queryRAMUserPct := fmt.Sprintf(nodeRAMUserPctQuery, cfg.ClusterFilter, durStr, minsPerResolution, cfg.ClusterLabel, cfg.ClusterFilter, durStr, minsPerResolution, cfg.ClusterLabel, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryRAMUserPct, end)
+}
+
+func (pds *PrometheusDataSource) QueryLBCost(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel()
+
+	const lbCostQuery = `avg(avg_over_time(kubecost_load_balancer_cost{%s}[%s])) by (namespace, service_name, %s, ingress_ip)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryLBCost")
+	}
+
+	queryLBCost := fmt.Sprintf(lbCostQuery, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLBCost, end)
+}
+
+func (pds *PrometheusDataSource) QueryLBActiveMinutes(start, end time.Time) source.QueryResultsChan {
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
+
+	const lbActiveMinutesQuery = `avg(kubecost_load_balancer_cost{%s}) by (namespace, service_name, %s, ingress_ip)[%s:%dm]`
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryLBActiveMinutes")
+	}
+
+	queryLBActiveMins := fmt.Sprintf(lbActiveMinutesQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryLBActiveMins, end)
+}
+
+func (pds *PrometheusDataSource) QueryDataCount(start, end time.Time) source.QueryResultsChan {
+	const fmtQueryDataCount = `
+		count_over_time(sum(kube_node_status_capacity_cpu_cores{%s}) by (%s)[%s:%dm]) * %d
+	`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), windowStr, minsPerResolution, minsPerResolution)
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryDataCount")
+	}
+
+	queryDataCount := fmt.Sprintf(fmtQueryDataCount, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution, minsPerResolution)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryDataCount, end)
+}
+
+func (pds *PrometheusDataSource) QueryTotalGPU(start, end time.Time) source.QueryResultsChan {
+	const fmtQueryTotalGPU = `
+		sum(
+			sum_over_time(node_gpu_hourly_cost{%s}[%s:%dm]) * %f
+		) by (%s)
+	`
+	// env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, hourlyToCumulative, env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryTotalGPU")
+	}
+
+	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
+	// value, converts it to a cumulative value; i.e.
+	// [$/hr] * [min/res]*[hr/min] = [$/res]
+	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
+
+	queryTotalGPU := fmt.Sprintf(fmtQueryTotalGPU, cfg.ClusterFilter, durStr, minsPerResolution, hourlyToCumulative, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryTotalGPU, end)
+}
+
+func (pds *PrometheusDataSource) QueryTotalCPU(start, end time.Time) source.QueryResultsChan {
+	const fmtQueryTotalCPU = `
+		sum(
+			sum_over_time(avg(kube_node_status_capacity_cpu_cores{%s}) by (node, %s)[%s:%dm]) *
+			avg(avg_over_time(node_cpu_hourly_cost{%s}[%s:%dm])) by (node, %s) * %f
+		) by (%s)
+	`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterLabel(), hourlyToCumulative, env.GetPromClusterLabel()
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryTotalCPU")
+	}
+
+	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
+	// value, converts it to a cumulative value; i.e.
+	// [$/hr] * [min/res]*[hr/min] = [$/res]
+	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
+
+	queryTotalCPU := fmt.Sprintf(fmtQueryTotalCPU, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution, cfg.ClusterFilter, durStr, minsPerResolution, cfg.ClusterLabel, hourlyToCumulative, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryTotalCPU, end)
+}
+
+func (pds *PrometheusDataSource) QueryTotalRAM(start, end time.Time) source.QueryResultsChan {
+	const fmtQueryTotalRAM = `
+		sum(
+			sum_over_time(avg(kube_node_status_capacity_memory_bytes{%s}) by (node, %s)[%s:%dm]) / 1024 / 1024 / 1024 *
+			avg(avg_over_time(node_ram_hourly_cost{%s}[%s:%dm])) by (node, %s) * %f
+		) by (%s)
+	`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), windowStr, minsPerResolution, env.GetPromClusterFilter(), windowStr, minsPerResolution, env.GetPromClusterLabel(), hourlyToCumulative, env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryTotalRAM")
+	}
+
+	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
+	// value, converts it to a cumulative value; i.e.
+	// [$/hr] * [min/res]*[hr/min] = [$/res]
+	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
+
+	queryTotalRAM := fmt.Sprintf(fmtQueryTotalRAM, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution, cfg.ClusterFilter, durStr, minsPerResolution, cfg.ClusterLabel, hourlyToCumulative, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryTotalRAM, end)
+}
+
+func (pds *PrometheusDataSource) QueryTotalStorage(start, end time.Time) source.QueryResultsChan {
+	const fmtQueryTotalStorage = `
+		sum(
+			sum_over_time(avg(kube_persistentvolume_capacity_bytes{%s}) by (persistentvolume, %s)[%s:%dm]) / 1024 / 1024 / 1024 *
+			avg(avg_over_time(pv_hourly_cost{%s}[%s:%dm])) by (persistentvolume, %s) * %f
+		) by (%s)
+	`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), windowStr, minsPerResolution, env.GetPromClusterFilter(), windowStr, minsPerResolution, env.GetPromClusterLabel(), hourlyToCumulative, env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryTotalStorage")
+	}
+
+	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
+	// value, converts it to a cumulative value; i.e.
+	// [$/hr] * [min/res]*[hr/min] = [$/res]
+	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
+
+	queryTotalStorage := fmt.Sprintf(fmtQueryTotalStorage, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution, cfg.ClusterFilter, durStr, minsPerResolution, cfg.ClusterLabel, hourlyToCumulative, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryAtTime(queryTotalStorage, end)
+}
+
+func (pds *PrometheusDataSource) QueryClusterCores(start, end time.Time, step time.Duration) source.QueryResultsChan {
+	const queryClusterCores = `sum(
+		avg(avg_over_time(kube_node_status_capacity_cpu_cores{%s}[%s])) by (node, %s) * avg(avg_over_time(node_cpu_hourly_cost{%s}[%s])) by (node, %s) * 730 +
+		avg(avg_over_time(node_gpu_hourly_cost{%s}[%s])) by (node, %s) * 730
+	  ) by (%s)`
+	// env.GetPromClusterFilter(), fmtWindow, env.GetPromClusterLabel(), env.GetPromClusterFilter(), fmtWindow, env.GetPromClusterLabel(), env.GetPromClusterFilter(), fmtWindow,  env.GetPromClusterLabel(), env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+	durStr := timeutil.DurationString(step)
+
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryClusterCores")
+	}
+
+	clusterCoresQuery := fmt.Sprintf(queryClusterCores, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryRange(clusterCoresQuery, start, end, step)
+}
+
+func (pds *PrometheusDataSource) QueryClusterRAM(start, end time.Time, step time.Duration) source.QueryResultsChan {
+	const queryClusterRAM = `sum(
+		avg(avg_over_time(kube_node_status_capacity_memory_bytes{%s}[%s])) by (node, %s) / 1024 / 1024 / 1024 * avg(avg_over_time(node_ram_hourly_cost{%s}[%s])) by (node, %s) * 730
+	  ) by (%s)`
+	//  env.GetPromClusterFilter(), fmtWindow, env.GetPromClusterLabel(), env.GetPromClusterFilter(), fmtWindow, env.GetPromClusterLabel(), env.GetPromClusterLabel())
+
+	cfg := pds.promConfig
+	durStr := timeutil.DurationString(step)
+
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryClusterCores")
+	}
+
+	clusterRAMQuery := fmt.Sprintf(queryClusterRAM, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterLabel)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryRange(clusterRAMQuery, start, end, step)
+}
+
+func (pds *PrometheusDataSource) QueryClusterStorage(start, end time.Time, step time.Duration) source.QueryResultsChan {
+	return pds.QueryClusterStorageByProvider("", start, end, step)
+}
+
+func (pds *PrometheusDataSource) QueryClusterStorageByProvider(provider string, start, end time.Time, step time.Duration) source.QueryResultsChan {
+	const queryStorage = `sum(
+		avg(avg_over_time(pv_hourly_cost{%s}[%s])) by (persistentvolume, %s) * 730
+		* avg(avg_over_time(kube_persistentvolume_capacity_bytes{%s}[%s])) by (persistentvolume, %s) / 1024 / 1024 / 1024
+	  ) by (%s) %s`
+	// env.GetPromClusterFilter(), fmtWindow, env.GetPromClusterLabel(), env.GetPromClusterFilter(), fmtWindow, env.GetPromClusterLabel(), env.GetPromClusterLabel(), localStorageQuery)
+
+	var localStorageQuery string
+	if provider != "" {
+		key := strings.ToLower(provider)
+		if f, ok := providerStorageQueries[key]; ok {
+			localStorageQuery = f(pds.promConfig, start, end, true, false)
+		} else {
+			localStorageQuery = ""
+		}
+	}
+
+	if localStorageQuery != "" {
+		localStorageQuery = fmt.Sprintf(" + %s", localStorageQuery)
+	}
+
+	cfg := pds.promConfig
+	durStr := timeutil.DurationString(step)
+
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryClusterCores")
+	}
+
+	clusterStorageQuery := fmt.Sprintf(queryStorage, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterFilter, durStr, cfg.ClusterLabel, cfg.ClusterLabel, localStorageQuery)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryRange(clusterStorageQuery, start, end, step)
+}
+
+func (pds *PrometheusDataSource) QueryClusterTotal(start, end time.Time, step time.Duration) source.QueryResultsChan {
+	return pds.QueryClusterTotalByProvider("", start, end, step)
+}
+
+func (pds *PrometheusDataSource) QueryClusterTotalByProvider(provider string, start, end time.Time, step time.Duration) source.QueryResultsChan {
+	const queryTotal = `sum(avg(node_total_hourly_cost{%s}) by (node, %s)) * 730 +
+	  sum(
+		avg(avg_over_time(pv_hourly_cost{%s}[1h])) by (persistentvolume, %s) * 730
+		* avg(avg_over_time(kube_persistentvolume_capacity_bytes{%s}[1h])) by (persistentvolume, %s) / 1024 / 1024 / 1024
+	  ) by (%s) %s`
+
+	var localStorageQuery string
+	if provider != "" {
+		key := strings.ToLower(provider)
+		if f, ok := providerStorageQueries[key]; ok {
+			localStorageQuery = f(pds.promConfig, start, end, true, false)
+		} else {
+			localStorageQuery = ""
+		}
+	}
+
+	if localStorageQuery != "" {
+		localStorageQuery = fmt.Sprintf(" + %s", localStorageQuery)
+	}
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(step)
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryClusterTotalByProvider")
+	}
+
+	clusterTotalQuery := fmt.Sprintf(queryTotal, cfg.ClusterFilter, cfg.ClusterLabel, cfg.ClusterFilter, cfg.ClusterLabel, cfg.ClusterFilter, cfg.ClusterLabel, cfg.ClusterLabel, localStorageQuery)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryRange(clusterTotalQuery, start, end, step)
+}
+
+func (pds *PrometheusDataSource) QueryClusterNodes(start, end time.Time, step time.Duration) source.QueryResultsChan {
+	return pds.QueryClusterNodesByProvider("", start, end, step)
+}
+
+func (pds *PrometheusDataSource) QueryClusterNodesByProvider(provider string, start, end time.Time, step time.Duration) source.QueryResultsChan {
+	const queryNodes = `sum(avg(node_total_hourly_cost{%s}) by (node, %s)) * 730 %s`
+	// env.GetPromClusterFilter(), env.GetPromClusterLabel(), localStorageQuery)
+
+	var localStorageQuery string
+	if provider != "" {
+		key := strings.ToLower(provider)
+		if f, ok := providerStorageQueries[key]; ok {
+			localStorageQuery = f(pds.promConfig, start, end, true, false)
+		} else {
+			localStorageQuery = ""
+		}
+	}
+
+	if localStorageQuery != "" {
+		localStorageQuery = fmt.Sprintf(" + %s", localStorageQuery)
+	}
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(step)
+	if durStr == "" {
+		panic("failed to parse duration string passed to QueryClusterNodesByProvider")
+	}
+
+	clusterNodesCostQuery := fmt.Sprintf(queryNodes, cfg.ClusterFilter, cfg.ClusterLabel, localStorageQuery)
+	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
+	return ctx.QueryRange(clusterNodesCostQuery, start, end, step)
+}
+
+func newEmptyResult() source.QueryResultsChan {
+	ch := make(source.QueryResultsChan)
+	go func() {
+		results := source.NewQueryResults("")
+		ch <- results
+	}()
+	return ch
+}
+
+func snapResolutionMinute(res time.Duration) time.Duration {
+	resMins := int64(math.Trunc(res.Minutes()))
+	if resMins <= 0 {
+		resMins = 1
+	}
+	return time.Duration(resMins) * time.Minute
+}
+
+func formatResolutionMinutes(resMins int64) string {
+	if resMins%60 == 0 {
+		return fmt.Sprintf("%dh", resMins/60)
+	}
+
+	return fmt.Sprintf("%dm", resMins)
+}

+ 27 - 20
pkg/prom/diagnostics.go → modules/prometheus-source/pkg/prom/diagnostics.go

@@ -2,9 +2,9 @@ package prom
 
 import (
 	"fmt"
-	"time"
 
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/pkg/env"
 	prometheus "github.com/prometheus/client_golang/api"
 )
@@ -139,6 +139,13 @@ var diagnosticDefinitions map[string]*diagnosticDefinition = map[string]*diagnos
 	},
 }
 
+// RequestCounter is used to determine if the prometheus client keeps track of
+// the concurrent outbound requests
+type RequestCounter interface {
+	TotalQueuedRequests() int
+	TotalOutboundRequests() int
+}
+
 // QueuedPromRequest is a representation of a request waiting to be sent by the prometheus
 // client.
 type QueuedPromRequest struct {
@@ -158,7 +165,7 @@ type PrometheusQueueState struct {
 
 // GetPrometheusQueueState is a diagnostic function that probes the prometheus request queue and gathers
 // query, context, and queue statistics.
-func GetPrometheusQueueState(client prometheus.Client) (*PrometheusQueueState, error) {
+func GetPrometheusQueueState(client prometheus.Client, config *OpenCostPrometheusConfig) (*PrometheusQueueState, error) {
 	rlpc, ok := client.(*RateLimitedPrometheusClient)
 	if !ok {
 		return nil, fmt.Errorf("Failed to get prometheus queue state for the provided client. Must be of type RateLimitedPrometheusClient.")
@@ -167,11 +174,11 @@ func GetPrometheusQueueState(client prometheus.Client) (*PrometheusQueueState, e
 	outbound := rlpc.TotalOutboundRequests()
 
 	requests := []*QueuedPromRequest{}
-	rlpc.queue.Each(func(_ int, req *workRequest) {
+	rlpc.EachQueuedRequest(func(ctx string, query string, queueTimeMs int64) {
 		requests = append(requests, &QueuedPromRequest{
-			Context:   req.contextName,
-			Query:     req.query,
-			QueueTime: time.Since(req.start).Milliseconds(),
+			Context:   ctx,
+			Query:     query,
+			QueueTime: queueTimeMs,
 		})
 	})
 
@@ -179,14 +186,14 @@ func GetPrometheusQueueState(client prometheus.Client) (*PrometheusQueueState, e
 		QueuedRequests:      requests,
 		OutboundRequests:    outbound,
 		TotalRequests:       outbound + len(requests),
-		MaxQueryConcurrency: env.GetMaxQueryConcurrency(),
+		MaxQueryConcurrency: config.ClientConfig.QueryConcurrency,
 	}, nil
 }
 
 // LogPrometheusClientState logs the current state, with respect to outbound requests, if that
 // information is available.
 func LogPrometheusClientState(client prometheus.Client) {
-	if rc, ok := client.(requestCounter); ok {
+	if rc, ok := client.(RequestCounter); ok {
 		queued := rc.TotalQueuedRequests()
 		outbound := rc.TotalOutboundRequests()
 		total := queued + outbound
@@ -196,8 +203,8 @@ func LogPrometheusClientState(client prometheus.Client) {
 }
 
 // GetPrometheusMetrics returns a list of the state of Prometheus metric used by kubecost using the provided client
-func GetPrometheusMetrics(client prometheus.Client, offset string) PrometheusDiagnostics {
-	ctx := NewNamedContext(client, DiagnosticContextName)
+func GetPrometheusMetrics(client prometheus.Client, config *OpenCostPrometheusConfig, offset string) PrometheusDiagnostics {
+	ctx := NewNamedContext(client, config, DiagnosticContextName)
 
 	var result []*PrometheusDiagnostic
 	for _, definition := range diagnosticDefinitions {
@@ -215,8 +222,8 @@ func GetPrometheusMetrics(client prometheus.Client, offset string) PrometheusDia
 }
 
 // GetPrometheusMetricsByID returns a list of the state of specific Prometheus metrics by identifier.
-func GetPrometheusMetricsByID(ids []string, client prometheus.Client, offset string) PrometheusDiagnostics {
-	ctx := NewNamedContext(client, DiagnosticContextName)
+func GetPrometheusMetricsByID(ids []string, client prometheus.Client, config *OpenCostPrometheusConfig, offset string) PrometheusDiagnostics {
+	ctx := NewNamedContext(client, config, DiagnosticContextName)
 
 	var result []*PrometheusDiagnostic
 	for _, id := range ids {
@@ -284,13 +291,13 @@ func (pdd *diagnosticDefinition) NewDiagnostic(offset string) *PrometheusDiagnos
 
 // PrometheusDiagnostic holds information about a metric and the query to ensure it is functional
 type PrometheusDiagnostic struct {
-	ID          string         `json:"id"`
-	Query       string         `json:"query"`
-	Label       string         `json:"label"`
-	Description string         `json:"description"`
-	DocLink     string         `json:"docLink"`
-	Result      []*QueryResult `json:"result"`
-	Passed      bool           `json:"passed"`
+	ID          string                `json:"id"`
+	Query       string                `json:"query"`
+	Label       string                `json:"label"`
+	Description string                `json:"description"`
+	DocLink     string                `json:"docLink"`
+	Result      []*source.QueryResult `json:"result"`
+	Passed      bool                  `json:"passed"`
 }
 
 // executePrometheusDiagnosticQuery executes a PrometheusDiagnostic query using the given context
@@ -301,7 +308,7 @@ func (pd *PrometheusDiagnostic) executePrometheusDiagnosticQuery(ctx *Context) e
 		return fmt.Errorf("prometheus diagnostic %s failed with error: %s", pd.ID, err)
 	}
 	if result == nil {
-		result = []*QueryResult{}
+		result = []*source.QueryResult{}
 	}
 	pd.Result = result
 	pd.Passed = len(result) == 0

+ 0 - 0
pkg/prom/helpers.go → modules/prometheus-source/pkg/prom/helpers.go


+ 0 - 0
pkg/prom/ids.go → modules/prometheus-source/pkg/prom/ids.go


+ 10 - 26
pkg/prom/prom.go → modules/prometheus-source/pkg/prom/prom.go

@@ -18,13 +18,10 @@ import (
 	"github.com/opencost/opencost/core/pkg/util/fileutil"
 	"github.com/opencost/opencost/core/pkg/util/httputil"
 	"github.com/opencost/opencost/core/pkg/version"
-	"github.com/opencost/opencost/pkg/env"
 
 	golog "log"
 
 	prometheus "github.com/prometheus/client_golang/api"
-	restclient "k8s.io/client-go/rest"
-	certutil "k8s.io/client-go/util/cert"
 )
 
 var UserAgent = fmt.Sprintf("Opencost/%s", version.Version)
@@ -132,13 +129,6 @@ type RateLimitedPrometheusClient struct {
 	headerXScopeOrgId string
 }
 
-// requestCounter is used to determine if the prometheus client keeps track of
-// the concurrent outbound requests
-type requestCounter interface {
-	TotalQueuedRequests() int
-	TotalOutboundRequests() int
-}
-
 // NewRateLimitedClient creates a prometheus client which limits the number of concurrent outbound
 // prometheus requests.
 func NewRateLimitedClient(
@@ -224,6 +214,14 @@ func (rlpc *RateLimitedPrometheusClient) URL(ep string, args map[string]string)
 	return rlpc.client.URL(ep, args)
 }
 
+// EachQueuedRequest provides a mechanism to safely iterate through all queued request and return
+// metadata about each request.
+func (rlpc *RateLimitedPrometheusClient) EachQueuedRequest(f func(ctx string, query string, queueTimeMs int64)) {
+	rlpc.queue.Each(func(_ int, req *workRequest) {
+		f(req.contextName, req.query, time.Since(req.start).Milliseconds())
+	})
+}
+
 // workRequest is used to queue requests
 type workRequest struct {
 	ctx      context.Context
@@ -374,26 +372,12 @@ type PrometheusClientConfig struct {
 	QueryConcurrency      int
 	QueryLogFile          string
 	HeaderXScopeOrgId     string
+	RootCAs               *x509.CertPool
 }
 
 // NewPrometheusClient creates a new rate limited client which limits by outbound concurrent requests.
 func NewPrometheusClient(address string, config *PrometheusClientConfig) (prometheus.Client, error) {
 
-	var tlsCaCert *x509.CertPool
-	// We will use the service account token and service-ca.crt to authenticate with the Prometheus server via kube-rbac-proxy.
-	// We need to ensure that the service account has the necessary permissions to access the Prometheus server by binding it to the appropriate role.
-	if env.IsKubeRbacProxyEnabled() {
-		restConfig, err := restclient.InClusterConfig()
-		if err != nil {
-			log.Errorf("KUBE_RBAC_PROXY_ENABLED was set to true but failed to get in-cluster config: %s", err)
-		}
-		config.Auth.BearerToken = restConfig.BearerToken
-		tlsCaCert, err = certutil.NewPool(`/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt`)
-		if err != nil {
-			log.Errorf("KUBE_RBAC_PROXY_ENABLED was set to true but failed to load service-ca.crt: %s", err)
-		}
-	}
-
 	// may be necessary for long prometheus queries
 	rt := httputil.NewUserAgentTransport(UserAgent, &http.Transport{
 		Proxy: http.ProxyFromEnvironment,
@@ -404,7 +388,7 @@ func NewPrometheusClient(address string, config *PrometheusClientConfig) (promet
 		TLSHandshakeTimeout: config.TLSHandshakeTimeout,
 		TLSClientConfig: &tls.Config{
 			InsecureSkipVerify: config.TLSInsecureSkipVerify,
-			RootCAs:            tlsCaCert,
+			RootCAs:            config.RootCAs,
 		},
 	})
 	pc := prometheus.Config{

+ 92 - 42
pkg/prom/query.go → modules/prometheus-source/pkg/prom/query.go

@@ -9,52 +9,73 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util/httputil"
 	"github.com/opencost/opencost/core/pkg/util/json"
-	"github.com/opencost/opencost/pkg/env"
+
 	"github.com/opencost/opencost/pkg/errors"
 	prometheus "github.com/prometheus/client_golang/api"
 	v1 "github.com/prometheus/client_golang/api/prometheus/v1"
 )
 
 const (
-	apiPrefix    = "/api/v1"
 	epQuery      = apiPrefix + "/query"
 	epQueryRange = apiPrefix + "/query_range"
 )
 
-// prometheus query offset to apply to each non-range query
-// package scope to prevent calling duration parse each use
-var promQueryOffset time.Duration = env.GetPrometheusQueryOffset()
+// ContextFactory is a factory for creating new Contexts for prometheus queries.
+type ContextFactory struct {
+	client prometheus.Client
+	config *OpenCostPrometheusConfig
+}
+
+// NewContextFactory creates a new ContextFactory with the provided prometheus client.
+func NewContextFactory(client prometheus.Client, promConfig *OpenCostPrometheusConfig) *ContextFactory {
+	return &ContextFactory{
+		client: client,
+	}
+}
+
+// NewContext creates a new prometheus query context.
+func (cf *ContextFactory) NewContext() *Context {
+	return NewContext(cf.client, cf.config)
+}
+
+// NewContext creates a new named prometheus query context.
+func (cf *ContextFactory) NewNamedContext(name string) *Context {
+	return NewNamedContext(cf.client, cf.config, name)
+}
 
 // Context wraps a Prometheus client and provides methods for querying and
 // parsing query responses and errors.
 type Context struct {
 	Client         prometheus.Client
+	config         *OpenCostPrometheusConfig
 	name           string
-	errorCollector *QueryErrorCollector
+	errorCollector *source.QueryErrorCollector
 }
 
 // NewContext creates a new Prometheus querying context from the given client
-func NewContext(client prometheus.Client) *Context {
-	var ec QueryErrorCollector
+func NewContext(client prometheus.Client, config *OpenCostPrometheusConfig) *Context {
+	var ec source.QueryErrorCollector
 
 	return &Context{
 		Client:         client,
+		config:         config,
 		name:           "",
 		errorCollector: &ec,
 	}
 }
 
 // NewNamedContext creates a new named Prometheus querying context from the given client
-func NewNamedContext(client prometheus.Client, name string) *Context {
-	ctx := NewContext(client)
+func NewNamedContext(client prometheus.Client, config *OpenCostPrometheusConfig, name string) *Context {
+	ctx := NewContext(client, config)
 	ctx.name = name
 	return ctx
 }
 
 // Warnings returns the warnings collected from the Context's ErrorCollector
-func (ctx *Context) Warnings() []*QueryWarning {
+func (ctx *Context) Warnings() []*source.QueryWarning {
 	return ctx.errorCollector.Warnings()
 }
 
@@ -64,7 +85,7 @@ func (ctx *Context) HasWarnings() bool {
 }
 
 // Errors returns the errors collected from the Context's ErrorCollector.
-func (ctx *Context) Errors() []*QueryError {
+func (ctx *Context) Errors() []*source.QueryError {
 	return ctx.errorCollector.Errors()
 }
 
@@ -87,8 +108,8 @@ func (ctx *Context) ErrorCollection() error {
 // Query returns a QueryResultsChan, then runs the given query and sends the
 // results on the provided channel. Receiver is responsible for closing the
 // channel, preferably using the Read method.
-func (ctx *Context) Query(query string) QueryResultsChan {
-	resCh := make(QueryResultsChan)
+func (ctx *Context) Query(query string) source.QueryResultsChan {
+	resCh := make(source.QueryResultsChan)
 
 	go runQuery(query, ctx, resCh, time.Now(), "")
 
@@ -99,8 +120,8 @@ func (ctx *Context) Query(query string) QueryResultsChan {
 // given time (see time parameter here: https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries)
 // and sends the results on the provided channel. Receiver is responsible for
 // closing the channel, preferably using the Read method.
-func (ctx *Context) QueryAtTime(query string, t time.Time) QueryResultsChan {
-	resCh := make(QueryResultsChan)
+func (ctx *Context) QueryAtTime(query string, t time.Time) source.QueryResultsChan {
+	resCh := make(source.QueryResultsChan)
 
 	go runQuery(query, ctx, resCh, t, "")
 
@@ -110,8 +131,8 @@ func (ctx *Context) QueryAtTime(query string, t time.Time) QueryResultsChan {
 // ProfileQuery returns a QueryResultsChan, then runs the given query with a profile
 // label and sends the results on the provided channel. Receiver is responsible for closing the
 // channel, preferably using the Read method.
-func (ctx *Context) ProfileQuery(query string, profileLabel string) QueryResultsChan {
-	resCh := make(QueryResultsChan)
+func (ctx *Context) ProfileQuery(query string, profileLabel string) source.QueryResultsChan {
+	resCh := make(source.QueryResultsChan)
 
 	go runQuery(query, ctx, resCh, time.Now(), profileLabel)
 
@@ -122,8 +143,8 @@ func (ctx *Context) ProfileQuery(query string, profileLabel string) QueryResults
 // each query concurrently and returns results on each channel, respectively,
 // in the order they were provided; i.e. the response to queries[1] will be
 // sent on channel resChs[1].
-func (ctx *Context) QueryAll(queries ...string) []QueryResultsChan {
-	resChs := []QueryResultsChan{}
+func (ctx *Context) QueryAll(queries ...string) []source.QueryResultsChan {
+	resChs := []source.QueryResultsChan{}
 
 	for _, q := range queries {
 		resChs = append(resChs, ctx.Query(q))
@@ -136,8 +157,8 @@ func (ctx *Context) QueryAll(queries ...string) []QueryResultsChan {
 // each ProfileQuery concurrently and returns results on each channel, respectively,
 // in the order they were provided; i.e. the response to queries[1] will be
 // sent on channel resChs[1].
-func (ctx *Context) ProfileQueryAll(queries ...string) []QueryResultsChan {
-	resChs := []QueryResultsChan{}
+func (ctx *Context) ProfileQueryAll(queries ...string) []source.QueryResultsChan {
+	resChs := []source.QueryResultsChan{}
 
 	for _, q := range queries {
 		resChs = append(resChs, ctx.ProfileQuery(q, fmt.Sprintf("Query #%d", len(resChs)+1)))
@@ -146,13 +167,16 @@ func (ctx *Context) ProfileQueryAll(queries ...string) []QueryResultsChan {
 	return resChs
 }
 
-func (ctx *Context) QuerySync(query string) ([]*QueryResult, v1.Warnings, error) {
+func (ctx *Context) QuerySync(query string) ([]*source.QueryResult, v1.Warnings, error) {
 	raw, warnings, err := ctx.query(query, time.Now())
 	if err != nil {
 		return nil, warnings, err
 	}
 
-	results := NewQueryResults(query, raw)
+	// create result keys from custom cluster label
+	resultKeys := source.ClusterKeyWithDefaults(ctx.config.ClusterLabel)
+
+	results := NewQueryResults(query, raw, resultKeys)
 	if results.Error != nil {
 		return nil, warnings, results.Error
 	}
@@ -167,15 +191,27 @@ func (ctx *Context) QueryURL() *url.URL {
 
 // runQuery executes the prometheus query asynchronously, collects results and
 // errors, and passes them through the results channel.
-func runQuery(query string, ctx *Context, resCh QueryResultsChan, t time.Time, profileLabel string) {
+func runQuery(query string, ctx *Context, resCh source.QueryResultsChan, t time.Time, profileLabel string) {
 	defer errors.HandlePanic()
 	startQuery := time.Now()
 
 	raw, warnings, requestError := ctx.query(query, t)
-	results := NewQueryResults(query, raw)
+
+	var parseError error
+
+	var results *source.QueryResults
+	if requestError != nil {
+		results = NewQueryResultError(query, requestError)
+	} else {
+		// create result keys from custom cluster label
+		resultKeys := source.ClusterKeyWithDefaults(ctx.config.ClusterLabel)
+		results = NewQueryResults(query, raw, resultKeys)
+
+		parseError = results.Error
+	}
 
 	// report all warnings, request, and parse errors (nils will be ignored)
-	ctx.errorCollector.Report(query, warnings, requestError, results.Error)
+	ctx.errorCollector.Report(query, warnings, requestError, parseError)
 
 	if profileLabel != "" {
 		log.Profile(startQuery, profileLabel)
@@ -225,7 +261,7 @@ func (ctx *Context) RawQuery(query string, t time.Time) ([]byte, error) {
 	statusCode := resp.StatusCode
 	statusText := http.StatusText(statusCode)
 	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
-		return nil, CommErrorf("%d (%s) URL: '%s', Body: '%s' Query: '%s'", statusCode, statusText, req.URL, body, query)
+		return nil, source.CommErrorf("%d (%s) URL: '%s', Body: '%s' Query: '%s'", statusCode, statusText, req.URL, body, query)
 	}
 
 	return body, err
@@ -248,8 +284,8 @@ func (ctx *Context) query(query string, t time.Time) (interface{}, v1.Warnings,
 		// NoStoreAPIWarning is a warning that we would consider an error. It returns partial data relating only to the
 		// store apis which were reachable. In order to ensure integrity of data across all clusters, we'll need to identify
 		// this warning and convert it to an error.
-		if IsNoStoreAPIWarning(w) {
-			return nil, warnings, CommErrorf("Error: %s, Body: %s, Query: %s", w, body, query)
+		if source.IsNoStoreAPIWarning(w) {
+			return nil, warnings, source.CommErrorf("Error: %s, Body: %s, Query: %s", w, body, query)
 		}
 
 		log.Warnf("fetching query '%s': %s", query, w)
@@ -266,8 +302,8 @@ func (ctx *Context) isRequestStepAligned(start, end time.Time, step time.Duratio
 	return startInUnix%stepInSeconds == 0 && endInUnix%stepInSeconds == 0
 }
 
-func (ctx *Context) QueryRange(query string, start, end time.Time, step time.Duration) QueryResultsChan {
-	resCh := make(QueryResultsChan)
+func (ctx *Context) QueryRange(query string, start, end time.Time, step time.Duration) source.QueryResultsChan {
+	resCh := make(source.QueryResultsChan)
 
 	if !ctx.isRequestStepAligned(start, end, step) {
 		start, end = ctx.alignWindow(start, end, step)
@@ -278,21 +314,23 @@ func (ctx *Context) QueryRange(query string, start, end time.Time, step time.Dur
 	return resCh
 }
 
-func (ctx *Context) ProfileQueryRange(query string, start, end time.Time, step time.Duration, profileLabel string) QueryResultsChan {
-	resCh := make(QueryResultsChan)
+func (ctx *Context) ProfileQueryRange(query string, start, end time.Time, step time.Duration, profileLabel string) source.QueryResultsChan {
+	resCh := make(source.QueryResultsChan)
 
 	go runQueryRange(query, start, end, step, ctx, resCh, profileLabel)
 
 	return resCh
 }
 
-func (ctx *Context) QueryRangeSync(query string, start, end time.Time, step time.Duration) ([]*QueryResult, v1.Warnings, error) {
+func (ctx *Context) QueryRangeSync(query string, start, end time.Time, step time.Duration) ([]*source.QueryResult, v1.Warnings, error) {
 	raw, warnings, err := ctx.queryRange(query, start, end, step)
 	if err != nil {
 		return nil, warnings, err
 	}
 
-	results := NewQueryResults(query, raw)
+	// create result keys from custom cluster label
+	resultKeys := source.ClusterKeyWithDefaults(ctx.config.ClusterLabel)
+	results := NewQueryResults(query, raw, resultKeys)
 	if results.Error != nil {
 		return nil, warnings, results.Error
 	}
@@ -307,15 +345,27 @@ func (ctx *Context) QueryRangeURL() *url.URL {
 
 // runQueryRange executes the prometheus queryRange asynchronously, collects results and
 // errors, and passes them through the results channel.
-func runQueryRange(query string, start, end time.Time, step time.Duration, ctx *Context, resCh QueryResultsChan, profileLabel string) {
+func runQueryRange(query string, start, end time.Time, step time.Duration, ctx *Context, resCh source.QueryResultsChan, profileLabel string) {
 	defer errors.HandlePanic()
 	startQuery := time.Now()
 
 	raw, warnings, requestError := ctx.queryRange(query, start, end, step)
-	results := NewQueryResults(query, raw)
+
+	var parseError error
+
+	var results *source.QueryResults
+	if requestError != nil {
+		results = NewQueryResultError(query, requestError)
+	} else {
+		// create result keys from custom cluster label
+		resultKeys := source.ClusterKeyWithDefaults(ctx.config.ClusterLabel)
+		results = NewQueryResults(query, raw, resultKeys)
+
+		parseError = results.Error
+	}
 
 	// report all warnings, request, and parse errors (nils will be ignored)
-	ctx.errorCollector.Report(query, warnings, requestError, results.Error)
+	ctx.errorCollector.Report(query, warnings, requestError, parseError)
 
 	if profileLabel != "" {
 		log.Profile(startQuery, profileLabel)
@@ -361,7 +411,7 @@ func (ctx *Context) RawQueryRange(query string, start, end time.Time, step time.
 	statusCode := resp.StatusCode
 	statusText := http.StatusText(statusCode)
 	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
-		return nil, CommErrorf("%d (%s) Body: %s Query: %s", statusCode, statusText, body, query)
+		return nil, source.CommErrorf("%d (%s) Body: %s Query: %s", statusCode, statusText, body, query)
 	}
 
 	return body, err
@@ -385,8 +435,8 @@ func (ctx *Context) queryRange(query string, start, end time.Time, step time.Dur
 		// NoStoreAPIWarning is a warning that we would consider an error. It returns partial data relating only to the
 		// store apis which were reachable. In order to ensure integrity of data across all clusters, we'll need to identify
 		// this warning and convert it to an error.
-		if IsNoStoreAPIWarning(w) {
-			return nil, warnings, CommErrorf("Error: %s, Body: %s, Query: %s", w, body, query)
+		if source.IsNoStoreAPIWarning(w) {
+			return nil, warnings, source.CommErrorf("Error: %s, Body: %s, Query: %s", w, body, query)
 		}
 
 		log.Warnf("fetching query '%s': %s", query, w)

+ 5 - 3
pkg/prom/query_test.go → modules/prometheus-source/pkg/prom/query_test.go

@@ -1,10 +1,12 @@
 package prom
 
 import (
-	"github.com/prometheus/client_golang/api"
 	"reflect"
 	"testing"
 	"time"
+
+	"github.com/opencost/opencost/core/pkg/source"
+	"github.com/prometheus/client_golang/api"
 )
 
 func TestWarningsFrom(t *testing.T) {
@@ -35,7 +37,7 @@ func TestContext_isRequestStepAligned(t *testing.T) {
 	type fields struct {
 		Client         api.Client
 		name           string
-		errorCollector *QueryErrorCollector
+		errorCollector *source.QueryErrorCollector
 	}
 	type args struct {
 		start time.Time
@@ -107,7 +109,7 @@ func TestContext_alignWindow(t *testing.T) {
 	type fields struct {
 		Client         api.Client
 		name           string
-		errorCollector *QueryErrorCollector
+		errorCollector *source.QueryErrorCollector
 	}
 	type args struct {
 		start time.Time

+ 0 - 0
pkg/prom/ratelimitedclient_test.go → modules/prometheus-source/pkg/prom/ratelimitedclient_test.go


+ 12 - 119
pkg/prom/result.go → modules/prometheus-source/pkg/prom/result.go

@@ -7,6 +7,7 @@ import (
 	"strings"
 
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 )
 
@@ -33,7 +34,7 @@ func MetricFieldFormatErr(query string) error {
 }
 
 func NoDataErr(query string) error {
-	return NewNoDataError(query)
+	return source.NewNoDataError(query)
 }
 
 func PromUnexpectedResponseErr(query string) error {
@@ -41,7 +42,7 @@ func PromUnexpectedResponseErr(query string) error {
 }
 
 func QueryResultNilErr(query string) error {
-	return NewCommError(query)
+	return source.NewCommError(query)
 }
 
 func ResultFieldDoesNotExistErr(query string) error {
@@ -64,40 +65,17 @@ func ValueFieldFormatErr(query string) error {
 	return fmt.Errorf("Values field is improperly formatted fetching query '%s'", query)
 }
 
-// QueryResultsChan is a channel of query results
-type QueryResultsChan chan *QueryResults
-
-// Await returns query results, blocking until they are made available, and
-// deferring the closure of the underlying channel
-func (qrc QueryResultsChan) Await() ([]*QueryResult, error) {
-	defer close(qrc)
-
-	results := <-qrc
-	if results.Error != nil {
-		return nil, results.Error
-	}
-
-	return results.Results, nil
-}
-
-// QueryResults contains all of the query results and the source query string.
-type QueryResults struct {
-	Query   string
-	Error   error
-	Results []*QueryResult
-}
-
-// QueryResult contains a single result from a prometheus query. It's common
-// to refer to query results as a slice of QueryResult
-type QueryResult struct {
-	Metric map[string]interface{} `json:"metric"`
-	Values []*util.Vector         `json:"values"`
+// NewQueryResultError returns a QueryResults object with an error set and does not parse a result.
+func NewQueryResultError(query string, err error) *source.QueryResults {
+	qrs := source.NewQueryResults(query)
+	qrs.Error = err
+	return qrs
 }
 
 // NewQueryResults accepts the raw prometheus query result and returns an array of
 // QueryResult objects
-func NewQueryResults(query string, queryResult interface{}) *QueryResults {
-	qrs := &QueryResults{Query: query}
+func NewQueryResults(query string, queryResult interface{}, resultKeys *source.ResultKeys) *source.QueryResults {
+	qrs := source.NewQueryResults(query)
 
 	if queryResult == nil {
 		qrs.Error = QueryResultNilErr(query)
@@ -133,7 +111,7 @@ func NewQueryResults(query string, queryResult interface{}) *QueryResults {
 	}
 
 	// Result vectors from the query
-	var results []*QueryResult
+	var results []*source.QueryResult
 
 	// Parse raw results and into QueryResults
 	for _, val := range resultsData {
@@ -205,98 +183,13 @@ func NewQueryResults(query string, queryResult interface{}) *QueryResults {
 			}
 		}
 
-		results = append(results, &QueryResult{
-			Metric: metricMap,
-			Values: vectors,
-		})
+		results = append(results, source.NewQueryResult(metricMap, vectors, resultKeys))
 	}
 
 	qrs.Results = results
 	return qrs
 }
 
-// GetString returns the requested field, or an error if it does not exist
-func (qr *QueryResult) GetString(field string) (string, error) {
-	f, ok := qr.Metric[field]
-	if !ok {
-		return "", fmt.Errorf("'%s' field does not exist in data result vector", field)
-	}
-
-	strField, ok := f.(string)
-	if !ok {
-		return "", fmt.Errorf("'%s' field is improperly formatted and cannot be converted to string", field)
-	}
-
-	return strField, nil
-}
-
-// GetStrings returns the requested fields, or an error if it does not exist
-func (qr *QueryResult) GetStrings(fields ...string) (map[string]string, error) {
-	values := map[string]string{}
-
-	for _, field := range fields {
-		f, ok := qr.Metric[field]
-		if !ok {
-			return nil, fmt.Errorf("'%s' field does not exist in data result vector", field)
-		}
-
-		value, ok := f.(string)
-		if !ok {
-			return nil, fmt.Errorf("'%s' field is improperly formatted and cannot be converted to string", field)
-		}
-
-		values[field] = value
-	}
-
-	return values, nil
-}
-
-// GetLabels returns all labels and their values from the query result
-func (qr *QueryResult) GetLabels() map[string]string {
-	result := make(map[string]string)
-
-	// Find All keys with prefix label_, remove prefix, add to labels
-	for k, v := range qr.Metric {
-		if !strings.HasPrefix(k, "label_") {
-			continue
-		}
-
-		label := strings.TrimPrefix(k, "label_")
-		value, ok := v.(string)
-		if !ok {
-			log.Warnf("Failed to parse label value for label: '%s'", label)
-			continue
-		}
-
-		result[label] = value
-	}
-
-	return result
-}
-
-// GetAnnotations returns all annotations and their values from the query result
-func (qr *QueryResult) GetAnnotations() map[string]string {
-	result := make(map[string]string)
-
-	// Find All keys with prefix annotation_, remove prefix, add to annotations
-	for k, v := range qr.Metric {
-		if !strings.HasPrefix(k, "annotation_") {
-			continue
-		}
-
-		annotations := strings.TrimPrefix(k, "annotation_")
-		value, ok := v.(string)
-		if !ok {
-			log.Warnf("Failed to parse label value for label: '%s'", annotations)
-			continue
-		}
-
-		result[annotations] = value
-	}
-
-	return result
-}
-
 // parseDataPoint parses a data point from raw prometheus query results and returns
 // a new Vector instance containing the parsed data along with any warnings or errors.
 func parseDataPoint(query string, dataPoint interface{}) (*util.Vector, warning, error) {

+ 60 - 0
modules/prometheus-source/pkg/prom/thanos.go

@@ -0,0 +1,60 @@
+package prom
+
+import (
+	"crypto/tls"
+	"net"
+	"net/http"
+	"net/url"
+	"strings"
+
+	prometheus "github.com/prometheus/client_golang/api"
+)
+
+// MaxSourceResulution is the query parameter key used to designate the resolution
+// to use when executing a query.
+const MaxSourceResulution = "max_source_resolution"
+
+// NewThanosClient creates a new `prometheus.Client` with the specific thanos configuration, with a
+// thanos client identifier.
+func NewThanosClient(address string, thanosConfig *OpenCostThanosConfig) (prometheus.Client, error) {
+	config := thanosConfig.ClientConfig
+
+	tc := prometheus.Config{
+		Address: address,
+		RoundTripper: &http.Transport{
+			Proxy: http.ProxyFromEnvironment,
+			DialContext: (&net.Dialer{
+				Timeout:   config.Timeout,
+				KeepAlive: config.KeepAlive,
+			}).DialContext,
+			TLSHandshakeTimeout: config.TLSHandshakeTimeout,
+			TLSClientConfig: &tls.Config{
+				InsecureSkipVerify: config.TLSInsecureSkipVerify,
+			},
+		},
+	}
+
+	client, err := prometheus.NewClient(tc)
+	if err != nil {
+		return nil, err
+	}
+
+	// max source resolution decorator
+	maxSourceDecorator := func(path string, queryParams url.Values) url.Values {
+		if strings.Contains(path, "query") {
+			queryParams.Set(MaxSourceResulution, thanosConfig.MaxSourceResulution)
+		}
+		return queryParams
+	}
+
+	return NewRateLimitedClient(
+		ThanosClientID,
+		client,
+		config.QueryConcurrency,
+		config.Auth,
+		maxSourceDecorator,
+		config.RateLimitRetryOpts,
+		config.QueryLogFile,
+		"",
+	)
+}

+ 15 - 12
pkg/prom/validate.go → modules/prometheus-source/pkg/prom/validate.go

@@ -3,15 +3,10 @@ package prom
 import (
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/env"
-
 	prometheus "github.com/prometheus/client_golang/api"
 )
 
-var (
-	prometheusValidateQuery string = "up"
-	thanosValidateQuery     string = fmt.Sprintf("up offset %s", env.GetThanosOffset())
-)
+const UpQuery = "up"
 
 // PrometheusMetadata represents a validation result for prometheus/thanos running
 // opencost.
@@ -21,17 +16,25 @@ type PrometheusMetadata struct {
 }
 
 // Validate tells the model what data prometheus has on it.
-func Validate(cli prometheus.Client) (*PrometheusMetadata, error) {
+func Validate(cli prometheus.Client, config *OpenCostPrometheusConfig) (*PrometheusMetadata, error) {
 	if IsThanos(cli) {
-		return validate(cli, thanosValidateQuery)
+		return validate(cli, validationQueryFor(config), config)
+	}
+
+	return validate(cli, validationQueryFor(config), config)
+}
+
+func validationQueryFor(config *OpenCostPrometheusConfig) string {
+	if config.Offset != "" {
+		return fmt.Sprintf("%s offset %s", UpQuery, config.Offset)
 	}
 
-	return validate(cli, prometheusValidateQuery)
+	return UpQuery
 }
 
 // validate executes the prometheus query against the provided client.
-func validate(cli prometheus.Client, q string) (*PrometheusMetadata, error) {
-	ctx := NewContext(cli)
+func validate(cli prometheus.Client, q string, config *OpenCostPrometheusConfig) (*PrometheusMetadata, error) {
+	ctx := NewContext(cli, config)
 
 	resUp, _, err := ctx.QuerySync(q)
 	if err != nil {
@@ -57,7 +60,7 @@ func validate(cli prometheus.Client, q string) (*PrometheusMetadata, error) {
 			}, fmt.Errorf("up query does not have job names")
 		}
 
-		if job == "kubecost" {
+		if job == config.JobName {
 			return &PrometheusMetadata{
 				Running:            true,
 				KubecostDataExists: true,

+ 0 - 0
pkg/prom/warning.go → modules/prometheus-source/pkg/prom/warning.go


+ 56 - 0
modules/prometheus-source/pkg/thanos/thanos.go

@@ -0,0 +1,56 @@
+package thanos
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/modules/prometheus-source/pkg/env"
+)
+
+var (
+	lock           = new(sync.Mutex)
+	enabled        = env.IsThanosEnabled()
+	queryUrl       = env.GetThanosQueryUrl()
+	offset         = env.GetThanosOffset()
+	maxSourceRes   = env.GetThanosMaxSourceResolution()
+	offsetDuration *time.Duration
+	queryOffset    = fmt.Sprintf(" offset %s", offset)
+)
+
+// IsEnabled returns true if Thanos is enabled.
+func IsEnabled() bool {
+	return enabled
+}
+
+// QueryURL returns true if Thanos is enabled.
+func QueryURL() string {
+	return queryUrl
+}
+
+// Offset returns the duration string for the query offset that should be applied to thanos
+func Offset() string {
+	return offset
+}
+
+// OffsetDuration returns the Offset as a parsed duration
+func OffsetDuration() time.Duration {
+	lock.Lock()
+	defer lock.Unlock()
+
+	if offsetDuration == nil {
+		d, err := time.ParseDuration(offset)
+		if err != nil {
+			d = 0
+		}
+
+		offsetDuration = &d
+	}
+
+	return *offsetDuration
+}
+
+// QueryOffset returns a string in the format: " offset %s" substituting in the Offset() string.
+func QueryOffset() string {
+	return queryOffset
+}

+ 0 - 6
pkg/cloud/alibaba/provider.go

@@ -9,7 +9,6 @@ import (
 	"strconv"
 	"strings"
 	"sync"
-	"time"
 
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
@@ -747,11 +746,6 @@ func (alibaba *Alibaba) GetManagementPlatform() (string, error) {
 	return "", nil
 }
 
-// Will look at this in Next PR if needed
-func (alibaba *Alibaba) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
-	return ""
-}
-
 // Will look at this in Next PR if needed
 func (alibaba *Alibaba) ApplyReservedInstancePricing(nodes map[string]*models.Node) {
 

+ 0 - 4
pkg/cloud/aws/provider.go

@@ -352,10 +352,6 @@ var volTypes = map[string]string{
 var loadedAWSSecret bool = false
 var awsSecret *AWSAccessKey = nil
 
-func (aws *AWS) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
-	return ""
-}
-
 // KubeAttrConversion maps the k8s labels for region to an AWS key
 func (aws *AWS) KubeAttrConversion(region, instanceType, operatingSystem string) string {
 	operatingSystem = strings.ToLower(operatingSystem)

+ 0 - 4
pkg/cloud/azure/provider.go

@@ -1609,10 +1609,6 @@ func (az *Azure) PVPricing(pvk models.PVKey) (*models.PV, error) {
 	return pricing.PV, nil
 }
 
-func (az *Azure) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
-	return ""
-}
-
 func (az *Azure) ServiceAccountStatus() *models.ServiceAccountStatus {
 	return az.ServiceAccountChecks.GetStatus()
 }

+ 0 - 31
pkg/cloud/gcp/provider.go

@@ -136,37 +136,6 @@ type multiKeyGCPAllocation struct {
 	Cost    float64
 }
 
-// GetLocalStorageQuery returns the cost of local storage for the given window. Setting rate=true
-// returns hourly spend. Setting used=true only tracks used storage, not total.
-func (gcp *GCP) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
-	// TODO Set to the price for the appropriate storage class. It's not trivial to determine the local storage disk type
-	// See https://cloud.google.com/compute/disks-image-pricing#persistentdisk
-	localStorageCost := 0.04
-
-	baseMetric := "container_fs_limit_bytes"
-	if used {
-		baseMetric = "container_fs_usage_bytes"
-	}
-
-	fmtOffset := timeutil.DurationToPromOffsetString(offset)
-
-	fmtCumulativeQuery := `sum(
-		sum_over_time(%s{device!="tmpfs", id="/", %s}[%s:1m]%s)
-	) by (%s) / 60 / 730 / 1024 / 1024 / 1024 * %f`
-
-	fmtMonthlyQuery := `sum(
-		avg_over_time(%s{device!="tmpfs", id="/", %s}[%s:1m]%s)
-	) by (%s) / 1024 / 1024 / 1024 * %f`
-
-	fmtQuery := fmtCumulativeQuery
-	if rate {
-		fmtQuery = fmtMonthlyQuery
-	}
-	fmtWindow := timeutil.DurationString(window)
-
-	return fmt.Sprintf(fmtQuery, baseMetric, env.GetPromClusterFilter(), fmtWindow, fmtOffset, env.GetPromClusterLabel(), localStorageCost)
-}
-
 func (gcp *GCP) GetConfig() (*models.CustomPricing, error) {
 	c, err := gcp.Config.GetCustomPricingData()
 	if err != nil {

+ 0 - 2
pkg/cloud/models/models.go

@@ -7,7 +7,6 @@ import (
 	"reflect"
 	"strconv"
 	"strings"
-	"time"
 
 	"github.com/microcosm-cc/bluemonday"
 	"github.com/opencost/opencost/core/pkg/log"
@@ -320,7 +319,6 @@ type Provider interface {
 	UpdateConfigFromConfigMap(map[string]string) (*CustomPricing, error)
 	GetConfig() (*CustomPricing, error)
 	GetManagementPlatform() (string, error)
-	GetLocalStorageQuery(time.Duration, time.Duration, bool, bool) string
 	ApplyReservedInstancePricing(map[string]*Node)
 	ServiceAccountStatus() *ServiceAccountStatus
 	PricingSourceStatus() map[string]*PricingSource

+ 0 - 5
pkg/cloud/oracle/provider.go

@@ -5,7 +5,6 @@ import (
 	"io"
 	"strconv"
 	"sync"
-	"time"
 
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
@@ -298,10 +297,6 @@ func (o *Oracle) GetOrphanedResources() ([]models.OrphanedResource, error) {
 	return nil, nil
 }
 
-func (o *Oracle) GetLocalStorageQuery(duration time.Duration, duration2 time.Duration, b bool, b2 bool) string {
-	return ""
-}
-
 func (o *Oracle) ApplyReservedInstancePricing(m map[string]*models.Node) {}
 
 func (o *Oracle) ServiceAccountStatus() *models.ServiceAccountStatus {

+ 0 - 6
pkg/cloud/otc/provider.go

@@ -8,7 +8,6 @@ import (
 	"strconv"
 	"strings"
 	"sync"
-	"time"
 
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
@@ -532,11 +531,6 @@ func (otc *OTC) GetManagementPlatform() (string, error) {
 	return "", nil
 }
 
-// TODO: Implement method
-func (otc *OTC) GetLocalStorageQuery(start, end time.Duration, isPVC, isDeleted bool) string {
-	return ""
-}
-
 // TODO: Implement method
 func (otc *OTC) ApplyReservedInstancePricing(nodes map[string]*models.Node) {
 }

+ 0 - 5
pkg/cloud/provider/customprovider.go

@@ -6,7 +6,6 @@ import (
 	"io"
 	"strconv"
 	"sync"
-	"time"
 
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
@@ -81,10 +80,6 @@ func (*CustomProvider) ClusterManagementPricing() (string, float64, error) {
 	return "", 0.0, nil
 }
 
-func (*CustomProvider) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
-	return ""
-}
-
 func (cp *CustomProvider) GetConfig() (*models.CustomPricing, error) {
 	return cp.Config.GetCustomPricingData()
 }

+ 0 - 5
pkg/cloud/scaleway/provider.go

@@ -7,7 +7,6 @@ import (
 	"strconv"
 	"strings"
 	"sync"
-	"time"
 
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
@@ -358,10 +357,6 @@ func (scw *Scaleway) GetConfig() (*models.CustomPricing, error) {
 	return c, nil
 }
 
-func (*Scaleway) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
-	return ""
-}
-
 func (scw *Scaleway) GetManagementPlatform() (string, error) {
 	nodes := scw.Clientset.GetAllNodes()
 

+ 1 - 1
pkg/cmd/agent/agent.go

@@ -12,6 +12,7 @@ import (
 	"github.com/opencost/opencost/pkg/util/watcher"
 
 	"github.com/opencost/opencost/core/pkg/version"
+	"github.com/opencost/opencost/modules/prometheus-source/pkg/prom"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/config"
@@ -20,7 +21,6 @@ import (
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/kubeconfig"
 	"github.com/opencost/opencost/pkg/metrics"
-	"github.com/opencost/opencost/pkg/prom"
 
 	prometheus "github.com/prometheus/client_golang/api"
 	prometheusAPI "github.com/prometheus/client_golang/api/prometheus/v1"

+ 49 - 47
pkg/costmodel/aggregation.go

@@ -13,10 +13,10 @@ import (
 	"github.com/julienschmidt/httprouter"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/patrickmn/go-cache"
-	prometheusClient "github.com/prometheus/client_golang/api"
 
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/core/pkg/util/httputil"
 	"github.com/opencost/opencost/core/pkg/util/json"
@@ -25,8 +25,6 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/errors"
-	"github.com/opencost/opencost/pkg/prom"
-	"github.com/opencost/opencost/pkg/thanos"
 )
 
 const (
@@ -199,7 +197,7 @@ func GetTotalContainerCost(costData map[string]*CostData, rate string, cp models
 	return totalContainerCost
 }
 
-func (a *Accesses) ComputeIdleCoefficient(costData map[string]*CostData, cli prometheusClient.Client, cp models.Provider, discount float64, customDiscount float64, window, offset time.Duration) (map[string]float64, error) {
+func (a *Accesses) ComputeIdleCoefficient(costData map[string]*CostData, discount float64, customDiscount float64, window, offset time.Duration) (map[string]float64, error) {
 	coefficients := make(map[string]float64)
 
 	profileName := "ComputeIdleCoefficient: ComputeClusterCosts"
@@ -212,7 +210,7 @@ func (a *Accesses) ComputeIdleCoefficient(costData map[string]*CostData, cli pro
 	if data, valid := a.ClusterCostsCache.Get(key); valid {
 		clusterCosts = data.(map[string]*ClusterCosts)
 	} else {
-		clusterCosts, err = a.ComputeClusterCosts(cli, cp, window, offset, false)
+		clusterCosts, err = a.ComputeClusterCosts(a.DataSource, a.CloudProvider, window, offset, false)
 		if err != nil {
 			return nil, err
 		}
@@ -234,7 +232,7 @@ func (a *Accesses) ComputeIdleCoefficient(costData map[string]*CostData, cli pro
 		totalContainerCost := 0.0
 		for _, costDatum := range costData {
 			if costDatum.ClusterID == cid {
-				cpuv, ramv, gpuv, pvvs, _ := getPriceVectors(cp, costDatum, "", discount, customDiscount, 1)
+				cpuv, ramv, gpuv, pvvs, _ := getPriceVectors(a.CloudProvider, costDatum, "", discount, customDiscount, 1)
 				totalContainerCost += totalVectors(cpuv)
 				totalContainerCost += totalVectors(ramv)
 				totalContainerCost += totalVectors(gpuv)
@@ -1051,7 +1049,7 @@ func DefaultAggregateQueryOpts() *AggregateQueryOpts {
 
 // ComputeAggregateCostModel computes cost data for the given window, then aggregates it by the given fields.
 // Data is cached on two levels: the aggregation is cached as well as the underlying cost data.
-func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client, window opencost.Window, field string, subfields []string, opts *AggregateQueryOpts) (map[string]*Aggregation, string, error) {
+func (a *Accesses) ComputeAggregateCostModel(window opencost.Window, field string, subfields []string, opts *AggregateQueryOpts) (map[string]*Aggregation, string, error) {
 	// Window is the range of the query, i.e. (start, end)
 	// It must be closed, i.e. neither start nor end can be nil
 	if window.IsOpen() {
@@ -1361,16 +1359,18 @@ func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client,
 	// parametrize cache key by all request parameters
 	aggKey := GenerateAggKey(window, field, subfields, opts)
 
-	thanosOffset := time.Now().Add(-thanos.OffsetDuration())
-	if a.ThanosClient != nil && window.End().After(thanosOffset) {
-		log.Infof("ComputeAggregateCostModel: setting end time backwards to first present data")
+	/*
+		thanosOffset := time.Now().Add(-thanos.OffsetDuration())
+		if a.ThanosClient != nil && window.End().After(thanosOffset) {
+			log.Infof("ComputeAggregateCostModel: setting end time backwards to first present data")
 
-		// Apply offsets to both end and start times to maintain correct time range
-		deltaDuration := window.End().Sub(thanosOffset)
-		s := window.Start().Add(-1 * deltaDuration)
-		e := time.Now().Add(-thanos.OffsetDuration())
-		window.Set(&s, &e)
-	}
+			// Apply offsets to both end and start times to maintain correct time range
+			deltaDuration := window.End().Sub(thanosOffset)
+			s := window.Start().Add(-1 * deltaDuration)
+			e := time.Now().Add(-thanos.OffsetDuration())
+			window.Set(&s, &e)
+		}
+	*/
 
 	dur, off := window.DurationOffsetStrings()
 	key := fmt.Sprintf(`%s:%s:%fh:%t`, dur, off, resolution.Hours(), remoteEnabled)
@@ -1384,7 +1384,7 @@ func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client,
 		if !ok {
 			// disable cache and recompute if type cast fails
 			log.Errorf("ComputeAggregateCostModel: caching error: failed to cast aggregate data to struct: %s", aggKey)
-			return a.ComputeAggregateCostModel(promClient, window, field, subfields, opts)
+			return a.ComputeAggregateCostModel(window, field, subfields, opts)
 		}
 		return result, fmt.Sprintf("aggregate cache hit: %s", aggKey), nil
 	}
@@ -1412,12 +1412,12 @@ func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client,
 	} else {
 		log.Infof("ComputeAggregateCostModel: missed cache: %s (found %t, disableAggregateCostModelCache %t, noCache %t)", key, found, disableAggregateCostModelCache, noCache)
 
-		costData, err = a.Model.ComputeCostDataRange(promClient, a.CloudProvider, window, resolution, "", "", remoteEnabled)
+		costData, err = a.Model.ComputeCostDataRange(window, resolution, "", "")
 		if err != nil {
-			if prom.IsErrorCollection(err) {
+			if source.IsErrorCollection(err) {
 				return nil, "", err
 			}
-			if pce, ok := err.(prom.CommError); ok {
+			if pce, ok := err.(source.CommError); ok {
 				return nil, "", pce
 			}
 			if strings.Contains(err.Error(), "data is empty") {
@@ -1470,27 +1470,29 @@ func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client,
 			return nil, "", err
 		}
 
-		if a.ThanosClient != nil && off < thanos.OffsetDuration() {
-			// Determine difference between the Thanos offset and the requested
-			// offset; e.g. off=1h, thanosOffsetDuration=3h => diff=2h
-			diff := thanos.OffsetDuration() - off
+		/*
+			if a.ThanosClient != nil && off < thanos.OffsetDuration() {
+				// Determine difference between the Thanos offset and the requested
+				// offset; e.g. off=1h, thanosOffsetDuration=3h => diff=2h
+				diff := thanos.OffsetDuration() - off
 
-			// Reduce duration by difference and increase offset by difference
-			// e.g. 24h offset 0h => 21h offset 3h
-			dur = dur - diff
-			off = thanos.OffsetDuration()
+				// Reduce duration by difference and increase offset by difference
+				// e.g. 24h offset 0h => 21h offset 3h
+				dur = dur - diff
+				off = thanos.OffsetDuration()
 
-			log.Infof("ComputeAggregateCostModel: setting duration, offset to %s, %s due to Thanos", dur, off)
+				log.Infof("ComputeAggregateCostModel: setting duration, offset to %s, %s due to Thanos", dur, off)
 
-			// Idle computation cannot be fulfilled for some windows, specifically
-			// those with sum(duration, offset) < Thanos offset, because there is
-			// no data within that window.
-			if dur <= 0 {
-				return nil, "", fmt.Errorf("requested idle coefficients from Thanos for illegal duration, offset: %s, %s (original window %s)", dur, off, window)
+				// Idle computation cannot be fulfilled for some windows, specifically
+				// those with sum(duration, offset) < Thanos offset, because there is
+				// no data within that window.
+				if dur <= 0 {
+					return nil, "", fmt.Errorf("requested idle coefficients from Thanos for illegal duration, offset: %s, %s (original window %s)", dur, off, window)
+				}
 			}
-		}
+		*/
 
-		idleCoefficients, err = a.ComputeIdleCoefficient(costData, promClient, a.CloudProvider, discount, customDiscount, dur, off)
+		idleCoefficients, err = a.ComputeIdleCoefficient(costData, discount, customDiscount, dur, off)
 		if err != nil {
 			durStr, offStr := timeutil.DurationOffsetStrings(dur, off)
 			log.Errorf("ComputeAggregateCostModel: error computing idle coefficient: duration=%s, offset=%s, err=%s", durStr, offStr, err)
@@ -1729,7 +1731,7 @@ func GenerateAggKey(window opencost.Window, field string, subfields []string, op
 // a brutal interface, which should be cleaned up, but it's necessary for
 // being able to swap in an ETL-backed implementation.
 type Aggregator interface {
-	ComputeAggregateCostModel(promClient prometheusClient.Client, window opencost.Window, field string, subfields []string, opts *AggregateQueryOpts) (map[string]*Aggregation, string, error)
+	ComputeAggregateCostModel(window opencost.Window, field string, subfields []string, opts *AggregateQueryOpts) (map[string]*Aggregation, string, error)
 }
 
 func (a *Accesses) warmAggregateCostModelCache() {
@@ -1741,13 +1743,15 @@ func (a *Accesses) warmAggregateCostModelCache() {
 	// if the default parameters change, the old cached defaults with eventually expire. Thus, the
 	// timing of the cache expiry/refresh is the only mechanism ensuring 100% cache warmth.
 	warmFunc := func(duration, offset time.Duration, cacheEfficiencyData bool) (error, error) {
-		if a.ThanosClient != nil {
-			duration = thanos.OffsetDuration()
-			log.Infof("Setting Offset to %s", duration)
-		}
+		/*
+			if a.ThanosClient != nil {
+				duration = thanos.OffsetDuration()
+				log.Infof("Setting Offset to %s", duration)
+			}
+		*/
 		fmtDuration, fmtOffset := timeutil.DurationOffsetStrings(duration, offset)
 		durationHrs, err := timeutil.FormatDurationStringDaysToHours(fmtDuration)
-		promClient := a.GetPrometheusClient(true)
+		//promClient := a.GetPrometheusClient(true)
 
 		windowStr := fmt.Sprintf("%s offset %s", fmtDuration, fmtOffset)
 		window, err := opencost.ParseWindowUTC(windowStr)
@@ -1782,12 +1786,12 @@ func (a *Accesses) warmAggregateCostModelCache() {
 		log.Infof("aggregation: cache warming defaults: %s", aggKey)
 		key := fmt.Sprintf("%s:%s", durationHrs, fmtOffset)
 
-		_, _, aggErr := a.ComputeAggregateCostModel(promClient, window, field, subfields, aggOpts)
+		_, _, aggErr := a.ComputeAggregateCostModel(window, field, subfields, aggOpts)
 		if aggErr != nil {
 			log.Infof("Error building cache %s: %s", window, aggErr)
 		}
 
-		totals, err := a.ComputeClusterCosts(promClient, a.CloudProvider, duration, offset, cacheEfficiencyData)
+		totals, err := a.ComputeClusterCosts(a.DataSource, a.CloudProvider, duration, offset, cacheEfficiencyData)
 		if err != nil {
 			log.Infof("Error building cluster costs cache %s", key)
 		}
@@ -2059,11 +2063,9 @@ func (a *Accesses) AggregateCostModelHandler(w http.ResponseWriter, r *http.Requ
 	// enable remote if it is available and not disabled
 	opts.RemoteEnabled = remote && env.IsRemoteEnabled()
 
-	promClient := a.GetPrometheusClient(remote)
-
 	var data map[string]*Aggregation
 	var message string
-	data, message, err = a.AggAPI.ComputeAggregateCostModel(promClient, window, field, subfields, opts)
+	data, message, err = a.AggAPI.ComputeAggregateCostModel(window, field, subfields, opts)
 
 	// Find any warnings in http request context
 	warning, _ := httputil.GetWarning(r)

+ 6 - 5
pkg/costmodel/allocation.go

@@ -5,6 +5,7 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util/timeutil"
 
 	"github.com/opencost/opencost/core/pkg/log"
@@ -124,7 +125,7 @@ func (cm *CostModel) Name() string {
 func (cm *CostModel) ComputeAllocation(start, end time.Time, resolution time.Duration) (*opencost.AllocationSet, error) {
 
 	// If the duration is short enough, compute the AllocationSet directly
-	if end.Sub(start) <= cm.MaxPrometheusQueryDuration {
+	if end.Sub(start) <= cm.BatchDuration {
 		as, _, err := cm.computeAllocation(start, end, resolution)
 		return as, err
 	}
@@ -145,8 +146,8 @@ func (cm *CostModel) ComputeAllocation(start, end time.Time, resolution time.Dur
 		// any individual query duration exceed the configured max Prometheus
 		// query duration.
 		duration := end.Sub(e)
-		if duration > cm.MaxPrometheusQueryDuration {
-			duration = cm.MaxPrometheusQueryDuration
+		if duration > cm.BatchDuration {
+			duration = cm.BatchDuration
 		}
 
 		// Set start and end parameters (s, e) for next individual computation.
@@ -514,7 +515,7 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	queryGetGPUInfo := fmt.Sprintf(queryFmtGetGPuInfo, env.GetPromClusterFilter(), durStr)
 	resChGetGPUInfo := ctx.QueryAtTime(queryGetGPUInfo, end)
 
-	var resChNodeLabels prom.QueryResultsChan
+	var resChNodeLabels source.QueryResultsChan
 	if env.GetAllocationNodeLabelsEnabled() {
 		queryNodeLabels := fmt.Sprintf(queryFmtNodeLabels, env.GetPromClusterFilter(), durStr)
 		resChNodeLabels = ctx.QueryAtTime(queryNodeLabels, end)
@@ -601,7 +602,7 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	resNetInternetGiB, _ := resChNetInternetGiB.Await()
 	resNetInternetCostPerGiB, _ := resChNetInternetCostPerGiB.Await()
 
-	var resNodeLabels []*prom.QueryResult
+	var resNodeLabels []*source.QueryResult
 	if env.GetAllocationNodeLabelsEnabled() {
 		if env.GetAllocationNodeLabelsEnabled() {
 			resNodeLabels, _ = resChNodeLabels.Await()

+ 60 - 59
pkg/costmodel/allocation_helpers.go

@@ -9,6 +9,7 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util/promutil"
 	"github.com/opencost/opencost/core/pkg/util/timeutil"
 	"github.com/opencost/opencost/pkg/cloud/provider"
@@ -64,7 +65,7 @@ func (cm *CostModel) buildPodMap(window opencost.Window, resolution, maxBatchSiz
 			batchEnd = end
 		}
 
-		var resPods []*prom.QueryResult
+		var resPods []*source.QueryResult
 		var err error
 		maxTries := 3
 		numTries := 0
@@ -75,7 +76,7 @@ func (cm *CostModel) buildPodMap(window opencost.Window, resolution, maxBatchSiz
 			durStr := timeutil.DurationString(batchEnd.Sub(batchStart))
 			if durStr == "" {
 				// Negative duration, so set empty results and don't query
-				resPods = []*prom.QueryResult{}
+				resPods = []*source.QueryResult{}
 				err = nil
 				break
 			}
@@ -107,7 +108,7 @@ func (cm *CostModel) buildPodMap(window opencost.Window, resolution, maxBatchSiz
 		// default setup of Kubecost having replicated kube_pod_container_status_running and
 		// included KSM kube_pod_container_status_running. Querying w/ UID will return both.
 		if ingestPodUID {
-			var resPodsUID []*prom.QueryResult
+			var resPodsUID []*source.QueryResult
 
 			for _, res := range resPods {
 				_, err := res.GetString("uid")
@@ -132,14 +133,14 @@ func (cm *CostModel) buildPodMap(window opencost.Window, resolution, maxBatchSiz
 	return nil
 }
 
-func applyPodResults(window opencost.Window, resolution time.Duration, podMap map[podKey]*pod, clusterStart, clusterEnd map[string]time.Time, resPods []*prom.QueryResult, ingestPodUID bool, podUIDKeyMap map[podKey][]podKey) {
+func applyPodResults(window opencost.Window, resolution time.Duration, podMap map[podKey]*pod, clusterStart, clusterEnd map[string]time.Time, resPods []*source.QueryResult, ingestPodUID bool, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resPods {
 		if len(res.Values) == 0 {
 			log.Warnf("CostModel.ComputeAllocation: empty minutes result")
 			continue
 		}
 
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -210,7 +211,7 @@ func applyPodResults(window opencost.Window, resolution time.Duration, podMap ma
 	}
 }
 
-func applyCPUCoresAllocated(podMap map[podKey]*pod, resCPUCoresAllocated []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyCPUCoresAllocated(podMap map[podKey]*pod, resCPUCoresAllocated []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resCPUCoresAllocated {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -254,7 +255,7 @@ func applyCPUCoresAllocated(podMap map[podKey]*pod, resCPUCoresAllocated []*prom
 			hours := thisPod.Allocations[container].Minutes() / 60.0
 			thisPod.Allocations[container].CPUCoreHours = cpuCores * hours
 
-			node, err := res.GetString("node")
+			node, err := res.GetNode()
 			if err != nil {
 				log.Warnf("CostModel.ComputeAllocation: CPU allocation query result missing 'node': %s", key)
 				continue
@@ -265,7 +266,7 @@ func applyCPUCoresAllocated(podMap map[podKey]*pod, resCPUCoresAllocated []*prom
 	}
 }
 
-func applyCPUCoresRequested(podMap map[podKey]*pod, resCPUCoresRequested []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyCPUCoresRequested(podMap map[podKey]*pod, resCPUCoresRequested []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resCPUCoresRequested {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -312,7 +313,7 @@ func applyCPUCoresRequested(podMap map[podKey]*pod, resCPUCoresRequested []*prom
 				log.Infof("[WARNING] Very large cpu allocation, clamping! to %f", res.Values[0].Value*(thisPod.Allocations[container].Minutes()/60.0))
 				thisPod.Allocations[container].CPUCoreHours = res.Values[0].Value * (thisPod.Allocations[container].Minutes() / 60.0)
 			}
-			node, err := res.GetString("node")
+			node, err := res.GetNode()
 			if err != nil {
 				log.Warnf("CostModel.ComputeAllocation: CPU request query result missing 'node': %s", key)
 				continue
@@ -323,7 +324,7 @@ func applyCPUCoresRequested(podMap map[podKey]*pod, resCPUCoresRequested []*prom
 	}
 }
 
-func applyCPUCoresUsedAvg(podMap map[podKey]*pod, resCPUCoresUsedAvg []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyCPUCoresUsedAvg(podMap map[podKey]*pod, resCPUCoresUsedAvg []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resCPUCoresUsedAvg {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -371,7 +372,7 @@ func applyCPUCoresUsedAvg(podMap map[podKey]*pod, resCPUCoresUsedAvg []*prom.Que
 	}
 }
 
-func applyCPUCoresUsedMax(podMap map[podKey]*pod, resCPUCoresUsedMax []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyCPUCoresUsedMax(podMap map[podKey]*pod, resCPUCoresUsedMax []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resCPUCoresUsedMax {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -421,7 +422,7 @@ func applyCPUCoresUsedMax(podMap map[podKey]*pod, resCPUCoresUsedMax []*prom.Que
 	}
 }
 
-func applyRAMBytesAllocated(podMap map[podKey]*pod, resRAMBytesAllocated []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyRAMBytesAllocated(podMap map[podKey]*pod, resRAMBytesAllocated []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resRAMBytesAllocated {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -461,7 +462,7 @@ func applyRAMBytesAllocated(podMap map[podKey]*pod, resRAMBytesAllocated []*prom
 			hours := thisPod.Allocations[container].Minutes() / 60.0
 			thisPod.Allocations[container].RAMByteHours = ramBytes * hours
 
-			node, err := res.GetString("node")
+			node, err := res.GetNode()
 			if err != nil {
 				log.Warnf("CostModel.ComputeAllocation: RAM allocation query result missing 'node': %s", key)
 				continue
@@ -472,7 +473,7 @@ func applyRAMBytesAllocated(podMap map[podKey]*pod, resRAMBytesAllocated []*prom
 	}
 }
 
-func applyRAMBytesRequested(podMap map[podKey]*pod, resRAMBytesRequested []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyRAMBytesRequested(podMap map[podKey]*pod, resRAMBytesRequested []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resRAMBytesRequested {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -516,7 +517,7 @@ func applyRAMBytesRequested(podMap map[podKey]*pod, resRAMBytesRequested []*prom
 				pod.Allocations[container].RAMByteHours = res.Values[0].Value * (pod.Allocations[container].Minutes() / 60.0)
 			}
 
-			node, err := res.GetString("node")
+			node, err := res.GetNode()
 			if err != nil {
 				log.Warnf("CostModel.ComputeAllocation: RAM request query result missing 'node': %s", key)
 				continue
@@ -527,7 +528,7 @@ func applyRAMBytesRequested(podMap map[podKey]*pod, resRAMBytesRequested []*prom
 	}
 }
 
-func applyRAMBytesUsedAvg(podMap map[podKey]*pod, resRAMBytesUsedAvg []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyRAMBytesUsedAvg(podMap map[podKey]*pod, resRAMBytesUsedAvg []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resRAMBytesUsedAvg {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -571,7 +572,7 @@ func applyRAMBytesUsedAvg(podMap map[podKey]*pod, resRAMBytesUsedAvg []*prom.Que
 	}
 }
 
-func applyRAMBytesUsedMax(podMap map[podKey]*pod, resRAMBytesUsedMax []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyRAMBytesUsedMax(podMap map[podKey]*pod, resRAMBytesUsedMax []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resRAMBytesUsedMax {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -622,7 +623,7 @@ func applyRAMBytesUsedMax(podMap map[podKey]*pod, resRAMBytesUsedMax []*prom.Que
 }
 
 // same func is used for both GPUUsageAvg and GPUUsageMax
-func applyGPUUsage(podMap map[podKey]*pod, resGPUUsageAvgOrMax []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey, mode string) {
+func applyGPUUsage(podMap map[podKey]*pod, resGPUUsageAvgOrMax []*source.QueryResult, podUIDKeyMap map[podKey][]podKey, mode string) {
 	// Example PromQueryResult: {container="dcgmproftester12", namespace="gpu", pod="dcgmproftester3-deployment-fc89c8dd6-ph7z5"} 0.997307
 	for _, res := range resGPUUsageAvgOrMax {
 		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
@@ -720,7 +721,7 @@ func applyGPUUsage(podMap map[podKey]*pod, resGPUUsageAvgOrMax []*prom.QueryResu
 	}
 }
 
-func applyGPUsAllocated(podMap map[podKey]*pod, resGPUsRequested []*prom.QueryResult, resGPUsAllocated []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyGPUsAllocated(podMap map[podKey]*pod, resGPUsRequested []*source.QueryResult, resGPUsAllocated []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	if len(resGPUsAllocated) > 0 { // Use the new query, when it's become available in a window
 		resGPUsRequested = resGPUsAllocated
 	}
@@ -779,7 +780,7 @@ func applyGPUsAllocated(podMap map[podKey]*pod, resGPUsRequested []*prom.QueryRe
 	}
 }
 
-func applyNetworkTotals(podMap map[podKey]*pod, resNetworkTransferBytes []*prom.QueryResult, resNetworkReceiveBytes []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+func applyNetworkTotals(podMap map[podKey]*pod, resNetworkTransferBytes []*source.QueryResult, resNetworkReceiveBytes []*source.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	for _, res := range resNetworkTransferBytes {
 		podKey, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
 		if err != nil {
@@ -842,11 +843,11 @@ func applyNetworkTotals(podMap map[podKey]*pod, resNetworkTransferBytes []*prom.
 	}
 }
 
-func applyNetworkAllocation(podMap map[podKey]*pod, resNetworkGiB []*prom.QueryResult, resNetworkCostPerGiB []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey, networkCostSubType string) {
+func applyNetworkAllocation(podMap map[podKey]*pod, resNetworkGiB []*source.QueryResult, resNetworkCostPerGiB []*source.QueryResult, podUIDKeyMap map[podKey][]podKey, networkCostSubType string) {
 	costPerGiBByCluster := map[string]float64{}
 
 	for _, res := range resNetworkCostPerGiB {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -899,7 +900,7 @@ func applyNetworkAllocation(podMap map[podKey]*pod, resNetworkGiB []*prom.QueryR
 	}
 }
 
-func resToNodeLabels(resNodeLabels []*prom.QueryResult) map[nodeKey]map[string]string {
+func resToNodeLabels(resNodeLabels []*source.QueryResult) map[nodeKey]map[string]string {
 	nodeLabels := map[nodeKey]map[string]string{}
 
 	for _, res := range resNodeLabels {
@@ -935,7 +936,7 @@ func resToNodeLabels(resNodeLabels []*prom.QueryResult) map[nodeKey]map[string]s
 	return nodeLabels
 }
 
-func resToNamespaceLabels(resNamespaceLabels []*prom.QueryResult) map[namespaceKey]map[string]string {
+func resToNamespaceLabels(resNamespaceLabels []*source.QueryResult) map[namespaceKey]map[string]string {
 	namespaceLabels := map[namespaceKey]map[string]string{}
 
 	for _, res := range resNamespaceLabels {
@@ -956,7 +957,7 @@ func resToNamespaceLabels(resNamespaceLabels []*prom.QueryResult) map[namespaceK
 	return namespaceLabels
 }
 
-func resToPodLabels(resPodLabels []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]map[string]string {
+func resToPodLabels(resPodLabels []*source.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]map[string]string {
 	podLabels := map[podKey]map[string]string{}
 
 	for _, res := range resPodLabels {
@@ -991,11 +992,11 @@ func resToPodLabels(resPodLabels []*prom.QueryResult, podUIDKeyMap map[podKey][]
 	return podLabels
 }
 
-func resToNamespaceAnnotations(resNamespaceAnnotations []*prom.QueryResult) map[string]map[string]string {
+func resToNamespaceAnnotations(resNamespaceAnnotations []*source.QueryResult) map[string]map[string]string {
 	namespaceAnnotations := map[string]map[string]string{}
 
 	for _, res := range resNamespaceAnnotations {
-		namespace, err := res.GetString("namespace")
+		namespace, err := res.GetNamespace()
 		if err != nil {
 			continue
 		}
@@ -1012,7 +1013,7 @@ func resToNamespaceAnnotations(resNamespaceAnnotations []*prom.QueryResult) map[
 	return namespaceAnnotations
 }
 
-func resToPodAnnotations(resPodAnnotations []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]map[string]string {
+func resToPodAnnotations(resPodAnnotations []*source.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]map[string]string {
 	podAnnotations := map[podKey]map[string]string{}
 
 	for _, res := range resPodAnnotations {
@@ -1127,7 +1128,7 @@ func applyAnnotations(podMap map[podKey]*pod, namespaceAnnotations map[string]ma
 	}
 }
 
-func resToDeploymentLabels(resDeploymentLabels []*prom.QueryResult) map[controllerKey]map[string]string {
+func resToDeploymentLabels(resDeploymentLabels []*source.QueryResult) map[controllerKey]map[string]string {
 	deploymentLabels := map[controllerKey]map[string]string{}
 
 	for _, res := range resDeploymentLabels {
@@ -1160,7 +1161,7 @@ func resToDeploymentLabels(resDeploymentLabels []*prom.QueryResult) map[controll
 	return deploymentLabels
 }
 
-func resToStatefulSetLabels(resStatefulSetLabels []*prom.QueryResult) map[controllerKey]map[string]string {
+func resToStatefulSetLabels(resStatefulSetLabels []*source.QueryResult) map[controllerKey]map[string]string {
 	statefulSetLabels := map[controllerKey]map[string]string{}
 
 	for _, res := range resStatefulSetLabels {
@@ -1222,7 +1223,7 @@ func labelsToPodControllerMap(podLabels map[podKey]map[string]string, controller
 	return podControllerMap
 }
 
-func resToPodDaemonSetMap(resDaemonSetLabels []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]controllerKey {
+func resToPodDaemonSetMap(resDaemonSetLabels []*source.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]controllerKey {
 	daemonSetLabels := map[podKey]controllerKey{}
 
 	for _, res := range resDaemonSetLabels {
@@ -1258,7 +1259,7 @@ func resToPodDaemonSetMap(resDaemonSetLabels []*prom.QueryResult, podUIDKeyMap m
 	return daemonSetLabels
 }
 
-func resToPodJobMap(resJobLabels []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]controllerKey {
+func resToPodJobMap(resJobLabels []*source.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]controllerKey {
 	jobLabels := map[podKey]controllerKey{}
 
 	for _, res := range resJobLabels {
@@ -1301,7 +1302,7 @@ func resToPodJobMap(resJobLabels []*prom.QueryResult, podUIDKeyMap map[podKey][]
 	return jobLabels
 }
 
-func resToPodReplicaSetMap(resPodsWithReplicaSetOwner []*prom.QueryResult, resReplicaSetsWithoutOwners []*prom.QueryResult, resReplicaSetsWithRolloutOwner []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]controllerKey {
+func resToPodReplicaSetMap(resPodsWithReplicaSetOwner []*source.QueryResult, resReplicaSetsWithoutOwners []*source.QueryResult, resReplicaSetsWithRolloutOwner []*source.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) map[podKey]controllerKey {
 	// Build out set of ReplicaSets that have no owners, themselves, such that
 	// the ReplicaSet should be used as the owner of the Pods it controls.
 	// (This should exclude, for example, ReplicaSets that are controlled by
@@ -1388,7 +1389,7 @@ func applyControllersToPods(podMap map[podKey]*pod, podControllerMap map[podKey]
 
 /* Service Helpers */
 
-func getServiceLabels(resServiceLabels []*prom.QueryResult) map[serviceKey]map[string]string {
+func getServiceLabels(resServiceLabels []*source.QueryResult) map[serviceKey]map[string]string {
 	serviceLabels := map[serviceKey]map[string]string{}
 
 	for _, res := range resServiceLabels {
@@ -1464,7 +1465,7 @@ func applyServicesToPods(podMap map[podKey]*pod, podLabels map[podKey]map[string
 	}
 }
 
-func getLoadBalancerCosts(lbMap map[serviceKey]*lbCost, resLBCost, resLBActiveMins []*prom.QueryResult, resolution time.Duration, window opencost.Window) {
+func getLoadBalancerCosts(lbMap map[serviceKey]*lbCost, resLBCost, resLBActiveMins []*source.QueryResult, resolution time.Duration, window opencost.Window) {
 	for _, res := range resLBActiveMins {
 		serviceKey, err := resultServiceKey(res, env.GetPromClusterLabel(), "namespace", "service_name")
 		if err != nil || len(res.Values) == 0 {
@@ -1598,14 +1599,14 @@ func applyLoadBalancersToPods(window opencost.Window, podMap map[podKey]*pod, lb
 
 /* Node Helpers */
 
-func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr []*prom.QueryResult) {
+func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr []*source.QueryResult) {
 	for _, res := range resNodeCostPerCPUHr {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		node, err := res.GetString("node")
+		node, err := res.GetNode()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node CPU cost query result missing field: \"%s\" for node \"%s\"", err, node)
 			continue
@@ -1616,7 +1617,7 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr
 			log.Warnf("CostModel.ComputeAllocation: Node CPU cost query result missing field: \"%s\" for node \"%s\"", err, node)
 		}
 
-		providerID, err := res.GetString("provider_id")
+		providerID, err := res.GetProviderID()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node CPU cost query result missing field: \"%s\" for node \"%s\"", err, node)
 		}
@@ -1634,14 +1635,14 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr
 	}
 }
 
-func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRAMGiBHr []*prom.QueryResult) {
+func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRAMGiBHr []*source.QueryResult) {
 	for _, res := range resNodeCostPerRAMGiBHr {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		node, err := res.GetString("node")
+		node, err := res.GetNode()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node RAM cost query result missing field: \"%s\" for node \"%s\"", err, node)
 			continue
@@ -1652,7 +1653,7 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRA
 			log.Warnf("CostModel.ComputeAllocation: Node RAM cost query result missing field: \"%s\" for node \"%s\"", err, node)
 		}
 
-		providerID, err := res.GetString("provider_id")
+		providerID, err := res.GetProviderID()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node RAM cost query result missing field: \"%s\" for node \"%s\"", err, node)
 		}
@@ -1670,14 +1671,14 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRA
 	}
 }
 
-func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr []*prom.QueryResult) {
+func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr []*source.QueryResult) {
 	for _, res := range resNodeCostPerGPUHr {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		node, err := res.GetString("node")
+		node, err := res.GetNode()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node GPU cost query result missing field: \"%s\" for node \"%s\"", err, node)
 			continue
@@ -1688,7 +1689,7 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr
 			log.Warnf("CostModel.ComputeAllocation: Node GPU cost query result missing field: \"%s\" for node \"%s\"", err, node)
 		}
 
-		providerID, err := res.GetString("provider_id")
+		providerID, err := res.GetProviderID()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node GPU cost query result missing field: \"%s\" for node \"%s\"", err, node)
 		}
@@ -1706,14 +1707,14 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr
 	}
 }
 
-func applyNodeSpot(nodeMap map[nodeKey]*nodePricing, resNodeIsSpot []*prom.QueryResult) {
+func applyNodeSpot(nodeMap map[nodeKey]*nodePricing, resNodeIsSpot []*source.QueryResult) {
 	for _, res := range resNodeIsSpot {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		node, err := res.GetString("node")
+		node, err := res.GetNode()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node spot query result missing field: %s", err)
 			continue
@@ -1917,7 +1918,7 @@ func (cm *CostModel) getNodePricing(nodeMap map[nodeKey]*nodePricing, nodeKey no
 
 /* PV/PVC Helpers */
 
-func buildPVMap(resolution time.Duration, pvMap map[pvKey]*pv, resPVCostPerGiBHour, resPVActiveMins, resPVMeta []*prom.QueryResult, window opencost.Window) {
+func buildPVMap(resolution time.Duration, pvMap map[pvKey]*pv, resPVCostPerGiBHour, resPVActiveMins, resPVMeta []*source.QueryResult, window opencost.Window) {
 	for _, result := range resPVActiveMins {
 		key, err := resultPVKey(result, env.GetPromClusterLabel(), "persistentvolume")
 		if err != nil {
@@ -1964,7 +1965,7 @@ func buildPVMap(resolution time.Duration, pvMap map[pvKey]*pv, resPVCostPerGiBHo
 
 		// only add metadata for disks that exist in the other metrics
 		if _, ok := pvMap[key]; ok {
-			provId, err := result.GetString("provider_id")
+			provId, err := result.GetProviderID()
 			if err != nil {
 				log.Warnf("error getting provider id for PV %v: %v", key, err)
 				continue
@@ -1975,7 +1976,7 @@ func buildPVMap(resolution time.Duration, pvMap map[pvKey]*pv, resPVCostPerGiBHo
 	}
 }
 
-func applyPVBytes(pvMap map[pvKey]*pv, resPVBytes []*prom.QueryResult) {
+func applyPVBytes(pvMap map[pvKey]*pv, resPVBytes []*source.QueryResult) {
 	for _, res := range resPVBytes {
 		key, err := resultPVKey(res, env.GetPromClusterLabel(), "persistentvolume")
 		if err != nil {
@@ -1998,9 +1999,9 @@ func applyPVBytes(pvMap map[pvKey]*pv, resPVBytes []*prom.QueryResult) {
 	}
 }
 
-func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvKey]*pv, resPVCInfo []*prom.QueryResult, window opencost.Window) {
+func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvKey]*pv, resPVCInfo []*source.QueryResult, window opencost.Window) {
 	for _, res := range resPVCInfo {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -2043,7 +2044,7 @@ func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvK
 	}
 }
 
-func applyPVCBytesRequested(pvcMap map[pvcKey]*pvc, resPVCBytesRequested []*prom.QueryResult) {
+func applyPVCBytesRequested(pvcMap map[pvcKey]*pvc, resPVCBytesRequested []*source.QueryResult) {
 	for _, res := range resPVCBytesRequested {
 		key, err := resultPVCKey(res, env.GetPromClusterLabel(), "namespace", "persistentvolumeclaim")
 		if err != nil {
@@ -2058,9 +2059,9 @@ func applyPVCBytesRequested(pvcMap map[pvcKey]*pvc, resPVCBytesRequested []*prom
 	}
 }
 
-func buildPodPVCMap(podPVCMap map[podKey][]*pvc, pvMap map[pvKey]*pv, pvcMap map[pvcKey]*pvc, podMap map[podKey]*pod, resPodPVCAllocation []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) {
+func buildPodPVCMap(podPVCMap map[podKey][]*pvc, pvMap map[pvKey]*pv, pvcMap map[pvcKey]*pvc, podMap map[podKey]*pod, resPodPVCAllocation []*source.QueryResult, podUIDKeyMap map[podKey][]podKey, ingestPodUID bool) {
 	for _, res := range resPodPVCAllocation {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -2364,7 +2365,7 @@ func getUnmountedPodForNamespace(window opencost.Window, podMap map[podKey]*pod,
 	return thisPod
 }
 
-func calculateStartAndEnd(result *prom.QueryResult, resolution time.Duration, window opencost.Window) (time.Time, time.Time) {
+func calculateStartAndEnd(result *source.QueryResult, resolution time.Duration, window opencost.Window) (time.Time, time.Time) {
 	// Start and end for a range vector are pulled from the timestamps of the
 	// first and final values in the range. There is no "offsetting" required
 	// of the start or the end, as we used to do. If you query for a duration

+ 13 - 13
pkg/costmodel/allocation_helpers_test.go

@@ -6,8 +6,8 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
-	"github.com/opencost/opencost/pkg/prom"
 )
 
 const Ki = 1024
@@ -213,13 +213,13 @@ func TestBuildPVMap(t *testing.T) {
 
 	testCases := map[string]struct {
 		resolution              time.Duration
-		resultsPVCostPerGiBHour []*prom.QueryResult
-		resultsActiveMinutes    []*prom.QueryResult
+		resultsPVCostPerGiBHour []*source.QueryResult
+		resultsActiveMinutes    []*source.QueryResult
 		expected                map[pvKey]*pv
 	}{
 		"pvMap1": {
 			resolution: time.Hour * 6,
-			resultsPVCostPerGiBHour: []*prom.QueryResult{
+			resultsPVCostPerGiBHour: []*source.QueryResult{
 				{
 					Metric: map[string]interface{}{
 						"cluster_id": "cluster1",
@@ -265,7 +265,7 @@ func TestBuildPVMap(t *testing.T) {
 					},
 				},
 			},
-			resultsActiveMinutes: []*prom.QueryResult{
+			resultsActiveMinutes: []*source.QueryResult{
 				{
 					Metric: map[string]interface{}{
 						"cluster_id":       "cluster1",
@@ -354,7 +354,7 @@ func TestBuildPVMap(t *testing.T) {
 	for name, testCase := range testCases {
 		t.Run(name, func(t *testing.T) {
 			pvMap := make(map[pvKey]*pv)
-			buildPVMap(testCase.resolution, pvMap, testCase.resultsPVCostPerGiBHour, testCase.resultsActiveMinutes, []*prom.QueryResult{}, window)
+			buildPVMap(testCase.resolution, pvMap, testCase.resultsPVCostPerGiBHour, testCase.resultsActiveMinutes, []*source.QueryResult{}, window)
 			if len(pvMap) != len(testCase.expected) {
 				t.Errorf("pv map does not have the expected length %d : %d", len(pvMap), len(testCase.expected))
 			}
@@ -460,13 +460,13 @@ func TestCalculateStartAndEnd(t *testing.T) {
 		resolution    time.Duration
 		expectedStart time.Time
 		expectedEnd   time.Time
-		result        *prom.QueryResult
+		result        *source.QueryResult
 	}{
 		"1 hour resolution, 1 hour window": {
 			resolution:    time.Hour,
 			expectedStart: windowStart,
 			expectedEnd:   windowStart.Add(time.Hour),
-			result: &prom.QueryResult{
+			result: &source.QueryResult{
 				Values: []*util.Vector{
 					{
 						Timestamp: startFloat,
@@ -481,7 +481,7 @@ func TestCalculateStartAndEnd(t *testing.T) {
 			resolution:    time.Minute * 30,
 			expectedStart: windowStart,
 			expectedEnd:   windowStart.Add(time.Hour),
-			result: &prom.QueryResult{
+			result: &source.QueryResult{
 				Values: []*util.Vector{
 					{
 						Timestamp: startFloat,
@@ -499,7 +499,7 @@ func TestCalculateStartAndEnd(t *testing.T) {
 			resolution:    time.Minute * 15,
 			expectedStart: windowStart,
 			expectedEnd:   windowStart.Add(time.Minute * 45),
-			result: &prom.QueryResult{
+			result: &source.QueryResult{
 				Values: []*util.Vector{
 					{
 						Timestamp: startFloat + (minute * 0),
@@ -520,7 +520,7 @@ func TestCalculateStartAndEnd(t *testing.T) {
 			resolution:    time.Minute,
 			expectedStart: windowStart.Add(time.Minute * 15),
 			expectedEnd:   windowStart.Add(time.Minute * 20),
-			result: &prom.QueryResult{
+			result: &source.QueryResult{
 				Values: []*util.Vector{
 					{
 						Timestamp: startFloat + (minute * 15),
@@ -547,7 +547,7 @@ func TestCalculateStartAndEnd(t *testing.T) {
 			resolution:    time.Minute,
 			expectedStart: windowStart.Add(time.Minute * 14).Add(time.Second * 30),
 			expectedEnd:   windowStart.Add(time.Minute * 15).Add(time.Second * 30),
-			result: &prom.QueryResult{
+			result: &source.QueryResult{
 				Values: []*util.Vector{
 					{
 						Timestamp: startFloat + (minute * 15),
@@ -559,7 +559,7 @@ func TestCalculateStartAndEnd(t *testing.T) {
 			resolution:    time.Minute,
 			expectedStart: windowStart,
 			expectedEnd:   windowStart.Add(time.Second * 30),
-			result: &prom.QueryResult{
+			result: &source.QueryResult{
 				Values: []*util.Vector{
 					{
 						Timestamp: startFloat,

+ 13 - 12
pkg/costmodel/allocation_incubating.go

@@ -8,6 +8,7 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/prom"
 )
@@ -243,9 +244,9 @@ func appendNodeData(nodeMap map[string]*NodeTotals, s, e time.Time, nodeData map
 // feature for extending the node details that can be returned with allocation
 // data
 type extendedNodeQueryResults struct {
-	nodeCPUCoreResults  []*prom.QueryResult
-	nodeRAMByteResults  []*prom.QueryResult
-	nodeGPUCountResults []*prom.QueryResult
+	nodeCPUCoreResults  []*source.QueryResult
+	nodeRAMByteResults  []*source.QueryResult
+	nodeGPUCountResults []*source.QueryResult
 }
 
 // queryExtendedNodeData makes additional prometheus queries for node data to append on
@@ -284,14 +285,14 @@ func applyExtendedNodeData(nodeMap map[nodeKey]*nodePricing, results *extendedNo
 	applyNodeGPUCount(nodeMap, results.nodeGPUCountResults)
 }
 
-func applyNodeCPUCores(nodeMap map[nodeKey]*nodePricing, nodeCPUCoreResults []*prom.QueryResult) {
+func applyNodeCPUCores(nodeMap map[nodeKey]*nodePricing, nodeCPUCoreResults []*source.QueryResult) {
 	for _, res := range nodeCPUCoreResults {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		node, err := res.GetString("node")
+		node, err := res.GetNode()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node CPU Cores query result missing field: %s", err)
 			continue
@@ -309,14 +310,14 @@ func applyNodeCPUCores(nodeMap map[nodeKey]*nodePricing, nodeCPUCoreResults []*p
 	}
 }
 
-func applyNodeRAMBytes(nodeMap map[nodeKey]*nodePricing, nodeRAMByteResults []*prom.QueryResult) {
+func applyNodeRAMBytes(nodeMap map[nodeKey]*nodePricing, nodeRAMByteResults []*source.QueryResult) {
 	for _, res := range nodeRAMByteResults {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		node, err := res.GetString("node")
+		node, err := res.GetNode()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node CPU Cores query result missing field: %s", err)
 			continue
@@ -334,14 +335,14 @@ func applyNodeRAMBytes(nodeMap map[nodeKey]*nodePricing, nodeRAMByteResults []*p
 	}
 }
 
-func applyNodeGPUCount(nodeMap map[nodeKey]*nodePricing, nodeGPUCountResults []*prom.QueryResult) {
+func applyNodeGPUCount(nodeMap map[nodeKey]*nodePricing, nodeGPUCountResults []*source.QueryResult) {
 	for _, res := range nodeGPUCountResults {
-		cluster, err := res.GetString(env.GetPromClusterLabel())
+		cluster, err := res.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		node, err := res.GetString("node")
+		node, err := res.GetNode()
 		if err != nil {
 			log.Warnf("CostModel.ComputeAllocation: Node CPU Cores query result missing field: %s", err)
 			continue

+ 3 - 3
pkg/costmodel/assets.go

@@ -156,15 +156,15 @@ func (cm *CostModel) ComputeAssets(start, end time.Time) (*opencost.AssetSet, er
 }
 
 func (cm *CostModel) ClusterDisks(start, end time.Time) (map[DiskIdentifier]*Disk, error) {
-	return ClusterDisks(cm.PrometheusClient, cm.Provider, start, end)
+	return ClusterDisks(cm.DataSource, cm.Provider, start, end)
 }
 
 func (cm *CostModel) ClusterLoadBalancers(start, end time.Time) (map[LoadBalancerIdentifier]*LoadBalancer, error) {
-	return ClusterLoadBalancers(cm.PrometheusClient, start, end)
+	return ClusterLoadBalancers(cm.DataSource, start, end)
 }
 
 func (cm *CostModel) ClusterNodes(start, end time.Time) (map[NodeIdentifier]*Node, error) {
-	return ClusterNodes(cm.Provider, cm.PrometheusClient, start, end)
+	return ClusterNodes(cm.DataSource, cm.Provider, start, end)
 }
 
 // propertiesFromCluster populates static cluster properties to individual asset properties

+ 161 - 390
pkg/costmodel/cluster.go

@@ -8,15 +8,14 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/pkg/cloud/provider"
-	prometheus "github.com/prometheus/client_golang/api"
 	"golang.org/x/exp/slices"
 
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util/timeutil"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/env"
-	"github.com/opencost/opencost/pkg/prom"
 )
 
 const (
@@ -152,42 +151,18 @@ type DiskIdentifier struct {
 	Name    string
 }
 
-func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.Time) (map[DiskIdentifier]*Disk, error) {
-	// Start from the time "end", querying backwards
-	t := end
-
-	// minsPerResolution determines accuracy and resource use for the following
-	// queries. Smaller values (higher resolution) result in better accuracy,
-	// but more expensive queries, and vice-a-versa.
+func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, start, end time.Time) (map[DiskIdentifier]*Disk, error) {
 	resolution := env.GetETLResolution()
-	//Ensuring if ETL_RESOLUTION_SECONDS is less than 60s default it to 1m
-	var minsPerResolution int
-	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) == 0 {
-		minsPerResolution = 1
-		log.DedupedWarningf(3, "ClusterDisks(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
-	}
 
-	durStr := timeutil.DurationString(end.Sub(start))
-	if durStr == "" {
-		return nil, fmt.Errorf("illegal duration value for %s", opencost.NewClosedWindow(start, end))
-	}
+	grp := source.NewQueryGroup()
 
-	ctx := prom.NewNamedContext(client, prom.ClusterContextName)
-	queryPVCost := fmt.Sprintf(`avg(avg_over_time(pv_hourly_cost{%s}[%s])) by (%s, persistentvolume,provider_id)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryPVSize := fmt.Sprintf(`avg(avg_over_time(kube_persistentvolume_capacity_bytes{%s}[%s])) by (%s, persistentvolume)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryActiveMins := fmt.Sprintf(`avg(kube_persistentvolume_capacity_bytes{%s}) by (%s, persistentvolume)[%s:%dm]`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
-	queryPVStorageClass := fmt.Sprintf(`avg(avg_over_time(kubecost_pv_info{%s}[%s])) by (%s, persistentvolume, storageclass)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryPVUsedAvg := fmt.Sprintf(`avg(avg_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryPVUsedMax := fmt.Sprintf(`max(max_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryPVCInfo := fmt.Sprintf(`avg(avg_over_time(kube_persistentvolumeclaim_info{%s}[%s])) by (%s, volumename, persistentvolumeclaim, namespace)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-
-	resChPVCost := ctx.QueryAtTime(queryPVCost, t)
-	resChPVSize := ctx.QueryAtTime(queryPVSize, t)
-	resChActiveMins := ctx.QueryAtTime(queryActiveMins, t)
-	resChPVStorageClass := ctx.QueryAtTime(queryPVStorageClass, t)
-	resChPVUsedAvg := ctx.QueryAtTime(queryPVUsedAvg, t)
-	resChPVUsedMax := ctx.QueryAtTime(queryPVUsedMax, t)
-	resChPVCInfo := ctx.QueryAtTime(queryPVCInfo, t)
+	resChPVCost := grp.With(dataSource.QueryPVCost(start, end))
+	resChPVSize := grp.With(dataSource.QueryPVSize(start, end))
+	resChActiveMins := grp.With(dataSource.QueryPVActiveMinutes(start, end))
+	resChPVStorageClass := grp.With(dataSource.QueryPVStorageClass(start, end))
+	resChPVUsedAvg := grp.With(dataSource.QueryPVUsedAverage(start, end))
+	resChPVUsedMax := grp.With(dataSource.QueryPVUsedMax(start, end))
+	resChPVCInfo := grp.With(dataSource.QueryPVCInfo(start, end))
 
 	resPVCost, _ := resChPVCost.Await()
 	resPVSize, _ := resChPVSize.Await()
@@ -206,36 +181,20 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 	// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/RootDeviceStorage.html
 	// https://learn.microsoft.com/en-us/azure/virtual-machines/managed-disks-overview#temporary-disk
 	// https://cloud.google.com/compute/docs/disks/local-ssd
-	resLocalStorageCost := []*prom.QueryResult{}
-	resLocalStorageUsedCost := []*prom.QueryResult{}
-	resLocalStorageUsedAvg := []*prom.QueryResult{}
-	resLocalStorageUsedMax := []*prom.QueryResult{}
-	resLocalStorageBytes := []*prom.QueryResult{}
-	resLocalActiveMins := []*prom.QueryResult{}
+	resLocalStorageCost := []*source.QueryResult{}
+	resLocalStorageUsedCost := []*source.QueryResult{}
+	resLocalStorageUsedAvg := []*source.QueryResult{}
+	resLocalStorageUsedMax := []*source.QueryResult{}
+	resLocalStorageBytes := []*source.QueryResult{}
+	resLocalActiveMins := []*source.QueryResult{}
+
 	if env.GetAssetIncludeLocalDiskCost() {
-		// hourlyToCumulative is a scaling factor that, when multiplied by an
-		// hourly value, converts it to a cumulative value; i.e. [$/hr] *
-		// [min/res]*[hr/min] = [$/res]
-		hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
-		costPerGBHr := 0.04 / 730.0
-
-		// container_fs metrics contains metrics for disks that are not local storage of the node. While not perfect to
-		// attempt to identify the correct device which is being used as local storage we first filter for devices mounted
-		// at paths `/dev/nvme.*` or `/dev/sda.*`. There still may be multiple devices mounted at paths matching the regex
-		// so later on we will select the device with the highest `container_fs_limit_bytes` per instance to create a local disk asset
-		queryLocalStorageCost := fmt.Sprintf(`sum_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
-		queryLocalStorageUsedCost := fmt.Sprintf(`sum_over_time(sum(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
-		queryLocalStorageUsedAvg := fmt.Sprintf(`avg(sum(avg_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
-		queryLocalStorageUsedMax := fmt.Sprintf(`max(sum(max_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
-		queryLocalStorageBytes := fmt.Sprintf(`avg_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm])`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
-		queryLocalActiveMins := fmt.Sprintf(`count(node_total_hourly_cost{%s}) by (%s, node)[%s:%dm]`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
-
-		resChLocalStorageCost := ctx.QueryAtTime(queryLocalStorageCost, t)
-		resChLocalStorageUsedCost := ctx.QueryAtTime(queryLocalStorageUsedCost, t)
-		resChLocalStoreageUsedAvg := ctx.QueryAtTime(queryLocalStorageUsedAvg, t)
-		resChLocalStoreageUsedMax := ctx.QueryAtTime(queryLocalStorageUsedMax, t)
-		resChLocalStorageBytes := ctx.QueryAtTime(queryLocalStorageBytes, t)
-		resChLocalActiveMins := ctx.QueryAtTime(queryLocalActiveMins, t)
+		resChLocalStorageCost := grp.With(dataSource.QueryLocalStorageCost(start, end))
+		resChLocalStorageUsedCost := grp.With(dataSource.QueryLocalStorageUsedCost(start, end))
+		resChLocalStoreageUsedAvg := grp.With(dataSource.QueryLocalStorageUsedAvg(start, end))
+		resChLocalStoreageUsedMax := grp.With(dataSource.QueryLocalStorageUsedMax(start, end))
+		resChLocalStorageBytes := grp.With(dataSource.QueryLocalStorageBytes(start, end))
+		resChLocalActiveMins := grp.With(dataSource.QueryLocalStorageActiveMinutes(start, end))
 
 		resLocalStorageCost, _ = resChLocalStorageCost.Await()
 		resLocalStorageUsedCost, _ = resChLocalStorageUsedCost.Await()
@@ -245,14 +204,14 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 		resLocalActiveMins, _ = resChLocalActiveMins.Await()
 	}
 
-	if ctx.HasErrors() {
-		return nil, ctx.ErrorCollection()
+	if grp.HasErrors() {
+		return nil, grp.Error()
 	}
 
 	diskMap := map[DiskIdentifier]*Disk{}
 
 	for _, result := range resPVCInfo {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -267,7 +226,7 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 			log.Debugf("ClusterDisks: pv claim data missing persistentvolumeclaim")
 			continue
 		}
-		claimNamespace, err := result.GetString("namespace")
+		claimNamespace, err := result.GetNamespace()
 		if err != nil {
 			log.Debugf("ClusterDisks: pv claim data missing namespace")
 			continue
@@ -299,18 +258,18 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 	// Start with local storage bytes so that the device with the largest size which has passed the
 	// query filters can be determined
 	for _, result := range resLocalStorageBytes {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("instance")
+		name, err := result.GetInstance()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetString("device")
+		device, err := result.GetDevice()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
@@ -341,18 +300,18 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 	}
 
 	for _, result := range resLocalStorageCost {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("instance")
+		name, err := result.GetInstance()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetString("device")
+		device, err := result.GetDevice()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
@@ -369,18 +328,18 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 	}
 
 	for _, result := range resLocalStorageUsedCost {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("instance")
+		name, err := result.GetInstance()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage usage data missing instance")
 			continue
 		}
 
-		device, err := result.GetString("device")
+		device, err := result.GetDevice()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
@@ -396,18 +355,18 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 	}
 
 	for _, result := range resLocalStorageUsedAvg {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("instance")
+		name, err := result.GetInstance()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetString("device")
+		device, err := result.GetDevice()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
@@ -423,18 +382,18 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 	}
 
 	for _, result := range resLocalStorageUsedMax {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("instance")
+		name, err := result.GetInstance()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetString("device")
+		device, err := result.GetDevice()
 		if err != nil {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
@@ -450,18 +409,18 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 	}
 
 	for _, result := range resLocalActiveMins {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("node")
+		name, err := result.GetNode()
 		if err != nil {
 			log.DedupedWarningf(5, "ClusterDisks: local active mins data missing instance")
 			continue
 		}
 
-		providerID, err := result.GetString("provider_id")
+		providerID, err := result.GetProviderID()
 		if err != nil {
 			log.DedupedWarningf(5, "ClusterDisks: local active mins data missing instance")
 			continue
@@ -498,7 +457,7 @@ func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.
 	var unTracedDiskLogData []DiskIdentifier
 	//Iterating through Persistent Volume given by custom metrics kubecost_pv_info and assign the storage class if known and __unknown__ if not populated.
 	for _, result := range resPVStorageClass {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -624,61 +583,29 @@ func costTimesMinute(activeDataMap map[NodeIdentifier]activeData, costMap map[No
 	}
 }
 
-func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.Time) (map[NodeIdentifier]*Node, error) {
-	// Start from the time "end", querying backwards
-	t := end
-
-	// minsPerResolution determines accuracy and resource use for the following
-	// queries. Smaller values (higher resolution) result in better accuracy,
-	// but more expensive queries, and vice-a-versa.
+func ClusterNodes(dataSource source.OpenCostDataSource, cp models.Provider, start, end time.Time) (map[NodeIdentifier]*Node, error) {
 	resolution := env.GetETLResolution()
-	//Ensuring if ETL_RESOLUTION_SECONDS is less than 60s default it to 1m
-	var minsPerResolution int
-	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) == 0 {
-		minsPerResolution = 1
-		log.DedupedWarningf(3, "ClusterNodes(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
-	}
-
-	durStr := timeutil.DurationString(end.Sub(start))
-	if durStr == "" {
-		return nil, fmt.Errorf("illegal duration value for %s", opencost.NewClosedWindow(start, end))
-	}
 
-	requiredCtx := prom.NewNamedContext(client, prom.ClusterContextName)
-	optionalCtx := prom.NewNamedContext(client, prom.ClusterOptionalContextName)
-
-	queryNodeCPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryNodeCPUCoresCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_cpu_cores{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryNodeCPUCoresAllocatable := fmt.Sprintf(`avg(avg_over_time(kube_node_status_allocatable_cpu_cores{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryNodeRAMHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id) / 1024 / 1024 / 1024`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryNodeRAMBytesCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_memory_bytes{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryNodeRAMBytesAllocatable := fmt.Sprintf(`avg(avg_over_time(kube_node_status_allocatable_memory_bytes{%s}[%s])) by (%s, node)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryNodeGPUCount := fmt.Sprintf(`avg(avg_over_time(node_gpu_count{%s}[%s])) by (%s, node, provider_id)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryNodeGPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost{%s}[%s])) by (%s, node, instance_type, provider_id)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryNodeCPUModeTotal := fmt.Sprintf(`sum(rate(node_cpu_seconds_total{%s}[%s:%dm])) by (kubernetes_node, %s, mode)`, env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel())
-	queryNodeRAMSystemPct := fmt.Sprintf(`sum(sum_over_time(container_memory_working_set_bytes{container_name!="POD",container_name!="",namespace="kube-system", %s}[%s:%dm])) by (instance, %s) / avg(label_replace(sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm])) by (node, %s), "instance", "$1", "node", "(.*)")) by (instance, %s)`, env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel(), env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel(), env.GetPromClusterLabel())
-	queryNodeRAMUserPct := fmt.Sprintf(`sum(sum_over_time(container_memory_working_set_bytes{container_name!="POD",container_name!="",namespace!="kube-system", %s}[%s:%dm])) by (instance, %s) / avg(label_replace(sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm])) by (node, %s), "instance", "$1", "node", "(.*)")) by (instance, %s)`, env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel(), env.GetPromClusterFilter(), durStr, minsPerResolution, env.GetPromClusterLabel(), env.GetPromClusterLabel())
-	queryActiveMins := fmt.Sprintf(`avg(node_total_hourly_cost{%s}) by (node, %s, provider_id)[%s:%dm]`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
-	queryIsSpot := fmt.Sprintf(`avg_over_time(kubecost_node_is_spot{%s}[%s:%dm])`, env.GetPromClusterFilter(), durStr, minsPerResolution)
-	queryLabels := fmt.Sprintf(`count_over_time(kube_node_labels{%s}[%s:%dm])`, env.GetPromClusterFilter(), durStr, minsPerResolution)
-
-	// Return errors if these fail
-	resChNodeCPUHourlyCost := requiredCtx.QueryAtTime(queryNodeCPUHourlyCost, t)
-	resChNodeCPUCoresCapacity := requiredCtx.QueryAtTime(queryNodeCPUCoresCapacity, t)
-	resChNodeCPUCoresAllocatable := requiredCtx.QueryAtTime(queryNodeCPUCoresAllocatable, t)
-	resChNodeRAMHourlyCost := requiredCtx.QueryAtTime(queryNodeRAMHourlyCost, t)
-	resChNodeRAMBytesCapacity := requiredCtx.QueryAtTime(queryNodeRAMBytesCapacity, t)
-	resChNodeRAMBytesAllocatable := requiredCtx.QueryAtTime(queryNodeRAMBytesAllocatable, t)
-	resChNodeGPUCount := requiredCtx.QueryAtTime(queryNodeGPUCount, t)
-	resChNodeGPUHourlyCost := requiredCtx.QueryAtTime(queryNodeGPUHourlyCost, t)
-	resChActiveMins := requiredCtx.QueryAtTime(queryActiveMins, t)
-	resChIsSpot := requiredCtx.QueryAtTime(queryIsSpot, t)
+	requiredGrp := source.NewQueryGroup()
+	optionalGrp := source.NewQueryGroup()
+
+	// return errors if these fail
+	resChNodeCPUHourlyCost := requiredGrp.With(dataSource.QueryNodeCPUHourlyCost(start, end))
+	resChNodeCPUCoresCapacity := requiredGrp.With(dataSource.QueryNodeCPUCoresCapacity(start, end))
+	resChNodeCPUCoresAllocatable := requiredGrp.With(dataSource.QueryNodeCPUCoresAllocatable(start, end))
+	resChNodeRAMHourlyCost := requiredGrp.With(dataSource.QueryNodeRAMHourlyCost(start, end))
+	resChNodeRAMBytesCapacity := requiredGrp.With(dataSource.QueryNodeRAMBytesCapacity(start, end))
+	resChNodeRAMBytesAllocatable := requiredGrp.With(dataSource.QueryNodeRAMBytesAllocatable(start, end))
+	resChNodeGPUCount := requiredGrp.With(dataSource.QueryNodeGPUCount(start, end))
+	resChNodeGPUHourlyCost := requiredGrp.With(dataSource.QueryNodeGPUHourlyCost(start, end))
+	resChActiveMins := requiredGrp.With(dataSource.QueryNodeActiveMinutes(start, end))
+	resChIsSpot := requiredGrp.With(dataSource.QueryNodeIsSpot(start, end))
 
 	// Do not return errors if these fail, but log warnings
-	resChNodeCPUModeTotal := optionalCtx.QueryAtTime(queryNodeCPUModeTotal, t)
-	resChNodeRAMSystemPct := optionalCtx.QueryAtTime(queryNodeRAMSystemPct, t)
-	resChNodeRAMUserPct := optionalCtx.QueryAtTime(queryNodeRAMUserPct, t)
-	resChLabels := optionalCtx.QueryAtTime(queryLabels, t)
+	resChNodeCPUModeTotal := optionalGrp.With(dataSource.QueryNodeCPUModeTotal(start, end))
+	resChNodeRAMSystemPct := optionalGrp.With(dataSource.QueryNodeRAMSystemPercent(start, end))
+	resChNodeRAMUserPct := optionalGrp.With(dataSource.QueryNodeRAMUserPercent(start, end))
+	resChLabels := optionalGrp.With(dataSource.QueryNodeLabels(start, end))
 
 	resNodeCPUHourlyCost, _ := resChNodeCPUHourlyCost.Await()
 	resNodeCPUCoresCapacity, _ := resChNodeCPUCoresCapacity.Await()
@@ -695,17 +622,17 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 	resActiveMins, _ := resChActiveMins.Await()
 	resLabels, _ := resChLabels.Await()
 
-	if optionalCtx.HasErrors() {
-		for _, err := range optionalCtx.Errors() {
+	if optionalGrp.HasErrors() {
+		for _, err := range optionalGrp.Errors() {
 			log.Warnf("ClusterNodes: %s", err)
 		}
 	}
-	if requiredCtx.HasErrors() {
-		for _, err := range requiredCtx.Errors() {
+	if requiredGrp.HasErrors() {
+		for _, err := range requiredGrp.Errors() {
 			log.Errorf("ClusterNodes: %s", err)
 		}
 
-		return nil, requiredCtx.ErrorCollection()
+		return nil, requiredGrp.Error()
 	}
 
 	activeDataMap := buildActiveDataMap(resActiveMins, resolution, opencost.NewClosedWindow(start, end))
@@ -797,51 +724,29 @@ type LoadBalancer struct {
 	Ip         string
 }
 
-func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[LoadBalancerIdentifier]*LoadBalancer, error) {
-
-	// Start from the time "end", querying backwards
-	t := end
-
-	// minsPerResolution determines accuracy and resource use for the following
-	// queries. Smaller values (higher resolution) result in better accuracy,
-	// but more expensive queries, and vice-a-versa.
+func ClusterLoadBalancers(dataSource source.OpenCostDataSource, start, end time.Time) (map[LoadBalancerIdentifier]*LoadBalancer, error) {
 	resolution := env.GetETLResolution()
-	//Ensuring if ETL_RESOLUTION_SECONDS is less than 60s default it to 1m
-	var minsPerResolution int
-	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) == 0 {
-		minsPerResolution = 1
-		log.DedupedWarningf(3, "ClusterLoadBalancers(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
-	}
-
-	// Query for the duration between start and end
-	durStr := timeutil.DurationString(end.Sub(start))
-	if durStr == "" {
-		return nil, fmt.Errorf("illegal duration value for %s", opencost.NewClosedWindow(start, end))
-	}
 
-	ctx := prom.NewNamedContext(client, prom.ClusterContextName)
+	grp := source.NewQueryGroup()
 
-	queryLBCost := fmt.Sprintf(`avg(avg_over_time(kubecost_load_balancer_cost{%s}[%s])) by (namespace, service_name, %s, ingress_ip)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryActiveMins := fmt.Sprintf(`avg(kubecost_load_balancer_cost{%s}) by (namespace, service_name, %s, ingress_ip)[%s:%dm]`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
-
-	resChLBCost := ctx.QueryAtTime(queryLBCost, t)
-	resChActiveMins := ctx.QueryAtTime(queryActiveMins, t)
+	resChLBCost := grp.With(dataSource.QueryLBCost(start, end))
+	resChActiveMins := grp.With(dataSource.QueryLBActiveMinutes(start, end))
 
 	resLBCost, _ := resChLBCost.Await()
 	resActiveMins, _ := resChActiveMins.Await()
 
-	if ctx.HasErrors() {
-		return nil, ctx.ErrorCollection()
+	if grp.HasErrors() {
+		return nil, grp.Error()
 	}
 
 	loadBalancerMap := make(map[LoadBalancerIdentifier]*LoadBalancer, len(resActiveMins))
 
 	for _, result := range resActiveMins {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
-		namespace, err := result.GetString("namespace")
+		namespace, err := result.GetNamespace()
 		if err != nil {
 			log.Warnf("ClusterLoadBalancers: LB cost data missing namespace")
 			continue
@@ -893,11 +798,11 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 	}
 
 	for _, result := range resLBCost {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
-		namespace, err := result.GetString("namespace")
+		namespace, err := result.GetNamespace()
 		if err != nil {
 			log.Warnf("ClusterLoadBalancers: LB cost data missing namespace")
 			continue
@@ -956,139 +861,41 @@ func privateIPCheck(ip string) bool {
 }
 
 // ComputeClusterCosts gives the cumulative and monthly-rate cluster costs over a window of time for all clusters.
-func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models.Provider, window, offset time.Duration, withBreakdown bool) (map[string]*ClusterCosts, error) {
+func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, provider models.Provider, window, offset time.Duration, withBreakdown bool) (map[string]*ClusterCosts, error) {
 	if window < 10*time.Minute {
 		return nil, fmt.Errorf("minimum window of 10m required; got %s", window)
 	}
 
 	// Compute number of minutes in the full interval, for use interpolating missed scrapes or scaling missing data
 	start, end := timeutil.ParseTimeRange(window, offset)
-
 	mins := end.Sub(start).Minutes()
 
-	// minsPerResolution determines accuracy and resource use for the following
-	// queries. Smaller values (higher resolution) result in better accuracy,
-	// but more expensive queries, and vice-a-versa.
-	resolution := env.GetETLResolution()
-	//Ensuring if ETL_RESOLUTION_SECONDS is less than 60s default it to 1m
-	var minsPerResolution int
-	if minsPerResolution = int(resolution.Minutes()); int(resolution.Minutes()) < 1 {
-		minsPerResolution = 1
-		log.DedupedWarningf(3, "ComputeClusterCosts(): Configured ETL resolution (%d seconds) is below the 60 seconds threshold. Overriding with 1 minute.", int(resolution.Seconds()))
-	}
+	providerName := ""
 
-	windowStr := timeutil.DurationString(window)
-
-	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
-	// value, converts it to a cumulative value; i.e.
-	// [$/hr] * [min/res]*[hr/min] = [$/res]
-	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
-
-	const fmtQueryDataCount = `
-		count_over_time(sum(kube_node_status_capacity_cpu_cores{%s}) by (%s)[%s:%dm]%s) * %d
-	`
-
-	const fmtQueryTotalGPU = `
-		sum(
-			sum_over_time(node_gpu_hourly_cost{%s}[%s:%dm]%s) * %f
-		) by (%s)
-	`
-
-	const fmtQueryTotalCPU = `
-		sum(
-			sum_over_time(avg(kube_node_status_capacity_cpu_cores{%s}) by (node, %s)[%s:%dm]%s) *
-			avg(avg_over_time(node_cpu_hourly_cost{%s}[%s:%dm]%s)) by (node, %s) * %f
-		) by (%s)
-	`
-
-	const fmtQueryTotalRAM = `
-		sum(
-			sum_over_time(avg(kube_node_status_capacity_memory_bytes{%s}) by (node, %s)[%s:%dm]%s) / 1024 / 1024 / 1024 *
-			avg(avg_over_time(node_ram_hourly_cost{%s}[%s:%dm]%s)) by (node, %s) * %f
-		) by (%s)
-	`
-
-	const fmtQueryTotalStorage = `
-		sum(
-			sum_over_time(avg(kube_persistentvolume_capacity_bytes{%s}) by (persistentvolume, %s)[%s:%dm]%s) / 1024 / 1024 / 1024 *
-			avg(avg_over_time(pv_hourly_cost{%s}[%s:%dm]%s)) by (persistentvolume, %s) * %f
-		) by (%s)
-	`
-
-	const fmtQueryCPUModePct = `
-		sum(rate(node_cpu_seconds_total{%s}[%s]%s)) by (%s, mode) / ignoring(mode)
-		group_left sum(rate(node_cpu_seconds_total{%s}[%s]%s)) by (%s)
-	`
-
-	const fmtQueryRAMSystemPct = `
-		sum(sum_over_time(container_memory_usage_bytes{container_name!="",namespace="kube-system", %s}[%s:%dm]%s)) by (%s)
-		/ sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm]%s)) by (%s)
-	`
-
-	const fmtQueryRAMUserPct = `
-		sum(sum_over_time(kubecost_cluster_memory_working_set_bytes{%s}[%s:%dm]%s)) by (%s)
-		/ sum(sum_over_time(kube_node_status_capacity_memory_bytes{%s}[%s:%dm]%s)) by (%s)
-	`
-
-	// TODO niko/clustercost metric "kubelet_volume_stats_used_bytes" was deprecated in 1.12, then seems to have come back in 1.17
-	// const fmtQueryPVStorageUsePct = `(sum(kube_persistentvolumeclaim_info) by (persistentvolumeclaim, storageclass,namespace) + on (persistentvolumeclaim,namespace)
-	// group_right(storageclass) sum(kubelet_volume_stats_used_bytes) by (persistentvolumeclaim,namespace))`
-
-	queryUsedLocalStorage := provider.GetLocalStorageQuery(window, offset, false, true)
-
-	queryTotalLocalStorage := provider.GetLocalStorageQuery(window, offset, false, false)
-	if queryTotalLocalStorage != "" {
-		queryTotalLocalStorage = fmt.Sprintf(" + %s", queryTotalLocalStorage)
+	if clusterInfo, err := provider.ClusterInfo(); err != nil {
+		providerName = clusterInfo["provider"]
 	}
 
-	fmtOffset := timeutil.DurationToPromOffsetString(offset)
+	grp := source.NewQueryGroup()
 
-	queryDataCount := fmt.Sprintf(fmtQueryDataCount, env.GetPromClusterFilter(), env.GetPromClusterLabel(), windowStr, minsPerResolution, fmtOffset, minsPerResolution)
-	queryTotalGPU := fmt.Sprintf(fmtQueryTotalGPU, env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, hourlyToCumulative, env.GetPromClusterLabel())
-	queryTotalCPU := fmt.Sprintf(fmtQueryTotalCPU, env.GetPromClusterFilter(), env.GetPromClusterLabel(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterLabel(), hourlyToCumulative, env.GetPromClusterLabel())
-	queryTotalRAM := fmt.Sprintf(fmtQueryTotalRAM, env.GetPromClusterFilter(), env.GetPromClusterLabel(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterLabel(), hourlyToCumulative, env.GetPromClusterLabel())
-	queryTotalStorage := fmt.Sprintf(fmtQueryTotalStorage, env.GetPromClusterFilter(), env.GetPromClusterLabel(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterLabel(), hourlyToCumulative, env.GetPromClusterLabel())
+	resChs := []*source.QueryGroupAsyncResult{}
 
-	ctx := prom.NewNamedContext(client, prom.ClusterContextName)
+	queryDataCount := grp.With(dataSource.QueryDataCount(start, end))
+	queryTotalGPU := grp.With(dataSource.QueryTotalGPU(start, end))
+	queryTotalCPU := grp.With(dataSource.QueryTotalCPU(start, end))
+	queryTotalRAM := grp.With(dataSource.QueryTotalRAM(start, end))
+	queryTotalStorage := grp.With(dataSource.QueryTotalStorage(start, end))
+	queryTotalLocalStorage := grp.With(dataSource.QueryLocalStorageBytesByProvider(providerName, start, end))
 
-	resChs := ctx.QueryAll(
-		queryDataCount,
-		queryTotalGPU,
-		queryTotalCPU,
-		queryTotalRAM,
-		queryTotalStorage,
-	)
-
-	// Only submit the local storage query if it is valid. Otherwise Prometheus
-	// will return errors. Always append something to resChs, regardless, to
-	// maintain indexing.
-	if queryTotalLocalStorage != "" {
-		resChs = append(resChs, ctx.Query(queryTotalLocalStorage))
-	} else {
-		resChs = append(resChs, nil)
-	}
+	resChs = append(resChs, queryDataCount, queryTotalGPU, queryTotalCPU, queryTotalRAM, queryTotalStorage, queryTotalLocalStorage)
 
 	if withBreakdown {
-		queryCPUModePct := fmt.Sprintf(fmtQueryCPUModePct, env.GetPromClusterFilter(), windowStr, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterFilter(), windowStr, fmtOffset, env.GetPromClusterLabel())
-		queryRAMSystemPct := fmt.Sprintf(fmtQueryRAMSystemPct, env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterLabel())
-		queryRAMUserPct := fmt.Sprintf(fmtQueryRAMUserPct, env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterFilter(), windowStr, minsPerResolution, fmtOffset, env.GetPromClusterLabel())
-
-		bdResChs := ctx.QueryAll(
-			queryCPUModePct,
-			queryRAMSystemPct,
-			queryRAMUserPct,
-		)
-
-		// Only submit the local storage query if it is valid. Otherwise Prometheus
-		// will return errors. Always append something to resChs, regardless, to
-		// maintain indexing.
-		if queryUsedLocalStorage != "" {
-			bdResChs = append(bdResChs, ctx.Query(queryUsedLocalStorage))
-		} else {
-			bdResChs = append(bdResChs, nil)
-		}
+		queryCPUModePct := grp.With(dataSource.QueryNodeCPUModePercent(start, end))
+		queryRAMSystemPct := grp.With(dataSource.QueryNodeRAMSystemPercent(start, end))
+		queryRAMUserPct := grp.With(dataSource.QueryNodeRAMUserPercent(start, end))
+		queryUsedLocalStorage := grp.With(dataSource.QueryLocalStorageUsedByProvider(providerName, start, end))
 
-		resChs = append(resChs, bdResChs...)
+		resChs = append(resChs, queryCPUModePct, queryRAMSystemPct, queryRAMUserPct, queryUsedLocalStorage)
 	}
 
 	resDataCount, _ := resChs[0].Await()
@@ -1096,15 +903,16 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 	resTotalCPU, _ := resChs[2].Await()
 	resTotalRAM, _ := resChs[3].Await()
 	resTotalStorage, _ := resChs[4].Await()
-	if ctx.HasErrors() {
-		return nil, ctx.ErrorCollection()
+
+	if grp.HasErrors() {
+		return nil, grp.Error()
 	}
 
 	defaultClusterID := env.GetClusterID()
 
 	dataMinsByCluster := map[string]float64{}
 	for _, result := range resDataCount {
-		clusterID, _ := result.GetString(env.GetPromClusterLabel())
+		clusterID, _ := result.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -1136,9 +944,9 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 
 	// Helper function to iterate over Prom query results, parsing the raw values into
 	// the intermediate costData structure.
-	setCostsFromResults := func(costData map[string]map[string]float64, results []*prom.QueryResult, name string, discount float64, customDiscount float64) {
+	setCostsFromResults := func(costData map[string]map[string]float64, results []*source.QueryResult, name string, discount float64, customDiscount float64) {
 		for _, result := range results {
-			clusterID, _ := result.GetString(env.GetPromClusterLabel())
+			clusterID, _ := result.GetCluster()
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
@@ -1157,11 +965,13 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 	// Apply only custom discount to GPU and storage
 	setCostsFromResults(costData, resTotalGPU, "gpu", 0.0, customDiscount)
 	setCostsFromResults(costData, resTotalStorage, "storage", 0.0, customDiscount)
-	if queryTotalLocalStorage != "" {
-		resTotalLocalStorage, err := resChs[5].Await()
-		if err != nil {
-			return nil, err
-		}
+
+	resTotalLocalStorage, err := resChs[5].Await()
+	if err != nil {
+		return nil, err
+	}
+
+	if len(resTotalLocalStorage) > 0 {
 		setCostsFromResults(costData, resTotalLocalStorage, "localstorage", 0.0, customDiscount)
 	}
 
@@ -1172,12 +982,13 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 		resCPUModePct, _ := resChs[6].Await()
 		resRAMSystemPct, _ := resChs[7].Await()
 		resRAMUserPct, _ := resChs[8].Await()
-		if ctx.HasErrors() {
-			return nil, ctx.ErrorCollection()
+
+		if grp.HasErrors() {
+			return nil, grp.Error()
 		}
 
 		for _, result := range resCPUModePct {
-			clusterID, _ := result.GetString(env.GetPromClusterLabel())
+			clusterID, _ := result.GetCluster()
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
@@ -1205,7 +1016,7 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 		}
 
 		for _, result := range resRAMSystemPct {
-			clusterID, _ := result.GetString(env.GetPromClusterLabel())
+			clusterID, _ := result.GetCluster()
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
@@ -1216,7 +1027,7 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 			ramBD.System += result.Values[0].Value
 		}
 		for _, result := range resRAMUserPct {
-			clusterID, _ := result.GetString(env.GetPromClusterLabel())
+			clusterID, _ := result.GetCluster()
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
@@ -1234,26 +1045,25 @@ func (a *Accesses) ComputeClusterCosts(client prometheus.Client, provider models
 			ramBD.Idle = remaining
 		}
 
-		if queryUsedLocalStorage != "" {
-			resUsedLocalStorage, err := resChs[9].Await()
-			if err != nil {
-				return nil, err
-			}
-			for _, result := range resUsedLocalStorage {
-				clusterID, _ := result.GetString(env.GetPromClusterLabel())
-				if clusterID == "" {
-					clusterID = defaultClusterID
-				}
-				pvUsedCostMap[clusterID] += result.Values[0].Value
+		resUsedLocalStorage, err := resChs[9].Await()
+		if err != nil {
+			return nil, err
+		}
+
+		for _, result := range resUsedLocalStorage {
+			clusterID, _ := result.GetCluster()
+			if clusterID == "" {
+				clusterID = defaultClusterID
 			}
+			pvUsedCostMap[clusterID] += result.Values[0].Value
 		}
 	}
 
-	if ctx.HasErrors() {
-		for _, err := range ctx.Errors() {
+	if grp.HasErrors() {
+		for _, err := range grp.Errors() {
 			log.Errorf("ComputeClusterCosts: %s", err)
 		}
-		return nil, ctx.ErrorCollection()
+		return nil, grp.Error()
 	}
 
 	// Convert intermediate structure to Costs instances
@@ -1295,7 +1105,7 @@ type Totals struct {
 	StorageCost [][]string `json:"storageCost"`
 }
 
-func resultToTotals(qrs []*prom.QueryResult) ([][]string, error) {
+func resultToTotals(qrs []*source.QueryResult) ([][]string, error) {
 	if len(qrs) == 0 {
 		return [][]string{}, fmt.Errorf("Not enough data available in the selected time range")
 	}
@@ -1315,63 +1125,27 @@ func resultToTotals(qrs []*prom.QueryResult) ([][]string, error) {
 }
 
 // ClusterCostsOverTime gives the full cluster costs over time
-func ClusterCostsOverTime(cli prometheus.Client, provider models.Provider, startString, endString string, window, offset time.Duration) (*Totals, error) {
-	localStorageQuery := provider.GetLocalStorageQuery(window, offset, true, false)
-	if localStorageQuery != "" {
-		localStorageQuery = fmt.Sprintf("+ %s", localStorageQuery)
-	}
-
-	layout := "2006-01-02T15:04:05.000Z"
-
-	start, err := time.Parse(layout, startString)
-	if err != nil {
-		log.Errorf("Error parsing time %s. Error: %s", startString, err.Error())
-		return nil, err
-	}
-	end, err := time.Parse(layout, endString)
-	if err != nil {
-		log.Errorf("Error parsing time %s. Error: %s", endString, err.Error())
-		return nil, err
-	}
-	fmtWindow := timeutil.DurationString(window)
+func ClusterCostsOverTime(dataSource source.OpenCostDataSource, provider models.Provider, start, end time.Time, window, offset time.Duration) (*Totals, error) {
+	providerName := ""
 
-	if fmtWindow == "" {
-		err := fmt.Errorf("window value invalid or missing")
-		log.Errorf("Error parsing time %v. Error: %s", window, err.Error())
-		return nil, err
+	if clusterInfo, err := provider.ClusterInfo(); err != nil {
+		providerName = clusterInfo["provider"]
 	}
 
-	fmtOffset := timeutil.DurationToPromOffsetString(offset)
-
-	qCores := fmt.Sprintf(queryClusterCores, env.GetPromClusterFilter(), fmtWindow, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterFilter(), fmtWindow, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterFilter(), fmtWindow, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterLabel())
-	qRAM := fmt.Sprintf(queryClusterRAM, env.GetPromClusterFilter(), fmtWindow, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterFilter(), fmtWindow, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterLabel())
-	qStorage := fmt.Sprintf(queryStorage, env.GetPromClusterFilter(), fmtWindow, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterFilter(), fmtWindow, fmtOffset, env.GetPromClusterLabel(), env.GetPromClusterLabel(), localStorageQuery)
-	qTotal := fmt.Sprintf(queryTotal, env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), localStorageQuery)
-
-	ctx := prom.NewNamedContext(cli, prom.ClusterContextName)
-	resChClusterCores := ctx.QueryRange(qCores, start, end, window)
-	resChClusterRAM := ctx.QueryRange(qRAM, start, end, window)
-	resChStorage := ctx.QueryRange(qStorage, start, end, window)
-	resChTotal := ctx.QueryRange(qTotal, start, end, window)
-
-	resultClusterCores, err := resChClusterCores.Await()
-	if err != nil {
-		return nil, err
-	}
+	grp := source.NewQueryGroup()
 
-	resultClusterRAM, err := resChClusterRAM.Await()
-	if err != nil {
-		return nil, err
-	}
+	qCores := grp.With(dataSource.QueryClusterCores(start, end, window))
+	qRAM := grp.With(dataSource.QueryClusterRAM(start, end, window))
+	qStorage := grp.With(dataSource.QueryClusterStorageByProvider(providerName, start, end, window))
+	qTotal := grp.With(dataSource.QueryClusterTotalByProvider(providerName, start, end, window))
 
-	resultStorage, err := resChStorage.Await()
-	if err != nil {
-		return nil, err
-	}
+	resultClusterCores, _ := qCores.Await()
+	resultClusterRAM, _ := qRAM.Await()
+	resultStorage, _ := qStorage.Await()
+	resultTotal, _ := qTotal.Await()
 
-	resultTotal, err := resChTotal.Await()
-	if err != nil {
-		return nil, err
+	if grp.HasErrors() {
+		return nil, grp.Error()
 	}
 
 	coreTotal, err := resultToTotals(resultClusterCores)
@@ -1396,12 +1170,9 @@ func ClusterCostsOverTime(cli prometheus.Client, provider models.Provider, start
 		// If clusterTotal query failed, it's likely because there are no PVs, which
 		// causes the qTotal query to return no data. Instead, query only node costs.
 		// If that fails, return an error because something is actually wrong.
-		qNodes := fmt.Sprintf(queryNodes, env.GetPromClusterFilter(), env.GetPromClusterLabel(), localStorageQuery)
+		qNodes := grp.With(dataSource.QueryClusterNodesByProvider(providerName, start, end, window))
 
-		resultNodes, warnings, err := ctx.QueryRangeSync(qNodes, start, end, window)
-		for _, warning := range warnings {
-			log.Warnf(warning)
-		}
+		resultNodes, err := qNodes.Await()
 		if err != nil {
 			return nil, err
 		}
@@ -1421,9 +1192,9 @@ func ClusterCostsOverTime(cli prometheus.Client, provider models.Provider, start
 	}, nil
 }
 
-func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo []*prom.QueryResult, cp models.Provider, window opencost.Window) {
+func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo []*source.QueryResult, cp models.Provider, window opencost.Window) {
 	for _, result := range resActiveMins {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -1456,7 +1227,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 	}
 
 	for _, result := range resPVSize {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -1488,7 +1259,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 	}
 
 	for _, result := range resPVCost {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -1525,14 +1296,14 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		}
 
 		diskMap[key].Cost = cost * (diskMap[key].Bytes / 1024 / 1024 / 1024) * (diskMap[key].Minutes / 60)
-		providerID, _ := result.GetString("provider_id") // just put the providerID set up here, it's the simplest query.
+		providerID, _ := result.GetProviderID() // just put the providerID set up here, it's the simplest query.
 		if providerID != "" {
 			diskMap[key].ProviderID = provider.ParsePVID(providerID)
 		}
 	}
 
 	for _, result := range resPVUsedAvg {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -1542,7 +1313,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 			log.Debugf("ClusterDisks: pv usage data missing persistentvolumeclaim")
 			continue
 		}
-		claimNamespace, err := result.GetString("namespace")
+		claimNamespace, err := result.GetNamespace()
 		if err != nil {
 			log.Debugf("ClusterDisks: pv usage data missing namespace")
 			continue
@@ -1552,7 +1323,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 
 		for _, thatRes := range resPVCInfo {
 
-			thatCluster, err := thatRes.GetString(env.GetPromClusterLabel())
+			thatCluster, err := thatRes.GetCluster()
 			if err != nil {
 				thatCluster = env.GetClusterID()
 			}
@@ -1569,7 +1340,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 				continue
 			}
 
-			thatClaimNamespace, err := thatRes.GetString("namespace")
+			thatClaimNamespace, err := thatRes.GetNamespace()
 			if err != nil {
 				log.Debugf("ClusterDisks: pv claim data missing namespace")
 				continue
@@ -1595,7 +1366,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 	}
 
 	for _, result := range resPVUsedMax {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -1606,7 +1377,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 			continue
 		}
 
-		claimNamespace, err := result.GetString("namespace")
+		claimNamespace, err := result.GetNamespace()
 		if err != nil {
 			log.Debugf("ClusterDisks: pv usage data missing namespace")
 			continue
@@ -1616,7 +1387,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 
 		for _, thatRes := range resPVCInfo {
 
-			thatCluster, err := thatRes.GetString(env.GetPromClusterLabel())
+			thatCluster, err := thatRes.GetCluster()
 			if err != nil {
 				thatCluster = env.GetClusterID()
 			}
@@ -1633,7 +1404,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 				continue
 			}
 
-			thatClaimNamespace, err := thatRes.GetString("namespace")
+			thatClaimNamespace, err := thatRes.GetNamespace()
 			if err != nil {
 				log.Debugf("ClusterDisks: pv claim data missing namespace")
 				continue

+ 42 - 42
pkg/costmodel/cluster_helpers.go

@@ -9,8 +9,8 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/pkg/env"
-	"github.com/opencost/opencost/pkg/prom"
 )
 
 // mergeTypeMaps takes two maps of (cluster name, node name) -> node type
@@ -31,7 +31,7 @@ func mergeTypeMaps(clusterAndNameToType1, clusterAndNameToType2 map[nodeIdentifi
 }
 
 func buildCPUCostMap(
-	resNodeCPUCost []*prom.QueryResult,
+	resNodeCPUCost []*source.QueryResult,
 	cp models.Provider,
 	preemptible map[NodeIdentifier]bool,
 ) (
@@ -49,19 +49,19 @@ func buildCPUCostMap(
 	}
 
 	for _, result := range resNodeCPUCost {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("node")
+		name, err := result.GetNode()
 		if err != nil {
 			log.Warnf("ClusterNodes: CPU cost data missing node")
 			continue
 		}
 
 		nodeType, _ := result.GetString("instance_type")
-		providerID, _ := result.GetString("provider_id")
+		providerID, _ := result.GetProviderID()
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -105,7 +105,7 @@ func buildCPUCostMap(
 }
 
 func buildRAMCostMap(
-	resNodeRAMCost []*prom.QueryResult,
+	resNodeRAMCost []*source.QueryResult,
 	cp models.Provider,
 	preemptible map[NodeIdentifier]bool,
 ) (
@@ -123,19 +123,19 @@ func buildRAMCostMap(
 	}
 
 	for _, result := range resNodeRAMCost {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("node")
+		name, err := result.GetNode()
 		if err != nil {
 			log.Warnf("ClusterNodes: RAM cost data missing node")
 			continue
 		}
 
 		nodeType, _ := result.GetString("instance_type")
-		providerID, _ := result.GetString("provider_id")
+		providerID, _ := result.GetProviderID()
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -179,7 +179,7 @@ func buildRAMCostMap(
 }
 
 func buildGPUCostMap(
-	resNodeGPUCost []*prom.QueryResult,
+	resNodeGPUCost []*source.QueryResult,
 	gpuCountMap map[NodeIdentifier]float64,
 	cp models.Provider,
 	preemptible map[NodeIdentifier]bool,
@@ -198,19 +198,19 @@ func buildGPUCostMap(
 	}
 
 	for _, result := range resNodeGPUCost {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("node")
+		name, err := result.GetNode()
 		if err != nil {
 			log.Warnf("ClusterNodes: GPU cost data missing node")
 			continue
 		}
 
 		nodeType, _ := result.GetString("instance_type")
-		providerID, _ := result.GetString("provider_id")
+		providerID, _ := result.GetProviderID()
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -260,25 +260,25 @@ func buildGPUCostMap(
 }
 
 func buildGPUCountMap(
-	resNodeGPUCount []*prom.QueryResult,
+	resNodeGPUCount []*source.QueryResult,
 ) map[NodeIdentifier]float64 {
 
 	gpuCountMap := make(map[NodeIdentifier]float64)
 
 	for _, result := range resNodeGPUCount {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("node")
+		name, err := result.GetNode()
 		if err != nil {
 			log.Warnf("ClusterNodes: GPU count data missing node")
 			continue
 		}
 
 		gpuCount := result.Values[0].Value
-		providerID, _ := result.GetString("provider_id")
+		providerID, _ := result.GetProviderID()
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -292,18 +292,18 @@ func buildGPUCountMap(
 }
 
 func buildCPUCoresMap(
-	resNodeCPUCores []*prom.QueryResult,
+	resNodeCPUCores []*source.QueryResult,
 ) map[nodeIdentifierNoProviderID]float64 {
 
 	m := make(map[nodeIdentifierNoProviderID]float64)
 
 	for _, result := range resNodeCPUCores {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("node")
+		name, err := result.GetNode()
 		if err != nil {
 			log.Warnf("ClusterNodes: CPU cores data missing node")
 			continue
@@ -321,17 +321,17 @@ func buildCPUCoresMap(
 	return m
 }
 
-func buildRAMBytesMap(resNodeRAMBytes []*prom.QueryResult) map[nodeIdentifierNoProviderID]float64 {
+func buildRAMBytesMap(resNodeRAMBytes []*source.QueryResult) map[nodeIdentifierNoProviderID]float64 {
 
 	m := make(map[nodeIdentifierNoProviderID]float64)
 
 	for _, result := range resNodeRAMBytes {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("node")
+		name, err := result.GetNode()
 		if err != nil {
 			log.Warnf("ClusterNodes: RAM bytes data missing node")
 			continue
@@ -350,7 +350,7 @@ func buildRAMBytesMap(resNodeRAMBytes []*prom.QueryResult) map[nodeIdentifierNoP
 }
 
 // Mapping of cluster/node=cpu for computing resource efficiency
-func buildCPUBreakdownMap(resNodeCPUModeTotal []*prom.QueryResult) map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown {
+func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.QueryResult) map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown {
 
 	cpuBreakdownMap := make(map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown)
 
@@ -362,7 +362,7 @@ func buildCPUBreakdownMap(resNodeCPUModeTotal []*prom.QueryResult) map[nodeIdent
 	// Build intermediate structures for CPU usage by (cluster, node) and by
 	// (cluster, node, mode) for computing resouce efficiency
 	for _, result := range resNodeCPUModeTotal {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
@@ -464,17 +464,17 @@ func buildOverheadMap(capRam, allocRam, capCPU, allocCPU map[nodeIdentifierNoPro
 	return m
 }
 
-func buildRAMUserPctMap(resNodeRAMUserPct []*prom.QueryResult) map[nodeIdentifierNoProviderID]float64 {
+func buildRAMUserPctMap(resNodeRAMUserPct []*source.QueryResult) map[nodeIdentifierNoProviderID]float64 {
 
 	m := make(map[nodeIdentifierNoProviderID]float64)
 
 	for _, result := range resNodeRAMUserPct {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("instance")
+		name, err := result.GetInstance()
 		if err != nil {
 			log.Warnf("ClusterNodes: RAM user percent missing node")
 			continue
@@ -493,17 +493,17 @@ func buildRAMUserPctMap(resNodeRAMUserPct []*prom.QueryResult) map[nodeIdentifie
 	return m
 }
 
-func buildRAMSystemPctMap(resNodeRAMSystemPct []*prom.QueryResult) map[nodeIdentifierNoProviderID]float64 {
+func buildRAMSystemPctMap(resNodeRAMSystemPct []*source.QueryResult) map[nodeIdentifierNoProviderID]float64 {
 
 	m := make(map[nodeIdentifierNoProviderID]float64)
 
 	for _, result := range resNodeRAMSystemPct {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("instance")
+		name, err := result.GetInstance()
 		if err != nil {
 			log.Warnf("ClusterNodes: RAM system percent missing node")
 			continue
@@ -528,23 +528,23 @@ type activeData struct {
 	minutes float64
 }
 
-func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Duration, window opencost.Window) map[NodeIdentifier]activeData {
+func buildActiveDataMap(resActiveMins []*source.QueryResult, resolution time.Duration, window opencost.Window) map[NodeIdentifier]activeData {
 
 	m := make(map[NodeIdentifier]activeData)
 
 	for _, result := range resActiveMins {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("node")
+		name, err := result.GetNode()
 		if err != nil {
 			log.Warnf("ClusterNodes: active mins missing node")
 			continue
 		}
 
-		providerID, _ := result.GetString("provider_id")
+		providerID, _ := result.GetProviderID()
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -573,13 +573,13 @@ func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Durat
 // Determine preemptibility with node labels
 // node id -> is preemptible?
 func buildPreemptibleMap(
-	resIsSpot []*prom.QueryResult,
+	resIsSpot []*source.QueryResult,
 ) map[NodeIdentifier]bool {
 
 	m := make(map[NodeIdentifier]bool)
 
 	for _, result := range resIsSpot {
-		nodeName, err := result.GetString("node")
+		nodeName, err := result.GetNode()
 		if err != nil {
 			continue
 		}
@@ -587,12 +587,12 @@ func buildPreemptibleMap(
 		// GCP preemptible label
 		pre := result.Values[0].Value
 
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
 
-		providerID, _ := result.GetString("provider_id")
+		providerID, _ := result.GetProviderID()
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -615,18 +615,18 @@ func buildPreemptibleMap(
 }
 
 func buildLabelsMap(
-	resLabels []*prom.QueryResult,
+	resLabels []*source.QueryResult,
 ) map[nodeIdentifierNoProviderID]map[string]string {
 
 	m := make(map[nodeIdentifierNoProviderID]map[string]string)
 
 	// Copy labels into node
 	for _, result := range resLabels {
-		cluster, err := result.GetString(env.GetPromClusterLabel())
+		cluster, err := result.GetCluster()
 		if err != nil {
 			cluster = env.GetClusterID()
 		}
-		node, err := result.GetString("node")
+		node, err := result.GetNode()
 		if err != nil {
 			log.DedupedWarningf(5, "ClusterNodes: label data missing node")
 			continue

+ 15 - 15
pkg/costmodel/cluster_helpers_test.go

@@ -7,10 +7,10 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/config"
-	"github.com/opencost/opencost/pkg/prom"
 
 	"github.com/davecgh/go-spew/spew"
 )
@@ -731,13 +731,13 @@ func TestBuildNodeMap(t *testing.T) {
 func TestBuildGPUCostMap(t *testing.T) {
 	cases := []struct {
 		name       string
-		promResult []*prom.QueryResult
+		promResult []*source.QueryResult
 		countMap   map[NodeIdentifier]float64
 		expected   map[NodeIdentifier]float64
 	}{
 		{
 			name: "All Zeros",
-			promResult: []*prom.QueryResult{
+			promResult: []*source.QueryResult{
 				{
 					Metric: map[string]interface{}{
 						"cluster_id":    "cluster1",
@@ -770,7 +770,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 		},
 		{
 			name: "Zero Node Count",
-			promResult: []*prom.QueryResult{
+			promResult: []*source.QueryResult{
 				{
 					Metric: map[string]interface{}{
 						"cluster_id":    "cluster1",
@@ -803,7 +803,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 		},
 		{
 			name: "Missing Node Count",
-			promResult: []*prom.QueryResult{
+			promResult: []*source.QueryResult{
 				{
 					Metric: map[string]interface{}{
 						"cluster_id":    "cluster1",
@@ -830,7 +830,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 		},
 		{
 			name: "missing cost data",
-			promResult: []*prom.QueryResult{
+			promResult: []*source.QueryResult{
 				{},
 			},
 			countMap: map[NodeIdentifier]float64{
@@ -844,7 +844,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 		},
 		{
 			name: "All values present",
-			promResult: []*prom.QueryResult{
+			promResult: []*source.QueryResult{
 				{
 					Metric: map[string]interface{}{
 						"cluster_id":    "cluster1",
@@ -899,7 +899,7 @@ func TestAssetCustompricing(t *testing.T) {
 
 	startTimestamp := float64(windowStart.Unix())
 
-	nodePromResult := []*prom.QueryResult{
+	nodePromResult := []*source.QueryResult{
 		{
 			Metric: map[string]interface{}{
 				"cluster_id":    "cluster1",
@@ -916,7 +916,7 @@ func TestAssetCustompricing(t *testing.T) {
 		},
 	}
 
-	pvCostPromResult := []*prom.QueryResult{
+	pvCostPromResult := []*source.QueryResult{
 		{
 			Metric: map[string]interface{}{
 				"cluster_id":       "cluster1",
@@ -932,7 +932,7 @@ func TestAssetCustompricing(t *testing.T) {
 		},
 	}
 
-	pvSizePromResult := []*prom.QueryResult{
+	pvSizePromResult := []*source.QueryResult{
 		{
 			Metric: map[string]interface{}{
 				"cluster_id":       "cluster1",
@@ -948,7 +948,7 @@ func TestAssetCustompricing(t *testing.T) {
 		},
 	}
 
-	pvMinsPromResult := []*prom.QueryResult{
+	pvMinsPromResult := []*source.QueryResult{
 		{
 			Metric: map[string]interface{}{
 				"cluster_id":       "cluster1",
@@ -968,7 +968,7 @@ func TestAssetCustompricing(t *testing.T) {
 		},
 	}
 
-	pvAvgUsagePromResult := []*prom.QueryResult{
+	pvAvgUsagePromResult := []*source.QueryResult{
 		{
 			Metric: map[string]interface{}{
 				"cluster_id":            "cluster1",
@@ -988,7 +988,7 @@ func TestAssetCustompricing(t *testing.T) {
 		},
 	}
 
-	pvMaxUsagePromResult := []*prom.QueryResult{
+	pvMaxUsagePromResult := []*source.QueryResult{
 		{
 			Metric: map[string]interface{}{
 				"cluster_id":            "cluster1",
@@ -1008,7 +1008,7 @@ func TestAssetCustompricing(t *testing.T) {
 		},
 	}
 
-	pvInfoPromResult := []*prom.QueryResult{
+	pvInfoPromResult := []*source.QueryResult{
 		{
 			Metric: map[string]interface{}{
 				"cluster_id":            "cluster1",
@@ -1122,7 +1122,7 @@ func TestBuildLabelsMap(t *testing.T) {
 
 	startTimestamp := float64(windowStart.Unix())
 
-	nodePromResult := []*prom.QueryResult{
+	nodePromResult := []*source.QueryResult{
 		{
 			Metric: map[string]interface{}{
 				"cluster_id":             "cluster1",

+ 2 - 1
pkg/costmodel/clusters/clustermap.go

@@ -11,6 +11,7 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/clusters"
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util/retry"
 	"github.com/opencost/opencost/pkg/prom"
 	"github.com/opencost/opencost/pkg/thanos"
@@ -91,7 +92,7 @@ func (pcm *PrometheusClusterMap) loadClusters() (map[string]*clusters.ClusterInf
 	// Retry on failure
 	result, err := retry.Retry(context.Background(), tryQuery, uint(LoadRetries), LoadRetryDelay)
 
-	qr, ok := result.([]*prom.QueryResult)
+	qr, ok := result.([]*source.QueryResult)
 	if !ok || err != nil {
 		return nil, err
 	}

+ 19 - 36
pkg/costmodel/containerkeys.go

@@ -5,8 +5,8 @@ import (
 	"strings"
 
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/pkg/clustercache"
-	"github.com/opencost/opencost/pkg/env"
 )
 
 var (
@@ -155,51 +155,34 @@ func NewContainerMetricsFromPod(pod *clustercache.Pod, clusterID string) ([]*Con
 	return cs, nil
 }
 
-// NewContainerMetricFromPrometheus accepts the metrics map from a QueryResult and returns a new ContainerMetric
+// NewContainerMetricFromResult accepts the metrics map from a QueryResult and returns a new ContainerMetric
 // instance
-func NewContainerMetricFromPrometheus(metrics map[string]interface{}, defaultClusterID string) (*ContainerMetric, error) {
-	// TODO: Can we use *prom.QueryResult.GetString() here?
-	cName, ok := metrics["container_name"]
-	if !ok {
+func NewContainerMetricFromResult(result *source.QueryResult, defaultClusterID string) (*ContainerMetric, error) {
+	containerName, err := result.GetContainer()
+	if err != nil {
 		return nil, NoContainerErr
 	}
-	containerName, ok := cName.(string)
-	if !ok {
-		return nil, NoContainerNameErr
-	}
-	pName, ok := metrics["pod_name"]
-	if !ok {
-		return nil, NoPodErr
-	}
-	podName, ok := pName.(string)
-	if !ok {
+
+	podName, err := result.GetPod()
+	if err != nil {
 		return nil, NoPodNameErr
 	}
-	ns, ok := metrics["namespace"]
-	if !ok {
-		return nil, NoNamespaceErr
-	}
-	namespace, ok := ns.(string)
-	if !ok {
+
+	namespace, err := result.GetNamespace()
+	if err != nil {
 		return nil, NoNamespaceNameErr
 	}
-	node, ok := metrics["node"]
-	if !ok {
+
+	nodeName, err := result.GetNode()
+	if err != nil {
 		log.Debugf("Prometheus vector does not have node name")
-		node = ""
-	}
-	nodeName, ok := node.(string)
-	if !ok {
-		return nil, NoNodeNameErr
+		nodeName = ""
 	}
-	cid, ok := metrics[env.GetPromClusterLabel()]
-	if !ok {
+
+	clusterID, err := result.GetCluster()
+	if err != nil {
 		log.Debugf("Prometheus vector does not have cluster id")
-		cid = defaultClusterID
-	}
-	clusterID, ok := cid.(string)
-	if !ok {
-		return nil, NoClusterIDErr
+		clusterID = defaultClusterID
 	}
 
 	return &ContainerMetric{

+ 181 - 152
pkg/costmodel/costmodel.go

@@ -12,14 +12,12 @@ import (
 	"github.com/opencost/opencost/core/pkg/clusters"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/core/pkg/util/promutil"
 	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/env"
-	"github.com/opencost/opencost/pkg/prom"
-	prometheus "github.com/prometheus/client_golang/api"
-	prometheusClient "github.com/prometheus/client_golang/api"
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
@@ -33,17 +31,6 @@ const (
 	profileThreshold = 1000 * 1000 * 1000 // 1s (in ns)
 
 	unmountedPVsContainer = "unmounted-pvs"
-
-	apiPrefix         = "/api/v1"
-	epAlertManagers   = apiPrefix + "/alertmanagers"
-	epLabelValues     = apiPrefix + "/label/:name/values"
-	epSeries          = apiPrefix + "/series"
-	epTargets         = apiPrefix + "/targets"
-	epSnapshot        = apiPrefix + "/admin/tsdb/snapshot"
-	epDeleteSeries    = apiPrefix + "/admin/tsdb/delete_series"
-	epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
-	epConfig          = apiPrefix + "/status/config"
-	epFlags           = apiPrefix + "/status/flags"
 )
 
 // isCron matches a CronJob name and captures the non-timestamp name
@@ -54,28 +41,35 @@ const (
 var isCron = regexp.MustCompile(`^(.+)-(\d{10}|\d{8})$`)
 
 type CostModel struct {
-	Cache                      clustercache.ClusterCache
-	ClusterMap                 clusters.ClusterMap
-	MaxPrometheusQueryDuration time.Duration
-	RequestGroup               *singleflight.Group
-	ScrapeInterval             time.Duration
-	PrometheusClient           prometheus.Client
-	Provider                   costAnalyzerCloud.Provider
-	pricingMetadata            *costAnalyzerCloud.PricingMatchMetadata
+	Cache           clustercache.ClusterCache
+	ClusterMap      clusters.ClusterMap
+	BatchDuration   time.Duration
+	RequestGroup    *singleflight.Group
+	RefreshInterval time.Duration
+	DataSource      source.OpenCostDataSource
+	Provider        costAnalyzerCloud.Provider
+	pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
 }
 
-func NewCostModel(client prometheus.Client, provider costAnalyzerCloud.Provider, cache clustercache.ClusterCache, clusterMap clusters.ClusterMap, scrapeInterval time.Duration) *CostModel {
+func NewCostModel(
+	dataSource source.OpenCostDataSource,
+	provider costAnalyzerCloud.Provider,
+	cache clustercache.ClusterCache,
+	clusterMap clusters.ClusterMap,
+	batchDuration time.Duration,
+	refreshInterval time.Duration,
+) *CostModel {
 	// request grouping to prevent over-requesting the same data prior to caching
 	requestGroup := new(singleflight.Group)
 
 	return &CostModel{
-		Cache:                      cache,
-		ClusterMap:                 clusterMap,
-		MaxPrometheusQueryDuration: env.GetETLMaxPrometheusQueryDuration(),
-		PrometheusClient:           client,
-		Provider:                   provider,
-		RequestGroup:               requestGroup,
-		ScrapeInterval:             scrapeInterval,
+		Cache:           cache,
+		ClusterMap:      clusterMap,
+		BatchDuration:   batchDuration,
+		DataSource:      dataSource,
+		Provider:        provider,
+		RequestGroup:    requestGroup,
+		RefreshInterval: refreshInterval,
 	}
 }
 
@@ -234,25 +228,41 @@ const (
 	normalizationStr          = `max(count_over_time(kube_pod_container_resource_requests{resource="memory", unit="byte", %s}[%s] %s))`
 )
 
-func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
-	queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
-	queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
-	queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
-	queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
-	queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
-	queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), window, offset)
+func (cm *CostModel) ComputeCostData(window string, offset string, filterNamespace string) (map[string]*CostData, error) {
+
+	/*
+		queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
+		queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), window, offset, env.GetPromClusterLabel())
+		queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
+		queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
+		queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), window, "", env.GetPromClusterLabel())
+		queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), window, offset)
+
+		// Cluster ID is specific to the source cluster
+		clusterID := env.GetClusterID()
+
+		// Submit all Prometheus queries asynchronously
+		ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
+		resChRAMUsage := ctx.Query(queryRAMUsage)
+		resChCPUUsage := ctx.Query(queryCPUUsage)
+		resChNetZoneRequests := ctx.Query(queryNetZoneRequests)
+		resChNetRegionRequests := ctx.Query(queryNetRegionRequests)
+		resChNetInternetRequests := ctx.Query(queryNetInternetRequests)
+		resChNormalization := ctx.Query(queryNormalization)
+	*/
 
 	// Cluster ID is specific to the source cluster
 	clusterID := env.GetClusterID()
+	cp := cm.Provider
 
-	// Submit all Prometheus queries asynchronously
-	ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
-	resChRAMUsage := ctx.Query(queryRAMUsage)
-	resChCPUUsage := ctx.Query(queryCPUUsage)
-	resChNetZoneRequests := ctx.Query(queryNetZoneRequests)
-	resChNetRegionRequests := ctx.Query(queryNetRegionRequests)
-	resChNetInternetRequests := ctx.Query(queryNetInternetRequests)
-	resChNormalization := ctx.Query(queryNormalization)
+	grp := source.NewQueryGroup()
+
+	resChRAMUsage := grp.With(cm.DataSource.QueryRAMUsage(window, offset))
+	resChCPUUsage := grp.With(cm.DataSource.QueryCPUUsage(window, offset))
+	resChNetZoneRequests := grp.With(cm.DataSource.QueryNetworkInZoneRequests(window, offset))
+	resChNetRegionRequests := grp.With(cm.DataSource.QueryNetworkInRegionRequests(window, offset))
+	resChNetInternetRequests := grp.With(cm.DataSource.QueryNetworkInternetRequests(window, offset))
+	resChNormalization := grp.With(cm.DataSource.QueryNormalization(window, offset))
 
 	// Pull pod information from k8s API
 	podlist := cm.Cache.GetAllPods()
@@ -287,21 +297,21 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 
 	// NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
 	// NOTE: will not propagate unless coupled with errors.
-	if ctx.HasErrors() {
+	if grp.HasErrors() {
 		// To keep the context of where the errors are occurring, we log the errors here and pass them the error
 		// back to the caller. The caller should handle the specific case where error is an ErrorCollection
-		for _, promErr := range ctx.Errors() {
-			if promErr.Error != nil {
-				log.Errorf("ComputeCostData: Request Error: %s", promErr.Error)
+		for _, queryErr := range grp.Errors() {
+			if queryErr.Error != nil {
+				log.Errorf("ComputeCostData: Request Error: %s", queryErr.Error)
 			}
-			if promErr.ParseError != nil {
-				log.Errorf("ComputeCostData: Parsing Error: %s", promErr.ParseError)
+			if queryErr.ParseError != nil {
+				log.Errorf("ComputeCostData: Parsing Error: %s", queryErr.ParseError)
 			}
 		}
 
 		// ErrorCollection is an collection of errors wrapped in a single error implementation
 		// We opt to not return an error for the sake of running as a pure exporter.
-		log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", ctx.ErrorCollection().Error())
+		log.Warnf("ComputeCostData: continuing despite prometheus errors: %s", grp.Error())
 	}
 
 	defer measureTime(time.Now(), profileThreshold, "ComputeCostData: Processing Query Data")
@@ -309,7 +319,7 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 	normalizationValue, err := getNormalization(resNormalization)
 	if err != nil {
 		// We opt to not return an error for the sake of running as a pure exporter.
-		log.Warnf("ComputeCostData: continuing despite error parsing normalization values from %s: %s", queryNormalization, err.Error())
+		log.Warnf("ComputeCostData: continuing despite error parsing normalization values: %s", err.Error())
 	}
 
 	nodes, err := cm.GetNodeCost(cp)
@@ -679,12 +689,12 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 		}
 	}
 
-	err = findDeletedNodeInfo(cli, missingNodes, window, "")
+	err = findDeletedNodeInfo(cm.DataSource, missingNodes, window, "")
 	if err != nil {
 		log.Errorf("Error fetching historical node data: %s", err.Error())
 	}
 
-	err = findDeletedPodInfo(cli, missingContainers, window)
+	err = findDeletedPodInfo(cm.DataSource, missingContainers, window)
 	if err != nil {
 		log.Errorf("Error fetching historical pod data: %s", err.Error())
 	}
@@ -734,11 +744,11 @@ func findUnmountedPVCostData(clusterMap clusters.ClusterMap, unmountedPVs map[st
 	return costs
 }
 
-func findDeletedPodInfo(cli prometheusClient.Client, missingContainers map[string]*CostData, window string) error {
+func findDeletedPodInfo(dataSource source.OpenCostDataSource, missingContainers map[string]*CostData, window string) error {
 	if len(missingContainers) > 0 {
-		queryHistoricalPodLabels := fmt.Sprintf(`kube_pod_labels{%s}[%s]`, env.GetPromClusterFilter(), window)
 
-		podLabelsResult, _, err := prom.NewNamedContext(cli, prom.ComputeCostDataContextName).QuerySync(queryHistoricalPodLabels)
+		podLabelsResCh := dataSource.QueryHistoricalPodLabels(window, "")
+		podLabelsResult, err := podLabelsResCh.Await()
 		if err != nil {
 			log.Errorf("failed to parse historical pod labels: %s", err.Error())
 		}
@@ -766,29 +776,22 @@ func findDeletedPodInfo(cli prometheusClient.Client, missingContainers map[strin
 	return nil
 }
 
-func findDeletedNodeInfo(cli prometheusClient.Client, missingNodes map[string]*costAnalyzerCloud.Node, window, offset string) error {
+func findDeletedNodeInfo(dataSource source.OpenCostDataSource, missingNodes map[string]*costAnalyzerCloud.Node, window, offset string) error {
 	if len(missingNodes) > 0 {
 		defer measureTime(time.Now(), profileThreshold, "Finding Deleted Node Info")
 
-		offsetStr := ""
-		if offset != "" {
-			offsetStr = fmt.Sprintf("offset %s", offset)
-		}
+		grp := source.NewQueryGroup()
 
-		queryHistoricalCPUCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
-		queryHistoricalRAMCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
-		queryHistoricalGPUCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost{%s}[%s] %s)) by (node, instance, %s)`, env.GetPromClusterFilter(), window, offsetStr, env.GetPromClusterLabel())
-
-		ctx := prom.NewNamedContext(cli, prom.ComputeCostDataContextName)
-		cpuCostResCh := ctx.Query(queryHistoricalCPUCost)
-		ramCostResCh := ctx.Query(queryHistoricalRAMCost)
-		gpuCostResCh := ctx.Query(queryHistoricalGPUCost)
+		cpuCostResCh := grp.With(dataSource.QueryHistoricalCPUCost(window, offset))
+		ramCostResCh := grp.With(dataSource.QueryHistoricalRAMCost(window, offset))
+		gpuCostResCh := grp.With(dataSource.QueryHistoricalGPUCost(window, offset))
 
 		cpuCostRes, _ := cpuCostResCh.Await()
 		ramCostRes, _ := ramCostResCh.Await()
 		gpuCostRes, _ := gpuCostResCh.Await()
-		if ctx.HasErrors() {
-			return ctx.ErrorCollection()
+
+		if grp.HasErrors() {
+			return grp.Error()
 		}
 
 		cpuCosts, err := getCost(cpuCostRes)
@@ -1623,7 +1626,7 @@ func floorMultiple(value int64, multiple int64) int64 {
 
 // Attempt to create a key for the request. Reduce the times to minutes in order to more easily group requests based on
 // real time ranges. If for any reason, the key generation fails, return a uuid to ensure uniqueness.
-func requestKeyFor(window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) string {
+func requestKeyFor(window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string) string {
 	keyLayout := "2006-01-02T15:04Z"
 
 	// We "snap" start time and duration to their closest 5 min multiple less than itself, by
@@ -1640,22 +1643,22 @@ func requestKeyFor(window opencost.Window, resolution time.Duration, filterNames
 	startKey := sTime.Format(keyLayout)
 	endKey := eTime.Format(keyLayout)
 
-	return fmt.Sprintf("%s,%s,%s,%s,%s,%t", startKey, endKey, resolution.String(), filterNamespace, filterCluster, remoteEnabled)
+	return fmt.Sprintf("%s,%s,%s,%s,%s", startKey, endKey, resolution.String(), filterNamespace, filterCluster)
 }
 
 // ComputeCostDataRange executes a range query for cost data.
 // Note that "offset" represents the time between the function call and "endString", and is also passed for convenience
-func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
+func (cm *CostModel) ComputeCostDataRange(window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string) (map[string]*CostData, error) {
 	// Create a request key for request grouping. This key will be used to represent the cost-model result
 	// for the specific inputs to prevent multiple queries for identical data.
-	key := requestKeyFor(window, resolution, filterNamespace, filterCluster, remoteEnabled)
+	key := requestKeyFor(window, resolution, filterNamespace, filterCluster)
 
 	log.Debugf("ComputeCostDataRange with Key: %s", key)
 
 	// If there is already a request out that uses the same data, wait for it to return to share the results.
 	// Otherwise, start executing.
 	result, err, _ := cm.RequestGroup.Do(key, func() (interface{}, error) {
-		return cm.costDataRange(cli, cp, window, resolution, filterNamespace, filterCluster, remoteEnabled)
+		return cm.costDataRange(window, resolution, filterNamespace, filterCluster)
 	})
 
 	data, ok := result.(map[string]*CostData)
@@ -1666,8 +1669,10 @@ func (cm *CostModel) ComputeCostDataRange(cli prometheusClient.Client, cp costAn
 	return data, err
 }
 
-func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerCloud.Provider, window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string, remoteEnabled bool) (map[string]*CostData, error) {
+func (cm *CostModel) costDataRange(window opencost.Window, resolution time.Duration, filterNamespace string, filterCluster string) (map[string]*CostData, error) {
 	clusterID := env.GetClusterID()
+	dataSource := cm.DataSource
+	cp := cm.Provider
 
 	// durHrs := end.Sub(start).Hours() + 1
 
@@ -1689,63 +1694,87 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 		log.Warnf("CostDataRange: window should be divisible by resolution or else samples may be missed: %s %% %s = %dm", window, resolution, int64(window.Minutes())%int64(resolution.Minutes()))
 	}
 
-	// Convert to Prometheus-style duration string in terms of m or h
-	resStr := fmt.Sprintf("%dm", resMins)
-	if resMins%60 == 0 {
-		resStr = fmt.Sprintf("%dh", resMins/60)
-	}
-
-	if remoteEnabled {
-		remoteLayout := "2006-01-02T15:04:05Z"
-		remoteStartStr := window.Start().Format(remoteLayout)
-		remoteEndStr := window.End().Format(remoteLayout)
-		log.Infof("Using remote database for query from %s to %s with window %s", remoteStartStr, remoteEndStr, resolution)
-		return CostDataRangeFromSQL("", "", resolution.String(), remoteStartStr, remoteEndStr)
-	}
-
-	scrapeIntervalSeconds := cm.ScrapeInterval.Seconds()
-
-	ctx := prom.NewNamedContext(cli, prom.ComputeCostDataRangeContextName)
-
-	queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
-	queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
-	queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
-	queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
-	queryCPURequests := fmt.Sprintf(queryCPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
-	queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
-	queryGPURequests := fmt.Sprintf(queryGPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
-	queryPVRequests := fmt.Sprintf(queryPVRequestsStr, env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel())
-	queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
-	queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, env.GetPromClusterFilter(), resStr)
-	queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
-	queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
-	queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
-	queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), resStr, "")
-
-	// Submit all queries for concurrent evaluation
-	resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, resolution)
-	resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, resolution)
-	resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, resolution)
-	resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, resolution)
-	resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, resolution)
-	resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, resolution)
-	resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, resolution)
-	resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, resolution)
-	resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, resolution)
-	resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
-	resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
-	resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
-	resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
-	resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
-	resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
-	resChNSAnnotations := ctx.QueryRange(fmt.Sprintf(queryNSAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
-	resChPodAnnotations := ctx.QueryRange(fmt.Sprintf(queryPodAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
-	resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
-	resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
-	resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
-	resChJobs := ctx.QueryRange(fmt.Sprintf(queryPodJobs, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
-	resChDaemonsets := ctx.QueryRange(fmt.Sprintf(queryPodDaemonsets, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
-	resChNormalization := ctx.QueryRange(queryNormalization, start, end, resolution)
+	/*
+		// Convert to Prometheus-style duration string in terms of m or h
+		resStr := fmt.Sprintf("%dm", resMins)
+		if resMins%60 == 0 {
+			resStr = fmt.Sprintf("%dh", resMins/60)
+		}
+
+		scrapeIntervalSeconds := cm.RefreshInterval.Seconds()
+	*/
+
+	grp := source.NewQueryGroup()
+
+	resChRAMRequests := grp.With(dataSource.QueryRAMRequestsOverTime(start, end, resolution))
+	resChRAMUsage := grp.With(dataSource.QueryRAMUsageOverTime(start, end, resolution))
+	resChRAMAlloc := grp.With(dataSource.QueryRAMAllocationOverTime(start, end, resolution))
+	resChCPURequests := grp.With(dataSource.QueryCPURequestsOverTime(start, end, resolution))
+	resChCPUUsage := grp.With(dataSource.QueryCPUUsageOverTime(start, end, resolution))
+	resChCPUAlloc := grp.With(dataSource.QueryCPUAllocationOverTime(start, end, resolution))
+	resChGPURequests := grp.With(dataSource.QueryGPURequestsOverTime(start, end, resolution))
+	resChPVRequests := grp.With(dataSource.QueryPVRequestsOverTime(start, end, resolution))
+	resChPVCAlloc := grp.With(dataSource.QueryPVCAllocationOverTime(start, end, resolution))
+	resChPVHourlyCost := grp.With(dataSource.QueryPVHourlyCostOverTime(start, end, resolution))
+	resChNetZoneRequests := grp.With(dataSource.QueryNetworkInZoneOverTime(start, end, resolution))
+	resChNetRegionRequests := grp.With(dataSource.QueryNetworkInRegionOverTime(start, end, resolution))
+	resChNetInternetRequests := grp.With(dataSource.QueryNetworkInternetOverTime(start, end, resolution))
+
+	resChNSLabels := grp.With(dataSource.QueryNamespaceLabelsOverTime(start, end, resolution))
+	resChPodLabels := grp.With(dataSource.QueryPodLabelsOverTime(start, end, resolution))
+	resChNSAnnotations := grp.With(dataSource.QueryNamespaceAnnotationsOverTime(start, end, resolution))
+	resChPodAnnotations := grp.With(dataSource.QueryPodAnnotationsOverTime(start, end, resolution))
+	resChServiceLabels := grp.With(dataSource.QueryServiceLabelsOverTime(start, end, resolution))
+	resChDeploymentLabels := grp.With(dataSource.QueryDeploymentLabelsOverTime(start, end, resolution))
+	resChStatefulsetLabels := grp.With(dataSource.QueryStatefulsetLabelsOverTime(start, end, resolution))
+	resChJobs := grp.With(dataSource.QueryPodJobsOverTime(start, end, resolution))
+	resChDaemonsets := grp.With(dataSource.QueryPodDaemonsetsOverTime(start, end, resolution))
+	resChNormalization := grp.With(dataSource.QueryNormalizationOverTime(start, end, resolution))
+	/*
+		ctx := prom.NewNamedContext(cli, prom.ComputeCostDataRangeContextName)
+
+		queryRAMAlloc := fmt.Sprintf(queryRAMAllocationByteHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
+		queryCPUAlloc := fmt.Sprintf(queryCPUAllocationVCPUHours, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
+		queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+		queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+		queryCPURequests := fmt.Sprintf(queryCPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+		queryCPUUsage := fmt.Sprintf(queryCPUUsageStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+		queryGPURequests := fmt.Sprintf(queryGPURequestsStr, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+		queryPVRequests := fmt.Sprintf(queryPVRequestsStr, env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel(), env.GetPromClusterFilter(), env.GetPromClusterLabel(), env.GetPromClusterLabel())
+		queryPVCAllocation := fmt.Sprintf(queryPVCAllocationFmt, env.GetPromClusterFilter(), resStr, env.GetPromClusterLabel(), scrapeIntervalSeconds)
+		queryPVHourlyCost := fmt.Sprintf(queryPVHourlyCostFmt, env.GetPromClusterFilter(), resStr)
+		queryNetZoneRequests := fmt.Sprintf(queryZoneNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+		queryNetRegionRequests := fmt.Sprintf(queryRegionNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+		queryNetInternetRequests := fmt.Sprintf(queryInternetNetworkUsage, env.GetPromClusterFilter(), resStr, "", env.GetPromClusterLabel())
+		queryNormalization := fmt.Sprintf(normalizationStr, env.GetPromClusterFilter(), resStr, "")
+
+		// Submit all queries for concurrent evaluation
+		resChRAMRequests := ctx.QueryRange(queryRAMRequests, start, end, resolution)
+		resChRAMUsage := ctx.QueryRange(queryRAMUsage, start, end, resolution)
+		resChRAMAlloc := ctx.QueryRange(queryRAMAlloc, start, end, resolution)
+		resChCPURequests := ctx.QueryRange(queryCPURequests, start, end, resolution)
+		resChCPUUsage := ctx.QueryRange(queryCPUUsage, start, end, resolution)
+		resChCPUAlloc := ctx.QueryRange(queryCPUAlloc, start, end, resolution)
+		resChGPURequests := ctx.QueryRange(queryGPURequests, start, end, resolution)
+		resChPVRequests := ctx.QueryRange(queryPVRequests, start, end, resolution)
+		resChPVCAlloc := ctx.QueryRange(queryPVCAllocation, start, end, resolution)
+		resChPVHourlyCost := ctx.QueryRange(queryPVHourlyCost, start, end, resolution)
+		resChNetZoneRequests := ctx.QueryRange(queryNetZoneRequests, start, end, resolution)
+		resChNetRegionRequests := ctx.QueryRange(queryNetRegionRequests, start, end, resolution)
+		resChNetInternetRequests := ctx.QueryRange(queryNetInternetRequests, start, end, resolution)
+
+		resChNSLabels := ctx.QueryRange(fmt.Sprintf(queryNSLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
+		resChPodLabels := ctx.QueryRange(fmt.Sprintf(queryPodLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
+		resChNSAnnotations := ctx.QueryRange(fmt.Sprintf(queryNSAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
+		resChPodAnnotations := ctx.QueryRange(fmt.Sprintf(queryPodAnnotations, env.GetPromClusterFilter(), resStr), start, end, resolution)
+		resChServiceLabels := ctx.QueryRange(fmt.Sprintf(queryServiceLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
+		resChDeploymentLabels := ctx.QueryRange(fmt.Sprintf(queryDeploymentLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
+		resChStatefulsetLabels := ctx.QueryRange(fmt.Sprintf(queryStatefulsetLabels, env.GetPromClusterFilter(), resStr), start, end, resolution)
+		resChJobs := ctx.QueryRange(fmt.Sprintf(queryPodJobs, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
+		resChDaemonsets := ctx.QueryRange(fmt.Sprintf(queryPodDaemonsets, env.GetPromClusterFilter(), env.GetPromClusterLabel()), start, end, resolution)
+		resChNormalization := ctx.QueryRange(queryNormalization, start, end, resolution)
+
+	*/
 
 	// Pull k8s pod, controller, service, and namespace details
 	podlist := cm.Cache.GetAllPods()
@@ -1802,26 +1831,26 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	// NOTE: The way we currently handle errors and warnings only early returns if there is an error. Warnings
 	// NOTE: will not propagate unless coupled with errors.
-	if ctx.HasErrors() {
+	if grp.HasErrors() {
 		// To keep the context of where the errors are occurring, we log the errors here and pass them the error
 		// back to the caller. The caller should handle the specific case where error is an ErrorCollection
-		for _, promErr := range ctx.Errors() {
-			if promErr.Error != nil {
-				log.Errorf("CostDataRange: Request Error: %s", promErr.Error)
+		for _, queryErr := range grp.Errors() {
+			if queryErr.Error != nil {
+				log.Errorf("CostDataRange: Request Error: %s", queryErr.Error)
 			}
-			if promErr.ParseError != nil {
-				log.Errorf("CostDataRange: Parsing Error: %s", promErr.ParseError)
+			if queryErr.ParseError != nil {
+				log.Errorf("CostDataRange: Parsing Error: %s", queryErr.ParseError)
 			}
 		}
 
 		// ErrorCollection is an collection of errors wrapped in a single error implementation
-		return nil, ctx.ErrorCollection()
+		return nil, grp.Error()
 	}
 
 	normalizationValue, err := getNormalizations(resNormalization)
 	if err != nil {
 		msg := fmt.Sprintf("error computing normalization for start=%s, end=%s, res=%s", start, end, resolution)
-		return nil, prom.WrapError(err, msg)
+		return nil, source.WrapError(err, msg)
 	}
 
 	pvClaimMapping, err := GetPVInfo(resPVRequests, clusterID)
@@ -1934,7 +1963,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	RAMReqMap, err := GetNormalizedContainerMetricVectors(resRAMRequests, normalizationValue, clusterID)
 	if err != nil {
-		return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMRequests)")
+		return nil, source.WrapError(err, "GetNormalizedContainerMetricVectors(RAMRequests)")
 	}
 	for key := range RAMReqMap {
 		containers[key] = true
@@ -1942,7 +1971,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	RAMUsedMap, err := GetNormalizedContainerMetricVectors(resRAMUsage, normalizationValue, clusterID)
 	if err != nil {
-		return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(RAMUsage)")
+		return nil, source.WrapError(err, "GetNormalizedContainerMetricVectors(RAMUsage)")
 	}
 	for key := range RAMUsedMap {
 		containers[key] = true
@@ -1950,7 +1979,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	CPUReqMap, err := GetNormalizedContainerMetricVectors(resCPURequests, normalizationValue, clusterID)
 	if err != nil {
-		return nil, prom.WrapError(err, "GetNormalizedContainerMetricVectors(CPURequests)")
+		return nil, source.WrapError(err, "GetNormalizedContainerMetricVectors(CPURequests)")
 	}
 	for key := range CPUReqMap {
 		containers[key] = true
@@ -1960,7 +1989,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 	// rate(container_cpu_usage_seconds_total) which properly accounts for normalized rates
 	CPUUsedMap, err := GetContainerMetricVectors(resCPUUsage, clusterID)
 	if err != nil {
-		return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUUsage)")
+		return nil, source.WrapError(err, "GetContainerMetricVectors(CPUUsage)")
 	}
 	for key := range CPUUsedMap {
 		containers[key] = true
@@ -1968,7 +1997,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	RAMAllocMap, err := GetContainerMetricVectors(resRAMAlloc, clusterID)
 	if err != nil {
-		return nil, prom.WrapError(err, "GetContainerMetricVectors(RAMAllocations)")
+		return nil, source.WrapError(err, "GetContainerMetricVectors(RAMAllocations)")
 	}
 	for key := range RAMAllocMap {
 		containers[key] = true
@@ -1976,7 +2005,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	CPUAllocMap, err := GetContainerMetricVectors(resCPUAlloc, clusterID)
 	if err != nil {
-		return nil, prom.WrapError(err, "GetContainerMetricVectors(CPUAllocations)")
+		return nil, source.WrapError(err, "GetContainerMetricVectors(CPUAllocations)")
 	}
 	for key := range CPUAllocMap {
 		containers[key] = true
@@ -1984,7 +2013,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	GPUReqMap, err := GetNormalizedContainerMetricVectors(resGPURequests, normalizationValue, clusterID)
 	if err != nil {
-		return nil, prom.WrapError(err, "GetContainerMetricVectors(GPURequests)")
+		return nil, source.WrapError(err, "GetContainerMetricVectors(GPURequests)")
 	}
 	for key := range GPUReqMap {
 		containers[key] = true
@@ -2193,7 +2222,7 @@ func (cm *CostModel) costDataRange(cli prometheusClient.Client, cp costAnalyzerC
 
 	if window.Minutes() > 0 {
 		dur, off := window.DurationOffsetStrings()
-		err = findDeletedNodeInfo(cli, missingNodes, dur, off)
+		err = findDeletedNodeInfo(dataSource, missingNodes, dur, off)
 		if err != nil {
 			log.Errorf("Error fetching historical node data: %s", err.Error())
 		}

+ 15 - 15
pkg/costmodel/key.go

@@ -4,8 +4,8 @@ import (
 	"fmt"
 
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/pkg/env"
-	"github.com/opencost/opencost/pkg/prom"
 )
 
 type containerKey struct {
@@ -34,7 +34,7 @@ func newContainerKey(cluster, namespace, pod, container string) containerKey {
 // "cluster_id" as the containerKey's Cluster field. If a given field does not
 // exist on the result, an error is returned. (The only exception to that is
 // clusterLabel, which we expect may not exist, but has a default value.)
-func resultContainerKey(res *prom.QueryResult, clusterLabel, namespaceLabel, podLabel, containerLabel string) (containerKey, error) {
+func resultContainerKey(res *source.QueryResult, clusterLabel, namespaceLabel, podLabel, containerLabel string) (containerKey, error) {
 	key := containerKey{}
 
 	cluster, err := res.GetString(clusterLabel)
@@ -94,7 +94,7 @@ func getUnmountedPodKey(cluster string) podKey {
 // as the podKey's Cluster field. If a given field does not exist on the
 // result, an error is returned. (The only exception to that is clusterLabel,
 // which we expect may not exist, but has a default value.)
-func resultPodKey(res *prom.QueryResult, clusterLabel, namespaceLabel string) (podKey, error) {
+func resultPodKey(res *source.QueryResult, clusterLabel, namespaceLabel string) (podKey, error) {
 	key := podKey{}
 
 	cluster, err := res.GetString(clusterLabel)
@@ -143,7 +143,7 @@ func newNamespaceKey(cluster, namespace string) namespaceKey {
 // "cluster_id" as the namespaceKey's Cluster field. If a given field does not
 // exist on the result, an error is returned. (The only exception to that is
 // clusterLabel, which we expect may not exist, but has a default value.)
-func resultNamespaceKey(res *prom.QueryResult, clusterLabel, namespaceLabel string) (namespaceKey, error) {
+func resultNamespaceKey(res *source.QueryResult, clusterLabel, namespaceLabel string) (namespaceKey, error) {
 	key := namespaceKey{}
 
 	cluster, err := res.GetString(clusterLabel)
@@ -187,7 +187,7 @@ func newControllerKey(cluster, namespace, controllerKind, controller string) con
 // "cluster_id" as the controllerKey's Cluster field. If a given field does not
 // exist on the result, an error is returned. (The only exception to that is
 // clusterLabel, which we expect may not exist, but has a default value.)
-func resultControllerKey(controllerKind string, res *prom.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
+func resultControllerKey(controllerKind string, res *source.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
 	key := controllerKey{}
 
 	cluster, err := res.GetString(clusterLabel)
@@ -215,37 +215,37 @@ func resultControllerKey(controllerKind string, res *prom.QueryResult, clusterLa
 
 // resultDeploymentKey creates a controllerKey for a Deployment.
 // (See resultControllerKey for more.)
-func resultDeploymentKey(res *prom.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
+func resultDeploymentKey(res *source.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
 	return resultControllerKey("deployment", res, clusterLabel, namespaceLabel, controllerLabel)
 }
 
 // resultStatefulSetKey creates a controllerKey for a StatefulSet.
 // (See resultControllerKey for more.)
-func resultStatefulSetKey(res *prom.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
+func resultStatefulSetKey(res *source.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
 	return resultControllerKey("statefulset", res, clusterLabel, namespaceLabel, controllerLabel)
 }
 
 // resultDaemonSetKey creates a controllerKey for a DaemonSet.
 // (See resultControllerKey for more.)
-func resultDaemonSetKey(res *prom.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
+func resultDaemonSetKey(res *source.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
 	return resultControllerKey("daemonset", res, clusterLabel, namespaceLabel, controllerLabel)
 }
 
 // resultJobKey creates a controllerKey for a Job.
 // (See resultControllerKey for more.)
-func resultJobKey(res *prom.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
+func resultJobKey(res *source.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
 	return resultControllerKey("job", res, clusterLabel, namespaceLabel, controllerLabel)
 }
 
 // resultReplicaSetKey creates a controllerKey for a Job.
 // (See resultControllerKey for more.)
-func resultReplicaSetKey(res *prom.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
+func resultReplicaSetKey(res *source.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
 	return resultControllerKey("replicaset", res, clusterLabel, namespaceLabel, controllerLabel)
 }
 
 // resultReplicaSetRolloutKey creates a controllerKey for a Job.
 // (See resultControllerKey for more.)
-func resultReplicaSetRolloutKey(res *prom.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
+func resultReplicaSetRolloutKey(res *source.QueryResult, clusterLabel, namespaceLabel, controllerLabel string) (controllerKey, error) {
 	return resultControllerKey("rollout", res, clusterLabel, namespaceLabel, controllerLabel)
 }
 
@@ -273,7 +273,7 @@ func newServiceKey(cluster, namespace, service string) serviceKey {
 // "cluster_id" as the serviceKey's Cluster field. If a given field does not
 // exist on the result, an error is returned. (The only exception to that is
 // clusterLabel, which we expect may not exist, but has a default value.)
-func resultServiceKey(res *prom.QueryResult, clusterLabel, namespaceLabel, serviceLabel string) (serviceKey, error) {
+func resultServiceKey(res *source.QueryResult, clusterLabel, namespaceLabel, serviceLabel string) (serviceKey, error) {
 	key := serviceKey{}
 
 	cluster, err := res.GetString(clusterLabel)
@@ -319,7 +319,7 @@ func newNodeKey(cluster, node string) nodeKey {
 // "cluster_id" as the nodeKey's Cluster field. If a given field does not
 // exist on the result, an error is returned. (The only exception to that is
 // clusterLabel, which we expect may not exist, but has a default value.)
-func resultNodeKey(res *prom.QueryResult, clusterLabel, nodeLabel string) (nodeKey, error) {
+func resultNodeKey(res *source.QueryResult, clusterLabel, nodeLabel string) (nodeKey, error) {
 	key := nodeKey{}
 
 	cluster, err := res.GetString(clusterLabel)
@@ -361,7 +361,7 @@ func newPVCKey(cluster, namespace, persistentVolumeClaim string) pvcKey {
 // "cluster_id" as the pvcKey's Cluster field. If a given field does not
 // exist on the result, an error is returned. (The only exception to that is
 // clusterLabel, which we expect may not exist, but has a default value.)
-func resultPVCKey(res *prom.QueryResult, clusterLabel, namespaceLabel, pvcLabel string) (pvcKey, error) {
+func resultPVCKey(res *source.QueryResult, clusterLabel, namespaceLabel, pvcLabel string) (pvcKey, error) {
 	key := pvcKey{}
 
 	cluster, err := res.GetString(clusterLabel)
@@ -407,7 +407,7 @@ func newPVKey(cluster, persistentVolume string) pvKey {
 // "cluster_id" as the pvKey's Cluster field. If a given field does not
 // exist on the result, an error is returned. (The only exception to that is
 // clusterLabel, which we expect may not exist, but has a default value.)
-func resultPVKey(res *prom.QueryResult, clusterLabel, persistentVolumeLabel string) (pvKey, error) {
+func resultPVKey(res *source.QueryResult, clusterLabel, persistentVolumeLabel string) (pvKey, error) {
 	key := pvKey{}
 
 	cluster, err := res.GetString(clusterLabel)

+ 3 - 2
pkg/costmodel/metrics.go

@@ -9,6 +9,7 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/clusters"
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/core/pkg/util/atomic"
 	"github.com/opencost/opencost/core/pkg/util/promutil"
@@ -454,11 +455,11 @@ func (cmme *CostModelMetricsEmitter) Start() bool {
 			}
 
 			// TODO: Pass PrometheusClient and CloudProvider into CostModel on instantiation so this isn't so awkward
-			data, err := cmme.Model.ComputeCostData(cmme.PrometheusClient, cmme.CloudProvider, "2m", "", "")
+			data, err := cmme.Model.ComputeCostData("2m", "", "")
 			if err != nil {
 				// For an error collection, we'll just log the length of the errors (ComputeCostData already logs the
 				// actual errors)
-				if prom.IsErrorCollection(err) {
+				if source.IsErrorCollection(err) {
 					if ec, ok := err.(prom.QueryErrorCollection); ok {
 						log.Errorf("Error in price recording: %d errors occurred", len(ec.Errors()))
 					}

+ 5 - 6
pkg/costmodel/networkcosts.go

@@ -2,10 +2,9 @@ package costmodel
 
 import (
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
-	"github.com/opencost/opencost/pkg/env"
-	"github.com/opencost/opencost/pkg/prom"
 )
 
 // NetworkUsageVNetworkUsageDataector contains the network usage values for egress network traffic
@@ -28,7 +27,7 @@ type NetworkUsageVector struct {
 
 // GetNetworkUsageData performs a join of the the results of zone, region, and internet usage queries to return a single
 // map containing network costs for each namespace+pod
-func GetNetworkUsageData(zr []*prom.QueryResult, rr []*prom.QueryResult, ir []*prom.QueryResult, defaultClusterID string) (map[string]*NetworkUsageData, error) {
+func GetNetworkUsageData(zr []*source.QueryResult, rr []*source.QueryResult, ir []*source.QueryResult, defaultClusterID string) (map[string]*NetworkUsageData, error) {
 	zoneNetworkMap, err := getNetworkUsage(zr, defaultClusterID)
 	if err != nil {
 		return nil, err
@@ -138,7 +137,7 @@ func GetNetworkCost(usage *NetworkUsageData, cloud costAnalyzerCloud.Provider) (
 	return results, nil
 }
 
-func getNetworkUsage(qrs []*prom.QueryResult, defaultClusterID string) (map[string]*NetworkUsageVector, error) {
+func getNetworkUsage(qrs []*source.QueryResult, defaultClusterID string) (map[string]*NetworkUsageVector, error) {
 	ncdmap := make(map[string]*NetworkUsageVector)
 
 	for _, val := range qrs {
@@ -147,12 +146,12 @@ func getNetworkUsage(qrs []*prom.QueryResult, defaultClusterID string) (map[stri
 			return nil, err
 		}
 
-		namespace, err := val.GetString("namespace")
+		namespace, err := val.GetNamespace()
 		if err != nil {
 			return nil, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, err := val.GetCluster()
 		if clusterID == "" {
 			log.Debugf("Prometheus vector does not have cluster id")
 			clusterID = defaultClusterID

+ 52 - 52
pkg/costmodel/promparsers.go → pkg/costmodel/resultparsers.go

@@ -6,10 +6,10 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/clustercache"
-	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/prom"
 )
 
@@ -46,16 +46,16 @@ func GetPVInfoLocal(cache clustercache.ClusterCache, defaultClusterID string) (m
 
 // TODO niko/prom move parsing functions from costmodel.go
 
-func GetPVInfo(qrs []*prom.QueryResult, defaultClusterID string) (map[string]*PersistentVolumeClaimData, error) {
+func GetPVInfo(qrs []*source.QueryResult, defaultClusterID string) (map[string]*PersistentVolumeClaimData, error) {
 	toReturn := make(map[string]*PersistentVolumeClaimData)
 
 	for _, val := range qrs {
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
@@ -92,21 +92,21 @@ func GetPVInfo(qrs []*prom.QueryResult, defaultClusterID string) (map[string]*Pe
 	return toReturn, nil
 }
 
-func GetPVAllocationMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string][]*PersistentVolumeClaimData, error) {
+func GetPVAllocationMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string][]*PersistentVolumeClaimData, error) {
 	toReturn := make(map[string][]*PersistentVolumeClaimData)
 
 	for _, val := range qrs {
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		pod, err := val.GetString("pod")
+		pod, err := val.GetPod()
 		if err != nil {
 			return toReturn, err
 		}
@@ -138,11 +138,11 @@ func GetPVAllocationMetrics(qrs []*prom.QueryResult, defaultClusterID string) (m
 	return toReturn, nil
 }
 
-func GetPVCostMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]*costAnalyzerCloud.PV, error) {
+func GetPVCostMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]*costAnalyzerCloud.PV, error) {
 	toReturn := make(map[string]*costAnalyzerCloud.PV)
 
 	for _, val := range qrs {
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -161,17 +161,17 @@ func GetPVCostMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[str
 	return toReturn, nil
 }
 
-func GetNamespaceLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
+func GetNamespaceLabelsMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
 	toReturn := make(map[string]map[string]string)
 
 	for _, val := range qrs {
 		// We want Namespace and ClusterID for key generation purposes
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -188,22 +188,22 @@ func GetNamespaceLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string)
 	return toReturn, nil
 }
 
-func GetPodLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
+func GetPodLabelsMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
 	toReturn := make(map[string]map[string]string)
 
 	for _, val := range qrs {
 		// We want Pod, Namespace and ClusterID for key generation purposes
-		pod, err := val.GetString("pod")
+		pod, err := val.GetPod()
 		if err != nil {
 			return toReturn, err
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -222,17 +222,17 @@ func GetPodLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[
 	return toReturn, nil
 }
 
-func GetNamespaceAnnotationsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
+func GetNamespaceAnnotationsMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
 	toReturn := make(map[string]map[string]string)
 
 	for _, val := range qrs {
 		// We want Namespace and ClusterID for key generation purposes
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -249,22 +249,22 @@ func GetNamespaceAnnotationsMetrics(qrs []*prom.QueryResult, defaultClusterID st
 	return toReturn, nil
 }
 
-func GetPodAnnotationsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
+func GetPodAnnotationsMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
 	toReturn := make(map[string]map[string]string)
 
 	for _, val := range qrs {
 		// We want Pod, Namespace and ClusterID for key generation purposes
-		pod, err := val.GetString("pod")
+		pod, err := val.GetPod()
 		if err != nil {
 			return toReturn, err
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 
 		if clusterID == "" {
 			clusterID = defaultClusterID
@@ -283,7 +283,7 @@ func GetPodAnnotationsMetrics(qrs []*prom.QueryResult, defaultClusterID string)
 	return toReturn, nil
 }
 
-func GetStatefulsetMatchLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
+func GetStatefulsetMatchLabelsMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
 	toReturn := make(map[string]map[string]string)
 
 	for _, val := range qrs {
@@ -293,12 +293,12 @@ func GetStatefulsetMatchLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID
 			return toReturn, err
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -310,7 +310,7 @@ func GetStatefulsetMatchLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID
 	return toReturn, nil
 }
 
-func GetPodDaemonsetsWithMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]string, error) {
+func GetPodDaemonsetsWithMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]string, error) {
 	toReturn := make(map[string]string)
 
 	for _, val := range qrs {
@@ -319,17 +319,17 @@ func GetPodDaemonsetsWithMetrics(qrs []*prom.QueryResult, defaultClusterID strin
 			return toReturn, err
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
 
-		pod, err := val.GetString("pod")
+		pod, err := val.GetPod()
 		if err != nil {
 			return toReturn, err
 		}
@@ -341,7 +341,7 @@ func GetPodDaemonsetsWithMetrics(qrs []*prom.QueryResult, defaultClusterID strin
 	return toReturn, nil
 }
 
-func GetPodJobsWithMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]string, error) {
+func GetPodJobsWithMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]string, error) {
 	toReturn := make(map[string]string)
 
 	for _, val := range qrs {
@@ -350,17 +350,17 @@ func GetPodJobsWithMetrics(qrs []*prom.QueryResult, defaultClusterID string) (ma
 			return toReturn, err
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
 
-		pod, err := val.GetString("pod")
+		pod, err := val.GetPod()
 		if err != nil {
 			return toReturn, err
 		}
@@ -372,7 +372,7 @@ func GetPodJobsWithMetrics(qrs []*prom.QueryResult, defaultClusterID string) (ma
 	return toReturn, nil
 }
 
-func GetDeploymentMatchLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
+func GetDeploymentMatchLabelsMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
 	toReturn := make(map[string]map[string]string)
 
 	for _, val := range qrs {
@@ -382,12 +382,12 @@ func GetDeploymentMatchLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID s
 			return toReturn, err
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -399,7 +399,7 @@ func GetDeploymentMatchLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID s
 	return toReturn, nil
 }
 
-func GetServiceSelectorLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
+func GetServiceSelectorLabelsMetrics(qrs []*source.QueryResult, defaultClusterID string) (map[string]map[string]string, error) {
 	toReturn := make(map[string]map[string]string)
 
 	for _, val := range qrs {
@@ -409,12 +409,12 @@ func GetServiceSelectorLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID s
 			return toReturn, err
 		}
 
-		ns, err := val.GetString("namespace")
+		ns, err := val.GetNamespace()
 		if err != nil {
 			return toReturn, err
 		}
 
-		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetCluster()
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -426,10 +426,10 @@ func GetServiceSelectorLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID s
 	return toReturn, nil
 }
 
-func GetContainerMetricVector(qrs []*prom.QueryResult, normalize bool, normalizationValue float64, defaultClusterID string) (map[string][]*util.Vector, error) {
+func GetContainerMetricVector(qrs []*source.QueryResult, normalize bool, normalizationValue float64, defaultClusterID string) (map[string][]*util.Vector, error) {
 	containerData := make(map[string][]*util.Vector)
 	for _, val := range qrs {
-		containerMetric, err := NewContainerMetricFromPrometheus(val.Metric, defaultClusterID)
+		containerMetric, err := NewContainerMetricFromResult(val, defaultClusterID)
 		if err != nil {
 			return nil, err
 		}
@@ -444,10 +444,10 @@ func GetContainerMetricVector(qrs []*prom.QueryResult, normalize bool, normaliza
 	return containerData, nil
 }
 
-func GetContainerMetricVectors(qrs []*prom.QueryResult, defaultClusterID string) (map[string][]*util.Vector, error) {
+func GetContainerMetricVectors(qrs []*source.QueryResult, defaultClusterID string) (map[string][]*util.Vector, error) {
 	containerData := make(map[string][]*util.Vector)
 	for _, val := range qrs {
-		containerMetric, err := NewContainerMetricFromPrometheus(val.Metric, defaultClusterID)
+		containerMetric, err := NewContainerMetricFromResult(val, defaultClusterID)
 		if err != nil {
 			return nil, err
 		}
@@ -456,10 +456,10 @@ func GetContainerMetricVectors(qrs []*prom.QueryResult, defaultClusterID string)
 	return containerData, nil
 }
 
-func GetNormalizedContainerMetricVectors(qrs []*prom.QueryResult, normalizationValues []*util.Vector, defaultClusterID string) (map[string][]*util.Vector, error) {
+func GetNormalizedContainerMetricVectors(qrs []*source.QueryResult, normalizationValues []*util.Vector, defaultClusterID string) (map[string][]*util.Vector, error) {
 	containerData := make(map[string][]*util.Vector)
 	for _, val := range qrs {
-		containerMetric, err := NewContainerMetricFromPrometheus(val.Metric, defaultClusterID)
+		containerMetric, err := NewContainerMetricFromResult(val, defaultClusterID)
 		if err != nil {
 			return nil, err
 		}
@@ -468,11 +468,11 @@ func GetNormalizedContainerMetricVectors(qrs []*prom.QueryResult, normalizationV
 	return containerData, nil
 }
 
-func getCost(qrs []*prom.QueryResult) (map[string][]*util.Vector, error) {
+func getCost(qrs []*source.QueryResult) (map[string][]*util.Vector, error) {
 	toReturn := make(map[string][]*util.Vector)
 
 	for _, val := range qrs {
-		instance, err := val.GetString("node")
+		instance, err := val.GetNode()
 		if err != nil {
 			return toReturn, err
 		}
@@ -485,7 +485,7 @@ func getCost(qrs []*prom.QueryResult) (map[string][]*util.Vector, error) {
 
 // TODO niko/prom retain message:
 // normalization data is empty: time window may be invalid or kube-state-metrics or node-exporter may not be running
-func getNormalization(qrs []*prom.QueryResult) (float64, error) {
+func getNormalization(qrs []*source.QueryResult) (float64, error) {
 	if len(qrs) == 0 {
 		return 0.0, prom.NoDataErr("getNormalization")
 	}
@@ -497,7 +497,7 @@ func getNormalization(qrs []*prom.QueryResult) (float64, error) {
 
 // TODO niko/prom retain message:
 // normalization data is empty: time window may be invalid or kube-state-metrics or node-exporter may not be running
-func getNormalizations(qrs []*prom.QueryResult) ([]*util.Vector, error) {
+func getNormalizations(qrs []*source.QueryResult) ([]*util.Vector, error) {
 	if len(qrs) == 0 {
 		return nil, prom.NoDataErr("getNormalizations")
 	}
@@ -505,7 +505,7 @@ func getNormalizations(qrs []*prom.QueryResult) ([]*util.Vector, error) {
 	return qrs[0].Values, nil
 }
 
-func parsePodLabels(qrs []*prom.QueryResult) (map[string]map[string]string, error) {
+func parsePodLabels(qrs []*source.QueryResult) (map[string]map[string]string, error) {
 	podLabels := map[string]map[string]string{}
 
 	for _, result := range qrs {

+ 186 - 400
pkg/costmodel/router.go

@@ -15,7 +15,9 @@ import (
 
 	"github.com/microcosm-cc/bluemonday"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util/httputil"
+	"github.com/opencost/opencost/core/pkg/util/retry"
 	"github.com/opencost/opencost/core/pkg/util/timeutil"
 	"github.com/opencost/opencost/core/pkg/version"
 	"github.com/opencost/opencost/pkg/cloud/aws"
@@ -39,16 +41,14 @@ import (
 	sysenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/util/json"
+	"github.com/opencost/opencost/modules/prometheus-source/pkg/prom"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/errors"
-	"github.com/opencost/opencost/pkg/prom"
-	"github.com/opencost/opencost/pkg/thanos"
 	prometheus "github.com/prometheus/client_golang/api"
-	prometheusAPI "github.com/prometheus/client_golang/api/prometheus/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
 	"github.com/patrickmn/go-cache"
@@ -66,7 +66,6 @@ const (
 	maxCacheMinutes30d   = 137
 	CustomPricingSetting = "CustomPricing"
 	DiscountSetting      = "Discount"
-	epRules              = apiPrefix + "/rules"
 )
 
 var (
@@ -77,8 +76,9 @@ var (
 // Accesses defines a singleton application instance, providing access to
 // Prometheus, Kubernetes, the cloud provider, and caches.
 type Accesses struct {
-	PrometheusClient    prometheus.Client
-	ThanosClient        prometheus.Client
+	//PrometheusClient    prometheus.Client
+	//ThanosClient        prometheus.Client
+	DataSource          source.OpenCostDataSource
 	KubeClientSet       kubernetes.Interface
 	ClusterCache        clustercache.ClusterCache
 	ClusterMap          clusters.ClusterMap
@@ -105,6 +105,7 @@ type Accesses struct {
 
 // GetPrometheusClient decides whether the default Prometheus client or the Thanos client
 // should be used.
+/*
 func (a *Accesses) GetPrometheusClient(remote bool) prometheus.Client {
 	// Use Thanos Client if it exists (enabled) and remote flag set
 	var pc prometheus.Client
@@ -117,6 +118,7 @@ func (a *Accesses) GetPrometheusClient(remote bool) prometheus.Client {
 
 	return pc
 }
+*/
 
 // GetCacheExpiration looks up and returns custom cache expiration for the given duration.
 // If one does not exists, it returns the default cache expiration, which is defined by
@@ -147,14 +149,14 @@ func (a *Accesses) ClusterCostsFromCacheHandler(w http.ResponseWriter, r *http.R
 	offset := time.Minute
 	durationHrs := "24h"
 	fmtOffset := "1m"
-	pClient := a.GetPrometheusClient(true)
+	dataSource := a.DataSource
 
 	key := fmt.Sprintf("%s:%s", durationHrs, fmtOffset)
 	if data, valid := a.ClusterCostsCache.Get(key); valid {
 		clusterCosts := data.(map[string]*ClusterCosts)
 		w.Write(WrapDataWithMessage(clusterCosts, nil, "clusterCosts cache hit"))
 	} else {
-		data, err := a.ComputeClusterCosts(pClient, a.CloudProvider, duration, offset, true)
+		data, err := a.ComputeClusterCosts(dataSource, a.CloudProvider, duration, offset, true)
 		w.Write(WrapDataWithMessage(data, err, fmt.Sprintf("clusterCosts cache miss: %s", key)))
 	}
 }
@@ -396,7 +398,7 @@ func (a *Accesses) CostDataModel(w http.ResponseWriter, r *http.Request, ps http
 		offset = "offset " + offset
 	}
 
-	data, err := a.Model.ComputeCostData(a.PrometheusClient, a.CloudProvider, window, offset, namespace)
+	data, err := a.Model.ComputeCostData(window, offset, namespace)
 
 	if fields != "" {
 		filteredData := filterFields(fields, data)
@@ -433,24 +435,26 @@ func (a *Accesses) ClusterCosts(w http.ResponseWriter, r *http.Request, ps httpr
 			return
 		}
 	}
+	/*
+		useThanos, _ := strconv.ParseBool(r.URL.Query().Get("multi"))
 
-	useThanos, _ := strconv.ParseBool(r.URL.Query().Get("multi"))
+		if useThanos && !thanos.IsEnabled() {
+			w.Write(WrapData(nil, fmt.Errorf("Multi=true while Thanos is not enabled.")))
+			return
+		}
 
-	if useThanos && !thanos.IsEnabled() {
-		w.Write(WrapData(nil, fmt.Errorf("Multi=true while Thanos is not enabled.")))
-		return
-	}
 
-	var client prometheus.Client
-	if useThanos {
-		client = a.ThanosClient
-		offsetDur = thanos.OffsetDuration()
+		var client prometheus.Client
+		if useThanos {
+			client = a.ThanosClient
+			offsetDur = thanos.OffsetDuration()
 
-	} else {
-		client = a.PrometheusClient
-	}
+		} else {
+			client = a.PrometheusClient
+		}
+	*/
 
-	data, err := a.ComputeClusterCosts(client, a.CloudProvider, windowDur, offsetDur, true)
+	data, err := a.ComputeClusterCosts(a.DataSource, a.CloudProvider, windowDur, offsetDur, true)
 	w.Write(WrapData(data, err))
 }
 
@@ -458,8 +462,8 @@ func (a *Accesses) ClusterCostsOverTime(w http.ResponseWriter, r *http.Request,
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	start := r.URL.Query().Get("start")
-	end := r.URL.Query().Get("end")
+	startString := r.URL.Query().Get("start")
+	endString := r.URL.Query().Get("end")
 	window := r.URL.Query().Get("window")
 	offset := r.URL.Query().Get("offset")
 
@@ -483,7 +487,23 @@ func (a *Accesses) ClusterCostsOverTime(w http.ResponseWriter, r *http.Request,
 		}
 	}
 
-	data, err := ClusterCostsOverTime(a.PrometheusClient, a.CloudProvider, start, end, windowDur, offsetDur)
+	const layout = "2006-01-02T15:04:05.000Z"
+
+	start, err := time.Parse(layout, startString)
+	if err != nil {
+		log.Errorf("Error parsing time %s. Error: %s", startString, err.Error())
+		w.Write(WrapData(nil, fmt.Errorf("error parsing 'start': %s: %w", startString, err)))
+		return
+	}
+
+	end, err := time.Parse(layout, endString)
+	if err != nil {
+		log.Errorf("Error parsing time %s. Error: %s", endString, err.Error())
+		w.Write(WrapData(nil, fmt.Errorf("error parsing 'end': %s: %w", endString, err)))
+		return
+	}
+
+	data, err := ClusterCostsOverTime(a.DataSource, a.CloudProvider, start, end, windowDur, offsetDur)
 	w.Write(WrapData(data, err))
 }
 
@@ -524,14 +544,16 @@ func (a *Accesses) CostDataModelRange(w http.ResponseWriter, r *http.Request, ps
 	}
 
 	// Use Thanos Client if it exists (enabled) and remote flag set
-	var pClient prometheus.Client
-	if remote != "false" && a.ThanosClient != nil {
-		pClient = a.ThanosClient
-	} else {
-		pClient = a.PrometheusClient
-	}
+	/*
+		var pClient prometheus.Client
+		if remote != "false" && a.ThanosClient != nil {
+			pClient = a.ThanosClient
+		} else {
+			pClient = a.PrometheusClient
+		}
+	*/
 
-	data, err := a.Model.ComputeCostDataRange(pClient, a.CloudProvider, window, resolution, namespace, cluster, remoteEnabled)
+	data, err := a.Model.ComputeCostDataRange(window, resolution, namespace, cluster)
 	if err != nil {
 		w.Write(WrapData(nil, err))
 	}
@@ -702,151 +724,6 @@ func (a *Accesses) GetPricingSourceSummary(w http.ResponseWriter, r *http.Reques
 	w.Write(WrapData(data, nil))
 }
 
-func (a *Accesses) GetPrometheusMetadata(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	w.Write(WrapData(prom.Validate(a.PrometheusClient)))
-}
-
-func (a *Accesses) PrometheusQuery(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	qp := httputil.NewQueryParams(r.URL.Query())
-	query := qp.Get("query", "")
-	if query == "" {
-		w.Write(WrapData(nil, fmt.Errorf("Query Parameter 'query' is unset'")))
-		return
-	}
-
-	// Attempt to parse time as either a unix timestamp or as an RFC3339 value
-	var timeVal time.Time
-	timeStr := qp.Get("time", "")
-	if len(timeStr) > 0 {
-		if t, err := strconv.ParseInt(timeStr, 10, 64); err == nil {
-			timeVal = time.Unix(t, 0)
-		} else if t, err := time.Parse(time.RFC3339, timeStr); err == nil {
-			timeVal = t
-		}
-
-		// If time is given, but not parse-able, return an error
-		if timeVal.IsZero() {
-			http.Error(w, fmt.Sprintf("time must be a unix timestamp or RFC3339 value; illegal value given: %s", timeStr), http.StatusBadRequest)
-		}
-	}
-
-	ctx := prom.NewNamedContext(a.PrometheusClient, prom.FrontendContextName)
-	body, err := ctx.RawQuery(query, timeVal)
-	if err != nil {
-		w.Write(WrapData(nil, fmt.Errorf("Error running query %s. Error: %s", query, err)))
-		return
-	}
-
-	w.Write(body)
-}
-
-func (a *Accesses) PrometheusQueryRange(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	qp := httputil.NewQueryParams(r.URL.Query())
-	query := qp.Get("query", "")
-	if query == "" {
-		fmt.Fprintf(w, "Error parsing query from request parameters.")
-		return
-	}
-
-	start, end, duration, err := toStartEndStep(qp)
-	if err != nil {
-		fmt.Fprintf(w, err.Error())
-		return
-	}
-
-	ctx := prom.NewNamedContext(a.PrometheusClient, prom.FrontendContextName)
-	body, err := ctx.RawQueryRange(query, start, end, duration)
-	if err != nil {
-		fmt.Fprintf(w, "Error running query %s. Error: %s", query, err)
-		return
-	}
-
-	w.Write(body)
-}
-
-func (a *Accesses) ThanosQuery(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	if !thanos.IsEnabled() {
-		w.Write(WrapData(nil, fmt.Errorf("ThanosDisabled")))
-		return
-	}
-
-	qp := httputil.NewQueryParams(r.URL.Query())
-	query := qp.Get("query", "")
-	if query == "" {
-		w.Write(WrapData(nil, fmt.Errorf("Query Parameter 'query' is unset'")))
-		return
-	}
-
-	// Attempt to parse time as either a unix timestamp or as an RFC3339 value
-	var timeVal time.Time
-	timeStr := qp.Get("time", "")
-	if len(timeStr) > 0 {
-		if t, err := strconv.ParseInt(timeStr, 10, 64); err == nil {
-			timeVal = time.Unix(t, 0)
-		} else if t, err := time.Parse(time.RFC3339, timeStr); err == nil {
-			timeVal = t
-		}
-
-		// If time is given, but not parse-able, return an error
-		if timeVal.IsZero() {
-			http.Error(w, fmt.Sprintf("time must be a unix timestamp or RFC3339 value; illegal value given: %s", timeStr), http.StatusBadRequest)
-		}
-	}
-
-	ctx := prom.NewNamedContext(a.ThanosClient, prom.FrontendContextName)
-	body, err := ctx.RawQuery(query, timeVal)
-	if err != nil {
-		w.Write(WrapData(nil, fmt.Errorf("Error running query %s. Error: %s", query, err)))
-		return
-	}
-
-	w.Write(body)
-}
-
-func (a *Accesses) ThanosQueryRange(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	if !thanos.IsEnabled() {
-		w.Write(WrapData(nil, fmt.Errorf("ThanosDisabled")))
-		return
-	}
-
-	qp := httputil.NewQueryParams(r.URL.Query())
-	query := qp.Get("query", "")
-	if query == "" {
-		fmt.Fprintf(w, "Error parsing query from request parameters.")
-		return
-	}
-
-	start, end, duration, err := toStartEndStep(qp)
-	if err != nil {
-		fmt.Fprintf(w, err.Error())
-		return
-	}
-
-	ctx := prom.NewNamedContext(a.ThanosClient, prom.FrontendContextName)
-	body, err := ctx.RawQueryRange(query, start, end, duration)
-	if err != nil {
-		fmt.Fprintf(w, "Error running query %s. Error: %s", query, err)
-		return
-	}
-
-	w.Write(body)
-}
-
 // helper for query range proxy requests
 func toStartEndStep(qp httputil.QueryParams) (start, end time.Time, step time.Duration, err error) {
 	var e error
@@ -876,105 +753,6 @@ func toStartEndStep(qp httputil.QueryParams) (start, end time.Time, step time.Du
 	return
 }
 
-func (a *Accesses) GetPrometheusQueueState(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	promQueueState, err := prom.GetPrometheusQueueState(a.PrometheusClient)
-	if err != nil {
-		w.Write(WrapData(nil, err))
-		return
-	}
-
-	result := map[string]*prom.PrometheusQueueState{
-		"prometheus": promQueueState,
-	}
-
-	if thanos.IsEnabled() {
-		thanosQueueState, err := prom.GetPrometheusQueueState(a.ThanosClient)
-		if err != nil {
-			log.Warnf("Error getting Thanos queue state: %s", err)
-		} else {
-			result["thanos"] = thanosQueueState
-		}
-	}
-
-	w.Write(WrapData(result, nil))
-}
-
-// GetPrometheusMetrics retrieves availability of Prometheus and Thanos metrics
-func (a *Accesses) GetPrometheusMetrics(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	promMetrics := prom.GetPrometheusMetrics(a.PrometheusClient, "")
-
-	result := map[string][]*prom.PrometheusDiagnostic{
-		"prometheus": promMetrics,
-	}
-
-	if thanos.IsEnabled() {
-		thanosMetrics := prom.GetPrometheusMetrics(a.ThanosClient, thanos.QueryOffset())
-		result["thanos"] = thanosMetrics
-	}
-
-	w.Write(WrapData(result, nil))
-}
-
-func (a *Accesses) PrometheusRecordingRules(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	u := a.PrometheusClient.URL(epRules, nil)
-
-	req, err := http.NewRequest(http.MethodGet, u.String(), nil)
-	if err != nil {
-		fmt.Fprintf(w, "Error creating Prometheus rule request: "+err.Error())
-	}
-
-	_, body, err := a.PrometheusClient.Do(r.Context(), req)
-	if err != nil {
-		fmt.Fprintf(w, "Error making Prometheus rule request: "+err.Error())
-	} else {
-		w.Write(body)
-	}
-}
-
-func (a *Accesses) PrometheusConfig(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	pConfig := map[string]string{
-		"address": env.GetPrometheusServerEndpoint(),
-	}
-
-	body, err := json.Marshal(pConfig)
-	if err != nil {
-		fmt.Fprintf(w, "Error marshalling prometheus config")
-	} else {
-		w.Write(body)
-	}
-}
-
-func (a *Accesses) PrometheusTargets(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	u := a.PrometheusClient.URL(epTargets, nil)
-
-	req, err := http.NewRequest(http.MethodGet, u.String(), nil)
-	if err != nil {
-		fmt.Fprintf(w, "Error creating Prometheus rule request: "+err.Error())
-	}
-
-	_, body, err := a.PrometheusClient.Do(r.Context(), req)
-	if err != nil {
-		fmt.Fprintf(w, "Error making Prometheus rule request: "+err.Error())
-	} else {
-		w.Write(body)
-	}
-}
-
 func (a *Accesses) GetOrphanedPods(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
@@ -1111,21 +889,10 @@ func (a *Accesses) GetHelmValues(w http.ResponseWriter, r *http.Request, ps http
 	w.Write(result)
 }
 
-func (a *Accesses) Status(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	promServer := env.GetPrometheusServerEndpoint()
+// FIXME: Prometheus Status EP
+/*
 
-	api := prometheusAPI.NewAPI(a.PrometheusClient)
-	result, err := api.Buildinfo(r.Context())
-	if err != nil {
-		fmt.Fprintf(w, "Using Prometheus at "+promServer+". Error: "+err.Error())
-	} else {
-
-		fmt.Fprintf(w, "Using Prometheus at "+promServer+". Version: "+result.Version)
-	}
-}
+ */
 
 // captures the panic event in sentry
 func capturePanicEvent(err string, stack string) {
@@ -1171,75 +938,106 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 		}
 	}
 
-	address := env.GetPrometheusServerEndpoint()
-	if address == "" {
-		log.Fatalf("No address for prometheus set in $%s. Aborting.", env.PrometheusServerEndpointEnvVar)
-	}
-
-	queryConcurrency := env.GetMaxQueryConcurrency()
-	log.Infof("Prometheus/Thanos Client Max Concurrency set to %d", queryConcurrency)
+	const maxRetries = 10
+	const retryInterval = 10 * time.Second
 
-	timeout := 120 * time.Second
-	keepAlive := 120 * time.Second
-	tlsHandshakeTimeout := 10 * time.Second
-	scrapeInterval := env.GetKubecostScrapeInterval()
+	var fatalErr error
 
-	var rateLimitRetryOpts *prom.RateLimitRetryOpts = nil
-	if env.IsPrometheusRetryOnRateLimitResponse() {
-		rateLimitRetryOpts = &prom.RateLimitRetryOpts{
-			MaxRetries:       env.GetPrometheusRetryOnRateLimitMaxRetries(),
-			DefaultRetryWait: env.GetPrometheusRetryOnRateLimitDefaultWait(),
-		}
-	}
+	ctx, cancel := context.WithCancel(context.Background())
+	dataSource, err := retry.Retry(
+		ctx,
+		func() (source.OpenCostDataSource, error) {
+			ds, e := prom.NewDefaultPrometheusDataSource()
+			if e != nil {
+				if source.IsRetryable(e) {
+					return e
+				}
+				fatalErr = e
+				cancel()
+			}
 
-	promCli, err := prom.NewPrometheusClient(address, &prom.PrometheusClientConfig{
-		Timeout:               timeout,
-		KeepAlive:             keepAlive,
-		TLSHandshakeTimeout:   tlsHandshakeTimeout,
-		TLSInsecureSkipVerify: env.GetInsecureSkipVerify(),
-		RateLimitRetryOpts:    rateLimitRetryOpts,
-		Auth: &prom.ClientAuth{
-			Username:    env.GetDBBasicAuthUsername(),
-			Password:    env.GetDBBasicAuthUserPassword(),
-			BearerToken: env.GetDBBearerToken(),
+			return ds, e
 		},
-		QueryConcurrency:  queryConcurrency,
-		QueryLogFile:      "",
-		HeaderXScopeOrgId: env.GetPrometheusHeaderXScopeOrgId(),
-	})
-	if err != nil {
-		log.Fatalf("Failed to create prometheus client, Error: %v", err)
+		maxRetries,
+		retryInterval,
+	)
+
+	if fatalErr != nil {
+		log.Fatalf("Failed to create Prometheus data source: %s", fatalErr)
+		panic(fatalErr)
 	}
 
-	m, err := prom.Validate(promCli)
-	if err != nil || !m.Running {
+	/*
+		address := env.GetPrometheusServerEndpoint()
+		if address == "" {
+			log.Fatalf("No address for prometheus set in $%s. Aborting.", env.PrometheusServerEndpointEnvVar)
+		}
+
+		queryConcurrency := env.GetMaxQueryConcurrency()
+		log.Infof("Prometheus/Thanos Client Max Concurrency set to %d", queryConcurrency)
+
+		timeout := 120 * time.Second
+		keepAlive := 120 * time.Second
+		tlsHandshakeTimeout := 10 * time.Second
+		scrapeInterval := env.GetKubecostScrapeInterval()
+
+		var rateLimitRetryOpts *prom.RateLimitRetryOpts = nil
+		if env.IsPrometheusRetryOnRateLimitResponse() {
+			rateLimitRetryOpts = &prom.RateLimitRetryOpts{
+				MaxRetries:       env.GetPrometheusRetryOnRateLimitMaxRetries(),
+				DefaultRetryWait: env.GetPrometheusRetryOnRateLimitDefaultWait(),
+			}
+		}
+
+		promCli, err := prom.NewPrometheusClient(address, &prom.PrometheusClientConfig{
+			Timeout:               timeout,
+			KeepAlive:             keepAlive,
+			TLSHandshakeTimeout:   tlsHandshakeTimeout,
+			TLSInsecureSkipVerify: env.GetInsecureSkipVerify(),
+			RateLimitRetryOpts:    rateLimitRetryOpts,
+			Auth: &prom.ClientAuth{
+				Username:    env.GetDBBasicAuthUsername(),
+				Password:    env.GetDBBasicAuthUserPassword(),
+				BearerToken: env.GetDBBearerToken(),
+			},
+			QueryConcurrency:  queryConcurrency,
+			QueryLogFile:      "",
+			HeaderXScopeOrgId: env.GetPrometheusHeaderXScopeOrgId(),
+		})
 		if err != nil {
-			log.Errorf("Failed to query prometheus at %s. Error: %s . Troubleshooting help available at: %s", address, err.Error(), prom.PrometheusTroubleshootingURL)
-		} else if !m.Running {
-			log.Errorf("Prometheus at %s is not running. Troubleshooting help available at: %s", address, prom.PrometheusTroubleshootingURL)
+			log.Fatalf("Failed to create prometheus client, Error: %v", err)
 		}
-	} else {
-		log.Infof("Success: retrieved the 'up' query against prometheus at: " + address)
-	}
 
-	api := prometheusAPI.NewAPI(promCli)
-	_, err = api.Buildinfo(context.Background())
-	if err != nil {
-		log.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/mimir/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
-	} else {
-		log.Infof("Retrieved a prometheus config file from: %s", address)
-	}
+		m, err := prom.Validate(promCli)
+		if err != nil || !m.Running {
+			if err != nil {
+				log.Errorf("Failed to query prometheus at %s. Error: %s . Troubleshooting help available at: %s", address, err.Error(), prom.PrometheusTroubleshootingURL)
+			} else if !m.Running {
+				log.Errorf("Prometheus at %s is not running. Troubleshooting help available at: %s", address, prom.PrometheusTroubleshootingURL)
+			}
+		} else {
+			log.Infof("Success: retrieved the 'up' query against prometheus at: " + address)
+		}
 
-	if scrapeInterval == 0 {
-		scrapeInterval = time.Minute
-		// Lookup scrape interval for kubecost job, update if found
-		si, err := prom.ScrapeIntervalFor(promCli, env.GetKubecostJobName())
-		if err == nil {
-			scrapeInterval = si
+		api := prometheusAPI.NewAPI(promCli)
+		_, err = api.Buildinfo(context.Background())
+		if err != nil {
+			log.Infof("No valid prometheus config file at %s. Error: %s . Troubleshooting help available at: %s. Ignore if using cortex/mimir/thanos here.", address, err.Error(), prom.PrometheusTroubleshootingURL)
+		} else {
+			log.Infof("Retrieved a prometheus config file from: %s", address)
 		}
-	}
 
-	log.Infof("Using scrape interval of %f", scrapeInterval.Seconds())
+		if scrapeInterval == 0 {
+			scrapeInterval = time.Minute
+			// Lookup scrape interval for kubecost job, update if found
+			si, err := prom.ScrapeIntervalFor(promCli, env.GetKubecostJobName())
+			if err == nil {
+				scrapeInterval = si
+			}
+		}
+
+		log.Infof("Using scrape interval of %f", scrapeInterval.Seconds())
+	*/
 
 	// Kubernetes API setup
 	kubeClientset, err := kubeconfig.LoadKubeClient("")
@@ -1286,41 +1084,44 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 		}
 	}
 
-	// Thanos Client
-	var thanosClient prometheus.Client
-	if thanos.IsEnabled() {
-		thanosAddress := thanos.QueryURL()
-
-		if thanosAddress != "" {
-			thanosCli, _ := thanos.NewThanosClient(thanosAddress, &prom.PrometheusClientConfig{
-				Timeout:               timeout,
-				KeepAlive:             keepAlive,
-				TLSHandshakeTimeout:   tlsHandshakeTimeout,
-				TLSInsecureSkipVerify: env.GetInsecureSkipVerify(),
-				RateLimitRetryOpts:    rateLimitRetryOpts,
-				Auth: &prom.ClientAuth{
-					Username:    env.GetMultiClusterBasicAuthUsername(),
-					Password:    env.GetMultiClusterBasicAuthPassword(),
-					BearerToken: env.GetMultiClusterBearerToken(),
-				},
-				QueryConcurrency: queryConcurrency,
-				QueryLogFile:     env.GetQueryLoggingFile(),
-			})
-
-			_, err = prom.Validate(thanosCli)
-			if err != nil {
-				log.Warnf("Failed to query Thanos at %s. Error: %s.", thanosAddress, err.Error())
-				thanosClient = thanosCli
-			} else {
-				log.Infof("Success: retrieved the 'up' query against Thanos at: " + thanosAddress)
+	/*
+
+		// Thanos Client
+		var thanosClient prometheus.Client
+		if thanos.IsEnabled() {
+			thanosAddress := thanos.QueryURL()
+
+			if thanosAddress != "" {
+				thanosCli, _ := thanos.NewThanosClient(thanosAddress, &prom.PrometheusClientConfig{
+					Timeout:               timeout,
+					KeepAlive:             keepAlive,
+					TLSHandshakeTimeout:   tlsHandshakeTimeout,
+					TLSInsecureSkipVerify: env.GetInsecureSkipVerify(),
+					RateLimitRetryOpts:    rateLimitRetryOpts,
+					Auth: &prom.ClientAuth{
+						Username:    env.GetMultiClusterBasicAuthUsername(),
+						Password:    env.GetMultiClusterBasicAuthPassword(),
+						BearerToken: env.GetMultiClusterBearerToken(),
+					},
+					QueryConcurrency: queryConcurrency,
+					QueryLogFile:     env.GetQueryLoggingFile(),
+				})
+
+				_, err = prom.Validate(thanosCli)
+				if err != nil {
+					log.Warnf("Failed to query Thanos at %s. Error: %s.", thanosAddress, err.Error())
+					thanosClient = thanosCli
+				} else {
+					log.Infof("Success: retrieved the 'up' query against Thanos at: " + thanosAddress)
+
+					thanosClient = thanosCli
+				}
 
-				thanosClient = thanosCli
+			} else {
+				log.Infof("Error resolving environment variable: $%s", env.ThanosQueryUrlEnvVar)
 			}
-
-		} else {
-			log.Infof("Error resolving environment variable: $%s", env.ThanosQueryUrlEnvVar)
 		}
-	}
+	*/
 
 	// ClusterInfo Provider to provide the cluster map with local and remote cluster data
 	var clusterInfoProvider clusters.ClusterInfoProvider
@@ -1365,12 +1166,13 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 		pc = promCli
 	}
 	costModel := NewCostModel(pc, cloudProvider, k8sCache, clusterMap, scrapeInterval)
-	metricsEmitter := NewCostModelMetricsEmitter(promCli, k8sCache, cloudProvider, clusterInfoProvider, costModel)
+	metricsEmitter := NewCostModelMetricsEmitter(dataSource, k8sCache, cloudProvider, clusterInfoProvider, costModel)
 
 	a := &Accesses{
-		httpServices:        services.NewCostModelServices(),
-		PrometheusClient:    promCli,
-		ThanosClient:        thanosClient,
+		httpServices: services.NewCostModelServices(),
+		//PrometheusClient:    promCli,
+		//ThanosClient:        thanosClient,
+		DataSource:          dataSource,
 		KubeClientSet:       kubeClientset,
 		ClusterCache:        k8sCache,
 		ClusterMap:          clusterMap,
@@ -1413,6 +1215,7 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	}
 
 	a.httpServices.RegisterAll(router)
+	a.DataSource.RegisterEndPoints(router)
 
 	router.GET("/costDataModel", a.CostDataModel)
 	router.GET("/costDataModelRange", a.CostDataModelRange)
@@ -1424,7 +1227,6 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	router.GET("/clusterCostsOverTime", a.ClusterCostsOverTime)
 	router.GET("/clusterCosts", a.ClusterCosts)
 	router.GET("/clusterCostsFromCache", a.ClusterCostsFromCacheHandler)
-	router.GET("/validatePrometheus", a.GetPrometheusMetadata)
 	router.GET("/managementPlatform", a.ManagementPlatform)
 	router.GET("/clusterInfo", a.ClusterInfo)
 	router.GET("/clusterInfoMap", a.GetClusterInfoMap)
@@ -1432,27 +1234,11 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	router.GET("/pricingSourceStatus", a.GetPricingSourceStatus)
 	router.GET("/pricingSourceSummary", a.GetPricingSourceSummary)
 	router.GET("/pricingSourceCounts", a.GetPricingSourceCounts)
-
-	// endpoints migrated from server
-	router.GET("/prometheusRecordingRules", a.PrometheusRecordingRules)
-	router.GET("/prometheusConfig", a.PrometheusConfig)
-	router.GET("/prometheusTargets", a.PrometheusTargets)
 	router.GET("/orphanedPods", a.GetOrphanedPods)
 	router.GET("/installNamespace", a.GetInstallNamespace)
 	router.GET("/installInfo", a.GetInstallInfo)
 	router.POST("/serviceKey", a.AddServiceKey)
 	router.GET("/helmValues", a.GetHelmValues)
-	router.GET("/status", a.Status)
-
-	// prom query proxies
-	router.GET("/prometheusQuery", a.PrometheusQuery)
-	router.GET("/prometheusQueryRange", a.PrometheusQueryRange)
-	router.GET("/thanosQuery", a.ThanosQuery)
-	router.GET("/thanosQueryRange", a.ThanosQueryRange)
-
-	// diagnostics
-	router.GET("/diagnostics/requestQueue", a.GetPrometheusQueueState)
-	router.GET("/diagnostics/prometheusMetrics", a.GetPrometheusMetrics)
 
 	return a
 }

+ 0 - 375
pkg/costmodel/sql.go

@@ -1,375 +0,0 @@
-package costmodel
-
-import (
-	"database/sql"
-	"fmt"
-	"time"
-
-	"github.com/opencost/opencost/core/pkg/log"
-	"github.com/opencost/opencost/core/pkg/util"
-	"github.com/opencost/opencost/core/pkg/util/json"
-	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
-	"github.com/opencost/opencost/pkg/env"
-
-	_ "github.com/lib/pq"
-)
-
-func getPVCosts(db *sql.DB) (map[string]*costAnalyzerCloud.PV, error) {
-	pvs := make(map[string]*costAnalyzerCloud.PV)
-	query := `SELECT name, avg(value),labels->>'volumename' AS volumename, labels->>'cluster_id' AS clusterid
-	FROM metrics
-	WHERE (name='pv_hourly_cost')  AND value != 'NaN' AND value != 0
-	GROUP BY volumename,name,clusterid;`
-	rows, err := db.Query(query)
-	if err != nil {
-		return nil, err
-	}
-	defer rows.Close()
-	for rows.Next() {
-		var (
-			name       string
-			avg        float64
-			volumename string
-			clusterid  string
-		)
-		if err := rows.Scan(&name, &avg, &volumename, &clusterid); err != nil {
-			return nil, err
-		}
-		pvs[volumename] = &costAnalyzerCloud.PV{
-			Cost: fmt.Sprintf("%f", avg),
-		}
-	}
-	return pvs, nil
-}
-
-func getNodeCosts(db *sql.DB) (map[string]*costAnalyzerCloud.Node, error) {
-
-	nodes := make(map[string]*costAnalyzerCloud.Node)
-
-	query := `SELECT name, avg(value),labels->>'instance' AS instance, labels->>'cluster_id' AS clusterid
-	FROM metrics
-	WHERE (name='node_cpu_hourly_cost' OR name='node_ram_hourly_cost' OR name='node_gpu_hourly_cost')  AND value != 'NaN' AND value != 0
-	GROUP BY instance,name,clusterid`
-	rows, err := db.Query(query)
-	if err != nil {
-		return nil, err
-	}
-	defer rows.Close()
-	for rows.Next() {
-		var (
-			name      string
-			avg       float64
-			instance  string
-			clusterid string
-		)
-		if err := rows.Scan(&name, &avg, &instance, &clusterid); err != nil {
-			return nil, err
-		}
-		if data, ok := nodes[instance]; ok {
-			if name == "node_cpu_hourly_cost" {
-				data.VCPUCost = fmt.Sprintf("%f", avg)
-			} else if name == "node_ram_hourly_cost" {
-				data.RAMCost = fmt.Sprintf("%f", avg)
-			} else if name == "node_gpu_hourly_cost" {
-				data.GPUCost = fmt.Sprintf("%f", avg)
-			}
-		} else {
-			nodes[instance] = &costAnalyzerCloud.Node{}
-			data := nodes[instance]
-			if name == "node_cpu_hourly_cost" {
-				data.VCPUCost = fmt.Sprintf("%f", avg)
-			} else if name == "node_ram_hourly_cost" {
-				data.RAMCost = fmt.Sprintf("%f", avg)
-			} else if name == "node_gpu_hourly_cost" {
-				data.GPUCost = fmt.Sprintf("%f", avg)
-			}
-		}
-
-	}
-
-	return nodes, nil
-}
-
-func CostDataRangeFromSQL(field string, value string, window string, start string, end string) (map[string]*CostData, error) {
-	pw := env.GetRemotePW()
-	address := env.GetSQLAddress()
-	connStr := fmt.Sprintf("postgres://postgres:%s@%s:5432?sslmode=disable", pw, address)
-	db, err := sql.Open("postgres", connStr)
-	if err != nil {
-		return nil, err
-	}
-	defer db.Close()
-	nodes, err := getNodeCosts(db)
-	if err != nil {
-		return nil, err
-	}
-	model := make(map[string]*CostData)
-	query := `SELECT time_bucket($1, time) AS bucket, name, avg(value),labels->>'container' AS container,labels->>'pod' AS pod,labels->>'namespace' AS namespace, labels->>'instance' AS instance, labels->>'cluster_id' AS clusterid
-	FROM metrics
-	WHERE (name='container_cpu_allocation') AND
-	  time > $2 AND time < $3 AND value != 'NaN'
-	GROUP BY container,pod,bucket,namespace,instance,clusterid,name
-	ORDER BY container,bucket;
-	`
-	rows, err := db.Query(query, window, start, end)
-	if err != nil {
-		return nil, err
-	}
-	defer rows.Close()
-
-	for rows.Next() {
-		var (
-			bucket    string
-			name      string
-			sum       float64
-			container string
-			pod       string
-			namespace string
-			instance  string
-			clusterid string
-		)
-		if err := rows.Scan(&bucket, &name, &sum, &container, &pod, &namespace, &instance, &clusterid); err != nil {
-			return nil, err
-		}
-		layout := "2006-01-02T15:04:05Z"
-		t, err := time.Parse(layout, bucket)
-		if err != nil {
-			return nil, err
-		}
-
-		k := NewContainerMetricFromValues(namespace, pod, container, instance, clusterid)
-		key := k.Key()
-		allocationVector := &util.Vector{
-			Timestamp: float64(t.Unix()),
-			Value:     sum,
-		}
-		if data, ok := model[key]; ok {
-			if name == "container_cpu_allocation" {
-				data.CPUAllocation = append(data.CPUAllocation, allocationVector)
-			} else if name == "container_memory_allocation_bytes" {
-				data.RAMAllocation = append(data.RAMAllocation, allocationVector)
-			} else if name == "container_gpu_allocation" {
-				data.GPUReq = append(data.GPUReq, allocationVector)
-			}
-		} else {
-			node, ok := nodes[instance]
-			if !ok {
-				return nil, fmt.Errorf("No node found")
-			}
-			model[key] = &CostData{
-				Name:          container,
-				PodName:       pod,
-				NodeName:      instance,
-				NodeData:      node,
-				CPUAllocation: []*util.Vector{},
-				RAMAllocation: []*util.Vector{},
-				GPUReq:        []*util.Vector{},
-				Namespace:     namespace,
-				ClusterID:     clusterid,
-			}
-			data := model[key]
-			if name == "container_cpu_allocation" {
-				data.CPUAllocation = append(data.CPUAllocation, allocationVector)
-			} else if name == "container_memory_allocation_bytes" {
-				data.RAMAllocation = append(data.RAMAllocation, allocationVector)
-			} else if name == "container_gpu_allocation" {
-				data.GPUReq = append(data.GPUReq, allocationVector)
-			}
-		}
-	}
-	query = `SELECT time_bucket($1, time) AS bucket, name, avg(value),labels->>'container' AS container,labels->>'pod' AS pod,labels->>'namespace' AS namespace, labels->>'instance' AS instance, labels->>'cluster_id' AS clusterid
-	FROM metrics
-	WHERE (name='container_memory_allocation_bytes') AND
-		time > $2 AND time < $3 AND value != 'NaN'
-	GROUP BY container,pod,bucket,namespace,instance,clusterid,name
-	ORDER BY container,bucket;
-	`
-	rows, err = db.Query(query, window, start, end)
-	if err != nil {
-		return nil, err
-	}
-	for rows.Next() {
-		var (
-			bucket    string
-			name      string
-			sum       float64
-			container string
-			pod       string
-			namespace string
-			instance  string
-			clusterid string
-		)
-		if err := rows.Scan(&bucket, &name, &sum, &container, &pod, &namespace, &instance, &clusterid); err != nil {
-			return nil, err
-		}
-		layout := "2006-01-02T15:04:05Z"
-		t, err := time.Parse(layout, bucket)
-		if err != nil {
-			return nil, err
-		}
-
-		k := NewContainerMetricFromValues(namespace, pod, container, instance, clusterid)
-		key := k.Key()
-		allocationVector := &util.Vector{
-			Timestamp: float64(t.Unix()),
-			Value:     sum,
-		}
-		if data, ok := model[key]; ok {
-			if name == "container_cpu_allocation" {
-				data.CPUAllocation = append(data.CPUAllocation, allocationVector)
-			} else if name == "container_memory_allocation_bytes" {
-				data.RAMAllocation = append(data.RAMAllocation, allocationVector)
-			} else if name == "container_gpu_allocation" {
-				data.GPUReq = append(data.GPUReq, allocationVector)
-			}
-		} else {
-			node, ok := nodes[instance]
-			if !ok {
-				return nil, fmt.Errorf("No node found")
-			}
-			model[key] = &CostData{
-				Name:          container,
-				PodName:       pod,
-				NodeName:      instance,
-				NodeData:      node,
-				CPUAllocation: []*util.Vector{},
-				RAMAllocation: []*util.Vector{},
-				GPUReq:        []*util.Vector{},
-				Namespace:     namespace,
-				ClusterID:     clusterid,
-			}
-			data := model[key]
-			if name == "container_cpu_allocation" {
-				data.CPUAllocation = append(data.CPUAllocation, allocationVector)
-			} else if name == "container_memory_allocation_bytes" {
-				data.RAMAllocation = append(data.RAMAllocation, allocationVector)
-			} else if name == "container_gpu_allocation" {
-				data.GPUReq = append(data.GPUReq, allocationVector)
-			}
-		}
-	}
-	query = `SELECT DISTINCT ON (labels->>'namespace') * FROM METRICS WHERE name='kube_namespace_labels' ORDER BY labels->>'namespace',time DESC;`
-	rows, err = db.Query(query)
-	if err != nil {
-		return nil, err
-	}
-	cols, err := rows.Columns()
-	if err != nil {
-		return nil, err
-	}
-	rawResult := make([][]byte, len(cols))
-	result := make([]string, len(cols))
-	dest := make([]interface{}, len(cols)) // A temporary interface{} slice
-	for i := range rawResult {
-		dest[i] = &rawResult[i] // Put pointers to each string in the interface slice
-	}
-	nsToLabels := make(map[string]map[string]string)
-	for rows.Next() {
-		err = rows.Scan(dest...)
-		if err != nil {
-			return nil, err
-		}
-
-		for i, raw := range rawResult {
-			if raw == nil {
-				result[i] = "\\N"
-			} else {
-				result[i] = string(raw)
-			}
-		}
-
-		var dat map[string]string
-		err := json.Unmarshal([]byte(result[4]), &dat)
-		if err != nil {
-			return nil, err
-		}
-
-		ns, ok := dat["namespace"]
-		if !ok {
-			return nil, fmt.Errorf("No namespace found")
-		}
-		nsToLabels[ns] = dat
-	}
-
-	for _, cd := range model {
-		ns := cd.Namespace
-		if labels, ok := nsToLabels[ns]; ok {
-			cd.NamespaceLabels = labels
-			cd.Labels = labels // TODO: override with podlabels
-		}
-	}
-
-	volumes, err := getPVCosts(db)
-	if err != nil {
-		log.Infof("Error fetching pv data from sql: %s. Skipping PVData", err.Error())
-	} else {
-		query = `SELECT time_bucket($1, time) AS bucket, name, avg(value), labels->>'persistentvolumeclaim' AS claim, labels->>'pod' AS pod,labels->>'namespace' AS namespace, labels->>'persistentvolume' AS volumename, labels->>'cluster_id' AS clusterid
-		FROM metrics
-		WHERE (name='pod_pvc_allocation') AND
-			time > $2 AND time < $3 AND value != 'NaN'
-		GROUP BY claim,pod,bucket,namespace,volumename,clusterid,name
-		ORDER BY pod,bucket;`
-
-		rows, err = db.Query(query, window, start, end)
-		if err != nil {
-			return nil, err
-		}
-		pvcData := make(map[string]*PersistentVolumeClaimData)
-		for rows.Next() {
-			var (
-				bucket     string
-				name       string
-				sum        float64
-				claim      string
-				pod        string
-				namespace  string
-				volumename sql.NullString
-				clusterid  string
-			)
-			if err := rows.Scan(&bucket, &name, &sum, &claim, &pod, &namespace, &volumename, &clusterid); err != nil {
-				return nil, err
-			}
-			layout := "2006-01-02T15:04:05Z"
-			t, err := time.Parse(layout, bucket)
-			if err != nil {
-				return nil, err
-			}
-			allocationVector := &util.Vector{
-				Timestamp: float64(t.Unix()),
-				Value:     sum,
-			}
-			if pvcd, ok := pvcData[claim]; ok {
-				pvcd.Values = append(pvcd.Values, allocationVector)
-			} else {
-				if volumename.Valid {
-					vname := volumename.String
-					d := &PersistentVolumeClaimData{
-						Namespace:  namespace,
-						ClusterID:  clusterid,
-						VolumeName: vname,
-						Claim:      claim,
-					}
-					if volume, ok := volumes[vname]; ok {
-						volume.Size = fmt.Sprintf("%f", sum) // Just assume the claim is the whole volume for now
-						d.Volume = volume
-					}
-					d.Values = append(d.Values, allocationVector)
-					pvcData[claim] = d
-					for _, cd := range model { // TODO: make this not doubly nested
-						if cd.PodName == pod && cd.Namespace == namespace {
-							if len(cd.PVCData) > 0 {
-								cd.PVCData = append(cd.PVCData, d)
-							} else {
-								cd.PVCData = []*PersistentVolumeClaimData{d}
-							}
-							break // break so we only assign to the first
-						}
-					}
-				}
-
-			}
-		}
-	}
-
-	return model, nil
-}

+ 5 - 190
pkg/env/costmodelenv.go

@@ -1,7 +1,6 @@
 package env
 
 import (
-	"fmt"
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/env"
@@ -25,13 +24,9 @@ const (
 	AzureDownloadBillingDataToDiskEnvVar = "AZURE_DOWNLOAD_BILLING_DATA_TO_DISK"
 
 	KubecostNamespaceEnvVar        = "KUBECOST_NAMESPACE"
-	KubecostScrapeIntervalEnvVar   = "KUBECOST_SCRAPE_INTERVAL"
 	PodNameEnvVar                  = "POD_NAME"
 	ClusterIDEnvVar                = "CLUSTER_ID"
 	ClusterProfileEnvVar           = "CLUSTER_PROFILE"
-	PrometheusServerEndpointEnvVar = "PROMETHEUS_SERVER_ENDPOINT"
-	MaxQueryConcurrencyEnvVar      = "MAX_QUERY_CONCURRENCY"
-	QueryLoggingFileEnvVar         = "QUERY_LOGGING_FILE"
 	RemoteEnabledEnvVar            = "REMOTE_WRITE_ENABLED"
 	RemotePWEnvVar                 = "REMOTE_WRITE_PASSWORD"
 	SQLAddressEnvVar               = "SQL_ADDRESS"
@@ -51,11 +46,6 @@ const (
 	EmitKsmV1MetricsEnvVar = "EMIT_KSM_V1_METRICS"
 	EmitKsmV1MetricsOnly   = "EMIT_KSM_V1_METRICS_ONLY"
 
-	ThanosEnabledEnvVar      = "THANOS_ENABLED"
-	ThanosQueryUrlEnvVar     = "THANOS_QUERY_URL"
-	ThanosOffsetEnvVar       = "THANOS_QUERY_OFFSET"
-	ThanosMaxSourceResEnvVar = "THANOS_MAX_SOURCE_RESOLUTION"
-
 	PProfEnabledEnvVar = "PPROF_ENABLED"
 
 	LogCollectionEnabledEnvVar    = "LOG_COLLECTION_ENABLED"
@@ -63,29 +53,16 @@ const (
 	ErrorReportingEnabledEnvVar   = "ERROR_REPORTING_ENABLED"
 	ValuesReportingEnabledEnvVar  = "VALUES_REPORTING_ENABLED"
 
-	DBBasicAuthUsername = "DB_BASIC_AUTH_USERNAME"
-	DBBasicAuthPassword = "DB_BASIC_AUTH_PW"
-	DBBearerToken       = "DB_BEARER_TOKEN"
-
-	MultiClusterBasicAuthUsername = "MC_BASIC_AUTH_USERNAME"
-	MultiClusterBasicAuthPassword = "MC_BASIC_AUTH_PW"
-	MultiClusterBearerToken       = "MC_BEARER_TOKEN"
-
-	InsecureSkipVerify = "INSECURE_SKIP_VERIFY"
 	KubeRbacProxyEnabled = "KUBE_RBAC_PROXY_ENABLED"
 
 	KubeConfigPathEnvVar = "KUBECONFIG_PATH"
 
-	UTCOffsetEnvVar                  = "UTC_OFFSET"
-	CurrentClusterIdFilterEnabledVar = "CURRENT_CLUSTER_ID_FILTER_ENABLED"
-
-	CacheWarmingEnabledEnvVar            = "CACHE_WARMING_ENABLED"
-	ETLEnabledEnvVar                     = "ETL_ENABLED"
-	ETLMaxPrometheusQueryDurationMinutes = "ETL_MAX_PROMETHEUS_QUERY_DURATION_MINUTES"
-	ETLResolutionSeconds                 = "ETL_RESOLUTION_SECONDS"
-	LegacyExternalAPIDisabledVar         = "LEGACY_EXTERNAL_API_DISABLED"
+	UTCOffsetEnvVar = "UTC_OFFSET"
 
-	PromClusterIDLabelEnvVar = "PROM_CLUSTER_ID_LABEL"
+	CacheWarmingEnabledEnvVar    = "CACHE_WARMING_ENABLED"
+	ETLEnabledEnvVar             = "ETL_ENABLED"
+	ETLResolutionSeconds         = "ETL_RESOLUTION_SECONDS"
+	LegacyExternalAPIDisabledVar = "LEGACY_EXTERNAL_API_DISABLED"
 
 	PricingConfigmapName  = "PRICING_CONFIGMAP_NAME"
 	MetricsConfigmapName  = "METRICS_CONFIGMAP_NAME"
@@ -95,13 +72,6 @@ const (
 	ClusterInfoFileEnabledEnvVar  = "CLUSTER_INFO_FILE_ENABLED"
 	ClusterCacheFileEnabledEnvVar = "CLUSTER_CACHE_FILE_ENABLED"
 
-	PrometheusQueryOffsetEnvVar                 = "PROMETHEUS_QUERY_OFFSET"
-	PrometheusRetryOnRateLimitResponseEnvVar    = "PROMETHEUS_RETRY_ON_RATE_LIMIT"
-	PrometheusRetryOnRateLimitMaxRetriesEnvVar  = "PROMETHEUS_RETRY_ON_RATE_LIMIT_MAX_RETRIES"
-	PrometheusRetryOnRateLimitDefaultWaitEnvVar = "PROMETHEUS_RETRY_ON_RATE_LIMIT_DEFAULT_WAIT"
-
-	PrometheusHeaderXScopeOrgIdEnvVar = "PROMETHEUS_HEADER_X_SCOPE_ORGID"
-
 	IngestPodUIDEnvVar = "INGEST_POD_UID"
 
 	ETLReadOnlyMode = "ETL_READ_ONLY"
@@ -158,10 +128,6 @@ func GetExportCSVLabelsAll() bool {
 	return env.GetBool(ExportCSVLabelsAll, false)
 }
 
-func GetKubecostScrapeInterval() time.Duration {
-	return env.GetDuration(KubecostScrapeIntervalEnvVar, 0)
-}
-
 func GetExportCSVLabelsList() []string {
 	return env.GetList(ExportCSVLabelsList, ",")
 }
@@ -198,53 +164,6 @@ func IsClusterCacheFileEnabled() bool {
 	return env.GetBool(ClusterCacheFileEnabledEnvVar, false)
 }
 
-// IsPrometheusRetryOnRateLimitResponse will attempt to retry if a 429 response is received OR a 400 with a body containing
-// ThrottleException (common in AWS services like AMP)
-func IsPrometheusRetryOnRateLimitResponse() bool {
-	return env.GetBool(PrometheusRetryOnRateLimitResponseEnvVar, true)
-}
-
-// GetPrometheusRetryOnRateLimitMaxRetries returns the maximum number of retries that should be attempted prior to failing.
-// Only used if IsPrometheusRetryOnRateLimitResponse() is true.
-func GetPrometheusRetryOnRateLimitMaxRetries() int {
-	return env.GetInt(PrometheusRetryOnRateLimitMaxRetriesEnvVar, 5)
-}
-
-// GetPrometheusRetryOnRateLimitDefaultWait returns the default wait time for a retriable rate limit response without a
-// Retry-After header.
-func GetPrometheusRetryOnRateLimitDefaultWait() time.Duration {
-	return env.GetDuration(PrometheusRetryOnRateLimitDefaultWaitEnvVar, 100*time.Millisecond)
-}
-
-// GetPrometheusHeaderXScopeOrgId returns the default value for X-Scope-OrgID header used for requests in Mimir/Cortex-Tenant API.
-// To use Mimir(or Cortex-Tenant) instead of Prometheus add variable from cluster settings:
-// "PROMETHEUS_HEADER_X_SCOPE_ORGID": "my-cluster-name"
-// Then set Prometheus URL to prometheus API endpoint:
-// "PROMETHEUS_SERVER_ENDPOINT": "http://mimir-url/prometheus/"
-func GetPrometheusHeaderXScopeOrgId() string {
-	return env.Get(PrometheusHeaderXScopeOrgIdEnvVar, "")
-}
-
-// GetPrometheusQueryOffset returns the time.Duration to offset all prometheus queries by. NOTE: This env var is applied
-// to all non-range queries made via our query context. This should only be applied when there is a significant delay in
-// data arriving in the target prom db. For example, if supplying a thanos or cortex querier for the prometheus server, using
-// a 3h offset will ensure that current time = current time - 3h.
-//
-// This offset is NOT the same as the GetThanosOffset() option, as that is only applied to queries made specifically targeting
-// thanos. This offset is applied globally.
-func GetPrometheusQueryOffset() time.Duration {
-	offset := env.Get(PrometheusQueryOffsetEnvVar, "")
-	if offset == "" {
-		return 0
-	}
-
-	dur, err := timeutil.ParseDuration(offset)
-	if err != nil {
-		return 0
-	}
-	return dur
-}
-
 func GetPricingConfigmapName() string {
 	return env.Get(PricingConfigmapName, "pricing-configs")
 }
@@ -366,25 +285,6 @@ func GetClusterID() string {
 	return env.Get(ClusterIDEnvVar, "")
 }
 
-// GetPromClusterFilter returns environment variable value CurrentClusterIdFilterEnabledVar which
-// represents additional prometheus filter for all metrics for current cluster id
-func GetPromClusterFilter() string {
-	if env.GetBool(CurrentClusterIdFilterEnabledVar, false) {
-		return fmt.Sprintf("%s=\"%s\"", GetPromClusterLabel(), GetClusterID())
-	}
-	return ""
-}
-
-// GetPrometheusServerEndpoint returns the environment variable value for PrometheusServerEndpointEnvVar which
-// represents the prometheus server endpoint used to execute prometheus queries.
-func GetPrometheusServerEndpoint() string {
-	return env.Get(PrometheusServerEndpointEnvVar, "")
-}
-
-func GetInsecureSkipVerify() bool {
-	return env.GetBool(InsecureSkipVerify, false)
-}
-
 func IsKubeRbacProxyEnabled() bool {
 	return env.GetBool(KubeRbacProxyEnabled, false)
 }
@@ -461,43 +361,6 @@ func GetCloudProviderAPIKey() string {
 	return env.Get(CloudProviderAPIKeyEnvVar, "")
 }
 
-// IsThanosEnabled returns the environment variable value for ThanosEnabledEnvVar which represents whether
-// or not thanos is enabled.
-func IsThanosEnabled() bool {
-	return env.GetBool(ThanosEnabledEnvVar, false)
-}
-
-// GetThanosQueryUrl returns the environment variable value for ThanosQueryUrlEnvVar which represents the
-// target query endpoint for hitting thanos.
-func GetThanosQueryUrl() string {
-	return env.Get(ThanosQueryUrlEnvVar, "")
-}
-
-// GetThanosOffset returns the environment variable value for ThanosOffsetEnvVar which represents the total
-// amount of time to offset all queries made to thanos.
-func GetThanosOffset() string {
-	return env.Get(ThanosOffsetEnvVar, "3h")
-}
-
-// GetThanosMaxSourceResolution returns the environment variable value for ThanosMaxSourceResEnvVar which represents
-// the max source resolution to use when querying thanos.
-func GetThanosMaxSourceResolution() string {
-	res := env.Get(ThanosMaxSourceResEnvVar, "raw")
-
-	switch res {
-	case "raw":
-		return "0s"
-	case "0s":
-		fallthrough
-	case "5m":
-		fallthrough
-	case "1h":
-		return res
-	default:
-		return "0s"
-	}
-}
-
 // IsLogCollectionEnabled returns the environment variable value for LogCollectionEnabledEnvVar which represents
 // whether or not log collection has been enabled for kubecost deployments.
 func IsLogCollectionEnabled() bool {
@@ -519,43 +382,6 @@ func IsValuesReportingEnabled() bool {
 	return env.GetBool(ValuesReportingEnabledEnvVar, true)
 }
 
-// GetMaxQueryConcurrency returns the environment variable value for MaxQueryConcurrencyEnvVar
-func GetMaxQueryConcurrency() int {
-	return env.GetInt(MaxQueryConcurrencyEnvVar, 5)
-}
-
-// GetQueryLoggingFile returns a file location if query logging is enabled. Otherwise, empty string
-func GetQueryLoggingFile() string {
-	return env.Get(QueryLoggingFileEnvVar, "")
-}
-
-func GetDBBasicAuthUsername() string {
-	return env.Get(DBBasicAuthUsername, "")
-}
-
-func GetDBBasicAuthUserPassword() string {
-	return env.Get(DBBasicAuthPassword, "")
-
-}
-
-func GetDBBearerToken() string {
-	return env.Get(DBBearerToken, "")
-}
-
-// GetMultiClusterBasicAuthUsername returns the environment variable value for MultiClusterBasicAuthUsername
-func GetMultiClusterBasicAuthUsername() string {
-	return env.Get(MultiClusterBasicAuthUsername, "")
-}
-
-// GetMultiClusterBasicAuthPassword returns the environment variable value for MultiClusterBasicAuthPassword
-func GetMultiClusterBasicAuthPassword() string {
-	return env.Get(MultiClusterBasicAuthPassword, "")
-}
-
-func GetMultiClusterBearerToken() string {
-	return env.Get(MultiClusterBearerToken, "")
-}
-
 // GetKubeConfigPath returns the environment variable value for KubeConfigPathEnvVar
 func GetKubeConfigPath() string {
 	return env.Get(KubeConfigPathEnvVar, "")
@@ -589,12 +415,6 @@ func IsETLEnabled() bool {
 	return env.GetBool(ETLEnabledEnvVar, true)
 }
 
-func GetETLMaxPrometheusQueryDuration() time.Duration {
-	dayMins := 60 * 24
-	mins := time.Duration(env.GetInt64(ETLMaxPrometheusQueryDurationMinutes, int64(dayMins)))
-	return mins * time.Minute
-}
-
 // GetETLResolution determines the resolution of ETL queries. The smaller the
 // duration, the higher the resolution; the higher the resolution, the more
 // accurate the query results, but the more computationally expensive.
@@ -609,11 +429,6 @@ func LegacyExternalCostsAPIDisabled() bool {
 	return env.GetBool(LegacyExternalAPIDisabledVar, false)
 }
 
-// GetPromClusterLabel returns the environment variable value for PromClusterIDLabel
-func GetPromClusterLabel() string {
-	return env.Get(PromClusterIDLabelEnvVar, "cluster_id")
-}
-
 // IsIngestingPodUID returns the env variable from ingestPodUID, which alters the
 // contents of podKeys in Allocation
 func IsIngestingPodUID() bool {

+ 0 - 109
pkg/thanos/thanos.go

@@ -1,109 +0,0 @@
-package thanos
-
-import (
-	"crypto/tls"
-	"fmt"
-	"net"
-	"net/http"
-	"net/url"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/opencost/opencost/pkg/env"
-	"github.com/opencost/opencost/pkg/prom"
-
-	prometheus "github.com/prometheus/client_golang/api"
-)
-
-// MaxSourceResulution is the query parameter key used to designate the resolution
-// to use when executing a query.
-const MaxSourceResulution = "max_source_resolution"
-
-var (
-	lock           = new(sync.Mutex)
-	enabled        = env.IsThanosEnabled()
-	queryUrl       = env.GetThanosQueryUrl()
-	offset         = env.GetThanosOffset()
-	maxSourceRes   = env.GetThanosMaxSourceResolution()
-	offsetDuration *time.Duration
-	queryOffset    = fmt.Sprintf(" offset %s", offset)
-)
-
-// IsEnabled returns true if Thanos is enabled.
-func IsEnabled() bool {
-	return enabled
-}
-
-// QueryURL returns true if Thanos is enabled.
-func QueryURL() string {
-	return queryUrl
-}
-
-// Offset returns the duration string for the query offset that should be applied to thanos
-func Offset() string {
-	return offset
-}
-
-// OffsetDuration returns the Offset as a parsed duration
-func OffsetDuration() time.Duration {
-	lock.Lock()
-	defer lock.Unlock()
-
-	if offsetDuration == nil {
-		d, err := time.ParseDuration(offset)
-		if err != nil {
-			d = 0
-		}
-
-		offsetDuration = &d
-	}
-
-	return *offsetDuration
-}
-
-// QueryOffset returns a string in the format: " offset %s" substituting in the Offset() string.
-func QueryOffset() string {
-	return queryOffset
-}
-
-func NewThanosClient(address string, config *prom.PrometheusClientConfig) (prometheus.Client, error) {
-	tc := prometheus.Config{
-		Address: address,
-		RoundTripper: &http.Transport{
-			Proxy: http.ProxyFromEnvironment,
-			DialContext: (&net.Dialer{
-				Timeout:   config.Timeout,
-				KeepAlive: config.KeepAlive,
-			}).DialContext,
-			TLSHandshakeTimeout: config.TLSHandshakeTimeout,
-			TLSClientConfig: &tls.Config{
-				InsecureSkipVerify: config.TLSInsecureSkipVerify,
-			},
-		},
-	}
-
-	client, err := prometheus.NewClient(tc)
-	if err != nil {
-		return nil, err
-	}
-
-	// max source resolution decorator
-	maxSourceDecorator := func(path string, queryParams url.Values) url.Values {
-		if strings.Contains(path, "query") {
-			queryParams.Set(MaxSourceResulution, maxSourceRes)
-		}
-		return queryParams
-	}
-
-	return prom.NewRateLimitedClient(
-		prom.ThanosClientID,
-		client,
-		config.QueryConcurrency,
-		config.Auth,
-		maxSourceDecorator,
-		config.RateLimitRetryOpts,
-		config.QueryLogFile,
-		"",
-	)
-}