Explorar el Código

Sth/kcm 4076 (#3147)

Signed-off-by: Sean Holcomb <seanholcomb@gmail.com>
Sean Holcomb hace 1 año
padre
commit
993db8f67f
Se han modificado 37 ficheros con 2357 adiciones y 411 borrados
  1. 4 0
      core/pkg/source/decoders.go
  2. 54 103
      modules/collector-source/pkg/collector/collector.go
  3. 0 60
      modules/collector-source/pkg/collector/collector_test.go
  4. 33 0
      modules/collector-source/pkg/collector/collectorprovider.go
  5. 0 1
      modules/collector-source/pkg/collector/collectorprovider_test.go
  6. 3 3
      modules/collector-source/pkg/collector/config.go
  7. 191 14
      modules/collector-source/pkg/collector/metricsquerier.go
  8. 838 33
      modules/collector-source/pkg/collector/metricsquerier_test.go
  9. 27 17
      modules/collector-source/pkg/metric/aggregator/activeminutes.go
  10. 119 0
      modules/collector-source/pkg/metric/aggregator/activeminutes_test.go
  11. 1 1
      modules/collector-source/pkg/metric/aggregator/aggregator.go
  12. 27 12
      modules/collector-source/pkg/metric/aggregator/avgovertime.go
  13. 110 0
      modules/collector-source/pkg/metric/aggregator/avgovertime_test.go
  14. 36 15
      modules/collector-source/pkg/metric/aggregator/increase.go
  15. 93 0
      modules/collector-source/pkg/metric/aggregator/increase_test.go
  16. 18 12
      modules/collector-source/pkg/metric/aggregator/info.go
  17. 71 0
      modules/collector-source/pkg/metric/aggregator/info_test.go
  18. 78 0
      modules/collector-source/pkg/metric/aggregator/iratemax.go
  19. 137 0
      modules/collector-source/pkg/metric/aggregator/iratemax_test.go
  20. 22 12
      modules/collector-source/pkg/metric/aggregator/maxovertime.go
  21. 84 0
      modules/collector-source/pkg/metric/aggregator/maxovertime_test.go
  22. 76 0
      modules/collector-source/pkg/metric/aggregator/rate.go
  23. 115 0
      modules/collector-source/pkg/metric/aggregator/rate_test.go
  24. 8 5
      modules/collector-source/pkg/metric/collector.go
  25. 28 7
      modules/collector-source/pkg/metric/repository.go
  26. 1 1
      modules/collector-source/pkg/metric/store.go
  27. 16 14
      modules/collector-source/pkg/metric/updater.go
  28. 20 20
      modules/collector-source/pkg/scrape/clustercache.go
  29. 21 21
      modules/collector-source/pkg/scrape/clustercache_test.go
  30. 22 5
      modules/collector-source/pkg/scrape/dcgm.go
  31. 64 0
      modules/collector-source/pkg/scrape/dcgm_test.go
  32. 2 9
      modules/collector-source/pkg/scrape/network.go
  33. 8 8
      modules/collector-source/pkg/scrape/statsummary.go
  34. 9 9
      modules/collector-source/pkg/scrape/statsummary_test.go
  35. 8 1
      modules/collector-source/pkg/scrape/targetscraper.go
  36. 8 7
      modules/collector-source/pkg/scrape/targetscraper_test.go
  37. 5 21
      modules/prometheus-source/pkg/prom/metricsquerier.go

+ 4 - 0
core/pkg/source/decoders.go

@@ -12,6 +12,7 @@ const (
 	InstanceTypeLabel    = "instance_type"
 	ContainerLabel       = "container"
 	PodLabel             = "pod"
+	PodNameLabel         = "pod_name"
 	ProviderIDLabel      = "provider_id"
 	DeviceLabel          = "device"
 	PVCLabel             = "persistentvolumeclaim"
@@ -1036,6 +1037,7 @@ func DecodeNetTransferBytesResult(result *QueryResult) *NetTransferBytesResult {
 }
 
 type NamespaceAnnotationsResult struct {
+	Cluster     string
 	Namespace   string
 	Annotations map[string]string
 
@@ -1043,10 +1045,12 @@ type NamespaceAnnotationsResult struct {
 }
 
 func DecodeNamespaceAnnotationsResult(result *QueryResult) *NamespaceAnnotationsResult {
+	cluster, _ := result.GetCluster()
 	namespace, _ := result.GetNamespace()
 	annotations := result.GetAnnotations()
 
 	return &NamespaceAnnotationsResult{
+		Cluster:     cluster,
 		Namespace:   namespace,
 		Annotations: annotations,
 		Data:        result.Values,

+ 54 - 103
modules/collector-source/pkg/collector/collector.go

@@ -18,8 +18,7 @@ func NewOpenCostMetricStore() metric.MetricStore {
 	memStore.Register(NewPVUsedMaxMetricCollector())
 	memStore.Register(NewPVCInfoMetricCollector())
 	memStore.Register(NewPVActiveMinutesMetricCollector())
-	memStore.Register(NewLocalStorageCostMetricCollector())
-	memStore.Register(NewLocalStorageUsedCostMetricCollector())
+	memStore.Register(NewLocalStorageUsedActiveMinutesMetricCollector())
 	memStore.Register(NewLocalStorageUsedAverageMetricCollector())
 	memStore.Register(NewLocalStorageUsedMaxMetricCollector())
 	memStore.Register(NewLocalStorageBytesMetricCollector())
@@ -60,7 +59,6 @@ func NewOpenCostMetricStore() metric.MetricStore {
 	memStore.Register(NewPodPVCAllocationMetricCollector())
 	memStore.Register(NewPVCBytesRequestedMetricCollector())
 	memStore.Register(NewPVBytesMetricCollector())
-	memStore.Register(NewPVCostPerGiBHourMetricCollector())
 	memStore.Register(NewPVInfoMetricCollector())
 	memStore.Register(NewNetZoneGiBMetricCollector())
 	memStore.Register(NewNetZonePricePerGiBMetricCollector())
@@ -173,7 +171,9 @@ func NewPVCInfoMetricCollector() *metric.MetricCollector {
 			source.StorageClassLabel,
 		},
 		aggregator.Info,
-		nil, // TODO missing filter
+		func(labels map[string]string) bool {
+			return labels[source.VolumeNameLabel] != ""
+		},
 	)
 }
 
@@ -195,33 +195,6 @@ func NewPVActiveMinutesMetricCollector() *metric.MetricCollector {
 	)
 }
 
-// todo revisit this
-//
-//	sum_over_time(
-//		sum(
-//			container_fs_limit_bytes{
-//				device=~"/dev/(nvme|sda).*",
-//				id="/",
-//				<some_custom_filter>
-//			}
-//		) by (instance, device, cluster_id)[%s:%dm]
-//	) / 1024 / 1024 / 1024 * %f * %f
-func NewLocalStorageCostMetricCollector() *metric.MetricCollector {
-	return metric.NewMetricCollector(
-		metric.LocalStorageCostID,
-		scrape.NodeFSCapacityBytes,
-		[]string{
-			source.InstanceLabel,
-			source.DeviceLabel,
-		},
-		aggregator.AverageOverTime,
-		func(labels map[string]string) bool {
-			// todo this filter needs a regex
-			return true
-		},
-	)
-}
-
 // sum_over_time(
 //
 //	sum(
@@ -233,19 +206,18 @@ func NewLocalStorageCostMetricCollector() *metric.MetricCollector {
 //	) by (instance, device, cluster_id)[%s:%dm]
 //
 // ) / 1024 / 1024 / 1024 * %f * %f`
-func NewLocalStorageUsedCostMetricCollector() *metric.MetricCollector {
+// NewLocalStorageUsedActiveMinutesMetricCollector does not have an associated query end point but is used in the results
+// of QueryLocalStorageUsedCost
+func NewLocalStorageUsedActiveMinutesMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(
-		metric.LocalStorageUsedCostID,
+		metric.LocalStorageUsedActiveMinutesID,
 		scrape.ContainerFSUsageBytes,
 		[]string{
 			source.InstanceLabel,
 			source.DeviceLabel,
 		},
-		aggregator.AverageOverTime,
-		func(labels map[string]string) bool {
-			// todo this filter needs a regex
-			return true
-		},
+		aggregator.ActiveMinutes,
+		nil, // filter not required here because only container root file system is being scraped
 	)
 }
 
@@ -270,10 +242,7 @@ func NewLocalStorageUsedAverageMetricCollector() *metric.MetricCollector {
 			source.DeviceLabel,
 		},
 		aggregator.AverageOverTime,
-		func(labels map[string]string) bool {
-			// todo this filter needs a regex
-			return true
-		},
+		nil, // filter not required here because only container root file system is being scraped
 	)
 }
 
@@ -299,10 +268,7 @@ func NewLocalStorageUsedMaxMetricCollector() *metric.MetricCollector {
 			source.DeviceLabel,
 		},
 		aggregator.MaxOverTime,
-		func(labels map[string]string) bool {
-			// todo this filter needs a regex
-			return true
-		},
+		nil, // filter not required here because only container root file system is being scraped
 	)
 }
 
@@ -326,10 +292,7 @@ func NewLocalStorageBytesMetricCollector() *metric.MetricCollector {
 			source.DeviceLabel,
 		},
 		aggregator.AverageOverTime,
-		func(labels map[string]string) bool {
-			// todo this filter needs a regex
-			return true
-		},
+		nil, // filter not required here because only node root file system is being scraped
 	)
 }
 
@@ -508,7 +471,7 @@ func NewNodeCPUModeTotalMetricCollector() *metric.MetricCollector {
 			source.KubernetesNodeLabel,
 			source.ModeLabel,
 		},
-		aggregator.Increase,
+		aggregator.Rate,
 		nil,
 	)
 }
@@ -862,14 +825,26 @@ func NewCPUUsageAverageMetricCollector() *metric.MetricCollector {
 			source.PodLabel,
 			source.ContainerLabel,
 		},
-		aggregator.Increase,
+		aggregator.Rate,
 		func(labels map[string]string) bool {
 			return labels[source.ContainerLabel] != "" && labels[source.ContainerLabel] != "POD"
 		},
 	)
 }
 
-// TODO this is a special case
+// max(
+//
+//	max_over_time(
+//		irate(
+//			container_cpu_usage_seconds_total{
+//				container!="POD",
+//				container!="",
+//				<some_custom_filter>
+//			}[1h]
+//		)[%s:%s]
+//	)
+//
+// ) by (container, pod_name, pod, namespace, node, instance, cluster_id)
 func NewCPUUsageMaxMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(
 		metric.CPUUsageMaxID,
@@ -881,8 +856,10 @@ func NewCPUUsageMaxMetricCollector() *metric.MetricCollector {
 			source.PodLabel,
 			source.ContainerLabel,
 		},
-		aggregator.MaxOverTime,
-		nil,
+		aggregator.IRateMax,
+		func(labels map[string]string) bool {
+			return labels[source.ContainerLabel] != "" && labels[source.ContainerLabel] != "POD"
+		},
 	)
 }
 
@@ -1000,7 +977,7 @@ func NewGPUsAllocatedMetricCollector() *metric.MetricCollector {
 //				<some_custom_filter>
 //			}[1h]
 //		)
-//	) by (container, pod, namespace, node, resource) // TODO is this missing cluster
+//	) by (container, pod, namespace, node, resource, cluster_id)
 
 func NewIsGPUSharedMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(
@@ -1026,7 +1003,7 @@ func NewIsGPUSharedMetricCollector() *metric.MetricCollector {
 //				<some_custom_filter>
 //			}[1h]
 //		)
-//	) by (container, pod, namespace, device, modelName, UUID) // TODO is this missing cluster
+//	) by (container, pod, namespace, device, modelName, UUID, cluster_id)
 
 func NewGPUInfoMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(
@@ -1196,27 +1173,6 @@ func NewPVBytesMetricCollector() *metric.MetricCollector {
 	)
 }
 
-//	avg(
-//		avg_over_time(
-//			pv_hourly_cost{
-//				<some_custom_filter>
-//			}[1h]
-//		)
-//	) by (volumename, cluster_id)
-//
-// TODO what is going on here, does not appear to be a query
-func NewPVCostPerGiBHourMetricCollector() *metric.MetricCollector {
-	return metric.NewMetricCollector(
-		metric.PVCostPerGiBHourID,
-		scrape.PVHourlyCost,
-		[]string{
-			source.VolumeNameLabel,
-		},
-		aggregator.AverageOverTime,
-		nil,
-	)
-}
-
 //	avg(
 //		avg_over_time(
 //			kubecost_pv_info{
@@ -1250,15 +1206,14 @@ func NewPVInfoMetricCollector() *metric.MetricCollector {
 //		)
 //	) by (pod_name, namespace, cluster_id) / 1024 / 1024 / 1024
 //
-// TODO double check that changing "pod_name" to the source.PodLabel did not break something
+
 func NewNetZoneGiBMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(
 		metric.NetZoneGiBID,
 		scrape.KubecostPodNetworkEgressBytesTotal,
 		[]string{
 			source.NamespaceLabel,
-			source.PodLabel,
-			source.ServiceLabel,
+			source.PodNameLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
@@ -1275,7 +1230,7 @@ func NewNetZoneGiBMetricCollector() *metric.MetricCollector {
 //		)
 //	) by (cluster_id)
 //
-// TODO check that this works with no labels
+
 func NewNetZonePricePerGiBMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(
 		metric.NetZonePricePerGiBID,
@@ -1303,8 +1258,7 @@ func NewNetRegionGiBMetricCollector() *metric.MetricCollector {
 		scrape.KubecostPodNetworkEgressBytesTotal,
 		[]string{
 			source.NamespaceLabel,
-			source.PodLabel,
-			source.ServiceLabel,
+			source.PodNameLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
@@ -1346,8 +1300,7 @@ func NewNetInternetGiBMetricCollector() *metric.MetricCollector {
 		scrape.KubecostPodNetworkEgressBytesTotal,
 		[]string{
 			source.NamespaceLabel,
-			source.PodLabel,
-			source.ServiceLabel,
+			source.PodNameLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
@@ -1386,10 +1339,10 @@ func NewNetInternetPricePerGiBMetricCollector() *metric.MetricCollector {
 func NewNetInternetServiceGiBMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(
 		metric.NetInternetServiceGiBID,
-		scrape.KubecostNetworkInternetEgressCost,
+		scrape.KubecostPodNetworkEgressBytesTotal,
 		[]string{
 			source.NamespaceLabel,
-			source.PodLabel,
+			source.PodNameLabel,
 			source.ServiceLabel,
 		},
 		aggregator.Increase,
@@ -1415,7 +1368,6 @@ func NewNetReceiveBytesMetricCollector() *metric.MetricCollector {
 		[]string{
 			source.NamespaceLabel,
 			source.PodLabel,
-			source.ContainerLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
@@ -1441,13 +1393,13 @@ func NewNetZoneIngressGiBMetricCollector() *metric.MetricCollector {
 		scrape.KubecostPodNetworkIngressBytesTotal,
 		[]string{
 			source.NamespaceLabel,
-			source.PodLabel,
+			source.PodNameLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
-			return labels[source.InternetLabel] != "false" &&
-				labels[source.SameZoneLabel] != "false" &&
-				labels[source.SameRegionLabel] != "true"
+			return labels[source.InternetLabel] == "false" &&
+				labels[source.SameZoneLabel] == "false" &&
+				labels[source.SameRegionLabel] == "true"
 		},
 	)
 }
@@ -1469,13 +1421,13 @@ func NewNetRegionIngressGiBMetricCollector() *metric.MetricCollector {
 		scrape.KubecostPodNetworkIngressBytesTotal,
 		[]string{
 			source.NamespaceLabel,
-			source.PodLabel,
+			source.PodNameLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
-			return labels[source.InternetLabel] != "false" &&
-				labels[source.SameZoneLabel] != "false" &&
-				labels[source.SameRegionLabel] != "false"
+			return labels[source.InternetLabel] == "false" &&
+				labels[source.SameZoneLabel] == "false" &&
+				labels[source.SameRegionLabel] == "false"
 		},
 	)
 }
@@ -1495,11 +1447,11 @@ func NewNetInternetIngressGiBMetricCollector() *metric.MetricCollector {
 		scrape.KubecostPodNetworkIngressBytesTotal,
 		[]string{
 			source.NamespaceLabel,
-			source.PodLabel,
+			source.PodNameLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
-			return labels[source.InternetLabel] != "true"
+			return labels[source.InternetLabel] == "true"
 		},
 	)
 }
@@ -1519,12 +1471,12 @@ func NewNetInternetServiceIngressGiBMetricCollector() *metric.MetricCollector {
 		scrape.KubecostPodNetworkIngressBytesTotal,
 		[]string{
 			source.NamespaceLabel,
-			source.PodLabel,
+			source.PodNameLabel,
 			source.ServiceLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
-			return labels[source.InternetLabel] != "true"
+			return labels[source.InternetLabel] == "true"
 		},
 	)
 }
@@ -1545,7 +1497,6 @@ func NewNetTransferBytesMetricCollector() *metric.MetricCollector {
 		[]string{
 			source.NamespaceLabel,
 			source.PodLabel,
-			source.ContainerLabel,
 		},
 		aggregator.Increase,
 		func(labels map[string]string) bool {
@@ -1576,7 +1527,7 @@ func NewNamespaceLabelsMetricCollector() *metric.MetricCollector {
 //		kube_namespace_annotations{
 //			<some_custom_filter>
 //		}[1h]
-//	) // TODO decoder missing cluster
+//	)
 
 func NewNamespaceAnnotationsMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(

+ 0 - 60
modules/collector-source/pkg/collector/collector_test.go

@@ -1,60 +0,0 @@
-package collector
-
-import (
-	"testing"
-
-	"github.com/opencost/opencost/modules/collector-source/pkg/metric"
-	"github.com/opencost/opencost/modules/collector-source/pkg/scrape"
-)
-
-func TestBasicCollectorFunctionality(t *testing.T) {
-	// avg of 55 (sum of [1,10]) / data points (10) = 5.5
-	const expected = 55.0 / 10.0
-
-	labelsA := map[string]string{
-		"container": "container-a",
-		"uid":       "uid-a",
-		"pod":       "pod-a",
-		"namespace": "namespace-a",
-		"instance":  "instance-a",
-		"node":      "node-a",
-		"cluster":   "cluster-a",
-	}
-
-	labelsB := map[string]string{
-		"container": "container-b",
-		"uid":       "uid-b",
-		"pod":       "pod-b",
-		"namespace": "namespace-b",
-		"instance":  "instance-b",
-		"node":      "node-b",
-		"cluster":   "cluster-a",
-	}
-
-	collector := NewOpenCostMetricStore()
-
-	for i := 1; i <= 10; i++ {
-		collector.Update(scrape.ContainerMemoryWorkingSetBytes, labelsA, float64(i), nil, nil)
-		collector.Update(scrape.ContainerMemoryWorkingSetBytes, labelsB, float64(i), nil, nil)
-	}
-
-	results, err := collector.Query(metric.RAMUsageAverageID)
-	if err != nil {
-		t.Fatalf("error: %v", err)
-	}
-
-	if len(results) != 2 {
-		t.Fatalf("expected 2 results, got %d", len(results))
-	}
-
-	for _, result := range results {
-		if result.Values[0].Value != expected {
-			t.Fatalf("expected %f, got %f", expected, result.Values[0].Value)
-		}
-
-		t.Logf("+-- Result -------------------------------")
-		t.Logf("| Labels: %v", result.MetricLabels)
-		t.Logf("| Value: %v", result.Values[0].Value)
-		t.Logf("+----------------------------------------")
-	}
-}

+ 33 - 0
modules/collector-source/pkg/collector/collectorprovider.go

@@ -1,9 +1,11 @@
 package collector
 
 import (
+	"fmt"
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/util/timeutil"
 	"github.com/opencost/opencost/modules/collector-source/pkg/metric"
 	"github.com/opencost/opencost/modules/collector-source/pkg/util"
 )
@@ -12,6 +14,7 @@ import (
 // that was designed to make queries against a continuous datasource with now stores its data in discrete blocks
 type StoreProvider interface {
 	GetStore(start, end time.Time) metric.MetricStore
+	GetDailyDataCoverage(limitDays int) (time.Time, time.Time, error)
 }
 
 // repoStoreProvider is a StoreProvider implementation which uses a Repository and the Intervals of its Resolutions that it is
@@ -69,3 +72,33 @@ func (r *repoStoreProvider) getStoreKeys(start, end time.Time) (string, time.Tim
 	}
 	return minKey, minStart
 }
+
+// GetDailyDataCoverage this is a bit of a hacky add-on to help fulfill the metricsquerier interface
+func (r *repoStoreProvider) GetDailyDataCoverage(limitDays int) (time.Time, time.Time, error) {
+	coverage := r.repo.Coverage()
+	dailyCoverage, ok := coverage["1d"]
+	if !ok {
+		return time.Time{}, time.Time{}, fmt.Errorf("daily resolution is not configured")
+	}
+	if len(dailyCoverage) == 0 {
+		return time.Time{}, time.Time{}, fmt.Errorf("daily coverage not available")
+	}
+	start := dailyCoverage[0]
+	end := dailyCoverage[0]
+	for _, window := range dailyCoverage {
+		if start.After(window) {
+			start = window
+		}
+		if end.Before(window) {
+			end = window
+		}
+	}
+	limit := time.Now().UTC().Truncate(timeutil.Day).Add(-timeutil.Day * time.Duration(limitDays))
+	if start.Before(limit) {
+		start = limit
+	}
+	// since all times that we have been looking at are window start times,
+	// add a day to end time to create the actual coverage
+	end = end.Add(timeutil.Day)
+	return start, end, nil
+}

+ 0 - 1
modules/collector-source/pkg/collector/collectorprovider_test.go

@@ -9,7 +9,6 @@ import (
 )
 
 func Test_repoStoreProvider_getStoreKeys(t *testing.T) {
-
 	defaultResConfigs := []util.ResolutionConfiguration{
 		{
 			Interval: "10m",

+ 3 - 3
modules/collector-source/pkg/collector/config.go

@@ -20,15 +20,15 @@ func NewOpenCostCollectorConfigFromEnv() CollectorConfig {
 		Resolutions: []util.ResolutionConfiguration{
 			{
 				Interval:  "10m",
-				Retention: 2, // TODO UNDO env.GetCollector10mResolutionRetention(),
+				Retention: env.GetCollector10mResolutionRetention(),
 			},
 			{
 				Interval:  "1h",
-				Retention: 1, // TODO UNDO env.GetCollector1hResolutionRetention(),
+				Retention: env.GetCollector1hResolutionRetention(),
 			},
 			{
 				Interval:  "1d",
-				Retention: 1, // TODO UNDO env.GetCollection1dResolutionRetention(),
+				Retention: env.GetCollection1dResolutionRetention(),
 			},
 		},
 		ScrapeInterval: time.Second * time.Duration(env.GetCollectorScrapeIntervalSeconds()),

+ 191 - 14
modules/collector-source/pkg/collector/metricsquerier.go

@@ -8,6 +8,9 @@ import (
 	"github.com/opencost/opencost/modules/collector-source/pkg/util"
 )
 
+const GiB = 1024 * 1024 * 1024
+const LocalStorageCostPerGiBHr = 0.04 / 730.0
+
 type collectorMetricsQuerier struct {
 	collectorProvider StoreProvider
 }
@@ -35,6 +38,26 @@ func queryCollector[T any](c *collectorMetricsQuerier, start, end time.Time, id
 
 }
 
+func queryCollectorGiB[T any](c *collectorMetricsQuerier, start, end time.Time, id metric.MetricCollectorID, decoder source.ResultDecoder[T]) *source.Future[T] {
+	queryResults := source.NewQueryResults(string(id))
+	collector := c.collectorProvider.GetStore(start, end)
+	if collector != nil {
+		results, err := collector.Query(id)
+		queryResults.Error = err
+		for _, result := range results {
+			for i := range result.Values {
+				result.Values[i].Value /= GiB
+			}
+			queryResults.Results = append(queryResults.Results, result.ToQueryResult())
+		}
+	}
+	ch := make(source.QueryResultsChan, 1)
+	ch <- queryResults
+	f := source.NewFuture[T](decoder, ch)
+	return f
+
+}
+
 func (c *collectorMetricsQuerier) QueryPVActiveMinutes(start, end time.Time) *source.Future[source.PVActiveMinutesResult] {
 	return queryCollector(c, start, end, metric.PVActiveMinutesID, source.DecodePVActiveMinutesResult)
 }
@@ -52,12 +75,99 @@ func (c *collectorMetricsQuerier) QueryLocalStorageActiveMinutes(start, end time
 }
 
 func (c *collectorMetricsQuerier) QueryLocalStorageCost(start, end time.Time) *source.Future[source.LocalStorageCostResult] {
-	return queryCollector(c, start, end, metric.LocalStorageCostID, source.DecodeLocalStorageCostResult)
+	queryResults := source.NewQueryResults("LocalStorageCost")
+	collector := c.collectorProvider.GetStore(start, end)
+	if collector != nil {
+		minutesResults, err := collector.Query(metric.LocalStorageActiveMinutesID)
+		if err != nil {
+			queryResults.Error = err
+		}
+		minutesByNode := map[string]float64{}
+		for _, result := range minutesResults {
+			node := result.MetricLabels[source.NodeLabel]
+			if node == "" || len(result.Values) == 0 {
+				continue
+			}
+			nodeStart := result.Values[0].Timestamp
+			nodeEnd := result.Values[len(result.Values)-1].Timestamp
+			if nodeStart == nil || nodeEnd == nil {
+				continue
+			}
+			minutesByNode[node] = nodeEnd.Sub(*nodeStart).Minutes()
 
+		}
+		bytesResults, err := collector.Query(metric.LocalStorageBytesID)
+		if err != nil {
+			queryResults.Error = err
+		}
+		for _, result := range bytesResults {
+			instance := result.MetricLabels[source.InstanceLabel]
+			if instance == "" || len(result.Values) == 0 {
+				continue
+			}
+			mintues, ok := minutesByNode[instance]
+			if !ok {
+				continue
+			}
+			queryResult := result.ToQueryResult()
+			bytes := queryResult.Values[0].Value
+			GiBs := bytes / GiB
+			hours := mintues / 60
+			queryResult.Values[0].Value = GiBs * hours * LocalStorageCostPerGiBHr
+			queryResults.Results = append(queryResults.Results, queryResult)
+		}
+	}
+	ch := make(source.QueryResultsChan, 1)
+	ch <- queryResults
+	return source.NewFuture(source.DecodeLocalStorageCostResult, ch)
 }
 
 func (c *collectorMetricsQuerier) QueryLocalStorageUsedCost(start, end time.Time) *source.Future[source.LocalStorageUsedCostResult] {
-	return queryCollector(c, start, end, metric.LocalStorageUsedCostID, source.DecodeLocalStorageUsedCostResult)
+	queryResults := source.NewQueryResults("LocalStorageUsedCost")
+	collector := c.collectorProvider.GetStore(start, end)
+	if collector != nil {
+		minutesResults, err := collector.Query(metric.LocalStorageUsedActiveMinutesID)
+		if err != nil {
+			queryResults.Error = err
+		}
+		minutesByNode := map[string]float64{}
+		for _, result := range minutesResults {
+			node := result.MetricLabels[source.InstanceLabel]
+			if node == "" || len(result.Values) == 0 {
+				continue
+			}
+			nodeStart := result.Values[0].Timestamp
+			nodeEnd := result.Values[len(result.Values)-1].Timestamp
+			if nodeStart == nil || nodeEnd == nil {
+				continue
+			}
+			minutesByNode[node] = nodeEnd.Sub(*nodeStart).Minutes()
+
+		}
+		bytesResults, err := collector.Query(metric.LocalStorageUsedAverageID)
+		if err != nil {
+			queryResults.Error = err
+		}
+		for _, result := range bytesResults {
+			instance := result.MetricLabels[source.InstanceLabel]
+			if instance == "" || len(result.Values) == 0 {
+				continue
+			}
+			mintues, ok := minutesByNode[instance]
+			if !ok {
+				continue
+			}
+			queryResult := result.ToQueryResult()
+			bytes := queryResult.Values[0].Value
+			GiBs := bytes / GiB
+			hours := mintues / 60
+			queryResult.Values[0].Value = GiBs * hours * LocalStorageCostPerGiBHr
+			queryResults.Results = append(queryResults.Results, queryResult)
+		}
+	}
+	ch := make(source.QueryResultsChan, 1)
+	ch <- queryResults
+	return source.NewFuture(source.DecodeLocalStorageUsedCostResult, ch)
 }
 
 func (c *collectorMetricsQuerier) QueryLocalStorageUsedAvg(start, end time.Time) *source.Future[source.LocalStorageUsedAvgResult] {
@@ -106,11 +216,79 @@ func (c *collectorMetricsQuerier) QueryNodeIsSpot(start, end time.Time) *source.
 }
 
 func (c *collectorMetricsQuerier) QueryNodeRAMSystemPercent(start, end time.Time) *source.Future[source.NodeRAMSystemPercentResult] {
-	return queryCollector(c, start, end, metric.NodeRAMSystemUsageAverageID, source.DecodeNodeRAMSystemPercentResult)
+	queryResults := source.NewQueryResults("NodeRAMSystemPercent")
+	collector := c.collectorProvider.GetStore(start, end)
+	if collector != nil {
+		capacityResult, err := collector.Query(metric.NodeRAMBytesCapacityID)
+		if err != nil {
+			queryResults.Error = err
+		}
+		nodeCapacities := map[string]float64{}
+		for _, result := range capacityResult {
+			node := result.MetricLabels[source.NodeLabel]
+			if node == "" || len(result.Values) == 0 {
+				continue
+			}
+			nodeCapacities[node] = result.Values[0].Value
+		}
+
+		results, err := collector.Query(metric.NodeRAMSystemUsageAverageID)
+		if err != nil {
+			queryResults.Error = err
+		}
+		for _, result := range results {
+			instance := result.MetricLabels[source.InstanceLabel]
+
+			capacity, ok := nodeCapacities[instance]
+			if !ok || len(result.Values) == 0 {
+				continue
+			}
+			result.Values[0].Value /= capacity
+			queryResults.Results = append(queryResults.Results, result.ToQueryResult())
+		}
+	}
+	ch := make(source.QueryResultsChan, 1)
+	ch <- queryResults
+	f := source.NewFuture(source.DecodeNodeRAMSystemPercentResult, ch)
+	return f
 }
 
 func (c *collectorMetricsQuerier) QueryNodeRAMUserPercent(start, end time.Time) *source.Future[source.NodeRAMUserPercentResult] {
-	return queryCollector(c, start, end, metric.NodeRAMUserUsageAverageID, source.DecodeNodeRAMUserPercentResult)
+	queryResults := source.NewQueryResults("NodeRAMUserPercent")
+	collector := c.collectorProvider.GetStore(start, end)
+	if collector != nil {
+		capacityResult, err := collector.Query(metric.NodeRAMBytesCapacityID)
+		if err != nil {
+			queryResults.Error = err
+		}
+		nodeCapacities := map[string]float64{}
+		for _, result := range capacityResult {
+			node := result.MetricLabels[source.NodeLabel]
+			if node == "" || len(result.Values) == 0 {
+				continue
+			}
+			nodeCapacities[node] = result.Values[0].Value
+		}
+
+		results, err := collector.Query(metric.NodeRAMUserUsageAverageID)
+		if err != nil {
+			queryResults.Error = err
+		}
+		for _, result := range results {
+			instance := result.MetricLabels[source.InstanceLabel]
+
+			capacity, ok := nodeCapacities[instance]
+			if !ok || len(result.Values) == 0 {
+				continue
+			}
+			result.Values[0].Value /= capacity
+			queryResults.Results = append(queryResults.Results, result.ToQueryResult())
+		}
+	}
+	ch := make(source.QueryResultsChan, 1)
+	ch <- queryResults
+	f := source.NewFuture(source.DecodeNodeRAMUserPercentResult, ch)
+	return f
 }
 
 func (c *collectorMetricsQuerier) QueryLBActiveMinutes(start, end time.Time) *source.Future[source.LBActiveMinutesResult] {
@@ -231,7 +409,7 @@ func (c *collectorMetricsQuerier) QueryPVInfo(start, end time.Time) *source.Futu
 }
 
 func (c *collectorMetricsQuerier) QueryNetZoneGiB(start, end time.Time) *source.Future[source.NetZoneGiBResult] {
-	return queryCollector(c, start, end, metric.NetZoneGiBID, source.DecodeNetZoneGiBResult)
+	return queryCollectorGiB(c, start, end, metric.NetZoneGiBID, source.DecodeNetZoneGiBResult)
 }
 
 func (c *collectorMetricsQuerier) QueryNetZonePricePerGiB(start, end time.Time) *source.Future[source.NetZonePricePerGiBResult] {
@@ -239,7 +417,7 @@ func (c *collectorMetricsQuerier) QueryNetZonePricePerGiB(start, end time.Time)
 }
 
 func (c *collectorMetricsQuerier) QueryNetRegionGiB(start, end time.Time) *source.Future[source.NetRegionGiBResult] {
-	return queryCollector(c, start, end, metric.NetRegionGiBID, source.DecodeNetRegionGiBResult)
+	return queryCollectorGiB(c, start, end, metric.NetRegionGiBID, source.DecodeNetRegionGiBResult)
 }
 
 func (c *collectorMetricsQuerier) QueryNetRegionPricePerGiB(start, end time.Time) *source.Future[source.NetRegionPricePerGiBResult] {
@@ -247,7 +425,7 @@ func (c *collectorMetricsQuerier) QueryNetRegionPricePerGiB(start, end time.Time
 }
 
 func (c *collectorMetricsQuerier) QueryNetInternetGiB(start, end time.Time) *source.Future[source.NetInternetGiBResult] {
-	return queryCollector(c, start, end, metric.NetInternetGiBID, source.DecodeNetInternetGiBResult)
+	return queryCollectorGiB(c, start, end, metric.NetInternetGiBID, source.DecodeNetInternetGiBResult)
 }
 
 func (c *collectorMetricsQuerier) QueryNetInternetPricePerGiB(start, end time.Time) *source.Future[source.NetInternetPricePerGiBResult] {
@@ -255,7 +433,7 @@ func (c *collectorMetricsQuerier) QueryNetInternetPricePerGiB(start, end time.Ti
 }
 
 func (c *collectorMetricsQuerier) QueryNetInternetServiceGiB(start, end time.Time) *source.Future[source.NetInternetServiceGiBResult] {
-	return queryCollector(c, start, end, metric.NetInternetServiceGiBID, source.DecodeNetInternetServiceGiBResult)
+	return queryCollectorGiB(c, start, end, metric.NetInternetServiceGiBID, source.DecodeNetInternetServiceGiBResult)
 }
 
 func (c *collectorMetricsQuerier) QueryNetTransferBytes(start, end time.Time) *source.Future[source.NetTransferBytesResult] {
@@ -263,19 +441,19 @@ func (c *collectorMetricsQuerier) QueryNetTransferBytes(start, end time.Time) *s
 }
 
 func (c *collectorMetricsQuerier) QueryNetZoneIngressGiB(start, end time.Time) *source.Future[source.NetZoneIngressGiBResult] {
-	return queryCollector(c, start, end, metric.NetZoneIngressGiBID, source.DecodeNetZoneIngressGiBResult)
+	return queryCollectorGiB(c, start, end, metric.NetZoneIngressGiBID, source.DecodeNetZoneIngressGiBResult)
 }
 
 func (c *collectorMetricsQuerier) QueryNetRegionIngressGiB(start, end time.Time) *source.Future[source.NetRegionIngressGiBResult] {
-	return queryCollector(c, start, end, metric.NetRegionIngressGiBID, source.DecodeNetRegionIngressGiBResult)
+	return queryCollectorGiB(c, start, end, metric.NetRegionIngressGiBID, source.DecodeNetRegionIngressGiBResult)
 }
 
 func (c *collectorMetricsQuerier) QueryNetInternetIngressGiB(start, end time.Time) *source.Future[source.NetInternetIngressGiBResult] {
-	return queryCollector(c, start, end, metric.NetInternetIngressGiBID, source.DecodeNetInternetIngressGiBResult)
+	return queryCollectorGiB(c, start, end, metric.NetInternetIngressGiBID, source.DecodeNetInternetIngressGiBResult)
 }
 
 func (c *collectorMetricsQuerier) QueryNetInternetServiceIngressGiB(start, end time.Time) *source.Future[source.NetInternetServiceIngressGiBResult] {
-	return queryCollector(c, start, end, metric.NetInternetServiceIngressGiBID, source.DecodeNetInternetServiceIngressGiBResult)
+	return queryCollectorGiB(c, start, end, metric.NetInternetServiceIngressGiBID, source.DecodeNetInternetServiceIngressGiBResult)
 }
 
 func (c *collectorMetricsQuerier) QueryNetReceiveBytes(start, end time.Time) *source.Future[source.NetReceiveBytesResult] {
@@ -335,6 +513,5 @@ func (c *collectorMetricsQuerier) QueryReplicaSetsWithRollout(start, end time.Ti
 }
 
 func (c *collectorMetricsQuerier) QueryDataCoverage(limitDays int) (time.Time, time.Time, error) {
-	// TODO immplement me
-	panic("implement me")
+	return c.collectorProvider.GetDailyDataCoverage(limitDays)
 }

+ 838 - 33
modules/collector-source/pkg/collector/metricsquerier_test.go

@@ -11,59 +11,240 @@ import (
 	"github.com/opencost/opencost/modules/collector-source/pkg/scrape"
 )
 
-var Start1Str = "2025-01-01T00:00:00Z00:00"
-var End1Str = "2025-01-01T00:01:00Z00:00"
+var Start1Str = "2025-01-01T00:00:00Z"
+var End1Str = "2025-01-01T01:00:00Z"
 
-type MockCollectorProvider struct {
+type MockStoreProvider struct {
 	metricsCollector metric.MetricStore
 }
 
-func (m *MockCollectorProvider) GetStore(start, end time.Time) metric.MetricStore {
+func (m *MockStoreProvider) GetStore(start, end time.Time) metric.MetricStore {
 	return m.metricsCollector
 }
 
+// QueryDataCoverage is not implemented for this  mock
+func (m *MockStoreProvider) GetDailyDataCoverage(limitDays int) (time.Time, time.Time, error) {
+	return time.Time{}, time.Time{}, nil
+}
+
 func GetMockCollectorProvider() StoreProvider {
 	collector := NewOpenCostMetricStore()
 
-	start1, _ := time.Parse(time.RFC3339, Start1Str)
-	end1, _ := time.Parse(time.RFC3339, End1Str)
+	start, _ := time.Parse(time.RFC3339, Start1Str)
+	time1 := time.Date(2025, 1, 1, 0, 30, 0, 0, time.UTC)
+	end, _ := time.Parse(time.RFC3339, End1Str)
 
 	node1Info := map[string]string{
 		"node":        "node1",
 		"provider_id": "node1",
 	}
 
+	localStorage1Info := map[string]string{
+		source.InstanceLabel: "node1",
+		source.DeviceLabel:   "local",
+	}
+
 	cluster1Info := map[string]string{
 		"provisioner_name": "GKE",
 	}
 
 	gpu1Info := map[string]string{
-		"namespace":  "namespace1",
-		"pod":        "pod1",
-		"container":  "container1",
-		"gpu":        "0",
-		"UUID":       "GPU-1",
-		"pci_bus_id": "00000000:00:0A.0",
-		"device":     "nvidia0",
-		"modelName":  "Tesla T4",
-		"Hostname":   "localhost",
+		source.NamespaceLabel: "namespace1",
+		source.PodLabel:       "pod1",
+		"container":           "container1",
+		"gpu":                 "0",
+		"UUID":                "GPU-1",
+		"pci_bus_id":          "00000000:00:0A.0",
+		"device":              "nvidia0",
+		"modelName":           "Tesla T4",
+		"Hostname":            "localhost",
+	}
+
+	pod1Info := map[string]string{
+		source.NamespaceLabel: "namespace1",
+		source.NodeLabel:      "node1",
+		source.InstanceLabel:  "node1",
+		source.PodLabel:       "pod1",
+		source.UIDLabel:       "pod-uuid1",
 	}
 
-	collector.Update(scrape.NodeTotalHourlyCost, node1Info, 0, &start1, nil)
-	collector.Update(scrape.NodeTotalHourlyCost, node1Info, 0, &end1, nil)
+	container1Info := map[string]string{
+		source.NamespaceLabel: "namespace1",
+		source.NodeLabel:      "node1",
+		source.InstanceLabel:  "node1",
+		source.PodLabel:       "pod1",
+		source.UIDLabel:       "pod-uuid1",
+		source.ContainerLabel: "container1",
+	}
+
+	container2Info := map[string]string{
+		source.NamespaceLabel: "kube-system",
+		source.NodeLabel:      "node1",
+		source.InstanceLabel:  "node1",
+		source.PodLabel:       "pod2",
+		source.UIDLabel:       "pod-uuid2",
+		source.ContainerLabel: "container2",
+	}
+
+	networkZone1Info := map[string]string{
+		source.PodNameLabel:    "pod1",
+		source.NamespaceLabel:  "namespace1",
+		source.InternetLabel:   "false",
+		source.SameRegionLabel: "true",
+		source.SameZoneLabel:   "false",
+		source.ServiceLabel:    "service1",
+	}
 
-	collector.Update(scrape.KubecostClusterManagementCost, cluster1Info, 0.1, &start1, nil)
-	collector.Update(scrape.KubecostClusterManagementCost, cluster1Info, 0.1, &end1, nil)
+	networkRegion1Info := map[string]string{
+		source.PodNameLabel:    "pod1",
+		source.NamespaceLabel:  "namespace1",
+		source.InternetLabel:   "false",
+		source.SameRegionLabel: "false",
+		source.SameZoneLabel:   "false",
+		source.ServiceLabel:    "service1",
+	}
 
-	collector.Update(scrape.DCGMFIDEVDECUTIL, gpu1Info, 0, &start1, nil)
-	collector.Update(scrape.DCGMFIPROFGRENGINEACTIVE, gpu1Info, 0, &start1, nil)
-	collector.Update(scrape.DCGMFIPROFGRENGINEACTIVE, gpu1Info, 1, &end1, nil)
+	networkInternet1Info := map[string]string{
+		source.PodNameLabel:    "pod1",
+		source.NamespaceLabel:  "namespace1",
+		source.InternetLabel:   "true",
+		source.SameRegionLabel: "false",
+		source.SameZoneLabel:   "false",
+		source.ServiceLabel:    "service1",
+	}
 
-	return &MockCollectorProvider{
+	networkInternet2Info := map[string]string{
+		source.PodNameLabel:    "pod1",
+		source.NamespaceLabel:  "namespace1",
+		source.InternetLabel:   "true",
+		source.SameRegionLabel: "false",
+		source.SameZoneLabel:   "false",
+		source.ServiceLabel:    "service2",
+	}
+
+	collector.Update(scrape.NodeTotalHourlyCost, node1Info, 0, start, nil)
+	collector.Update(scrape.NodeTotalHourlyCost, node1Info, 0, end, nil)
+
+	collector.Update(scrape.NodeFSCapacityBytes, localStorage1Info, 2*GiB, start, nil)
+	collector.Update(scrape.ContainerFSUsageBytes, localStorage1Info, 1*GiB, start, nil)
+	collector.Update(scrape.ContainerFSUsageBytes, localStorage1Info, 1*GiB, end, nil)
+
+	collector.Update(scrape.KubeNodeStatusCapacityMemoryBytes, node1Info, 4*GiB, start, nil)
+	collector.Update(scrape.ContainerMemoryWorkingSetBytes, container1Info, 1*GiB, start, nil)
+	collector.Update(scrape.ContainerMemoryWorkingSetBytes, container2Info, 2*GiB, start, nil)
+
+	collector.Update(scrape.ContainerCPUUsageSecondsTotal, container1Info, 0, start, nil)
+	collector.Update(scrape.ContainerCPUUsageSecondsTotal, container1Info, 60*60*4, time1, nil)
+	collector.Update(scrape.ContainerCPUUsageSecondsTotal, container1Info, 60*60*10, end, nil)
+
+	collector.Update(scrape.KubecostClusterManagementCost, cluster1Info, 0.1, start, nil)
+	collector.Update(scrape.KubecostClusterManagementCost, cluster1Info, 0.1, end, nil)
+
+	collector.Update(scrape.DCGMFIDEVDECUTIL, gpu1Info, 0, start, nil)
+	collector.Update(scrape.DCGMFIPROFGRENGINEACTIVE, gpu1Info, 0, start, nil)
+	collector.Update(scrape.DCGMFIPROFGRENGINEACTIVE, gpu1Info, 1, end, nil)
+
+	collector.Update(scrape.KubecostNetworkZoneEgressCost, nil, 1, start, nil)
+	collector.Update(scrape.KubecostNetworkRegionEgressCost, nil, 2, start, nil)
+	collector.Update(scrape.KubecostNetworkInternetEgressCost, nil, 3, start, nil)
+
+	collector.Update(scrape.ContainerNetworkTransmitBytesTotal, pod1Info, 3*GiB, start, nil)
+	collector.Update(scrape.ContainerNetworkTransmitBytesTotal, pod1Info, 13*GiB, end, nil)
+
+	collector.Update(scrape.ContainerNetworkReceiveBytesTotal, pod1Info, 30*GiB, start, nil)
+	collector.Update(scrape.ContainerNetworkReceiveBytesTotal, pod1Info, 130*GiB, end, nil)
+
+	collector.Update(scrape.KubecostPodNetworkEgressBytesTotal, networkRegion1Info, 1*GiB, start, nil)
+	collector.Update(scrape.KubecostPodNetworkEgressBytesTotal, networkZone1Info, 0*GiB, start, nil)
+	collector.Update(scrape.KubecostPodNetworkEgressBytesTotal, networkInternet1Info, 1*GiB, start, nil)
+	collector.Update(scrape.KubecostPodNetworkEgressBytesTotal, networkInternet2Info, 1*GiB, start, nil)
+	collector.Update(scrape.KubecostPodNetworkEgressBytesTotal, networkRegion1Info, 2*GiB, end, nil)
+	collector.Update(scrape.KubecostPodNetworkEgressBytesTotal, networkZone1Info, 2*GiB, end, nil)
+	collector.Update(scrape.KubecostPodNetworkEgressBytesTotal, networkInternet1Info, 4*GiB, end, nil)
+	collector.Update(scrape.KubecostPodNetworkEgressBytesTotal, networkInternet2Info, 5*GiB, end, nil)
+
+	collector.Update(scrape.KubecostPodNetworkIngressBytesTotal, networkRegion1Info, 10*GiB, start, nil)
+	collector.Update(scrape.KubecostPodNetworkIngressBytesTotal, networkZone1Info, 0*GiB, start, nil)
+	collector.Update(scrape.KubecostPodNetworkIngressBytesTotal, networkInternet1Info, 10*GiB, start, nil)
+	collector.Update(scrape.KubecostPodNetworkIngressBytesTotal, networkInternet2Info, 10*GiB, start, nil)
+	collector.Update(scrape.KubecostPodNetworkIngressBytesTotal, networkRegion1Info, 20*GiB, end, nil)
+	collector.Update(scrape.KubecostPodNetworkIngressBytesTotal, networkZone1Info, 20*GiB, end, nil)
+	collector.Update(scrape.KubecostPodNetworkIngressBytesTotal, networkInternet1Info, 40*GiB, end, nil)
+	collector.Update(scrape.KubecostPodNetworkIngressBytesTotal, networkInternet2Info, 50*GiB, end, nil)
+
+	return &MockStoreProvider{
 		metricsCollector: collector,
 	}
 }
 
+func TestCollectorMetricsQuerier_QueryLocalStorageCost(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryLocalStorageCost(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.LocalStorageCostResult{
+		{
+			Cluster:  "",
+			Instance: "node1",
+			Device:   "local",
+			Data: []*util.Vector{
+				{
+					Value: LocalStorageCostPerGiBHr * 2,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func TestCollectorMetricsQuerier_QueryLocalStorageUsedCost(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryLocalStorageUsedCost(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.LocalStorageUsedCostResult{
+		{
+			Cluster:  "",
+			Instance: "node1",
+			Device:   "local",
+			Data: []*util.Vector{
+				{
+					Value: LocalStorageCostPerGiBHr,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
 func TestCollectorMetricsQuerier_QueryNodeActiveMinutes(t *testing.T) {
 	start1, _ := time.Parse(time.RFC3339, Start1Str)
 	end1, _ := time.Parse(time.RFC3339, End1Str)
@@ -93,8 +274,79 @@ func TestCollectorMetricsQuerier_QueryNodeActiveMinutes(t *testing.T) {
 			},
 		},
 	}
-	if !reflect.DeepEqual(res, expected) {
-		t.Errorf("QueryNodeActiveMinutes() = %v, want %v", res, expected)
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func TestCollectorMetricsQuerier_QueryNodeRAMSystemPercent(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNodeRAMSystemPercent(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NodeRAMSystemPercentResult{
+		{
+			Cluster:  "",
+			Instance: "node1",
+			Data: []*util.Vector{
+				{
+					Value: .5,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func TestCollectorMetricsQuerier_QueryNodeRAMUserPercent(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNodeRAMUserPercent(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NodeRAMUserPercentResult{
+		{
+			Cluster:  "",
+			Instance: "node1",
+			Data: []*util.Vector{
+				{
+					Value: .25,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
 	}
 }
 
@@ -126,10 +378,88 @@ func TestCollectorMetricsQuerier_QueryClusterManagementDuration(t *testing.T) {
 			},
 		},
 	}
-	if !reflect.DeepEqual(res, expected) {
-		t.Errorf("QueryNodeActiveMinutes() = %v, want %v", res, expected)
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
 	}
+}
 
+func Test_collectorMetricsQuerier_QueryCPUUsageAvg(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryCPUUsageAvg(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.CPUUsageAvgResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Node:      "node1",
+			Instance:  "node1",
+			Pod:       "pod1",
+			Container: "container1",
+			Data: []*util.Vector{
+				{
+					Value: 10,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryCPUUsageMax(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryCPUUsageMax(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.CPUUsageMaxResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Node:      "node1",
+			Instance:  "node1",
+			Pod:       "pod1",
+			Container: "container1",
+			Data: []*util.Vector{
+				{
+					Value: 12,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
 }
 
 func TestCollectorMetricsQuerier_QueryGPUsUsageAvg(t *testing.T) {
@@ -157,8 +487,13 @@ func TestCollectorMetricsQuerier_QueryGPUsUsageAvg(t *testing.T) {
 			},
 		},
 	}
-	if !reflect.DeepEqual(res, expected) {
-		t.Errorf("QueryGPUsUsageAvg() = %v, want %v", res, expected)
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
 	}
 }
 
@@ -187,8 +522,13 @@ func TestCollectorMetricsQuerier_QueryGPUsUsageMax(t *testing.T) {
 			},
 		},
 	}
-	if !reflect.DeepEqual(res, expected) {
-		t.Errorf("QueryGPUsUsageMax() = %v, want %v", res, expected)
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
 	}
 }
 
@@ -220,7 +560,472 @@ func TestCollectorMetricsQuerier_QueryGPUInfo(t *testing.T) {
 			},
 		},
 	}
-	if !reflect.DeepEqual(res, expected) {
-		t.Errorf("QueryGPUInfo() = %v, want %v", res, expected)
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetZoneGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetZoneGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetZoneGiBResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Data: []*util.Vector{
+				{
+					Value: 2,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetZonePricePerGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetZonePricePerGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetZonePricePerGiBResult{
+		{
+			Cluster: "",
+			Data: []*util.Vector{
+				{
+					Value: 1,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetRegionGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetRegionGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetRegionGiBResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Data: []*util.Vector{
+				{
+					Value: 1,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetRegionPricePerGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetRegionPricePerGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetRegionPricePerGiBResult{
+		{
+			Cluster: "",
+			Data: []*util.Vector{
+				{
+					Value: 2,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetInternetGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetInternetGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetInternetGiBResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Data: []*util.Vector{
+				{
+					Value: 7,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetInternetPricePerGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetInternetPricePerGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetInternetPricePerGiBResult{
+		{
+			Cluster: "",
+			Data: []*util.Vector{
+				{
+					Value: 3,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetInternetServiceGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetInternetServiceGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetInternetServiceGiBResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Service:   "service1",
+			Data: []*util.Vector{
+				{
+					Value: 3,
+				},
+			},
+		},
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Service:   "service2",
+			Data: []*util.Vector{
+				{
+					Value: 4,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetTransferBytes(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetTransferBytes(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetTransferBytesResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Data: []*util.Vector{
+				{
+					Value: 10 * GiB,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetZoneIngressGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetZoneIngressGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetZoneIngressGiBResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Data: []*util.Vector{
+				{
+					Value: 20,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetRegionIngressGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetRegionIngressGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetRegionIngressGiBResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Data: []*util.Vector{
+				{
+					Value: 10,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetInternetIngressGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetInternetIngressGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetInternetIngressGiBResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Data: []*util.Vector{
+				{
+					Value: 70,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetInternetServiceIngressGiB(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetInternetServiceIngressGiB(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetInternetServiceIngressGiBResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Service:   "service1",
+			Data: []*util.Vector{
+				{
+					Value: 30,
+				},
+			},
+		},
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Service:   "service2",
+			Data: []*util.Vector{
+				{
+					Value: 40,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
+	}
+}
+
+func Test_collectorMetricsQuerier_QueryNetReceiveBytes(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+	end1, _ := time.Parse(time.RFC3339, End1Str)
+
+	c := collectorMetricsQuerier{
+		collectorProvider: GetMockCollectorProvider(),
+	}
+	resCh := c.QueryNetReceiveBytes(start1, end1)
+	res, err := resCh.Await()
+	if err != nil {
+		t.Errorf("unexpected error: %v", err.Error())
+	}
+	expected := []*source.NetReceiveBytesResult{
+		{
+			Cluster:   "",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Data: []*util.Vector{
+				{
+					Value: 100 * GiB,
+				},
+			},
+		},
+	}
+	if len(res) != len(expected) {
+		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
+	}
+	for i, got := range res {
+		if !reflect.DeepEqual(got, expected[i]) {
+			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
+		}
 	}
 }

+ 27 - 17
modules/collector-source/pkg/metric/aggregator/activeminutes.go

@@ -1,10 +1,13 @@
 package aggregator
 
 import (
+	"sync"
 	"time"
 )
 
-type ActiveMinutesAggregator struct {
+// activateMinutesAggregator is a MetricAggregator which records the first and last timestamp of updates called on it
+type activeMinutesAggregator struct {
+	lock        sync.Mutex
 	name        string
 	labelValues []string
 	start       *time.Time
@@ -12,37 +15,44 @@ type ActiveMinutesAggregator struct {
 }
 
 func ActiveMinutes(name string, labelValues []string) MetricAggregator {
-	return &ActiveMinutesAggregator{
+	return &activeMinutesAggregator{
 		name:        name,
 		labelValues: labelValues,
 	}
 }
 
-func (m *ActiveMinutesAggregator) Name() string {
-	return m.name
+func (a *activeMinutesAggregator) Name() string {
+	return a.name
 }
 
-func (m *ActiveMinutesAggregator) AdditionInfo() map[string]string {
+func (a *activeMinutesAggregator) AdditionInfo() map[string]string {
 	return nil
 }
 
-func (m *ActiveMinutesAggregator) LabelValues() []string {
-	return m.labelValues
+func (a *activeMinutesAggregator) LabelValues() []string {
+	return a.labelValues
 }
 
-func (m *ActiveMinutesAggregator) Update(value float64, timestamp *time.Time, additionalInfo map[string]string) {
-	if timestamp == nil {
-		return
+func (a *activeMinutesAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if a.start == nil {
+		a.start = &timestamp
 	}
-	if m.start == nil {
-		m.start = timestamp
+	if !timestamp.Equal(*a.start) {
+		a.end = &timestamp
 	}
-	m.end = timestamp
 }
 
-func (m *ActiveMinutesAggregator) Value() []MetricValue {
-	return []MetricValue{
-		{Value: 1, Timestamp: m.start},
-		{Value: 1, Timestamp: m.end},
+func (a *activeMinutesAggregator) Value() []MetricValue {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	metricValues := make([]MetricValue, 0)
+	if a.start != nil {
+		metricValues = append(metricValues, MetricValue{Value: 1, Timestamp: a.start})
 	}
+	if a.end != nil {
+		metricValues = append(metricValues, MetricValue{Value: 1, Timestamp: a.end})
+	}
+	return metricValues
 }

+ 119 - 0
modules/collector-source/pkg/metric/aggregator/activeminutes_test.go

@@ -0,0 +1,119 @@
+package aggregator
+
+import (
+	"reflect"
+	"testing"
+	"time"
+)
+
+func TestActiveMinutesAggregator_Value(t *testing.T) {
+	time1 := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
+	time2 := time.Date(1, 1, 1, 0, 0, 1, 0, time.UTC)
+	time3 := time.Date(1, 1, 1, 0, 0, 2, 0, time.UTC)
+	type update struct {
+		value                 float64
+		timestamp             time.Time
+		additionalInformation map[string]string
+	}
+	tests := map[string]struct {
+		updates []update
+		want    []MetricValue
+	}{
+		"no update": {
+			updates: []update{},
+			want:    []MetricValue{},
+		},
+		"single update": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value:     1,
+					Timestamp: &time1,
+				},
+			},
+		},
+		"two sequential updates": {
+			updates: []update{
+				{
+					value:     2,
+					timestamp: time1,
+				},
+				{
+					value:     1,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value:     1,
+					Timestamp: &time1,
+				},
+				{
+					Value:     1,
+					Timestamp: &time2,
+				},
+			},
+		},
+		"multi update on single time": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value:     1,
+					Timestamp: &time1,
+				},
+			},
+		},
+		"three sequential updates": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     1,
+					timestamp: time2,
+				},
+				{
+					value:     1,
+					timestamp: time3,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value:     1,
+					Timestamp: &time1,
+				},
+				{
+					Value:     1,
+					Timestamp: &time3,
+				},
+			},
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			agg := activeMinutesAggregator{}
+			for _, u := range tt.updates {
+				agg.Update(u.value, u.timestamp, u.additionalInformation)
+			}
+			got := agg.Value()
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("got = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 1 - 1
modules/collector-source/pkg/metric/aggregator/aggregator.go

@@ -49,7 +49,7 @@ func (mr *MetricResult) ToQueryResult() *source.QueryResult {
 type MetricAggregator interface {
 	Name() string
 	AdditionInfo() map[string]string
-	Update(value float64, timestamp *time.Time, additionalInfo map[string]string)
+	Update(value float64, timestamp time.Time, additionalInfo map[string]string)
 	Value() []MetricValue
 	LabelValues() []string
 }

+ 27 - 12
modules/collector-source/pkg/metric/aggregator/avgovertime.go

@@ -1,42 +1,57 @@
 package aggregator
 
 import (
+	"sync"
 	"time"
 )
 
-type AverageOverTimeAggregator struct {
+// averageOverTimeAggregator is a MetricAggregator which returns the average of values it is aggregating by dividing the
+// total of all values by the count of unique timestamps
+type averageOverTimeAggregator struct {
+	lock        sync.Mutex
 	name        string
 	labelValues []string
 	total       float64
 	count       int
+	currentTime *time.Time
 }
 
 func AverageOverTime(name string, labelValues []string) MetricAggregator {
-	return &AverageOverTimeAggregator{
+	return &averageOverTimeAggregator{
 		name:        name,
 		labelValues: labelValues,
 	}
 }
 
-func (m *AverageOverTimeAggregator) Name() string {
-	return m.name
+func (a *averageOverTimeAggregator) Name() string {
+	return a.name
 }
 
-func (m *AverageOverTimeAggregator) AdditionInfo() map[string]string {
+func (a *averageOverTimeAggregator) AdditionInfo() map[string]string {
 	return nil
 }
 
-func (m *AverageOverTimeAggregator) LabelValues() []string {
-	return m.labelValues
+func (a *averageOverTimeAggregator) LabelValues() []string {
+	return a.labelValues
 }
 
-func (m *AverageOverTimeAggregator) Update(value float64, timestamp *time.Time, additionalInfo map[string]string) {
-	m.total += value
-	m.count++
+func (a *averageOverTimeAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	a.total += value
+	if a.currentTime == nil || !timestamp.Equal(*a.currentTime) {
+		a.currentTime = &timestamp
+		a.count++
+	}
 }
 
-func (m *AverageOverTimeAggregator) Value() []MetricValue {
+func (a *averageOverTimeAggregator) Value() []MetricValue {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if a.count == 0 {
+		return []MetricValue{}
+	}
 	return []MetricValue{
-		{m.total / float64(m.count), nil},
+		{a.total / float64(a.count), nil},
 	}
 }

+ 110 - 0
modules/collector-source/pkg/metric/aggregator/avgovertime_test.go

@@ -0,0 +1,110 @@
+package aggregator
+
+import (
+	"reflect"
+	"testing"
+	"time"
+)
+
+func TestAvgOverTimeAggregator_Value(t *testing.T) {
+	time1 := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
+	time2 := time.Date(1, 1, 1, 0, 1, 0, 0, time.UTC)
+	type update struct {
+		value                 float64
+		timestamp             time.Time
+		additionalInformation map[string]string
+	}
+	tests := map[string]struct {
+		updates []update
+		want    []MetricValue
+	}{
+		"no update": {
+			updates: []update{},
+			want:    []MetricValue{},
+		},
+		"single update": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 1,
+				},
+			},
+		},
+		"multiple updates": {
+			updates: []update{
+				{
+					value:     2,
+					timestamp: time1,
+				},
+				{
+					value:     1,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 1.5,
+				},
+			},
+		},
+		"aggregated updates": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 3,
+				},
+			},
+		},
+		"multiple aggregated updates": {
+			updates: []update{
+				{
+					value:     2,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time1,
+				},
+				{
+					value:     4,
+					timestamp: time2,
+				},
+				{
+					value:     4,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 6,
+				},
+			},
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			agg := averageOverTimeAggregator{}
+			for _, u := range tt.updates {
+				agg.Update(u.value, u.timestamp, u.additionalInformation)
+			}
+			got := agg.Value()
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("got = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 36 - 15
modules/collector-source/pkg/metric/aggregator/increase.go

@@ -1,46 +1,67 @@
 package aggregator
 
 import (
+	"sync"
 	"time"
 )
 
-type IncreaseAggregator struct {
+type increaseAggregator struct {
+	lock        sync.Mutex
 	name        string
 	labelValues []string
-	initiated   bool
+	initialized bool
+	initialTime time.Time
+	currentTime time.Time
 	initial     float64
 	current     float64
 }
 
 func Increase(name string, labelValues []string) MetricAggregator {
-	return &IncreaseAggregator{
+	return &increaseAggregator{
 		name:        name,
 		labelValues: labelValues,
 	}
 }
 
-func (m *IncreaseAggregator) Name() string {
-	return m.name
+func (a *increaseAggregator) Name() string {
+	return a.name
 }
 
-func (m *IncreaseAggregator) AdditionInfo() map[string]string {
+func (a *increaseAggregator) AdditionInfo() map[string]string {
 	return nil
 }
 
-func (m *IncreaseAggregator) LabelValues() []string {
-	return m.labelValues
+func (a *increaseAggregator) LabelValues() []string {
+	return a.labelValues
 }
 
-func (m *IncreaseAggregator) Update(value float64, timestamp *time.Time, additionalInfo map[string]string) {
-	if !m.initiated {
-		m.initiated = true
-		m.initial = value
+func (a *increaseAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if !a.initialized {
+		a.initialTime = timestamp
+		a.currentTime = timestamp
+		a.initialized = true
 	}
-	m.current = value
+	if a.initialTime == timestamp {
+		a.initial += value
+	}
+
+	if a.currentTime.Before(timestamp) {
+		a.currentTime = timestamp
+		a.current = 0
+	}
+
+	a.current += value
 }
 
-func (m *IncreaseAggregator) Value() []MetricValue {
+func (a *increaseAggregator) Value() []MetricValue {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if !a.initialized {
+		return []MetricValue{}
+	}
 	return []MetricValue{
-		{Value: m.current - m.initial},
+		{Value: a.current - a.initial},
 	}
 }

+ 93 - 0
modules/collector-source/pkg/metric/aggregator/increase_test.go

@@ -0,0 +1,93 @@
+package aggregator
+
+import (
+	"reflect"
+	"testing"
+	"time"
+)
+
+func TestIncreaseAggregator_Value(t *testing.T) {
+	time1 := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
+	time2 := time.Date(1, 1, 1, 0, 15, 0, 0, time.UTC)
+	type update struct {
+		value                 float64
+		timestamp             time.Time
+		additionalInformation map[string]string
+	}
+	tests := map[string]struct {
+		updates []update
+		want    []MetricValue
+	}{
+		"no update": {
+			updates: []update{},
+			want:    []MetricValue{},
+		},
+		"single update": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 0,
+				},
+			},
+		},
+		"normal increase": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 1,
+				},
+			},
+		},
+		"double increase": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time1,
+				},
+				{
+					value:     3,
+					timestamp: time2,
+				},
+				{
+					value:     4,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 4,
+				},
+			},
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			agg := increaseAggregator{}
+			for _, u := range tt.updates {
+				agg.Update(u.value, u.timestamp, u.additionalInformation)
+			}
+			got := agg.Value()
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("IncreaseAggregator.Value() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 18 - 12
modules/collector-source/pkg/metric/aggregator/info.go

@@ -2,40 +2,46 @@ package aggregator
 
 import (
 	"maps"
+	"sync"
 	"time"
 )
 
-// InfoAggregator is metric aggregator meant to record label values and addition information
-type InfoAggregator struct {
+// infoAggregator is MetricAggregator meant to record label values and addition information
+type infoAggregator struct {
+	lock           sync.RWMutex
 	name           string
 	labelValues    []string
 	additionalInfo map[string]string
 }
 
 func Info(name string, labelValues []string) MetricAggregator {
-	return &InfoAggregator{
+	return &infoAggregator{
 		name:        name,
 		labelValues: labelValues,
 	}
 }
 
-func (m *InfoAggregator) Name() string {
-	return m.name
+func (a *infoAggregator) Name() string {
+	return a.name
 }
 
-func (m *InfoAggregator) AdditionInfo() map[string]string {
-	return m.additionalInfo
+func (a *infoAggregator) AdditionInfo() map[string]string {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	return maps.Clone(a.additionalInfo)
 }
 
-func (m *InfoAggregator) LabelValues() []string {
-	return m.labelValues
+func (a *infoAggregator) LabelValues() []string {
+	return a.labelValues
 }
 
-func (m *InfoAggregator) Update(value float64, timestamp *time.Time, additionalInfo map[string]string) {
-	m.additionalInfo = maps.Clone(additionalInfo)
+func (a *infoAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	a.additionalInfo = maps.Clone(additionalInfo)
 }
 
-func (m *InfoAggregator) Value() []MetricValue {
+func (a *infoAggregator) Value() []MetricValue {
 	return []MetricValue{
 		{Value: 1},
 	}

+ 71 - 0
modules/collector-source/pkg/metric/aggregator/info_test.go

@@ -0,0 +1,71 @@
+package aggregator
+
+import (
+	"reflect"
+	"testing"
+	"time"
+)
+
+func TestInfoAggregator_AdditionInfo(t *testing.T) {
+	type update struct {
+		value                 float64
+		timestamp             time.Time
+		additionalInformation map[string]string
+	}
+	tests := map[string]struct {
+		updates []update
+		want    map[string]string
+	}{
+		"no update": {
+			updates: []update{},
+			want:    nil,
+		},
+		"empty update": {
+			updates: []update{
+				{},
+			},
+			want: nil,
+		},
+		"single update": {
+			updates: []update{
+				{
+					additionalInformation: map[string]string{
+						"test": "test",
+					},
+				},
+			},
+			want: map[string]string{
+				"test": "test",
+			},
+		},
+		"double update": {
+			updates: []update{
+				{
+					additionalInformation: map[string]string{
+						"test": "test",
+					},
+				},
+				{
+					additionalInformation: map[string]string{
+						"test2": "test2",
+					},
+				},
+			},
+			want: map[string]string{
+				"test2": "test2",
+			},
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			agg := infoAggregator{}
+			for _, u := range tt.updates {
+				agg.Update(u.value, u.timestamp, u.additionalInformation)
+			}
+			got := agg.AdditionInfo()
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("got = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 78 - 0
modules/collector-source/pkg/metric/aggregator/iratemax.go

@@ -0,0 +1,78 @@
+package aggregator
+
+import (
+	"sync"
+	"time"
+)
+
+// iRateMaxAggregator is a MetricAggregator which returns the max rate per second between any two samples.
+// to function properly calls to Update must have a timestamp greater than or equal to the last call to update.
+type iRateMaxAggregator struct {
+	lock         sync.Mutex
+	name         string
+	labelValues  []string
+	initialized  bool
+	previousTime time.Time
+	currentTime  time.Time
+	previous     float64
+	current      float64
+	max          float64
+}
+
+func IRateMax(name string, labelValues []string) MetricAggregator {
+	return &iRateMaxAggregator{
+		name:        name,
+		labelValues: labelValues,
+	}
+}
+
+func (a *iRateMaxAggregator) Name() string {
+	return a.name
+}
+
+func (a *iRateMaxAggregator) AdditionInfo() map[string]string {
+	return nil
+}
+
+func (a *iRateMaxAggregator) LabelValues() []string {
+	return a.labelValues
+}
+
+func (a *iRateMaxAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if !a.initialized {
+		a.previousTime = timestamp
+		a.currentTime = timestamp
+		a.initialized = true
+	}
+
+	if a.currentTime.Before(timestamp) {
+		a.previousTime = a.currentTime
+		a.previous = a.current
+		a.currentTime = timestamp
+		a.current = 0
+	}
+	a.current += value
+
+	seconds := a.currentTime.Sub(a.previousTime).Seconds()
+	if seconds == 0 {
+		return
+	}
+	increase := a.current - a.previous
+	irate := increase / seconds
+	if irate > a.max {
+		a.max = irate
+	}
+}
+
+func (a *iRateMaxAggregator) Value() []MetricValue {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if !a.initialized {
+		return []MetricValue{}
+	}
+	return []MetricValue{
+		{Value: a.max},
+	}
+}

+ 137 - 0
modules/collector-source/pkg/metric/aggregator/iratemax_test.go

@@ -0,0 +1,137 @@
+package aggregator
+
+import (
+	"reflect"
+	"testing"
+	"time"
+)
+
+func TestIRateMaxAggregator_Value(t *testing.T) {
+	time1 := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
+	time2 := time.Date(1, 1, 1, 0, 0, 1, 0, time.UTC)
+	time3 := time.Date(1, 1, 1, 0, 0, 2, 0, time.UTC)
+	time4 := time.Date(1, 1, 1, 0, 0, 4, 0, time.UTC)
+	type update struct {
+		value                 float64
+		timestamp             time.Time
+		additionalInformation map[string]string
+	}
+	tests := map[string]struct {
+		updates []update
+		want    []MetricValue
+	}{
+		"no update": {
+			updates: []update{},
+			want:    []MetricValue{},
+		},
+		"single update": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 0,
+				},
+			},
+		},
+		"normal increase": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 1,
+				},
+			},
+		},
+		"multi increase": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time2,
+				},
+				{
+					value:     4,
+					timestamp: time3,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 2,
+				},
+			},
+		},
+		"aggregated increase": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time1,
+				},
+				{
+					value:     3,
+					timestamp: time2,
+				},
+				{
+					value:     4,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 4,
+				},
+			},
+		},
+		"missing sample": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     3,
+					timestamp: time2,
+				},
+				{
+					value:     6,
+					timestamp: time4,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 2,
+				},
+			},
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			agg := iRateMaxAggregator{}
+			for _, u := range tt.updates {
+				agg.Update(u.value, u.timestamp, u.additionalInformation)
+			}
+			got := agg.Value()
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("got = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 22 - 12
modules/collector-source/pkg/metric/aggregator/maxovertime.go

@@ -1,42 +1,52 @@
 package aggregator
 
 import (
+	"sync"
 	"time"
 )
 
-type MaxOverTimeAggregator struct {
+// maxOverTimeAggregator is a MetricAggregator which returns the max value passed to it through the Update function
+type maxOverTimeAggregator struct {
+	lock        sync.Mutex
 	name        string
 	labelValues []string
 	max         float64
 }
 
 func MaxOverTime(name string, labelValues []string) MetricAggregator {
-	return &MaxOverTimeAggregator{
+	return &maxOverTimeAggregator{
 		name:        name,
 		labelValues: labelValues,
 	}
 }
 
-func (m *MaxOverTimeAggregator) Name() string {
-	return m.name
+func (a *maxOverTimeAggregator) Name() string {
+	return a.name
 }
 
-func (m *MaxOverTimeAggregator) AdditionInfo() map[string]string {
+func (a *maxOverTimeAggregator) AdditionInfo() map[string]string {
 	return nil
 }
 
-func (m *MaxOverTimeAggregator) LabelValues() []string {
-	return m.labelValues
+func (a *maxOverTimeAggregator) LabelValues() []string {
+	return a.labelValues
 }
 
-func (m *MaxOverTimeAggregator) Update(value float64, timestamp *time.Time, additionalInfo map[string]string) {
-	if value > m.max {
-		m.max = value
+func (a *maxOverTimeAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if value > a.max {
+		a.max = value
 	}
 }
 
-func (m *MaxOverTimeAggregator) Value() []MetricValue {
+func (a *maxOverTimeAggregator) Value() []MetricValue {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if a.max == 0 {
+		return []MetricValue{}
+	}
 	return []MetricValue{
-		{Value: m.max},
+		{Value: a.max},
 	}
 }

+ 84 - 0
modules/collector-source/pkg/metric/aggregator/maxovertime_test.go

@@ -0,0 +1,84 @@
+package aggregator
+
+import (
+	"reflect"
+	"testing"
+	"time"
+)
+
+func TestMaxOverTimeAggregator_Value(t *testing.T) {
+	time1 := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
+	type update struct {
+		value                 float64
+		timestamp             time.Time
+		additionalInformation map[string]string
+	}
+	tests := map[string]struct {
+		updates []update
+		want    []MetricValue
+	}{
+		"no update": {
+			updates: []update{},
+			want:    []MetricValue{},
+		},
+		"single update": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 1,
+				},
+			},
+		},
+		"max first": {
+			updates: []update{
+				{
+					value:     2,
+					timestamp: time1,
+				},
+				{
+					value:     1,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 2,
+				},
+			},
+		},
+		"max last": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 2,
+				},
+			},
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			agg := maxOverTimeAggregator{}
+			for _, u := range tt.updates {
+				agg.Update(u.value, u.timestamp, u.additionalInformation)
+			}
+			got := agg.Value()
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("got = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 76 - 0
modules/collector-source/pkg/metric/aggregator/rate.go

@@ -0,0 +1,76 @@
+package aggregator
+
+import (
+	"sync"
+	"time"
+)
+
+// rateAggregator is a MetricAggregator which returns the average rate per second change of the samples that it tracks.
+// to function properly calls to Update must have a timestamp greater than or equal to the last call to update.
+type rateAggregator struct {
+	lock        sync.Mutex
+	name        string
+	labelValues []string
+	initialized bool
+	initialTime time.Time
+	currentTime time.Time
+	initial     float64
+	current     float64
+}
+
+func Rate(name string, labelValues []string) MetricAggregator {
+	return &rateAggregator{
+		name:        name,
+		labelValues: labelValues,
+	}
+}
+
+func (a *rateAggregator) Name() string {
+	return a.name
+}
+
+func (a *rateAggregator) AdditionInfo() map[string]string {
+	return nil
+}
+
+func (a *rateAggregator) LabelValues() []string {
+	return a.labelValues
+}
+
+func (a *rateAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if !a.initialized {
+		a.initialTime = timestamp
+		a.currentTime = timestamp
+		a.initialized = true
+	}
+	if a.initialTime == timestamp {
+		a.initial += value
+	}
+
+	if a.currentTime.Before(timestamp) {
+		a.currentTime = timestamp
+		a.current = 0
+	}
+
+	a.current += value
+}
+
+func (a *rateAggregator) Value() []MetricValue {
+	a.lock.Lock()
+	defer a.lock.Unlock()
+	if !a.initialized {
+		return []MetricValue{}
+	}
+	seconds := a.currentTime.Sub(a.initialTime).Seconds()
+	if seconds == 0 {
+		return []MetricValue{
+			{Value: 0},
+		}
+	}
+	increase := a.current - a.initial
+	return []MetricValue{
+		{Value: increase / seconds},
+	}
+}

+ 115 - 0
modules/collector-source/pkg/metric/aggregator/rate_test.go

@@ -0,0 +1,115 @@
+package aggregator
+
+import (
+	"reflect"
+	"testing"
+	"time"
+)
+
+func TestRateAggregator_Value(t *testing.T) {
+	time1 := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
+	time2 := time.Date(1, 1, 1, 0, 0, 1, 0, time.UTC)
+	time3 := time.Date(1, 1, 1, 0, 0, 2, 0, time.UTC)
+	type update struct {
+		value                 float64
+		timestamp             time.Time
+		additionalInformation map[string]string
+	}
+	tests := map[string]struct {
+		updates []update
+		want    []MetricValue
+	}{
+		"no update": {
+			updates: []update{},
+			want:    []MetricValue{},
+		},
+		"single update": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 0,
+				},
+			},
+		},
+		"normal increase": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 1,
+				},
+			},
+		},
+		"multi increase": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time2,
+				},
+				{
+					value:     4,
+					timestamp: time3,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 1.5,
+				},
+			},
+		},
+		"aggregated increase": {
+			updates: []update{
+				{
+					value:     1,
+					timestamp: time1,
+				},
+				{
+					value:     2,
+					timestamp: time1,
+				},
+				{
+					value:     3,
+					timestamp: time2,
+				},
+				{
+					value:     4,
+					timestamp: time2,
+				},
+			},
+			want: []MetricValue{
+				{
+					Value: 4,
+				},
+			},
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			agg := rateAggregator{}
+			for _, u := range tt.updates {
+				agg.Update(u.value, u.timestamp, u.additionalInformation)
+			}
+			got := agg.Value()
+			if !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("got = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 8 - 5
modules/collector-source/pkg/metric/collector.go

@@ -2,6 +2,7 @@ package metric
 
 import (
 	"maps"
+	"sort"
 	"time"
 
 	"github.com/opencost/opencost/modules/collector-source/pkg/metric/aggregator"
@@ -20,8 +21,7 @@ const (
 	PVUsedMaxID                     MetricCollectorID = "PVUsedMax"
 	PVCInfoID                       MetricCollectorID = "PVCInfo"
 	PVActiveMinutesID               MetricCollectorID = "PVActiveMinutes"
-	LocalStorageCostID              MetricCollectorID = "LocalStorageCost"
-	LocalStorageUsedCostID          MetricCollectorID = "LocalStorageUsedCost"
+	LocalStorageUsedActiveMinutesID MetricCollectorID = "LocalStorageUsedCost"
 	LocalStorageUsedAverageID       MetricCollectorID = "LocalStorageUsedAverage"
 	LocalStorageUsedMaxID           MetricCollectorID = "LocalStorageUsedMax"
 	LocalStorageBytesID             MetricCollectorID = "LocalStorageBytesID"
@@ -62,7 +62,6 @@ const (
 	PodPVCAllocationID              MetricCollectorID = "PodPVCAllocation"
 	PVCBytesRequestedID             MetricCollectorID = "PVCBytesRequested"
 	PVBytesID                       MetricCollectorID = "PVBytesID"
-	PVCostPerGiBHourID              MetricCollectorID = "PVCostPerGiBHour"
 	PVInfoID                        MetricCollectorID = "PVInfo"
 	NetZoneGiBID                    MetricCollectorID = "NetZoneGiB"
 	NetZonePricePerGiBID            MetricCollectorID = "NetZonePricePerGiB"
@@ -91,7 +90,7 @@ const (
 	ReplicaSetsWithRolloutID        MetricCollectorID = "ReplicaSetsWithRollout"
 )
 
-// MetricCollector is a data structure that represents a specific metric metric instance that contains it's own breakdown
+// MetricCollector is a data structure that represents a specific MetricCollector metric instance that contains its own breakdown
 // of stored metrics by a specific label set.
 type MetricCollector struct {
 	id                MetricCollectorID // ie: RAMUsageAverage
@@ -115,7 +114,7 @@ func NewMetricCollector(id MetricCollectorID, metricName string, labels []string
 	}
 }
 
-func (mi *MetricCollector) Update(labels map[string]string, value float64, timestamp *time.Time, additionalInfo map[string]string) {
+func (mi *MetricCollector) Update(labels map[string]string, value float64, timestamp time.Time, additionalInfo map[string]string) {
 	if mi.filter != nil && !mi.filter(labels) {
 		return
 	}
@@ -147,6 +146,10 @@ func (mi *MetricCollector) Get() []*aggregator.MetricResult {
 		results = append(results, mr)
 	}
 
+	sort.Slice(results, func(i, j int) bool {
+		return results[i].Name < results[j].Name
+	})
+	
 	return results
 }
 

+ 28 - 7
modules/collector-source/pkg/metric/repository.go

@@ -56,21 +56,32 @@ func (r *MetricRepository) Update(
 	metricName string,
 	labels map[string]string,
 	value float64,
-	timestamp *time.Time,
+	timestamp time.Time,
 	additionalInformation map[string]string,
 ) {
 	r.lock.Lock()
 	defer r.lock.Unlock()
-	if timestamp == nil {
-		timestamp = util.Ptr(time.Now().UTC())
-	}
-	t := *timestamp
+
 	// Call update on the collectors for each resolution
 	for _, resCollector := range r.resolutionStores {
-		resCollector.update(metricName, labels, value, t, additionalInformation)
+		resCollector.update(metricName, labels, value, timestamp, additionalInformation)
 	}
 }
 
+func (r *MetricRepository) Coverage() map[string][]time.Time {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	result := make(map[string][]time.Time)
+	for resKey, resCollector := range r.resolutionStores {
+		var windowStarts []time.Time
+		for _, key := range resCollector.getKeys() {
+			windowStarts = append(windowStarts, time.Unix(key, 0).UTC())
+		}
+		result[resKey] = windowStarts
+	}
+	return result
+}
+
 // resolutionStores is a grouping of a resolution and the instances of MetricStore that it is used to manage
 type resolutionStores struct {
 	lock       sync.Mutex
@@ -138,7 +149,7 @@ func (r *resolutionStores) update(
 		collector = r.factory()
 		r.collectors[key] = collector
 	}
-	collector.Update(metricName, labels, value, &timestamp, additionalInformation)
+	collector.Update(metricName, labels, value, timestamp, additionalInformation)
 }
 
 func (r *resolutionStores) getCollector(t time.Time) (MetricStore, error) {
@@ -161,3 +172,13 @@ func (r *resolutionStores) getCollector(t time.Time) (MetricStore, error) {
 
 	return collector, nil
 }
+
+func (r *resolutionStores) getKeys() []int64 {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	var keys []int64
+	for key := range r.collectors {
+		keys = append(keys, key)
+	}
+	return keys
+}

+ 1 - 1
modules/collector-source/pkg/metric/store.go

@@ -88,7 +88,7 @@ func (m *InMemoryMetricStore) Update(
 	metricName string,
 	labels map[string]string,
 	value float64,
-	timestamp *time.Time,
+	timestamp time.Time,
 	additionalInformation map[string]string,
 ) {
 	m.lock.Lock()

+ 16 - 14
modules/collector-source/pkg/metric/updater.go

@@ -11,7 +11,7 @@ type MetricUpdater interface {
 	// Update accepts the name of a metric, the label set and values to update the metric, the updated Value, and a Timestamp.
 	// This method does not accept a `MetricCollectorID` because it provides updates across many potential MetricCollector instances
 	// which utilize the same metric.
-	Update(metricName string, labels map[string]string, value float64, timestamp *time.Time, additionalInformation map[string]string)
+	Update(metricName string, labels map[string]string, value float64, timestamp time.Time, additionalInformation map[string]string)
 }
 
 // ArgRecordUpdater is a mock MetricStore which records the arguments passed to the update function in an array
@@ -19,7 +19,7 @@ type ArgRecordUpdater struct {
 	UpdateArgs []UpdateArgs
 }
 
-func (u *ArgRecordUpdater) Update(metricName string, labels map[string]string, value float64, timestamp *time.Time, additionalInformation map[string]string) {
+func (u *ArgRecordUpdater) Update(metricName string, labels map[string]string, value float64, timestamp time.Time, additionalInformation map[string]string) {
 	u.UpdateArgs = append(u.UpdateArgs, UpdateArgs{
 		MetricName:            metricName,
 		Labels:                labels,
@@ -33,11 +33,24 @@ type UpdateArgs struct {
 	MetricName            string
 	Labels                map[string]string
 	Value                 float64
-	Timestamp             *time.Time
+	Timestamp             time.Time
 	AdditionalInformation map[string]string
 }
 
 func (u UpdateArgs) Equals(that UpdateArgs) error {
+	err := u.ValueEquals(that)
+	if err != nil {
+		return err
+	}
+
+	if !u.Timestamp.Equal(that.Timestamp) {
+		return fmt.Errorf("expected Timestamp %s, got %s", u.Timestamp, that.Timestamp)
+	}
+
+	return nil
+}
+
+func (u UpdateArgs) ValueEquals(that UpdateArgs) error {
 	if u.MetricName != that.MetricName {
 		return fmt.Errorf("expected metric name %s, got %s", u.MetricName, that.MetricName)
 	}
@@ -50,17 +63,6 @@ func (u UpdateArgs) Equals(that UpdateArgs) error {
 		return fmt.Errorf("expected Value %f, got %f", u.Value, that.Value)
 	}
 
-	if that.Timestamp != nil {
-		if u.Timestamp == nil {
-			return fmt.Errorf("expected Timestamp nil, got %v", that.Timestamp)
-		}
-		if !u.Timestamp.Equal(*that.Timestamp) {
-			return fmt.Errorf("expected Timestamp %s, got %s", u.Timestamp, that.Timestamp)
-		}
-	} else if u.Timestamp != nil {
-		return fmt.Errorf("expected Timestamp %v, got nil", u.Timestamp)
-	}
-
 	if !maps.Equal(u.AdditionalInformation, that.AdditionalInformation) {
 		return fmt.Errorf("expected AdditionalInformation %v, got %v", u.AdditionalInformation, that.AdditionalInformation)
 	}

+ 20 - 20
modules/collector-source/pkg/scrape/clustercache.go

@@ -88,12 +88,12 @@ func (ccs *ClusterCacheScraper) scrapeNodes(nodes []*clustercache.Node, timestam
 		if node.Status.Capacity != nil {
 			if quantity, ok := node.Status.Capacity[v1.ResourceCPU]; ok {
 				_, _, value := toResourceUnitValue(v1.ResourceCPU, quantity)
-				ccs.updater.Update(KubeNodeStatusCapacityCPUCores, nodeInfo, value, &timestamp, nil)
+				ccs.updater.Update(KubeNodeStatusCapacityCPUCores, nodeInfo, value, timestamp, nil)
 			}
 
 			if quantity, ok := node.Status.Capacity[v1.ResourceMemory]; ok {
 				_, _, value := toResourceUnitValue(v1.ResourceMemory, quantity)
-				ccs.updater.Update(KubeNodeStatusCapacityMemoryBytes, nodeInfo, value, &timestamp, nil)
+				ccs.updater.Update(KubeNodeStatusCapacityMemoryBytes, nodeInfo, value, timestamp, nil)
 			}
 		}
 
@@ -101,12 +101,12 @@ func (ccs *ClusterCacheScraper) scrapeNodes(nodes []*clustercache.Node, timestam
 		if node.Status.Allocatable != nil {
 			if quantity, ok := node.Status.Allocatable[v1.ResourceCPU]; ok {
 				_, _, value := toResourceUnitValue(v1.ResourceCPU, quantity)
-				ccs.updater.Update(KubeNodeStatusAllocatableCPUCores, nodeInfo, value, &timestamp, nil)
+				ccs.updater.Update(KubeNodeStatusAllocatableCPUCores, nodeInfo, value, timestamp, nil)
 			}
 
 			if quantity, ok := node.Status.Allocatable[v1.ResourceMemory]; ok {
 				_, _, value := toResourceUnitValue(v1.ResourceMemory, quantity)
-				ccs.updater.Update(KubeNodeStatusAllocatableMemoryBytes, nodeInfo, value, &timestamp, nil)
+				ccs.updater.Update(KubeNodeStatusAllocatableMemoryBytes, nodeInfo, value, timestamp, nil)
 			}
 		}
 
@@ -114,7 +114,7 @@ func (ccs *ClusterCacheScraper) scrapeNodes(nodes []*clustercache.Node, timestam
 		labelNames, labelValues := promutil.KubeLabelsToLabels(node.Labels)
 		nodeLabels := util.ToMap(labelNames, labelValues)
 
-		ccs.updater.Update(KubeNodeLabels, nodeInfo, 0, &timestamp, nodeLabels)
+		ccs.updater.Update(KubeNodeLabels, nodeInfo, 0, timestamp, nodeLabels)
 
 	}
 }
@@ -130,7 +130,7 @@ func (ccs *ClusterCacheScraper) scrapeDeployments(deployments []*clustercache.De
 		labelNames, labelValues := promutil.KubeLabelsToLabels(deployment.MatchLabels)
 		deploymentLabels := util.ToMap(labelNames, labelValues)
 
-		ccs.updater.Update(DeploymentMatchLabels, deploymentInfo, 0, &timestamp, deploymentLabels)
+		ccs.updater.Update(DeploymentMatchLabels, deploymentInfo, 0, timestamp, deploymentLabels)
 
 	}
 }
@@ -144,12 +144,12 @@ func (ccs *ClusterCacheScraper) scrapeNamespaces(namespaces []*clustercache.Name
 		// namespace labels
 		labelNames, labelValues := promutil.KubeLabelsToLabels(namespace.Labels)
 		namespaceLabels := util.ToMap(labelNames, labelValues)
-		ccs.updater.Update(KubeNamespaceLabels, namespaceInfo, 0, &timestamp, namespaceLabels)
+		ccs.updater.Update(KubeNamespaceLabels, namespaceInfo, 0, timestamp, namespaceLabels)
 
 		// namespace annotations
 		annotationNames, annotationValues := promutil.KubeAnnotationsToLabels(namespace.Annotations)
 		namespaceAnnotations := util.ToMap(annotationNames, annotationValues)
-		ccs.updater.Update(KubeNamespaceAnnotations, namespaceInfo, 0, &timestamp, namespaceAnnotations)
+		ccs.updater.Update(KubeNamespaceAnnotations, namespaceInfo, 0, timestamp, namespaceAnnotations)
 	}
 }
 
@@ -166,19 +166,19 @@ func (ccs *ClusterCacheScraper) scrapePods(pods []*clustercache.Pod, timestamp t
 		// pod labels
 		labelNames, labelValues := promutil.KubeLabelsToLabels(pod.Labels)
 		podLabels := util.ToMap(labelNames, labelValues)
-		ccs.updater.Update(KubePodLabels, podInfo, 0, &timestamp, podLabels)
+		ccs.updater.Update(KubePodLabels, podInfo, 0, timestamp, podLabels)
 
 		// pod annotations
 		annotationNames, annotationValues := promutil.KubeAnnotationsToLabels(pod.Annotations)
 		podAnnotations := util.ToMap(annotationNames, annotationValues)
-		ccs.updater.Update(KubePodAnnotations, podInfo, 0, &timestamp, podAnnotations)
+		ccs.updater.Update(KubePodAnnotations, podInfo, 0, timestamp, podAnnotations)
 
 		// Pod owner metric
 		for _, owner := range pod.OwnerReferences {
 			ownerInfo := maps.Clone(podInfo)
 			ownerInfo[source.OwnerKindLabel] = owner.Kind
 			ownerInfo[source.OwnerNameLabel] = owner.Name
-			ccs.updater.Update(KubePodOwner, ownerInfo, 0, &timestamp, nil)
+			ccs.updater.Update(KubePodOwner, ownerInfo, 0, timestamp, nil)
 		}
 
 		// Container Status
@@ -186,7 +186,7 @@ func (ccs *ClusterCacheScraper) scrapePods(pods []*clustercache.Pod, timestamp t
 			if status.State.Running != nil {
 				containerInfo := maps.Clone(podInfo)
 				containerInfo[source.ContainerLabel] = status.Name
-				ccs.updater.Update(KubePodContainerStatusRunning, containerInfo, 0, &timestamp, nil)
+				ccs.updater.Update(KubePodContainerStatusRunning, containerInfo, 0, timestamp, nil)
 			}
 		}
 
@@ -211,7 +211,7 @@ func (ccs *ClusterCacheScraper) scrapePods(pods []*clustercache.Pod, timestamp t
 					resourceRequestInfo := maps.Clone(containerInfo)
 					resourceRequestInfo[source.ResourceLabel] = resource
 					resourceRequestInfo[source.UnitLabel] = unit
-					ccs.updater.Update(KubePodContainerResourceRequests, resourceRequestInfo, value, &timestamp, nil)
+					ccs.updater.Update(KubePodContainerResourceRequests, resourceRequestInfo, value, timestamp, nil)
 				}
 			}
 		}
@@ -227,10 +227,10 @@ func (ccs *ClusterCacheScraper) scrapePVCs(pvcs []*clustercache.PersistentVolume
 			source.StorageClassLabel: getPersistentVolumeClaimClass(pvc),
 		}
 
-		ccs.updater.Update(KubePersistentVolumeClaimInfo, pvcInfo, 0, &timestamp, nil)
+		ccs.updater.Update(KubePersistentVolumeClaimInfo, pvcInfo, 0, timestamp, nil)
 
 		if storage, ok := pvc.Spec.Resources.Requests[v1.ResourceStorage]; ok {
-			ccs.updater.Update(KubePersistentVolumeClaimResourceRequestsStorageBytes, pvcInfo, float64(storage.Value()), &timestamp, nil)
+			ccs.updater.Update(KubePersistentVolumeClaimResourceRequestsStorageBytes, pvcInfo, float64(storage.Value()), timestamp, nil)
 		}
 	}
 }
@@ -248,10 +248,10 @@ func (ccs *ClusterCacheScraper) scrapePVs(pvs []*clustercache.PersistentVolume,
 			source.ProviderIDLabel:   providerID,
 		}
 
-		ccs.updater.Update(KubecostPVInfo, pvInfo, 0, &timestamp, nil)
+		ccs.updater.Update(KubecostPVInfo, pvInfo, 0, timestamp, nil)
 
 		if storage, ok := pv.Spec.Capacity[v1.ResourceStorage]; ok {
-			ccs.updater.Update(KubePersistentVolumeCapacityBytes, pvInfo, float64(storage.Value()), &timestamp, nil)
+			ccs.updater.Update(KubePersistentVolumeCapacityBytes, pvInfo, float64(storage.Value()), timestamp, nil)
 		}
 	}
 }
@@ -266,7 +266,7 @@ func (ccs *ClusterCacheScraper) scrapeServices(services []*clustercache.Service,
 		// service labels
 		labelNames, labelValues := promutil.KubeLabelsToLabels(service.SpecSelector)
 		serviceLabels := util.ToMap(labelNames, labelValues)
-		ccs.updater.Update(ServiceSelectorLabels, serviceInfo, 0, &timestamp, serviceLabels)
+		ccs.updater.Update(ServiceSelectorLabels, serviceInfo, 0, timestamp, serviceLabels)
 
 	}
 }
@@ -281,7 +281,7 @@ func (ccs *ClusterCacheScraper) scrapeStatefulSets(statefulSets []*clustercache.
 		// statefulSet labels
 		labelNames, labelValues := promutil.KubeLabelsToLabels(statefulSet.SpecSelector.MatchLabels)
 		statefulSetLabels := util.ToMap(labelNames, labelValues)
-		ccs.updater.Update(StatefulSetMatchLabels, statefulSetInfo, 0, &timestamp, statefulSetLabels)
+		ccs.updater.Update(StatefulSetMatchLabels, statefulSetInfo, 0, timestamp, statefulSetLabels)
 
 	}
 }
@@ -297,7 +297,7 @@ func (ccs *ClusterCacheScraper) scrapeReplicaSets(replicaSets []*clustercache.Re
 			ownerInfo := maps.Clone(replicaSetInfo)
 			ownerInfo[source.OwnerKindLabel] = owner.Kind
 			ownerInfo[source.OwnerNameLabel] = owner.Name
-			ccs.updater.Update(KubeReplicasetOwner, ownerInfo, 0, &timestamp, nil)
+			ccs.updater.Update(KubeReplicasetOwner, ownerInfo, 0, timestamp, nil)
 		}
 	}
 }

+ 21 - 21
modules/collector-source/pkg/scrape/clustercache_test.go

@@ -63,7 +63,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 						source.ProviderIDLabel: "i-1",
 					},
 					Value:                 2.0,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -73,7 +73,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 						source.ProviderIDLabel: "i-1",
 					},
 					Value:                 2048.0,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -83,7 +83,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 						source.ProviderIDLabel: "i-1",
 					},
 					Value:                 1.0,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -93,7 +93,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 						source.ProviderIDLabel: "i-1",
 					},
 					Value:                 1024.0,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -103,7 +103,7 @@ func Test_kubernetesScraper_scrapeNodes(t *testing.T) {
 						source.ProviderIDLabel: "i-1",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 					AdditionalInformation: map[string]string{
 						"label_test1": "blah",
 						"label_test2": "blah2",
@@ -176,7 +176,7 @@ func Test_kubernetesScraper_scrapeDeployments(t *testing.T) {
 						source.NamespaceLabel:  "namespace1",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 					AdditionalInformation: map[string]string{
 						"label_test1": "blah",
 						"label_test2": "blah2",
@@ -250,7 +250,7 @@ func Test_kubernetesScraper_scrapeNamespaces(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 					AdditionalInformation: map[string]string{
 						"label_test1": "blah",
 						"label_test2": "blah2",
@@ -262,7 +262,7 @@ func Test_kubernetesScraper_scrapeNamespaces(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 					AdditionalInformation: map[string]string{
 						"annotation_test3": "blah3",
 						"annotation_test4": "blah4",
@@ -373,7 +373,7 @@ func Test_kubernetesScraper_scrapePods(t *testing.T) {
 						source.InstanceLabel:  "node1",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 					AdditionalInformation: map[string]string{
 						"label_test1": "blah",
 						"label_test2": "blah2",
@@ -389,7 +389,7 @@ func Test_kubernetesScraper_scrapePods(t *testing.T) {
 						source.InstanceLabel:  "node1",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 					AdditionalInformation: map[string]string{
 						"annotation_test3": "blah3",
 						"annotation_test4": "blah4",
@@ -407,7 +407,7 @@ func Test_kubernetesScraper_scrapePods(t *testing.T) {
 						source.OwnerNameLabel: "deployment1",
 					},
 					Value:                 0,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -421,7 +421,7 @@ func Test_kubernetesScraper_scrapePods(t *testing.T) {
 						source.ContainerLabel: "container1",
 					},
 					Value:                 0,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -437,7 +437,7 @@ func Test_kubernetesScraper_scrapePods(t *testing.T) {
 						source.UnitLabel:      "core",
 					},
 					Value:                 0.5,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -453,7 +453,7 @@ func Test_kubernetesScraper_scrapePods(t *testing.T) {
 						source.UnitLabel:      "byte",
 					},
 					Value:                 512,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 			},
@@ -529,7 +529,7 @@ func Test_kubernetesScraper_scrapePVCs(t *testing.T) {
 						source.StorageClassLabel: "storageClass1",
 					},
 					Value:                 0,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -541,7 +541,7 @@ func Test_kubernetesScraper_scrapePVCs(t *testing.T) {
 						source.StorageClassLabel: "storageClass1",
 					},
 					Value:                 4096,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 			},
@@ -617,7 +617,7 @@ func Test_kubernetesScraper_scrapePVs(t *testing.T) {
 						source.StorageClassLabel: "storageClass1",
 					},
 					Value:                 0,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 				{
@@ -628,7 +628,7 @@ func Test_kubernetesScraper_scrapePVs(t *testing.T) {
 						source.StorageClassLabel: "storageClass1",
 					},
 					Value:                 4096,
-					Timestamp:             &start1,
+					Timestamp:             start1,
 					AdditionalInformation: nil,
 				},
 			},
@@ -697,7 +697,7 @@ func Test_kubernetesScraper_scrapeServices(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 					AdditionalInformation: map[string]string{
 						"label_test1": "blah",
 						"label_test2": "blah2",
@@ -771,7 +771,7 @@ func Test_kubernetesScraper_scrapeStatefulSets(t *testing.T) {
 						source.NamespaceLabel:   "namespace1",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 					AdditionalInformation: map[string]string{
 						"label_test1": "blah",
 						"label_test2": "blah2",
@@ -847,7 +847,7 @@ func Test_kubernetesScraper_scrapeReplicaSets(t *testing.T) {
 						source.OwnerKindLabel: "Rollout",
 					},
 					Value:     0,
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 			},
 		},

+ 22 - 5
modules/collector-source/pkg/scrape/dcgm.go

@@ -2,6 +2,7 @@ package scrape
 
 import (
 	"fmt"
+	"regexp"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
 	"github.com/opencost/opencost/core/pkg/log"
@@ -9,6 +10,8 @@ import (
 	"github.com/opencost/opencost/modules/collector-source/pkg/scrape/target"
 )
 
+var dcgmRegex = regexp.MustCompile("(?i)(.*dcgm-exporter.*)")
+
 // DCGM metrics
 const (
 	DCGMFIPROFGRENGINEACTIVE = "DCGM_FI_PROF_GR_ENGINE_ACTIVE"
@@ -45,11 +48,7 @@ func (p *DCGMTargetProvider) GetTargets() []target.ScrapeTarget {
 	svcs := p.clusterCache.GetAllServices()
 	var targets []target.ScrapeTarget
 	for _, svc := range svcs {
-		if svc.ClusterIP == "" || svc.SpecSelector == nil {
-			continue
-		}
-		// TODO do something in relation to Thomas' comment https://github.com/opencost/opencost/pull/3110
-		if name := svc.SpecSelector["app.kubernetes.io/name"]; name != "dcgm-collector" {
+		if svc.ClusterIP == "" || !isDCGM(svc.SpecSelector) {
 			continue
 		}
 		port := 9400
@@ -60,3 +59,21 @@ func (p *DCGMTargetProvider) GetTargets() []target.ScrapeTarget {
 
 	return targets
 }
+
+func isDCGM(labels map[string]string) bool {
+	keys := []string{
+		"app",
+		"app.kubernetes.io/name",
+		"app.kubernetes.io/component",
+	}
+
+	for _, key := range keys {
+		if value, ok := labels[key]; ok {
+			if dcgmRegex.MatchString(value) {
+				return true
+			}
+		}
+	}
+
+	return false
+}

+ 64 - 0
modules/collector-source/pkg/scrape/dcgm_test.go

@@ -0,0 +1,64 @@
+package scrape
+
+import (
+	"testing"
+)
+
+func Test_isDCGM(t *testing.T) {
+	tests := map[string]struct {
+		labels map[string]string
+		want   bool
+	}{
+		"nil": {
+			labels: nil,
+			want:   false,
+		},
+		"empty": {
+			labels: map[string]string{},
+			want:   false,
+		},
+		"app": {
+			labels: map[string]string{
+				"app": "dcgm-exporter",
+			},
+			want: true,
+		},
+		"app.kubernetes.io/name": {
+			labels: map[string]string{
+				"app.kubernetes.io/name": "dcgm-exporter",
+			},
+			want: true,
+		},
+		"app.kubernetes.io/component": {
+			labels: map[string]string{
+				"app.kubernetes.io/name": "dcgm-exporter",
+			},
+			want: true,
+		},
+		"invalid key": {
+			labels: map[string]string{
+				"invalid-key": "dcgm-exporter",
+			},
+			want: false,
+		},
+		"invalid value": {
+			labels: map[string]string{
+				"app.kubernetes.io/name": "dcgmExporter",
+			},
+			want: false,
+		},
+		"case insensitive": {
+			labels: map[string]string{
+				"app.kubernetes.io/name": "jhlkjhlkDcGm-eXpoRterlkjhlkuh",
+			},
+			want: true,
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			if got := isDCGM(tt.labels); got != tt.want {
+				t.Errorf("isDCGM() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 2 - 9
modules/collector-source/pkg/scrape/network.go

@@ -52,20 +52,13 @@ func NewNetworkTargetProvider(releaseName string, port int, clusterCache cluster
 
 func (n *NetworkTargetProvider) GetTargets() []target.ScrapeTarget {
 	pods := n.clusterCache.GetAllPods()
-	//pods, err := k8s.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{
-	//	LabelSelector: fmt.Sprintf("app=%s-network-costs", n.releaseName),
-	//})
-	//if err != nil {
-	//	log.Errorf("NetworkTargetProvider: failed to retieve pods from kubernetes client: %s", err.Error())
-	//	return nil
-	//}
 
 	var targets []target.ScrapeTarget
 	for _, pod := range pods {
 		instance := pod.Labels["app.kubernetes.io/instance"]
 		name := pod.Labels["app.kubernetes.io/name"]
-		if name == "network-costs" && instance == "kubecost" {
-			log.Debugf("Network: found target for %s", name)
+		if name == "network-costs" && instance == "kubecost" && pod.Status.Phase == "Running" {
+			log.Debugf("Network: found target for http://%s:%d/metrics", pod.Status.PodIP, n.port)
 			t := target.NewUrlTarget(fmt.Sprintf("http://%s:%d/metrics", pod.Status.PodIP, n.port))
 			targets = append(targets, t)
 		}

+ 8 - 8
modules/collector-source/pkg/scrape/statsummary.go

@@ -52,7 +52,7 @@ func (s *StatSummaryScraper) Scrape() {
 					source.ModeLabel:           "", // TODO
 				},
 				float64(*stat.Node.CPU.UsageCoreNanoSeconds)*1e-9,
-				&stat.Node.CPU.Time.Time,
+				stat.Node.CPU.Time.Time,
 				nil,
 			)
 		}
@@ -65,7 +65,7 @@ func (s *StatSummaryScraper) Scrape() {
 					source.DeviceLabel:   "local", // This value has to be populated but isn't important here
 				},
 				float64(*stat.Node.Fs.CapacityBytes),
-				&stat.Node.Fs.Time.Time,
+				stat.Node.Fs.Time.Time,
 				nil,
 			)
 		}
@@ -85,7 +85,7 @@ func (s *StatSummaryScraper) Scrape() {
 							source.NamespaceLabel: namespace,
 						},
 						float64(*pod.Network.RxBytes),
-						&pod.Network.Time.Time,
+						pod.Network.Time.Time,
 						nil,
 					)
 				}
@@ -99,7 +99,7 @@ func (s *StatSummaryScraper) Scrape() {
 							source.NamespaceLabel: namespace,
 						},
 						float64(*pod.Network.TxBytes),
-						&pod.Network.Time.Time,
+						pod.Network.Time.Time,
 						nil,
 					)
 				}
@@ -119,7 +119,7 @@ func (s *StatSummaryScraper) Scrape() {
 						source.NamespaceLabel: volumeStats.PVCRef.Namespace,
 					},
 					float64(*volumeStats.UsedBytes),
-					&volumeStats.Time.Time,
+					volumeStats.Time.Time,
 					nil,
 				)
 				seenPVC[*volumeStats.PVCRef] = struct{}{}
@@ -137,7 +137,7 @@ func (s *StatSummaryScraper) Scrape() {
 							source.InstanceLabel:  nodeName,
 						},
 						float64(*container.CPU.UsageCoreNanoSeconds)*1e-9,
-						&container.CPU.Time.Time,
+						container.CPU.Time.Time,
 						nil,
 					)
 				}
@@ -152,7 +152,7 @@ func (s *StatSummaryScraper) Scrape() {
 							source.InstanceLabel:  nodeName,
 						},
 						float64(*container.Memory.WorkingSetBytes),
-						&container.Memory.Time.Time,
+						container.Memory.Time.Time,
 						nil,
 					)
 				}
@@ -165,7 +165,7 @@ func (s *StatSummaryScraper) Scrape() {
 							source.DeviceLabel:   "local",
 						},
 						float64(*container.Rootfs.UsedBytes),
-						&container.Rootfs.Time.Time,
+						container.Rootfs.Time.Time,
 						nil,
 					)
 				}

+ 9 - 9
modules/collector-source/pkg/scrape/statsummary_test.go

@@ -197,7 +197,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.ModeLabel:           "",
 					},
 					Value:     2,
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 				{
 					MetricName: NodeFSCapacityBytes,
@@ -206,7 +206,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.DeviceLabel:   "local",
 					},
 					Value:     float64(2 * util.GB),
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 				{
 					MetricName: ContainerNetworkReceiveBytesTotal,
@@ -216,7 +216,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 					},
 					Value:     float64(1 * util.MB),
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 				{
 					MetricName: ContainerNetworkTransmitBytesTotal,
@@ -226,7 +226,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 					},
 					Value:     float64(2 * util.MB),
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 				{
 					MetricName: KubeletVolumeStatsUsedBytes,
@@ -235,7 +235,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 					},
 					Value:     float64(1 * util.GB),
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 				{
 					MetricName: ContainerCPUUsageSecondsTotal,
@@ -247,7 +247,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.InstanceLabel:  "node1",
 					},
 					Value:     1,
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 				{
 					MetricName: ContainerMemoryWorkingSetBytes,
@@ -259,7 +259,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.InstanceLabel:  "node1",
 					},
 					Value:     float64(5 * util.MB),
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 				{
 					MetricName: ContainerFSUsageBytes,
@@ -268,7 +268,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.DeviceLabel:   "local",
 					},
 					Value:     float64(1 * util.GB),
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 			},
 		},
@@ -330,7 +330,7 @@ func TestStatScraper_Scrape(t *testing.T) {
 						source.NamespaceLabel: "namespace1",
 					},
 					Value:     float64(1 * util.GB),
-					Timestamp: &start1,
+					Timestamp: start1,
 				},
 			},
 		},

+ 8 - 1
modules/collector-source/pkg/scrape/targetscraper.go

@@ -1,6 +1,8 @@
 package scrape
 
 import (
+	"time"
+
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/modules/collector-source/pkg/metric"
 	"github.com/opencost/opencost/modules/collector-source/pkg/scrape/parser"
@@ -30,6 +32,7 @@ func newTargetScrapper(provider target.TargetProvider, updater metric.MetricUpda
 func (s *TargetScraper) Scrape() {
 	targets := s.targetProvider.GetTargets()
 	for _, target := range targets {
+		now := time.Now().UTC()
 		f, err := target.Load()
 		if err != nil {
 			log.Errorf("failed to scrape target: %s", err.Error())
@@ -46,7 +49,11 @@ func (s *TargetScraper) Scrape() {
 			if _, ok := s.metricNames[result.Name]; ok != s.includeMetrics {
 				continue
 			}
-			s.metricUpdater.Update(result.Name, result.Labels, result.Value, result.Timestamp, nil)
+			timestamp := now
+			if result.Timestamp != nil {
+				timestamp = *result.Timestamp
+			}
+			s.metricUpdater.Update(result.Name, result.Labels, result.Value, timestamp, nil)
 		}
 	}
 }

+ 8 - 7
modules/collector-source/pkg/scrape/targetscraper_test.go

@@ -2,6 +2,7 @@ package scrape
 
 import (
 	"testing"
+	"time"
 
 	"github.com/opencost/opencost/modules/collector-source/pkg/metric"
 	"github.com/opencost/opencost/modules/collector-source/pkg/scrape/target"
@@ -96,11 +97,11 @@ const dcgmScrape = `
 DCGM_FI_PROF_GR_ENGINE_ACTIVE{gpu="0",UUID="GPU-1",pci_bus_id="00000000:00:0A.0",device="nvidia0",modelName="Tesla T4",Hostname="localhost"} 0.999999
 # HELP DCGM_FI_DEV_DEC_UTIL Decoder utilization (in %).
 # TYPE DCGM_FI_DEV_DEC_UTIL gauge
-DCGM_FI_DEV_DEC_UTIL{gpu="0",UUID="GPU-1",pci_bus_id="00000000:00:0A.0",device="nvidia0",modelName="Tesla T4",Hostname="localhost"} 0
+DCGM_FI_DEV_DEC_UTIL{gpu="0",UUID="GPU-1",pci_bus_id="00000000:00:0A.0",device="nvidia0",modelName="Tesla T4",Hostname="localhost"} 0 
 `
 
 func TestTargetScraper_Scrape(t *testing.T) {
-
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
 	tests := []struct {
 		name            string
 		scrapperFactory func(metric.MetricUpdater) *TargetScraper
@@ -126,7 +127,7 @@ func TestTargetScraper_Scrape(t *testing.T) {
 						"service":     "service1",
 					},
 					Value:     3127969647,
-					Timestamp: nil,
+					Timestamp: start1,
 				},
 				{
 					MetricName: KubecostPodNetworkEgressBytesTotal,
@@ -139,7 +140,7 @@ func TestTargetScraper_Scrape(t *testing.T) {
 						"service":     "",
 					},
 					Value:     335188219,
-					Timestamp: nil,
+					Timestamp: start1,
 				},
 				{
 					MetricName: KubecostPodNetworkIngressBytesTotal,
@@ -152,7 +153,7 @@ func TestTargetScraper_Scrape(t *testing.T) {
 						"service":     "service1",
 					},
 					Value:     17941460,
-					Timestamp: nil,
+					Timestamp: start1,
 				},
 				{
 					MetricName: KubecostPodNetworkIngressBytesTotal,
@@ -165,7 +166,7 @@ func TestTargetScraper_Scrape(t *testing.T) {
 						"service":     "",
 					},
 					Value:     13948766,
-					Timestamp: nil,
+					Timestamp: start1,
 				},
 			},
 		},
@@ -505,7 +506,7 @@ func TestTargetScraper_Scrape(t *testing.T) {
 
 			for i, expected := range tt.expected {
 				updateArg := updateRecorder.UpdateArgs[i]
-				err := expected.Equals(updateArg)
+				err := expected.ValueEquals(updateArg)
 				if err != nil {
 					t.Errorf("Result did not match expected at index %d: %s", i, err.Error())
 				}

+ 5 - 21
modules/prometheus-source/pkg/prom/metricsquerier.go

@@ -754,7 +754,7 @@ func (pds *PrometheusMetricsQuerier) QueryGPUsAllocated(start, end time.Time) *s
 }
 
 func (pds *PrometheusMetricsQuerier) QueryIsGPUShared(start, end time.Time) *source.Future[source.IsGPUSharedResult] {
-	const queryFmtIsGPUShared = `avg(avg_over_time(kube_pod_container_resource_requests{container!="", node != "", pod != "", container!= "", unit = "integer",  %s}[%s])) by (container, pod, namespace, node, resource)`
+	const queryFmtIsGPUShared = `avg(avg_over_time(kube_pod_container_resource_requests{container!="", node != "", pod != "", container!= "", unit = "integer",  %s}[%s])) by (container, pod, namespace, node, resource, %s)`
 	// env.GetPromClusterFilter(), durStr
 
 	cfg := pds.promConfig
@@ -764,13 +764,13 @@ func (pds *PrometheusMetricsQuerier) QueryIsGPUShared(start, end time.Time) *sou
 		panic("failed to parse duration string passed to QueryIsGPUShared")
 	}
 
-	queryIsGPUShared := fmt.Sprintf(queryFmtIsGPUShared, cfg.ClusterFilter, durStr)
+	queryIsGPUShared := fmt.Sprintf(queryFmtIsGPUShared, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
 	return source.NewFuture(source.DecodeIsGPUSharedResult, ctx.QueryAtTime(queryIsGPUShared, end))
 }
 
 func (pds *PrometheusMetricsQuerier) QueryGPUInfo(start, end time.Time) *source.Future[source.GPUInfoResult] {
-	const queryFmtGetGPUInfo = `avg(avg_over_time(DCGM_FI_DEV_DEC_UTIL{container!="",%s}[%s])) by (container, pod, namespace, device, modelName, UUID)`
+	const queryFmtGetGPUInfo = `avg(avg_over_time(DCGM_FI_DEV_DEC_UTIL{container!="",%s}[%s])) by (container, pod, namespace, device, modelName, UUID, %s)`
 	// env.GetPromClusterFilter(), durStr
 
 	cfg := pds.promConfig
@@ -780,7 +780,7 @@ func (pds *PrometheusMetricsQuerier) QueryGPUInfo(start, end time.Time) *source.
 		panic("failed to parse duration string passed to QueryGPUInfo")
 	}
 
-	queryGetGPUInfo := fmt.Sprintf(queryFmtGetGPUInfo, cfg.ClusterFilter, durStr)
+	queryGetGPUInfo := fmt.Sprintf(queryFmtGetGPUInfo, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
 	return source.NewFuture(source.DecodeGPUInfoResult, ctx.QueryAtTime(queryGetGPUInfo, end))
 }
@@ -898,22 +898,6 @@ func (pds *PrometheusMetricsQuerier) QueryPVBytes(start, end time.Time) *source.
 	return source.NewFuture(source.DecodePVBytesResult, ctx.QueryAtTime(queryPVBytes, end))
 }
 
-func (pds *PrometheusMetricsQuerier) QueryPVCostPerGiBHour(start, end time.Time) *source.Future[source.PVPricePerGiBHourResult] {
-	const queryFmtPVCostPerGiBHour = `avg(avg_over_time(pv_hourly_cost{%s}[%s])) by (volumename, %s)`
-	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-
-	cfg := pds.promConfig
-
-	durStr := timeutil.DurationString(end.Sub(start))
-	if durStr == "" {
-		panic("failed to parse duration string passed to QueryPVCostPerGiBHour")
-	}
-
-	queryPVCostPerGiBHour := fmt.Sprintf(queryFmtPVCostPerGiBHour, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
-	return source.NewFuture(source.DecodePVPricePerGiBHourResult, ctx.QueryAtTime(queryPVCostPerGiBHour, end))
-}
-
 func (pds *PrometheusMetricsQuerier) QueryPVInfo(start, end time.Time) *source.Future[source.PVInfoResult] {
 	const queryFmtPVMeta = `avg(avg_over_time(kubecost_pv_info{%s}[%s])) by (%s, storageclass, persistentvolume, provider_id)`
 	// env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
@@ -1378,7 +1362,7 @@ func (pds *PrometheusMetricsQuerier) durationStringFor(start, end time.Time, min
 	// time=01:00:00 will return, for a node running the entire time, 12
 	// timestamps where the first is 00:05:00 and the last is 01:00:00.
 	// However, OpenCost expects for there to be 13 timestamps where the first
-	// begins at 00:00:00. To achieve this, we must modify our query to 
+	// begins at 00:00:00. To achieve this, we must modify our query to
 	// avg(node_total_hourly_cost{}) by (node, provider_id)[65m:5m]
 	if pds.promConfig.IsOffsetResolution {
 		// increase the query time by the resolution