Quellcode durchsuchen

Merge branch 'develop' into feature/resourcequotas

Niko Kovacevic vor 6 Monaten
Ursprung
Commit
43aff66d2f

+ 2 - 0
core/pkg/clustercache/clustercache.go

@@ -161,6 +161,7 @@ type ReplicaSet struct {
 }
 
 type ResourceQuota struct {
+	UID       types.UID
 	Name      string
 	Namespace string
 	Spec      v1.ResourceQuotaSpec
@@ -390,6 +391,7 @@ func TransformReplicaSet(input *appsv1.ReplicaSet) *ReplicaSet {
 
 func TransformResourceQuota(input *v1.ResourceQuota) *ResourceQuota {
 	return &ResourceQuota{
+		UID:       input.UID,
 		Name:      input.Name,
 		Namespace: input.Namespace,
 		Spec:      input.Spec,

+ 18 - 0
core/pkg/source/datasource.go

@@ -117,6 +117,24 @@ type MetricsQuerier interface {
 	QueryReplicaSetsWithoutOwners(start, end time.Time) *Future[ReplicaSetsWithoutOwnersResult]
 	QueryReplicaSetsWithRollout(start, end time.Time) *Future[ReplicaSetsWithRolloutResult]
 
+	// ResourceQuotas
+	QueryResourceQuotaSpecCPURequestAverage(start, end time.Time) *Future[ResourceQuotaSpecCPURequestAvgResult]
+	QueryResourceQuotaSpecCPURequestMax(start, end time.Time) *Future[ResourceQuotaSpecCPURequestMaxResult]
+	QueryResourceQuotaSpecRAMRequestAverage(start, end time.Time) *Future[ResourceQuotaSpecRAMRequestAvgResult]
+	QueryResourceQuotaSpecRAMRequestMax(start, end time.Time) *Future[ResourceQuotaSpecRAMRequestMaxResult]
+	QueryResourceQuotaSpecCPULimitAverage(start, end time.Time) *Future[ResourceQuotaSpecCPULimitAvgResult]
+	QueryResourceQuotaSpecCPULimitMax(start, end time.Time) *Future[ResourceQuotaSpecCPULimitMaxResult]
+	QueryResourceQuotaSpecRAMLimitAverage(start, end time.Time) *Future[ResourceQuotaSpecRAMLimitAvgResult]
+	QueryResourceQuotaSpecRAMLimitMax(start, end time.Time) *Future[ResourceQuotaSpecRAMLimitMaxResult]
+	QueryResourceQuotaStatusUsedCPURequestAverage(start, end time.Time) *Future[ResourceQuotaStatusUsedCPURequestAvgResult]
+	QueryResourceQuotaStatusUsedCPURequestMax(start, end time.Time) *Future[ResourceQuotaStatusUsedCPURequestMaxResult]
+	QueryResourceQuotaStatusUsedRAMRequestAverage(start, end time.Time) *Future[ResourceQuotaStatusUsedRAMRequestAvgResult]
+	QueryResourceQuotaStatusUsedRAMRequestMax(start, end time.Time) *Future[ResourceQuotaStatusUsedRAMRequestMaxResult]
+	QueryResourceQuotaStatusUsedCPULimitAverage(start, end time.Time) *Future[ResourceQuotaStatusUsedCPULimitAvgResult]
+	QueryResourceQuotaStatusUsedCPULimitMax(start, end time.Time) *Future[ResourceQuotaStatusUsedCPULimitMaxResult]
+	QueryResourceQuotaStatusUsedRAMLimitAverage(start, end time.Time) *Future[ResourceQuotaStatusUsedRAMLimitAvgResult]
+	QueryResourceQuotaStatusUsedRAMLimitMax(start, end time.Time) *Future[ResourceQuotaStatusUsedRAMLimitMaxResult]
+
 	// Data Coverage Query
 	QueryDataCoverage(limitDays int) (time.Time, time.Time, error)
 }

+ 123 - 0
core/pkg/source/decoders.go

@@ -32,6 +32,7 @@ const (
 	DeploymentLabel      = "deployment"
 	StatefulSetLabel     = "statefulSet"
 	ReplicaSetLabel      = "replicaset"
+	ResourceQuotaLabel   = "resourcequota"
 	OwnerNameLabel       = "owner_name"
 	OwnerKindLabel       = "owner_kind"
 	UnitLabel            = "unit"
@@ -1497,6 +1498,128 @@ func DecodeReplicaSetsWithRolloutResult(result *QueryResult) *ReplicaSetsWithRol
 	}
 }
 
+type ResourceQuotaMetricResult struct {
+	UID           string
+	Namespace     string
+	ResourceQuota string
+	Resource      string
+	Unit          string
+	Data          []*util.Vector
+}
+
+func DecodeResourceQuotaMetricResult(result *QueryResult) *ResourceQuotaMetricResult {
+	uid, _ := result.GetString(UIDLabel)
+	namespace, _ := result.GetNamespace()
+	resourceQuota, _ := result.GetString(ResourceQuotaLabel)
+	resource, _ := result.GetString(ResourceLabel)
+	unit, _ := result.GetString(UnitLabel)
+
+	return &ResourceQuotaMetricResult{
+		UID:           uid,
+		Namespace:     namespace,
+		ResourceQuota: resourceQuota,
+		Resource:      resource,
+		Unit:          unit,
+		Data:          result.Values,
+	}
+}
+
+type ResourceQuotaSpecCPURequestAvgResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaSpecCPURequestAvgResult(result *QueryResult) *ResourceQuotaSpecCPURequestAvgResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaSpecCPURequestMaxResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaSpecCPURequestMaxResult(result *QueryResult) *ResourceQuotaSpecCPURequestMaxResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaSpecRAMRequestAvgResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaSpecRAMRequestAvgResult(result *QueryResult) *ResourceQuotaSpecRAMRequestAvgResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaSpecRAMRequestMaxResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaSpecRAMRequestMaxResult(result *QueryResult) *ResourceQuotaSpecRAMRequestMaxResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaSpecCPULimitAvgResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaSpecCPULimitAvgResult(result *QueryResult) *ResourceQuotaSpecCPULimitAvgResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaSpecCPULimitMaxResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaSpecCPULimitMaxResult(result *QueryResult) *ResourceQuotaSpecCPULimitMaxResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaSpecRAMLimitAvgResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaSpecRAMLimitAvgResult(result *QueryResult) *ResourceQuotaSpecRAMLimitAvgResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaSpecRAMLimitMaxResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaSpecRAMLimitMaxResult(result *QueryResult) *ResourceQuotaSpecRAMLimitMaxResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaStatusUsedCPURequestAvgResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaStatusUsedCPURequestAvgResult(result *QueryResult) *ResourceQuotaStatusUsedCPURequestAvgResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaStatusUsedCPURequestMaxResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaStatusUsedCPURequestMaxResult(result *QueryResult) *ResourceQuotaStatusUsedCPURequestMaxResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaStatusUsedRAMRequestAvgResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaStatusUsedRAMRequestAvgResult(result *QueryResult) *ResourceQuotaStatusUsedRAMRequestAvgResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaStatusUsedRAMRequestMaxResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaStatusUsedRAMRequestMaxResult(result *QueryResult) *ResourceQuotaStatusUsedRAMRequestMaxResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaStatusUsedCPULimitAvgResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaStatusUsedCPULimitAvgResult(result *QueryResult) *ResourceQuotaStatusUsedCPULimitAvgResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaStatusUsedCPULimitMaxResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaStatusUsedCPULimitMaxResult(result *QueryResult) *ResourceQuotaStatusUsedCPULimitMaxResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaStatusUsedRAMLimitAvgResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaStatusUsedRAMLimitAvgResult(result *QueryResult) *ResourceQuotaStatusUsedRAMLimitAvgResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
+type ResourceQuotaStatusUsedRAMLimitMaxResult = ResourceQuotaMetricResult
+
+func DecodeResourceQuotaStatusUsedRAMLimitMaxResult(result *QueryResult) *ResourceQuotaStatusUsedRAMLimitMaxResult {
+	return DecodeResourceQuotaMetricResult(result)
+}
+
 func DecodeAll[T any](results []*QueryResult, decode ResultDecoder[T]) []*T {
 	decoded := make([]*T, 0, len(results))
 	for _, result := range results {

+ 432 - 0
modules/collector-source/pkg/collector/collector.go

@@ -86,6 +86,22 @@ func NewOpenCostMetricStore() metric.MetricStore {
 	memStore.Register(NewPodsWithReplicaSetOwnerMetricCollector())
 	memStore.Register(NewReplicaSetsWithoutOwnersMetricCollector())
 	memStore.Register(NewReplicaSetsWithRolloutMetricCollector())
+	memStore.Register(NewResourceQuotaSpecCPURequestAverageMetricCollector())
+	memStore.Register(NewResourceQuotaSpecCPURequestMaxMetricCollector())
+	memStore.Register(NewResourceQuotaSpecRAMRequestAverageMetricCollector())
+	memStore.Register(NewResourceQuotaSpecRAMRequestMaxMetricCollector())
+	memStore.Register(NewResourceQuotaSpecCPULimitAverageMetricCollector())
+	memStore.Register(NewResourceQuotaSpecCPULimitMaxMetricCollector())
+	memStore.Register(NewResourceQuotaSpecRAMLimitAverageMetricCollector())
+	memStore.Register(NewResourceQuotaSpecRAMLimitMaxMetricCollector())
+	memStore.Register(NewResourceQuotaStatusUsedCPURequestAverageMetricCollector())
+	memStore.Register(NewResourceQuotaStatusUsedCPURequestMaxMetricCollector())
+	memStore.Register(NewResourceQuotaStatusUsedRAMRequestAverageMetricCollector())
+	memStore.Register(NewResourceQuotaStatusUsedRAMRequestMaxMetricCollector())
+	memStore.Register(NewResourceQuotaStatusUsedCPULimitAverageMetricCollector())
+	memStore.Register(NewResourceQuotaStatusUsedCPULimitMaxMetricCollector())
+	memStore.Register(NewResourceQuotaStatusUsedRAMLimitAverageMetricCollector())
+	memStore.Register(NewResourceQuotaStatusUsedRAMLimitMaxMetricCollector())
 
 	return memStore
 }
@@ -1892,3 +1908,419 @@ func NewReplicaSetsWithRolloutMetricCollector() *metric.MetricCollector {
 		},
 	)
 }
+
+// avg(
+//	avg_over_time(
+//		resourcequota_spec_resource_requests{
+//			resource="cpu",
+//			unit="core",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaSpecCPURequestAverageMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaSpecCPURequestAverageID,
+		metric.KubeResourceQuotaSpecResourceRequests,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.AverageOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "cpu" && labels[source.UnitLabel] == "core"
+		},
+	)
+}
+
+// max(
+//	max_over_time(
+//		resourcequota_spec_resource_requests{
+//			resource="cpu",
+//			unit="core",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaSpecCPURequestMaxMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaSpecCPURequestMaxID,
+		metric.KubeResourceQuotaSpecResourceRequests,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.MaxOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "cpu" && labels[source.UnitLabel] == "core"
+		},
+	)
+}
+
+// avg(
+//	avg_over_time(
+//		resourcequota_spec_resource_requests{
+//			resource="memory",
+//			unit="byte",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaSpecRAMRequestAverageMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaSpecRAMRequestAverageID,
+		metric.KubeResourceQuotaSpecResourceRequests,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.AverageOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "memory" && labels[source.UnitLabel] == "byte"
+		},
+	)
+}
+
+// max(
+//	max_over_time(
+//		resourcequota_spec_resource_requests{
+//			resource="memory",
+//			unit="byte",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaSpecRAMRequestMaxMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaSpecRAMRequestMaxID,
+		metric.KubeResourceQuotaSpecResourceRequests,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.MaxOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "memory" && labels[source.UnitLabel] == "byte"
+		},
+	)
+}
+
+// avg(
+//	avg_over_time(
+//		resourcequota_spec_resource_limits{
+//			resource="cpu",
+//			unit="core",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaSpecCPULimitAverageMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaSpecCPULimitAverageID,
+		metric.KubeResourceQuotaSpecResourceLimits,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.AverageOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "cpu" && labels[source.UnitLabel] == "core"
+		},
+	)
+}
+
+// max(
+//	max_over_time(
+//		resourcequota_spec_resource_limits{
+//			resource="cpu",
+//			unit="core",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaSpecCPULimitMaxMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaSpecCPULimitMaxID,
+		metric.KubeResourceQuotaSpecResourceLimits,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.MaxOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "cpu" && labels[source.UnitLabel] == "core"
+		},
+	)
+}
+
+// avg(
+//	avg_over_time(
+//		resourcequota_spec_resource_limits{
+//			resource="memory",
+//			unit="byte",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaSpecRAMLimitAverageMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaSpecRAMLimitAverageID,
+		metric.KubeResourceQuotaSpecResourceLimits,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.AverageOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "memory" && labels[source.UnitLabel] == "byte"
+		},
+	)
+}
+
+// max(
+//	max_over_time(
+//		resourcequota_spec_resource_limits{
+//			resource="memory",
+//			unit="byte",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaSpecRAMLimitMaxMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaSpecRAMLimitMaxID,
+		metric.KubeResourceQuotaSpecResourceLimits,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.MaxOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "memory" && labels[source.UnitLabel] == "byte"
+		},
+	)
+}
+
+// avg(
+//	avg_over_time(
+//		resourcequota_status_used_resource_requests{
+//			resource="cpu",
+//			unit="core",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaStatusUsedCPURequestAverageMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaStatusUsedCPURequestAverageID,
+		metric.KubeResourceQuotaStatusUsedResourceRequests,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.AverageOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "cpu" && labels[source.UnitLabel] == "core"
+		},
+	)
+}
+
+// max(
+//	max_over_time(
+//		resourcequota_status_used_resource_requests{
+//			resource="cpu",
+//			unit="core",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaStatusUsedCPURequestMaxMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaStatusUsedCPURequestMaxID,
+		metric.KubeResourceQuotaStatusUsedResourceRequests,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.MaxOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "cpu" && labels[source.UnitLabel] == "core"
+		},
+	)
+}
+
+// avg(
+//	avg_over_time(
+//		resourcequota_status_used_resource_requests{
+//			resource="memory",
+//			unit="byte",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaStatusUsedRAMRequestAverageMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaStatusUsedRAMRequestAverageID,
+		metric.KubeResourceQuotaStatusUsedResourceRequests,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.AverageOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "memory" && labels[source.UnitLabel] == "byte"
+		},
+	)
+}
+
+// max(
+//	max_over_time(
+//		resourcequota_status_used_resource_requests{
+//			resource="memory",
+//			unit="byte",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaStatusUsedRAMRequestMaxMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaStatusUsedRAMRequestMaxID,
+		metric.KubeResourceQuotaStatusUsedResourceRequests,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.MaxOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "memory" && labels[source.UnitLabel] == "byte"
+		},
+	)
+}
+
+// avg(
+//	avg_over_time(
+//		resourcequota_status_used_resource_limits{
+//			resource="cpu",
+//			unit="core",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaStatusUsedCPULimitAverageMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaStatusUsedCPULimitAverageID,
+		metric.KubeResourceQuotaStatusUsedResourceLimits,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.AverageOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "cpu" && labels[source.UnitLabel] == "core"
+		},
+	)
+}
+
+// max(
+//	max_over_time(
+//		resourcequota_status_used_resource_limits{
+//			resource="cpu",
+//			unit="core",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaStatusUsedCPULimitMaxMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaStatusUsedCPULimitMaxID,
+		metric.KubeResourceQuotaStatusUsedResourceLimits,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.MaxOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "cpu" && labels[source.UnitLabel] == "core"
+		},
+	)
+}
+
+// avg(
+//	avg_over_time(
+//		resourcequota_status_used_resource_limits{
+//			resource="memory",
+//			unit="byte",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaStatusUsedRAMLimitAverageMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaStatusUsedRAMLimitAverageID,
+		metric.KubeResourceQuotaStatusUsedResourceLimits,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.AverageOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "memory" && labels[source.UnitLabel] == "byte"
+		},
+	)
+}
+
+// max(
+//	max_over_time(
+//		resourcequota_status_used_resource_limits{
+//			resource="memory",
+//			unit="byte",
+//			<some_custom_filter>
+//		}[1h]
+//	)
+//) by (resourcequota, namespace, uid, cluster_id)
+
+func NewResourceQuotaStatusUsedRAMLimitMaxMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaStatusUsedRAMLimitMaxID,
+		metric.KubeResourceQuotaStatusUsedResourceLimits,
+		[]string{
+			source.NamespaceLabel,
+			source.ResourceQuotaLabel,
+			source.UIDLabel,
+		},
+		aggregator.MaxOverTime,
+		func(labels map[string]string) bool {
+			return labels[source.ResourceLabel] == "memory" && labels[source.UnitLabel] == "byte"
+		},
+	)
+}

+ 64 - 0
modules/collector-source/pkg/collector/metricsquerier.go

@@ -520,6 +520,70 @@ func (c *collectorMetricsQuerier) QueryReplicaSetsWithRollout(start, end time.Ti
 	return queryCollector(c, start, end, metric.ReplicaSetsWithRolloutID, source.DecodeReplicaSetsWithRolloutResult)
 }
 
+func (c *collectorMetricsQuerier) QueryResourceQuotaSpecCPURequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPURequestAvgResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaSpecCPURequestAverageID, source.DecodeResourceQuotaSpecCPURequestAvgResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaSpecCPURequestMax(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPURequestMaxResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaSpecCPURequestMaxID, source.DecodeResourceQuotaSpecCPURequestMaxResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaSpecRAMRequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecRAMRequestAvgResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaSpecRAMRequestAverageID, source.DecodeResourceQuotaSpecRAMRequestAvgResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaSpecRAMRequestMax(start, end time.Time) *source.Future[source.ResourceQuotaSpecRAMRequestMaxResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaSpecRAMRequestMaxID, source.DecodeResourceQuotaSpecRAMRequestMaxResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaSpecCPULimitAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPULimitAvgResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaSpecCPULimitAverageID, source.DecodeResourceQuotaSpecCPULimitAvgResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaSpecCPULimitMax(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPULimitMaxResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaSpecCPULimitMaxID, source.DecodeResourceQuotaSpecCPULimitMaxResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaSpecRAMLimitAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecRAMLimitAvgResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaSpecRAMLimitAverageID, source.DecodeResourceQuotaSpecRAMLimitAvgResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaSpecRAMLimitMax(start, end time.Time) *source.Future[source.ResourceQuotaSpecRAMLimitMaxResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaSpecRAMLimitMaxID, source.DecodeResourceQuotaSpecRAMLimitMaxResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaStatusUsedCPURequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedCPURequestAvgResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaStatusUsedCPURequestAverageID, source.DecodeResourceQuotaStatusUsedCPURequestAvgResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaStatusUsedCPURequestMax(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedCPURequestMaxResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaStatusUsedCPURequestMaxID, source.DecodeResourceQuotaStatusUsedCPURequestMaxResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaStatusUsedRAMRequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedRAMRequestAvgResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaStatusUsedRAMRequestAverageID, source.DecodeResourceQuotaStatusUsedRAMRequestAvgResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaStatusUsedRAMRequestMax(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedRAMRequestMaxResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaStatusUsedRAMRequestMaxID, source.DecodeResourceQuotaStatusUsedRAMRequestMaxResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaStatusUsedCPULimitAverage(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedCPULimitAvgResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaStatusUsedCPULimitAverageID, source.DecodeResourceQuotaStatusUsedCPULimitAvgResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaStatusUsedCPULimitMax(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedCPULimitMaxResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaStatusUsedCPULimitMaxID, source.DecodeResourceQuotaStatusUsedCPULimitMaxResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaStatusUsedRAMLimitAverage(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedRAMLimitAvgResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaStatusUsedRAMLimitAverageID, source.DecodeResourceQuotaStatusUsedRAMLimitAvgResult)
+}
+
+func (c *collectorMetricsQuerier) QueryResourceQuotaStatusUsedRAMLimitMax(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedRAMLimitMaxResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaStatusUsedRAMLimitMaxID, source.DecodeResourceQuotaStatusUsedRAMLimitMaxResult)
+}
+
 func (c *collectorMetricsQuerier) QueryDataCoverage(limitDays int) (time.Time, time.Time, error) {
 	return c.collectorProvider.GetDailyDataCoverage(limitDays)
 }

+ 10 - 9
modules/collector-source/pkg/event/scrape.go

@@ -9,15 +9,16 @@ const (
 )
 
 const (
-	NodeScraperType        = "nodes"
-	NamespaceScraperType   = "namespaces"
-	ReplicaSetScraperType  = "replicasets"
-	DeploymentScraperType  = "deployments"
-	StatefulSetScraperType = "statefulsets"
-	ServiceScraperType     = "services"
-	PodScraperType         = "pods"
-	PvScraperType          = "pvs"
-	PvcScraperType         = "pvcs"
+	NodeScraperType          = "nodes"
+	NamespaceScraperType     = "namespaces"
+	ReplicaSetScraperType    = "replicasets"
+	DeploymentScraperType    = "deployments"
+	StatefulSetScraperType   = "statefulsets"
+	ServiceScraperType       = "services"
+	PodScraperType           = "pods"
+	PvScraperType            = "pvs"
+	PvcScraperType           = "pvcs"
+	ResourceQuotaScraperType = "resourcequotas"
 )
 
 // ScrapeEvent is dispatched when a scrape is performed over a set of targets. It contains the name

+ 90 - 74
modules/collector-source/pkg/metric/collector.go

@@ -15,80 +15,96 @@ import (
 type MetricCollectorID string
 
 const (
-	PVPricePerGiBHourID             MetricCollectorID = "PVPricePerGiBHour"
-	PVUsedAverageID                 MetricCollectorID = "PVUsedAverage"
-	PVUsedMaxID                     MetricCollectorID = "PVUsedMax"
-	PVCInfoID                       MetricCollectorID = "PVCInfo"
-	PVActiveMinutesID               MetricCollectorID = "PVActiveMinutes"
-	LocalStorageUsedActiveMinutesID MetricCollectorID = "LocalStorageUsedCost"
-	LocalStorageUsedAverageID       MetricCollectorID = "LocalStorageUsedAverage"
-	LocalStorageUsedMaxID           MetricCollectorID = "LocalStorageUsedMax"
-	LocalStorageBytesID             MetricCollectorID = "LocalStorageBytesID"
-	LocalStorageActiveMinutesID     MetricCollectorID = "LocalStorageActiveMinutes"
-	NodeCPUCoresCapacityID          MetricCollectorID = "NodeCPUCoresCapacity"
-	NodeCPUCoresAllocatableID       MetricCollectorID = "NodeCPUCoresAllocatable"
-	NodeRAMBytesCapacityID          MetricCollectorID = "NodeRAMBytesCapacity"
-	NodeRAMBytesAllocatableID       MetricCollectorID = "NodeRAMBytesAllocatable"
-	NodeGPUCountID                  MetricCollectorID = "NodeGPUCount"
-	NodeLabelsID                    MetricCollectorID = "NodeLabels"
-	NodeActiveMinutesID             MetricCollectorID = "NodeActiveMinutes"
-	NodeCPUModeTotalID              MetricCollectorID = "NodeCPUModeTotal"
-	NodeRAMSystemUsageAverageID     MetricCollectorID = "NodeRAMSystemUsageAverage"
-	NodeRAMUserUsageAverageID       MetricCollectorID = "NodeRAMUserUsageAverage"
-	LBPricePerHourID                MetricCollectorID = "LBPricePerHour"
-	LBActiveMinutesID               MetricCollectorID = "LBActiveMinutes"
-	ClusterManagementDurationID     MetricCollectorID = "ClusterManagementDuration"
-	ClusterManagementPricePerHourID MetricCollectorID = "ClusterManagementPricePerHour"
-	PodActiveMinutesID              MetricCollectorID = "PodActiveMinutes"
-	RAMBytesAllocatedID             MetricCollectorID = "RAMBytesAllocated"
-	RAMRequestsID                   MetricCollectorID = "RAMRequests"
-	RAMLimitsID                     MetricCollectorID = "RAMLimits"
-	RAMUsageAverageID               MetricCollectorID = "RAMUsageAverage"
-	RAMUsageMaxID                   MetricCollectorID = "RAMUsageMax"
-	CPUCoresAllocatedID             MetricCollectorID = "CPUCoresAllocated"
-	CPURequestsID                   MetricCollectorID = "CPURequestsID"
-	CPULimitsID                     MetricCollectorID = "CPULimitsID"
-	CPUUsageAverageID               MetricCollectorID = "CPUUsageAverage"
-	CPUUsageMaxID                   MetricCollectorID = "CPUUsageMax"
-	GPUsRequestedID                 MetricCollectorID = "GPUsRequested"
-	GPUsUsageAverageID              MetricCollectorID = "GPUsUsageAverage"
-	GPUsUsageMaxID                  MetricCollectorID = "GPUsUsageMax"
-	GPUsAllocatedID                 MetricCollectorID = "GPUsAllocated"
-	IsGPUSharedID                   MetricCollectorID = "IsGPUShared"
-	GPUInfoID                       MetricCollectorID = "GPUInfo"
-	NodeCPUPricePerHourID           MetricCollectorID = "NodeCPUPricePerHour"
-	NodeRAMPricePerGiBHourID        MetricCollectorID = "NodeRAMPricePerGiBHour"
-	NodeGPUPricePerHourID           MetricCollectorID = "NodeGPUPricePerHour"
-	NodeIsSpotID                    MetricCollectorID = "NodeIsSpot"
-	PodPVCAllocationID              MetricCollectorID = "PodPVCAllocation"
-	PVCBytesRequestedID             MetricCollectorID = "PVCBytesRequested"
-	PVBytesID                       MetricCollectorID = "PVBytesID"
-	PVInfoID                        MetricCollectorID = "PVInfo"
-	NetZoneGiBID                    MetricCollectorID = "NetZoneGiB"
-	NetZonePricePerGiBID            MetricCollectorID = "NetZonePricePerGiB"
-	NetRegionGiBID                  MetricCollectorID = "NetRegionGiB"
-	NetRegionPricePerGiBID          MetricCollectorID = "NetRegionPricePerGiB"
-	NetInternetGiBID                MetricCollectorID = "NetInternetGiB"
-	NetInternetPricePerGiBID        MetricCollectorID = "NetInternetPricePerGiB"
-	NetInternetServiceGiBID         MetricCollectorID = "NetInternetServiceGiB"
-	NetTransferBytesID              MetricCollectorID = "NetTransferBytes"
-	NetZoneIngressGiBID             MetricCollectorID = "NetZoneIngressGiB"
-	NetRegionIngressGiBID           MetricCollectorID = "NetRegionIngressGiB"
-	NetInternetIngressGiBID         MetricCollectorID = "NetInternetIngressGiB"
-	NetInternetServiceIngressGiBID  MetricCollectorID = "NetInternetServiceIngressGiB"
-	NetReceiveBytesID               MetricCollectorID = "NetReceiveBytes"
-	NamespaceLabelsID               MetricCollectorID = "NamespaceLabels"
-	NamespaceAnnotationsID          MetricCollectorID = "NamespaceAnnotations"
-	PodLabelsID                     MetricCollectorID = "PodLabels"
-	PodAnnotationsID                MetricCollectorID = "PodAnnotations"
-	ServiceLabelsID                 MetricCollectorID = "ServiceLabels"
-	DeploymentLabelsID              MetricCollectorID = "DeploymentLabels"
-	StatefulSetLabelsID             MetricCollectorID = "StatefulSetLabels"
-	DaemonSetLabelsID               MetricCollectorID = "DaemonSetLabels"
-	JobLabelsID                     MetricCollectorID = "JobLabels"
-	PodsWithReplicaSetOwnerID       MetricCollectorID = "PodsWithReplicaSetOwner"
-	ReplicaSetsWithoutOwnersID      MetricCollectorID = "ReplicaSetsWithoutOwners"
-	ReplicaSetsWithRolloutID        MetricCollectorID = "ReplicaSetsWithRollout"
+	PVPricePerGiBHourID                        MetricCollectorID = "PVPricePerGiBHour"
+	PVUsedAverageID                            MetricCollectorID = "PVUsedAverage"
+	PVUsedMaxID                                MetricCollectorID = "PVUsedMax"
+	PVCInfoID                                  MetricCollectorID = "PVCInfo"
+	PVActiveMinutesID                          MetricCollectorID = "PVActiveMinutes"
+	LocalStorageUsedActiveMinutesID            MetricCollectorID = "LocalStorageUsedCost"
+	LocalStorageUsedAverageID                  MetricCollectorID = "LocalStorageUsedAverage"
+	LocalStorageUsedMaxID                      MetricCollectorID = "LocalStorageUsedMax"
+	LocalStorageBytesID                        MetricCollectorID = "LocalStorageBytesID"
+	LocalStorageActiveMinutesID                MetricCollectorID = "LocalStorageActiveMinutes"
+	NodeCPUCoresCapacityID                     MetricCollectorID = "NodeCPUCoresCapacity"
+	NodeCPUCoresAllocatableID                  MetricCollectorID = "NodeCPUCoresAllocatable"
+	NodeRAMBytesCapacityID                     MetricCollectorID = "NodeRAMBytesCapacity"
+	NodeRAMBytesAllocatableID                  MetricCollectorID = "NodeRAMBytesAllocatable"
+	NodeGPUCountID                             MetricCollectorID = "NodeGPUCount"
+	NodeLabelsID                               MetricCollectorID = "NodeLabels"
+	NodeActiveMinutesID                        MetricCollectorID = "NodeActiveMinutes"
+	NodeCPUModeTotalID                         MetricCollectorID = "NodeCPUModeTotal"
+	NodeRAMSystemUsageAverageID                MetricCollectorID = "NodeRAMSystemUsageAverage"
+	NodeRAMUserUsageAverageID                  MetricCollectorID = "NodeRAMUserUsageAverage"
+	LBPricePerHourID                           MetricCollectorID = "LBPricePerHour"
+	LBActiveMinutesID                          MetricCollectorID = "LBActiveMinutes"
+	ClusterManagementDurationID                MetricCollectorID = "ClusterManagementDuration"
+	ClusterManagementPricePerHourID            MetricCollectorID = "ClusterManagementPricePerHour"
+	PodActiveMinutesID                         MetricCollectorID = "PodActiveMinutes"
+	RAMBytesAllocatedID                        MetricCollectorID = "RAMBytesAllocated"
+	RAMRequestsID                              MetricCollectorID = "RAMRequests"
+	RAMLimitsID                                MetricCollectorID = "RAMLimits"
+	RAMUsageAverageID                          MetricCollectorID = "RAMUsageAverage"
+	RAMUsageMaxID                              MetricCollectorID = "RAMUsageMax"
+	CPUCoresAllocatedID                        MetricCollectorID = "CPUCoresAllocated"
+	CPURequestsID                              MetricCollectorID = "CPURequestsID"
+	CPULimitsID                                MetricCollectorID = "CPULimitsID"
+	CPUUsageAverageID                          MetricCollectorID = "CPUUsageAverage"
+	CPUUsageMaxID                              MetricCollectorID = "CPUUsageMax"
+	GPUsRequestedID                            MetricCollectorID = "GPUsRequested"
+	GPUsUsageAverageID                         MetricCollectorID = "GPUsUsageAverage"
+	GPUsUsageMaxID                             MetricCollectorID = "GPUsUsageMax"
+	GPUsAllocatedID                            MetricCollectorID = "GPUsAllocated"
+	IsGPUSharedID                              MetricCollectorID = "IsGPUShared"
+	GPUInfoID                                  MetricCollectorID = "GPUInfo"
+	NodeCPUPricePerHourID                      MetricCollectorID = "NodeCPUPricePerHour"
+	NodeRAMPricePerGiBHourID                   MetricCollectorID = "NodeRAMPricePerGiBHour"
+	NodeGPUPricePerHourID                      MetricCollectorID = "NodeGPUPricePerHour"
+	NodeIsSpotID                               MetricCollectorID = "NodeIsSpot"
+	PodPVCAllocationID                         MetricCollectorID = "PodPVCAllocation"
+	PVCBytesRequestedID                        MetricCollectorID = "PVCBytesRequested"
+	PVBytesID                                  MetricCollectorID = "PVBytesID"
+	PVInfoID                                   MetricCollectorID = "PVInfo"
+	NetZoneGiBID                               MetricCollectorID = "NetZoneGiB"
+	NetZonePricePerGiBID                       MetricCollectorID = "NetZonePricePerGiB"
+	NetRegionGiBID                             MetricCollectorID = "NetRegionGiB"
+	NetRegionPricePerGiBID                     MetricCollectorID = "NetRegionPricePerGiB"
+	NetInternetGiBID                           MetricCollectorID = "NetInternetGiB"
+	NetInternetPricePerGiBID                   MetricCollectorID = "NetInternetPricePerGiB"
+	NetInternetServiceGiBID                    MetricCollectorID = "NetInternetServiceGiB"
+	NetTransferBytesID                         MetricCollectorID = "NetTransferBytes"
+	NetZoneIngressGiBID                        MetricCollectorID = "NetZoneIngressGiB"
+	NetRegionIngressGiBID                      MetricCollectorID = "NetRegionIngressGiB"
+	NetInternetIngressGiBID                    MetricCollectorID = "NetInternetIngressGiB"
+	NetInternetServiceIngressGiBID             MetricCollectorID = "NetInternetServiceIngressGiB"
+	NetReceiveBytesID                          MetricCollectorID = "NetReceiveBytes"
+	NamespaceLabelsID                          MetricCollectorID = "NamespaceLabels"
+	NamespaceAnnotationsID                     MetricCollectorID = "NamespaceAnnotations"
+	PodLabelsID                                MetricCollectorID = "PodLabels"
+	PodAnnotationsID                           MetricCollectorID = "PodAnnotations"
+	ServiceLabelsID                            MetricCollectorID = "ServiceLabels"
+	DeploymentLabelsID                         MetricCollectorID = "DeploymentLabels"
+	StatefulSetLabelsID                        MetricCollectorID = "StatefulSetLabels"
+	DaemonSetLabelsID                          MetricCollectorID = "DaemonSetLabels"
+	JobLabelsID                                MetricCollectorID = "JobLabels"
+	PodsWithReplicaSetOwnerID                  MetricCollectorID = "PodsWithReplicaSetOwner"
+	ReplicaSetsWithoutOwnersID                 MetricCollectorID = "ReplicaSetsWithoutOwners"
+	ReplicaSetsWithRolloutID                   MetricCollectorID = "ReplicaSetsWithRollout"
+	ResourceQuotaSpecCPURequestAverageID       MetricCollectorID = "ResourceQuotaSpecCPURequestAverage"
+	ResourceQuotaSpecCPURequestMaxID           MetricCollectorID = "ResourceQuotaSpecCPURequestMax"
+	ResourceQuotaSpecRAMRequestAverageID       MetricCollectorID = "ResourceQuotaSpecRAMRequestAverage"
+	ResourceQuotaSpecRAMRequestMaxID           MetricCollectorID = "ResourceQuotaSpecRAMRequestMax"
+	ResourceQuotaSpecCPULimitAverageID         MetricCollectorID = "ResourceQuotaSpecCPULimitAverage"
+	ResourceQuotaSpecCPULimitMaxID             MetricCollectorID = "ResourceQuotaSpecCPULimitMax"
+	ResourceQuotaSpecRAMLimitAverageID         MetricCollectorID = "ResourceQuotaSpecRAMLimitAverage"
+	ResourceQuotaSpecRAMLimitMaxID             MetricCollectorID = "ResourceQuotaSpecRAMLimitMax"
+	ResourceQuotaStatusUsedCPURequestAverageID MetricCollectorID = "ResourceQuotaStatusUsedCPURequestAverage"
+	ResourceQuotaStatusUsedCPURequestMaxID     MetricCollectorID = "ResourceQuotaStatusUsedCPURequestMax"
+	ResourceQuotaStatusUsedRAMRequestAverageID MetricCollectorID = "ResourceQuotaStatusUsedRAMRequestAverage"
+	ResourceQuotaStatusUsedRAMRequestMaxID     MetricCollectorID = "ResourceQuotaStatusUsedRAMRequestMax"
+	ResourceQuotaStatusUsedCPULimitAverageID   MetricCollectorID = "ResourceQuotaStatusUsedCPULimitAverage"
+	ResourceQuotaStatusUsedCPULimitMaxID       MetricCollectorID = "ResourceQuotaStatusUsedCPULimitMax"
+	ResourceQuotaStatusUsedRAMLimitAverageID   MetricCollectorID = "ResourceQuotaStatusUsedRAMLimitAverage"
+	ResourceQuotaStatusUsedRAMLimitMaxID       MetricCollectorID = "ResourceQuotaStatusUsedRAMLimitMax"
 )
 
 // MetricCollector is a data structure that represents a specific MetricCollector metric instance that contains its own breakdown

+ 4 - 0
modules/collector-source/pkg/metric/metrics.go

@@ -23,6 +23,10 @@ const (
 	ServiceSelectorLabels                                 = "service_selector_labels"
 	StatefulSetMatchLabels                                = "statefulSet_match_labels"
 	KubeReplicasetOwner                                   = "kube_replicaset_owner"
+	KubeResourceQuotaSpecResourceRequests                 = "resourcequota_spec_resource_requests"
+	KubeResourceQuotaSpecResourceLimits                   = "resourcequota_spec_resource_limits"
+	KubeResourceQuotaStatusUsedResourceRequests           = "resourcequota_status_used_resource_requests"
+	KubeResourceQuotaStatusUsedResourceLimits             = "resourcequota_status_used_resource_limits"
 
 	// DCGM Metrics
 	DCGMFIPROFGRENGINEACTIVE = "DCGM_FI_PROF_GR_ENGINE_ACTIVE"

+ 80 - 0
modules/collector-source/pkg/scrape/clustercache.go

@@ -40,6 +40,7 @@ func (ccs *ClusterCacheScraper) Scrape() []metric.Update {
 		ccs.ScrapeServices,
 		ccs.ScrapeStatefulSets,
 		ccs.ScrapeReplicaSets,
+		ccs.ScrapeResourceQuotas,
 	}
 	return concurrentScrape(scrapeFuncs...)
 }
@@ -538,6 +539,85 @@ func (ccs *ClusterCacheScraper) scrapeReplicaSets(replicaSets []*clustercache.Re
 	return scrapeResults
 }
 
+func (ccs *ClusterCacheScraper) ScrapeResourceQuotas() []metric.Update {
+	resourceQuotas := ccs.clusterCache.GetAllResourceQuotas()
+	return ccs.scrapeResourceQuotas(resourceQuotas)
+}
+
+func (ccs *ClusterCacheScraper) scrapeResourceQuotas(resourceQuotas []*clustercache.ResourceQuota) []metric.Update {
+	var scrapeResults []metric.Update
+
+	processResource := func(baseLabels map[string]string, name v1.ResourceName, quantity resource.Quantity, metricName string) metric.Update {
+		resource, unit, value := toResourceUnitValue(name, quantity)
+
+		labels := maps.Clone(baseLabels)
+		labels[source.ResourceLabel] = resource
+		labels[source.UnitLabel] = unit
+
+		return metric.Update{
+			Name:   metricName,
+			Labels: labels,
+			Value:  value,
+		}
+	}
+
+	for _, resourceQuota := range resourceQuotas {
+		resourceQuotaInfo := map[string]string{
+			source.ResourceQuotaLabel: resourceQuota.Name,
+			source.NamespaceLabel:     resourceQuota.Namespace,
+			source.UIDLabel:           string(resourceQuota.UID),
+		}
+
+		if resourceQuota.Spec.Hard != nil {
+			// CPU/memory requests can also be aliased as "cpu" and "memory". For now, however, only scrape the complete names
+			// https://kubernetes.io/docs/concepts/policy/resource-quotas/#compute-resource-quota
+
+			if quantity, ok := resourceQuota.Spec.Hard[v1.ResourceRequestsCPU]; ok {
+				scrapeResults = append(scrapeResults, processResource(resourceQuotaInfo, v1.ResourceCPU, quantity, metric.KubeResourceQuotaSpecResourceRequests))
+			}
+
+			if quantity, ok := resourceQuota.Spec.Hard[v1.ResourceRequestsMemory]; ok {
+				scrapeResults = append(scrapeResults, processResource(resourceQuotaInfo, v1.ResourceMemory, quantity, metric.KubeResourceQuotaSpecResourceRequests))
+			}
+
+			if quantity, ok := resourceQuota.Spec.Hard[v1.ResourceLimitsCPU]; ok {
+				scrapeResults = append(scrapeResults, processResource(resourceQuotaInfo, v1.ResourceCPU, quantity, metric.KubeResourceQuotaSpecResourceLimits))
+			}
+
+			if quantity, ok := resourceQuota.Spec.Hard[v1.ResourceLimitsMemory]; ok {
+				scrapeResults = append(scrapeResults, processResource(resourceQuotaInfo, v1.ResourceMemory, quantity, metric.KubeResourceQuotaSpecResourceLimits))
+			}
+		}
+
+		if resourceQuota.Status.Used != nil {
+			if quantity, ok := resourceQuota.Status.Used[v1.ResourceRequestsCPU]; ok {
+				scrapeResults = append(scrapeResults, processResource(resourceQuotaInfo, v1.ResourceCPU, quantity, metric.KubeResourceQuotaStatusUsedResourceRequests))
+			}
+
+			if quantity, ok := resourceQuota.Status.Used[v1.ResourceRequestsMemory]; ok {
+				scrapeResults = append(scrapeResults, processResource(resourceQuotaInfo, v1.ResourceMemory, quantity, metric.KubeResourceQuotaStatusUsedResourceRequests))
+			}
+
+			if quantity, ok := resourceQuota.Status.Used[v1.ResourceLimitsCPU]; ok {
+				scrapeResults = append(scrapeResults, processResource(resourceQuotaInfo, v1.ResourceCPU, quantity, metric.KubeResourceQuotaStatusUsedResourceLimits))
+			}
+
+			if quantity, ok := resourceQuota.Status.Used[v1.ResourceLimitsMemory]; ok {
+				scrapeResults = append(scrapeResults, processResource(resourceQuotaInfo, v1.ResourceMemory, quantity, metric.KubeResourceQuotaStatusUsedResourceLimits))
+			}
+		}
+	}
+
+	events.Dispatch(event.ScrapeEvent{
+		ScraperName: event.KubernetesClusterScraperName,
+		ScrapeType:  event.ResourceQuotaScraperType,
+		Targets:     len(resourceQuotas),
+		Errors:      nil,
+	})
+
+	return scrapeResults
+}
+
 // getPersistentVolumeClaimClass returns StorageClassName. If no storage class was
 // requested, it returns "".
 func getPersistentVolumeClaimClass(claim *clustercache.PersistentVolumeClaim) string {

+ 166 - 0
modules/collector-source/pkg/scrape/clustercache_test.go

@@ -912,3 +912,169 @@ func Test_kubernetesScraper_scrapeReplicaSets(t *testing.T) {
 		})
 	}
 }
+
+func Test_kubernetesScraper_scrapeResourceQuotas(t *testing.T) {
+	start1, _ := time.Parse(time.RFC3339, Start1Str)
+
+	type scrape struct {
+		ResourceQuotas []*clustercache.ResourceQuota
+		Timestamp      time.Time
+	}
+	tests := []struct {
+		name     string
+		scrapes  []scrape
+		expected []metric.Update
+	}{
+		{
+			name: "simple",
+			scrapes: []scrape{
+				{
+					ResourceQuotas: []*clustercache.ResourceQuota{
+						{
+							Name:      "resourceQuota1",
+							Namespace: "namespace1",
+							UID:       "uuid1",
+							Spec: v1.ResourceQuotaSpec{
+								Hard: v1.ResourceList{
+									v1.ResourceRequestsCPU:    resource.MustParse("1"),
+									v1.ResourceRequestsMemory: resource.MustParse("1024"),
+									v1.ResourceLimitsCPU:      resource.MustParse("2"),
+									v1.ResourceLimitsMemory:   resource.MustParse("2048"),
+								},
+							},
+							Status: v1.ResourceQuotaStatus{
+								Used: v1.ResourceList{
+									v1.ResourceRequestsCPU:    resource.MustParse("0.5"),
+									v1.ResourceRequestsMemory: resource.MustParse("512"),
+									v1.ResourceLimitsCPU:      resource.MustParse("1"),
+									v1.ResourceLimitsMemory:   resource.MustParse("1024"),
+								},
+							},
+						},
+					},
+					Timestamp: start1,
+				},
+			},
+			expected: []metric.Update{
+				{
+					Name: metric.KubeResourceQuotaSpecResourceRequests,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+						source.ResourceLabel:      "cpu",
+						source.UnitLabel:          "core",
+					},
+					Value:          1,
+					AdditionalInfo: nil,
+				},
+				{
+					Name: metric.KubeResourceQuotaSpecResourceRequests,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+						source.ResourceLabel:      "memory",
+						source.UnitLabel:          "byte",
+					},
+					Value:          1024,
+					AdditionalInfo: nil,
+				},
+				{
+					Name: metric.KubeResourceQuotaSpecResourceLimits,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+						source.ResourceLabel:      "cpu",
+						source.UnitLabel:          "core",
+					},
+					Value:          2,
+					AdditionalInfo: nil,
+				},
+				{
+					Name: metric.KubeResourceQuotaSpecResourceLimits,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+						source.ResourceLabel:      "memory",
+						source.UnitLabel:          "byte",
+					},
+					Value:          2048,
+					AdditionalInfo: nil,
+				},
+				{
+					Name: metric.KubeResourceQuotaStatusUsedResourceRequests,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+						source.ResourceLabel:      "cpu",
+						source.UnitLabel:          "core",
+					},
+					Value:          0.5,
+					AdditionalInfo: nil,
+				},
+				{
+					Name: metric.KubeResourceQuotaStatusUsedResourceRequests,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+						source.ResourceLabel:      "memory",
+						source.UnitLabel:          "byte",
+					},
+					Value:          512,
+					AdditionalInfo: nil,
+				},
+				{
+					Name: metric.KubeResourceQuotaStatusUsedResourceLimits,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+						source.ResourceLabel:      "cpu",
+						source.UnitLabel:          "core",
+					},
+					Value:          1,
+					AdditionalInfo: nil,
+				},
+				{
+					Name: metric.KubeResourceQuotaStatusUsedResourceLimits,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+						source.ResourceLabel:      "memory",
+						source.UnitLabel:          "byte",
+					},
+					Value:          1024,
+					AdditionalInfo: nil,
+				},
+			},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			ks := &ClusterCacheScraper{}
+			var scrapeResults []metric.Update
+			for _, s := range tt.scrapes {
+				res := ks.scrapeResourceQuotas(s.ResourceQuotas)
+				scrapeResults = append(scrapeResults, res...)
+			}
+
+			if len(scrapeResults) != len(tt.expected) {
+				t.Errorf("Expected result length of %d, got %d", len(tt.expected), len(scrapeResults))
+			}
+
+			for i, expected := range tt.expected {
+				got := scrapeResults[i]
+				if !reflect.DeepEqual(expected, got) {
+					t.Errorf("Result did not match expected at index %d: got %v, want %v", i, got, expected)
+				}
+			}
+		})
+	}
+}

+ 290 - 0
modules/prometheus-source/pkg/prom/metricsquerier.go

@@ -1484,6 +1484,296 @@ func (pds *PrometheusMetricsQuerier) QueryReplicaSetsWithRollout(start, end time
 	return source.NewFuture(source.DecodeReplicaSetsWithRolloutResult, ctx.QueryAtTime(queryReplicaSetsWithRolloutOwner, end))
 }
 
+// Note: The ResourceQuota metrics are _not_ emitted at the moment. Leaving the query implementations here in case we add metric emission later on.
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPURequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPURequestAvgResult] {
+	const queryName = "QueryResourceQuotaSpecCPURequestAverage"
+	const queryFmtResourceQuotaSpecCPURequests = `avg(avg_over_time(resourcequota_spec_resource_requests{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaSpecCPURequests := fmt.Sprintf(queryFmtResourceQuotaSpecCPURequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecCPURequests)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaSpecCPURequestAvgResult, ctx.QueryAtTime(queryResourceQuotaSpecCPURequests, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPURequestMax(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPURequestMaxResult] {
+	const queryName = "QueryResourceQuotaSpecCPURequestMax"
+	const queryFmtResourceQuotaSpecCPURequests = `max(max_over_time(resourcequota_spec_resource_requests{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaSpecCPURequests := fmt.Sprintf(queryFmtResourceQuotaSpecCPURequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecCPURequests)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaSpecCPURequestMaxResult, ctx.QueryAtTime(queryResourceQuotaSpecCPURequests, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecRAMRequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecRAMRequestAvgResult] {
+	const queryName = "QueryResourceQuotaSpecRAMRequestAverage"
+	const queryFmtResourceQuotaSpecRAMRequests = `avg(avg_over_time(resourcequota_spec_resource_requests{resource="memory",unit="byte", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaSpecRAMRequests := fmt.Sprintf(queryFmtResourceQuotaSpecRAMRequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecRAMRequests)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaSpecRAMRequestAvgResult, ctx.QueryAtTime(queryResourceQuotaSpecRAMRequests, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecRAMRequestMax(start, end time.Time) *source.Future[source.ResourceQuotaSpecRAMRequestMaxResult] {
+	const queryName = "QueryResourceQuotaSpecRAMRequestMax"
+	const queryFmtResourceQuotaSpecRAMRequests = `max(max_over_time(resourcequota_spec_resource_requests{resource="memory",unit="byte", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaSpecRAMRequests := fmt.Sprintf(queryFmtResourceQuotaSpecRAMRequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecRAMRequests)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaSpecRAMRequestMaxResult, ctx.QueryAtTime(queryResourceQuotaSpecRAMRequests, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPULimitAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPULimitAvgResult] {
+	const queryName = "QueryResourceQuotaSpecCPULimitAverage"
+	const queryFmtResourceQuotaSpecCPULimits = `avg(avg_over_time(resourcequota_spec_resource_limits{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaSpecCPULimits := fmt.Sprintf(queryFmtResourceQuotaSpecCPULimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecCPULimits)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaSpecCPULimitAvgResult, ctx.QueryAtTime(queryResourceQuotaSpecCPULimits, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPULimitMax(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPULimitMaxResult] {
+	const queryName = "QueryResourceQuotaSpecCPULimitMax"
+	const queryFmtResourceQuotaSpecCPULimits = `max(max_over_time(resourcequota_spec_resource_limits{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaSpecCPULimits := fmt.Sprintf(queryFmtResourceQuotaSpecCPULimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecCPULimits)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaSpecCPULimitMaxResult, ctx.QueryAtTime(queryResourceQuotaSpecCPULimits, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecRAMLimitAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecRAMLimitAvgResult] {
+	const queryName = "QueryResourceQuotaSpecRAMLimitAverage"
+	const queryFmtResourceQuotaSpecRAMLimits = `avg(avg_over_time(resourcequota_spec_resource_limits{resource="memory",unit="byte", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaSpecRAMLimits := fmt.Sprintf(queryFmtResourceQuotaSpecRAMLimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecRAMLimits)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaSpecRAMLimitAvgResult, ctx.QueryAtTime(queryResourceQuotaSpecRAMLimits, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecRAMLimitMax(start, end time.Time) *source.Future[source.ResourceQuotaSpecRAMLimitMaxResult] {
+	const queryName = "QueryResourceQuotaSpecRAMLimitMax"
+	const queryFmtResourceQuotaSpecRAMLimits = `max(max_over_time(resourcequota_spec_resource_limits{resource="memory",unit="byte", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaSpecRAMLimits := fmt.Sprintf(queryFmtResourceQuotaSpecRAMLimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecRAMLimits)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaSpecRAMLimitMaxResult, ctx.QueryAtTime(queryResourceQuotaSpecRAMLimits, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedCPURequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedCPURequestAvgResult] {
+	const queryName = "QueryResourceQuotaStatusUsedCPURequestAverage"
+	const queryFmtResourceQuotaStatusUsedCPURequests = `avg(avg_over_time(resourcequota_status_used_resource_requests{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaStatusUsedCPURequests := fmt.Sprintf(queryFmtResourceQuotaStatusUsedCPURequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedCPURequests)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaStatusUsedCPURequestAvgResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedCPURequests, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedCPURequestMax(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedCPURequestMaxResult] {
+	const queryName = "QueryResourceQuotaStatusUsedCPURequestMax"
+	const queryFmtResourceQuotaStatusUsedCPURequests = `max(max_over_time(resourcequota_status_used_resource_requests{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaStatusUsedCPURequests := fmt.Sprintf(queryFmtResourceQuotaStatusUsedCPURequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedCPURequests)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaStatusUsedCPURequestMaxResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedCPURequests, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedRAMRequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedRAMRequestAvgResult] {
+	const queryName = "QueryResourceQuotaStatusUsedRAMRequestAverage"
+	const queryFmtResourceQuotaStatusUsedRAMRequests = `avg(avg_over_time(resourcequota_status_used_resource_requests{resource="memory",unit="byte", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaStatusUsedRAMRequests := fmt.Sprintf(queryFmtResourceQuotaStatusUsedRAMRequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedRAMRequests)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaStatusUsedRAMRequestAvgResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedRAMRequests, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedRAMRequestMax(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedRAMRequestMaxResult] {
+	const queryName = "QueryResourceQuotaStatusUsedRAMRequestMax"
+	const queryFmtResourceQuotaStatusUsedRAMRequests = `max(max_over_time(resourcequota_status_used_resource_requests{resource="memory",unit="byte", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaStatusUsedRAMRequests := fmt.Sprintf(queryFmtResourceQuotaStatusUsedRAMRequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedRAMRequests)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaStatusUsedRAMRequestMaxResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedRAMRequests, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedCPULimitAverage(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedCPULimitAvgResult] {
+	const queryName = "QueryResourceQuotaStatusUsedCPULimitAverage"
+	const queryFmtResourceQuotaStatusUsedCPULimits = `avg(avg_over_time(resourcequota_status_used_resource_limits{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaStatusUsedCPULimits := fmt.Sprintf(queryFmtResourceQuotaStatusUsedCPULimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedCPULimits)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaStatusUsedCPULimitAvgResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedCPULimits, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedCPULimitMax(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedCPULimitMaxResult] {
+	const queryName = "QueryResourceQuotaStatusUsedCPULimitMax"
+	const queryFmtResourceQuotaStatusUsedCPULimits = `max(max_over_time(resourcequota_status_used_resource_limits{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaStatusUsedCPULimits := fmt.Sprintf(queryFmtResourceQuotaStatusUsedCPULimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedCPULimits)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaStatusUsedCPULimitMaxResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedCPULimits, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedRAMLimitAverage(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedRAMLimitAvgResult] {
+	const queryName = "QueryResourceQuotaStatusUsedRAMLimitAverage"
+	const queryFmtResourceQuotaStatusUsedRAMLimits = `avg(avg_over_time(resourcequota_status_used_resource_limits{resource="memory",unit="byte", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaStatusUsedRAMLimits := fmt.Sprintf(queryFmtResourceQuotaStatusUsedRAMLimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedRAMLimits)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaStatusUsedRAMLimitAvgResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedRAMLimits, end))
+}
+
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedRAMLimitMax(start, end time.Time) *source.Future[source.ResourceQuotaStatusUsedRAMLimitMaxResult] {
+	const queryName = "QueryResourceQuotaStatusUsedRAMLimitMax"
+	const queryFmtResourceQuotaStatusUsedRAMLimits = `max(max_over_time(resourcequota_status_used_resource_limits{resource="memory",unit="byte", %s}[%s])) by (resourcequota, namespace, uid, %s)`
+
+	cfg := pds.promConfig
+
+	durStr := timeutil.DurationString(end.Sub(start))
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaStatusUsedRAMLimits := fmt.Sprintf(queryFmtResourceQuotaStatusUsedRAMLimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedRAMLimits)
+
+	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	return source.NewFuture(source.DecodeResourceQuotaStatusUsedRAMLimitMaxResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedRAMLimits, end))
+}
+
 func (pds *PrometheusMetricsQuerier) QueryDataCoverage(limitDays int) (time.Time, time.Time, error) {
 	const (
 		queryName            = "QueryDataCoverage"

+ 92 - 76
modules/prometheus-source/pkg/prom/metricsquerier_test.go

@@ -96,82 +96,98 @@ func TestQueryLogs(t *testing.T) {
 	queryStart := queryEnd.Add(-24 * time.Hour)
 
 	tests := map[string]func(time.Time, time.Time){
-		"QueryPVActiveMinutes":              func(s, e time.Time) { querier.QueryPVActiveMinutes(s, e) },
-		"QueryPVUsedAverage":                func(s, e time.Time) { querier.QueryPVUsedAverage(s, e) },
-		"QueryPVUsedMax":                    func(s, e time.Time) { querier.QueryPVUsedMax(s, e) },
-		"QueryLocalStorageActiveMinutes":    func(s, e time.Time) { querier.QueryLocalStorageActiveMinutes(s, e) },
-		"QueryLocalStorageCost":             func(s, e time.Time) { querier.QueryLocalStorageCost(s, e) },
-		"QueryLocalStorageUsedCost":         func(s, e time.Time) { querier.QueryLocalStorageUsedCost(s, e) },
-		"QueryLocalStorageUsedAvg":          func(s, e time.Time) { querier.QueryLocalStorageUsedAvg(s, e) },
-		"QueryLocalStorageUsedMax":          func(s, e time.Time) { querier.QueryLocalStorageUsedMax(s, e) },
-		"QueryLocalStorageBytes":            func(s, e time.Time) { querier.QueryLocalStorageBytes(s, e) },
-		"QueryNodeActiveMinutes":            func(s, e time.Time) { querier.QueryNodeActiveMinutes(s, e) },
-		"QueryNodeCPUCoresCapacity":         func(s, e time.Time) { querier.QueryNodeCPUCoresCapacity(s, e) },
-		"QueryNodeCPUCoresAllocatable":      func(s, e time.Time) { querier.QueryNodeCPUCoresAllocatable(s, e) },
-		"QueryNodeRAMBytesCapacity":         func(s, e time.Time) { querier.QueryNodeRAMBytesCapacity(s, e) },
-		"QueryNodeRAMBytesAllocatable":      func(s, e time.Time) { querier.QueryNodeRAMBytesAllocatable(s, e) },
-		"QueryNodeGPUCount":                 func(s, e time.Time) { querier.QueryNodeGPUCount(s, e) },
-		"QueryNodeCPUModeTotal":             func(s, e time.Time) { querier.QueryNodeCPUModeTotal(s, e) },
-		"QueryNodeIsSpot":                   func(s, e time.Time) { querier.QueryNodeIsSpot(s, e) },
-		"QueryNodeRAMSystemPercent":         func(s, e time.Time) { querier.QueryNodeRAMSystemPercent(s, e) },
-		"QueryNodeRAMUserPercent":           func(s, e time.Time) { querier.QueryNodeRAMUserPercent(s, e) },
-		"QueryLBActiveMinutes":              func(s, e time.Time) { querier.QueryLBActiveMinutes(s, e) },
-		"QueryLBPricePerHr":                 func(s, e time.Time) { querier.QueryLBPricePerHr(s, e) },
-		"QueryClusterManagementDuration":    func(s, e time.Time) { querier.QueryClusterManagementDuration(s, e) },
-		"QueryClusterManagementPricePerHr":  func(s, e time.Time) { querier.QueryClusterManagementPricePerHr(s, e) },
-		"QueryPods":                         func(s, e time.Time) { querier.QueryPods(s, e) },
-		"QueryPodsUID":                      func(s, e time.Time) { querier.QueryPodsUID(s, e) },
-		"QueryRAMBytesAllocated":            func(s, e time.Time) { querier.QueryRAMBytesAllocated(s, e) },
-		"QueryRAMRequests":                  func(s, e time.Time) { querier.QueryRAMRequests(s, e) },
-		"QueryRAMLimits":                    func(s, e time.Time) { querier.QueryRAMLimits(s, e) },
-		"QueryRAMUsageAvg":                  func(s, e time.Time) { querier.QueryRAMUsageAvg(s, e) },
-		"QueryRAMUsageMax":                  func(s, e time.Time) { querier.QueryRAMUsageMax(s, e) },
-		"QueryNodeRAMPricePerGiBHr":         func(s, e time.Time) { querier.QueryNodeRAMPricePerGiBHr(s, e) },
-		"QueryCPUCoresAllocated":            func(s, e time.Time) { querier.QueryCPUCoresAllocated(s, e) },
-		"QueryCPURequests":                  func(s, e time.Time) { querier.QueryCPURequests(s, e) },
-		"QueryCPULimits":                    func(s, e time.Time) { querier.QueryCPULimits(s, e) },
-		"QueryCPUUsageAvg":                  func(s, e time.Time) { querier.QueryCPUUsageAvg(s, e) },
-		"QueryCPUUsageMax":                  func(s, e time.Time) { querier.QueryCPUUsageMax(s, e) },
-		"QueryNodeCPUPricePerHr":            func(s, e time.Time) { querier.QueryNodeCPUPricePerHr(s, e) },
-		"QueryGPUsAllocated":                func(s, e time.Time) { querier.QueryGPUsAllocated(s, e) },
-		"QueryGPUsRequested":                func(s, e time.Time) { querier.QueryGPUsRequested(s, e) },
-		"QueryGPUsUsageAvg":                 func(s, e time.Time) { querier.QueryGPUsUsageAvg(s, e) },
-		"QueryGPUsUsageMax":                 func(s, e time.Time) { querier.QueryGPUsUsageMax(s, e) },
-		"QueryNodeGPUPricePerHr":            func(s, e time.Time) { querier.QueryNodeGPUPricePerHr(s, e) },
-		"QueryGPUInfo":                      func(s, e time.Time) { querier.QueryGPUInfo(s, e) },
-		"QueryIsGPUShared":                  func(s, e time.Time) { querier.QueryIsGPUShared(s, e) },
-		"QueryPodPVCAllocation":             func(s, e time.Time) { querier.QueryPodPVCAllocation(s, e) },
-		"QueryPVCBytesRequested":            func(s, e time.Time) { querier.QueryPVCBytesRequested(s, e) },
-		"QueryPVCInfo":                      func(s, e time.Time) { querier.QueryPVCInfo(s, e) },
-		"QueryPVBytes":                      func(s, e time.Time) { querier.QueryPVBytes(s, e) },
-		"QueryPVPricePerGiBHour":            func(s, e time.Time) { querier.QueryPVPricePerGiBHour(s, e) },
-		"QueryPVInfo":                       func(s, e time.Time) { querier.QueryPVInfo(s, e) },
-		"QueryNetZoneGiB":                   func(s, e time.Time) { querier.QueryNetZoneGiB(s, e) },
-		"QueryNetZonePricePerGiB":           func(s, e time.Time) { querier.QueryNetZonePricePerGiB(s, e) },
-		"QueryNetRegionGiB":                 func(s, e time.Time) { querier.QueryNetRegionGiB(s, e) },
-		"QueryNetRegionPricePerGiB":         func(s, e time.Time) { querier.QueryNetRegionPricePerGiB(s, e) },
-		"QueryNetInternetGiB":               func(s, e time.Time) { querier.QueryNetInternetGiB(s, e) },
-		"QueryNetInternetPricePerGiB":       func(s, e time.Time) { querier.QueryNetInternetPricePerGiB(s, e) },
-		"QueryNetInternetServiceGiB":        func(s, e time.Time) { querier.QueryNetInternetServiceGiB(s, e) },
-		"QueryNetTransferBytes":             func(s, e time.Time) { querier.QueryNetTransferBytes(s, e) },
-		"QueryNetZoneIngressGiB":            func(s, e time.Time) { querier.QueryNetZoneIngressGiB(s, e) },
-		"QueryNetRegionIngressGiB":          func(s, e time.Time) { querier.QueryNetRegionIngressGiB(s, e) },
-		"QueryNetInternetIngressGiB":        func(s, e time.Time) { querier.QueryNetInternetIngressGiB(s, e) },
-		"QueryNetInternetServiceIngressGiB": func(s, e time.Time) { querier.QueryNetInternetServiceIngressGiB(s, e) },
-		"QueryNetReceiveBytes":              func(s, e time.Time) { querier.QueryNetReceiveBytes(s, e) },
-		"QueryNamespaceAnnotations":         func(s, e time.Time) { querier.QueryNamespaceAnnotations(s, e) },
-		"QueryPodAnnotations":               func(s, e time.Time) { querier.QueryPodAnnotations(s, e) },
-		"QueryNodeLabels":                   func(s, e time.Time) { querier.QueryNodeLabels(s, e) },
-		"QueryNamespaceLabels":              func(s, e time.Time) { querier.QueryNamespaceLabels(s, e) },
-		"QueryPodLabels":                    func(s, e time.Time) { querier.QueryPodLabels(s, e) },
-		"QueryServiceLabels":                func(s, e time.Time) { querier.QueryServiceLabels(s, e) },
-		"QueryDeploymentLabels":             func(s, e time.Time) { querier.QueryDeploymentLabels(s, e) },
-		"QueryStatefulSetLabels":            func(s, e time.Time) { querier.QueryStatefulSetLabels(s, e) },
-		"QueryDaemonSetLabels":              func(s, e time.Time) { querier.QueryDaemonSetLabels(s, e) },
-		"QueryJobLabels":                    func(s, e time.Time) { querier.QueryJobLabels(s, e) },
-		"QueryPodsWithReplicaSetOwner":      func(s, e time.Time) { querier.QueryPodsWithReplicaSetOwner(s, e) },
-		"QueryReplicaSetsWithoutOwners":     func(s, e time.Time) { querier.QueryReplicaSetsWithoutOwners(s, e) },
-		"QueryReplicaSetsWithRollout":       func(s, e time.Time) { querier.QueryReplicaSetsWithRollout(s, e) },
+		"QueryPVActiveMinutes":                          func(s, e time.Time) { querier.QueryPVActiveMinutes(s, e) },
+		"QueryPVUsedAverage":                            func(s, e time.Time) { querier.QueryPVUsedAverage(s, e) },
+		"QueryPVUsedMax":                                func(s, e time.Time) { querier.QueryPVUsedMax(s, e) },
+		"QueryLocalStorageActiveMinutes":                func(s, e time.Time) { querier.QueryLocalStorageActiveMinutes(s, e) },
+		"QueryLocalStorageCost":                         func(s, e time.Time) { querier.QueryLocalStorageCost(s, e) },
+		"QueryLocalStorageUsedCost":                     func(s, e time.Time) { querier.QueryLocalStorageUsedCost(s, e) },
+		"QueryLocalStorageUsedAvg":                      func(s, e time.Time) { querier.QueryLocalStorageUsedAvg(s, e) },
+		"QueryLocalStorageUsedMax":                      func(s, e time.Time) { querier.QueryLocalStorageUsedMax(s, e) },
+		"QueryLocalStorageBytes":                        func(s, e time.Time) { querier.QueryLocalStorageBytes(s, e) },
+		"QueryNodeActiveMinutes":                        func(s, e time.Time) { querier.QueryNodeActiveMinutes(s, e) },
+		"QueryNodeCPUCoresCapacity":                     func(s, e time.Time) { querier.QueryNodeCPUCoresCapacity(s, e) },
+		"QueryNodeCPUCoresAllocatable":                  func(s, e time.Time) { querier.QueryNodeCPUCoresAllocatable(s, e) },
+		"QueryNodeRAMBytesCapacity":                     func(s, e time.Time) { querier.QueryNodeRAMBytesCapacity(s, e) },
+		"QueryNodeRAMBytesAllocatable":                  func(s, e time.Time) { querier.QueryNodeRAMBytesAllocatable(s, e) },
+		"QueryNodeGPUCount":                             func(s, e time.Time) { querier.QueryNodeGPUCount(s, e) },
+		"QueryNodeCPUModeTotal":                         func(s, e time.Time) { querier.QueryNodeCPUModeTotal(s, e) },
+		"QueryNodeIsSpot":                               func(s, e time.Time) { querier.QueryNodeIsSpot(s, e) },
+		"QueryNodeRAMSystemPercent":                     func(s, e time.Time) { querier.QueryNodeRAMSystemPercent(s, e) },
+		"QueryNodeRAMUserPercent":                       func(s, e time.Time) { querier.QueryNodeRAMUserPercent(s, e) },
+		"QueryLBActiveMinutes":                          func(s, e time.Time) { querier.QueryLBActiveMinutes(s, e) },
+		"QueryLBPricePerHr":                             func(s, e time.Time) { querier.QueryLBPricePerHr(s, e) },
+		"QueryClusterManagementDuration":                func(s, e time.Time) { querier.QueryClusterManagementDuration(s, e) },
+		"QueryClusterManagementPricePerHr":              func(s, e time.Time) { querier.QueryClusterManagementPricePerHr(s, e) },
+		"QueryPods":                                     func(s, e time.Time) { querier.QueryPods(s, e) },
+		"QueryPodsUID":                                  func(s, e time.Time) { querier.QueryPodsUID(s, e) },
+		"QueryRAMBytesAllocated":                        func(s, e time.Time) { querier.QueryRAMBytesAllocated(s, e) },
+		"QueryRAMRequests":                              func(s, e time.Time) { querier.QueryRAMRequests(s, e) },
+		"QueryRAMLimits":                                func(s, e time.Time) { querier.QueryRAMLimits(s, e) },
+		"QueryRAMUsageAvg":                              func(s, e time.Time) { querier.QueryRAMUsageAvg(s, e) },
+		"QueryRAMUsageMax":                              func(s, e time.Time) { querier.QueryRAMUsageMax(s, e) },
+		"QueryNodeRAMPricePerGiBHr":                     func(s, e time.Time) { querier.QueryNodeRAMPricePerGiBHr(s, e) },
+		"QueryCPUCoresAllocated":                        func(s, e time.Time) { querier.QueryCPUCoresAllocated(s, e) },
+		"QueryCPURequests":                              func(s, e time.Time) { querier.QueryCPURequests(s, e) },
+		"QueryCPULimits":                                func(s, e time.Time) { querier.QueryCPULimits(s, e) },
+		"QueryCPUUsageAvg":                              func(s, e time.Time) { querier.QueryCPUUsageAvg(s, e) },
+		"QueryCPUUsageMax":                              func(s, e time.Time) { querier.QueryCPUUsageMax(s, e) },
+		"QueryNodeCPUPricePerHr":                        func(s, e time.Time) { querier.QueryNodeCPUPricePerHr(s, e) },
+		"QueryGPUsAllocated":                            func(s, e time.Time) { querier.QueryGPUsAllocated(s, e) },
+		"QueryGPUsRequested":                            func(s, e time.Time) { querier.QueryGPUsRequested(s, e) },
+		"QueryGPUsUsageAvg":                             func(s, e time.Time) { querier.QueryGPUsUsageAvg(s, e) },
+		"QueryGPUsUsageMax":                             func(s, e time.Time) { querier.QueryGPUsUsageMax(s, e) },
+		"QueryNodeGPUPricePerHr":                        func(s, e time.Time) { querier.QueryNodeGPUPricePerHr(s, e) },
+		"QueryGPUInfo":                                  func(s, e time.Time) { querier.QueryGPUInfo(s, e) },
+		"QueryIsGPUShared":                              func(s, e time.Time) { querier.QueryIsGPUShared(s, e) },
+		"QueryPodPVCAllocation":                         func(s, e time.Time) { querier.QueryPodPVCAllocation(s, e) },
+		"QueryPVCBytesRequested":                        func(s, e time.Time) { querier.QueryPVCBytesRequested(s, e) },
+		"QueryPVCInfo":                                  func(s, e time.Time) { querier.QueryPVCInfo(s, e) },
+		"QueryPVBytes":                                  func(s, e time.Time) { querier.QueryPVBytes(s, e) },
+		"QueryPVPricePerGiBHour":                        func(s, e time.Time) { querier.QueryPVPricePerGiBHour(s, e) },
+		"QueryPVInfo":                                   func(s, e time.Time) { querier.QueryPVInfo(s, e) },
+		"QueryNetZoneGiB":                               func(s, e time.Time) { querier.QueryNetZoneGiB(s, e) },
+		"QueryNetZonePricePerGiB":                       func(s, e time.Time) { querier.QueryNetZonePricePerGiB(s, e) },
+		"QueryNetRegionGiB":                             func(s, e time.Time) { querier.QueryNetRegionGiB(s, e) },
+		"QueryNetRegionPricePerGiB":                     func(s, e time.Time) { querier.QueryNetRegionPricePerGiB(s, e) },
+		"QueryNetInternetGiB":                           func(s, e time.Time) { querier.QueryNetInternetGiB(s, e) },
+		"QueryNetInternetPricePerGiB":                   func(s, e time.Time) { querier.QueryNetInternetPricePerGiB(s, e) },
+		"QueryNetInternetServiceGiB":                    func(s, e time.Time) { querier.QueryNetInternetServiceGiB(s, e) },
+		"QueryNetTransferBytes":                         func(s, e time.Time) { querier.QueryNetTransferBytes(s, e) },
+		"QueryNetZoneIngressGiB":                        func(s, e time.Time) { querier.QueryNetZoneIngressGiB(s, e) },
+		"QueryNetRegionIngressGiB":                      func(s, e time.Time) { querier.QueryNetRegionIngressGiB(s, e) },
+		"QueryNetInternetIngressGiB":                    func(s, e time.Time) { querier.QueryNetInternetIngressGiB(s, e) },
+		"QueryNetInternetServiceIngressGiB":             func(s, e time.Time) { querier.QueryNetInternetServiceIngressGiB(s, e) },
+		"QueryNetReceiveBytes":                          func(s, e time.Time) { querier.QueryNetReceiveBytes(s, e) },
+		"QueryNamespaceAnnotations":                     func(s, e time.Time) { querier.QueryNamespaceAnnotations(s, e) },
+		"QueryPodAnnotations":                           func(s, e time.Time) { querier.QueryPodAnnotations(s, e) },
+		"QueryNodeLabels":                               func(s, e time.Time) { querier.QueryNodeLabels(s, e) },
+		"QueryNamespaceLabels":                          func(s, e time.Time) { querier.QueryNamespaceLabels(s, e) },
+		"QueryPodLabels":                                func(s, e time.Time) { querier.QueryPodLabels(s, e) },
+		"QueryServiceLabels":                            func(s, e time.Time) { querier.QueryServiceLabels(s, e) },
+		"QueryDeploymentLabels":                         func(s, e time.Time) { querier.QueryDeploymentLabels(s, e) },
+		"QueryStatefulSetLabels":                        func(s, e time.Time) { querier.QueryStatefulSetLabels(s, e) },
+		"QueryDaemonSetLabels":                          func(s, e time.Time) { querier.QueryDaemonSetLabels(s, e) },
+		"QueryJobLabels":                                func(s, e time.Time) { querier.QueryJobLabels(s, e) },
+		"QueryPodsWithReplicaSetOwner":                  func(s, e time.Time) { querier.QueryPodsWithReplicaSetOwner(s, e) },
+		"QueryReplicaSetsWithoutOwners":                 func(s, e time.Time) { querier.QueryReplicaSetsWithoutOwners(s, e) },
+		"QueryReplicaSetsWithRollout":                   func(s, e time.Time) { querier.QueryReplicaSetsWithRollout(s, e) },
+		"QueryResourceQuotaSpecCPURequestAverage":       func(s, e time.Time) { querier.QueryResourceQuotaSpecCPURequestAverage(s, e) },
+		"QueryResourceQuotaSpecCPURequestMax":           func(s, e time.Time) { querier.QueryResourceQuotaSpecCPURequestMax(s, e) },
+		"QueryResourceQuotaSpecRAMRequestAverage":       func(s, e time.Time) { querier.QueryResourceQuotaSpecRAMRequestAverage(s, e) },
+		"QueryResourceQuotaSpecRAMRequestMax":           func(s, e time.Time) { querier.QueryResourceQuotaSpecRAMRequestMax(s, e) },
+		"QueryResourceQuotaSpecCPULimitAverage":         func(s, e time.Time) { querier.QueryResourceQuotaSpecCPULimitAverage(s, e) },
+		"QueryResourceQuotaSpecCPULimitMax":             func(s, e time.Time) { querier.QueryResourceQuotaSpecCPULimitMax(s, e) },
+		"QueryResourceQuotaSpecRAMLimitAverage":         func(s, e time.Time) { querier.QueryResourceQuotaSpecRAMLimitAverage(s, e) },
+		"QueryResourceQuotaSpecRAMLimitMax":             func(s, e time.Time) { querier.QueryResourceQuotaSpecRAMLimitMax(s, e) },
+		"QueryResourceQuotaStatusUsedCPURequestAverage": func(s, e time.Time) { querier.QueryResourceQuotaStatusUsedCPURequestAverage(s, e) },
+		"QueryResourceQuotaStatusUsedCPURequestMax":     func(s, e time.Time) { querier.QueryResourceQuotaStatusUsedCPURequestMax(s, e) },
+		"QueryResourceQuotaStatusUsedRAMRequestAverage": func(s, e time.Time) { querier.QueryResourceQuotaStatusUsedRAMRequestAverage(s, e) },
+		"QueryResourceQuotaStatusUsedRAMRequestMax":     func(s, e time.Time) { querier.QueryResourceQuotaStatusUsedRAMRequestMax(s, e) },
+		"QueryResourceQuotaStatusUsedCPULimitAverage":   func(s, e time.Time) { querier.QueryResourceQuotaStatusUsedCPULimitAverage(s, e) },
+		"QueryResourceQuotaStatusUsedCPULimitMax":       func(s, e time.Time) { querier.QueryResourceQuotaStatusUsedCPULimitMax(s, e) },
+		"QueryResourceQuotaStatusUsedRAMLimitAverage":   func(s, e time.Time) { querier.QueryResourceQuotaStatusUsedRAMLimitAverage(s, e) },
+		"QueryResourceQuotaStatusUsedRAMLimitMax":       func(s, e time.Time) { querier.QueryResourceQuotaStatusUsedRAMLimitMax(s, e) },
 	}
 
 	for testName, queryFunc := range tests {

+ 18 - 4
pkg/cloud/aws/provider.go

@@ -70,6 +70,8 @@ var (
 	versionRx     = regexp.MustCompile(`^#Version: (\\d+)\\.\\d+$`)
 	regionRx      = regexp.MustCompile("([a-z]+-[a-z]+-[0-9])")
 
+	ErrNoAthenaBucket = errors.New("No Athena Bucket configured")
+
 	// StorageClassProvisionerDefaults specifies the default storage class types depending upon the provisioner
 	StorageClassProvisionerDefaults = map[string]string{
 		"kubernetes.io/aws-ebs": "gp2",
@@ -896,10 +898,18 @@ func (aws *AWS) DownloadPricingData() error {
 
 	// RIDataRunning establishes the existence of the goroutine. Since it's possible we
 	// run multiple downloads, we don't want to create multiple go routines if one already exists
+	//
+	// If athenaBucketName is unconfigured, the ReservedInstanceData and SavingsPlanData watchers
+	// are skipped. Note: These watchers are less commonly used. It is recommended to use the full
+	// CloudCosts feature via athenaintegration.go.
 	if !aws.RIDataRunning {
 		err = aws.GetReservationDataFromAthena() // Block until one run has completed.
 		if err != nil {
-			log.Errorf("Failed to lookup reserved instance data: %s", err.Error())
+			if errors.Is(err, ErrNoAthenaBucket) {
+				log.Debugf("No \"athenaBucketName\" configured, ReservedInstanceData watcher will not run")
+			} else {
+				log.Errorf("Failed to lookup reserved instance data: %s", err.Error())
+			}
 		} else { // If we make one successful run, check on new reservation data every hour
 			go func() {
 				defer errs.HandlePanic()
@@ -919,7 +929,11 @@ func (aws *AWS) DownloadPricingData() error {
 	if !aws.SavingsPlanDataRunning {
 		err = aws.GetSavingsPlanDataFromAthena()
 		if err != nil {
-			log.Errorf("Failed to lookup savings plan data: %s", err.Error())
+			if errors.Is(err, ErrNoAthenaBucket) {
+				log.Debugf("No \"athenaBucketName\" configured, SavingsPlanData watcher will not run")
+			} else {
+				log.Errorf("Failed to lookup savings plan data: %s", err.Error())
+			}
 		} else {
 			go func() {
 				defer errs.HandlePanic()
@@ -2039,7 +2053,7 @@ func (aws *AWS) GetSavingsPlanDataFromAthena() error {
 		return err
 	}
 	if cfg.AthenaBucketName == "" {
-		err = fmt.Errorf("No Athena Bucket configured")
+		err = ErrNoAthenaBucket
 		aws.RIPricingError = err
 		return err
 	}
@@ -2136,7 +2150,7 @@ func (aws *AWS) GetReservationDataFromAthena() error {
 		return err
 	}
 	if cfg.AthenaBucketName == "" {
-		err = fmt.Errorf("No Athena Bucket configured")
+		err = ErrNoAthenaBucket
 		aws.RIPricingError = err
 		return err
 	}

+ 82 - 0
pkg/cloud/azure/storageauthorizer.go

@@ -3,12 +3,28 @@ package azure
 import (
 	"encoding/json"
 	"fmt"
+	"net/http"
+	"time"
 
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore"
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+	"github.com/opencost/opencost/core/pkg/storage"
 	"github.com/opencost/opencost/pkg/cloud"
 )
 
 const SharedKeyAuthorizerType = "AzureAccessKey"
+const StorageConnectionStringAuthorizerType = "AzureStorageConnectionString"
+
+var defaultHTTPConfig = storage.HTTPConfig{
+	IdleConnTimeout:       90 * time.Second,
+	ResponseHeaderTimeout: 2 * time.Minute,
+	TLSHandshakeTimeout:   10 * time.Second,
+	ExpectContinueTimeout: 1 * time.Second,
+	MaxIdleConns:          100,
+	MaxIdleConnsPerHost:   100,
+	MaxConnsPerHost:       0,
+	DisableCompression:    false,
+}
 
 // StorageAuthorizer is a service specific Authorizer for Azure Storage, it exists so that we can support existing Shared
 // Key configurations while allowing the Authorizer to have a service agnostic api
@@ -22,6 +38,10 @@ func SelectStorageAuthorizerByType(typeStr string) (StorageAuthorizer, error) {
 	switch typeStr {
 	case SharedKeyAuthorizerType:
 		return &SharedKeyCredential{}, nil
+	case StorageConnectionStringAuthorizerType:
+		return &StorageConnectionStringCredential{
+			HTTPConfig: defaultHTTPConfig,
+		}, nil
 	default:
 		authorizer, err := SelectAuthorizerByType(typeStr)
 		if err != nil {
@@ -127,3 +147,65 @@ func (ah *AuthorizerHolder) GetBlobClient(serviceURL string) (*azblob.Client, er
 func (ah *AuthorizerHolder) UnmarshalJSON(b []byte) error {
 	return json.Unmarshal(b, ah.Authorizer)
 }
+
+type StorageConnectionStringCredential struct {
+	StorageConnectionString string             `json:"storageConnectionString"`
+	HTTPConfig              storage.HTTPConfig `json:"httpConfig"`
+}
+
+func (s *StorageConnectionStringCredential) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[cloud.AuthorizerTypeProperty] = StorageConnectionStringAuthorizerType
+	fmap["storageConnectionString"] = s.StorageConnectionString
+	fmap["httpConfig"] = s.HTTPConfig
+	return json.Marshal(fmap)
+}
+
+func (s *StorageConnectionStringCredential) Validate() error {
+	if s.StorageConnectionString == "" {
+		return fmt.Errorf("StorageConnectionStringCredential: missing storage connection string")
+	}
+	return nil
+}
+
+func (s *StorageConnectionStringCredential) Equals(config cloud.Config) bool {
+	if config == nil {
+		return false
+	}
+
+	thatConfig, ok := config.(*StorageConnectionStringCredential)
+	if !ok {
+		return false
+	}
+
+	if s.HTTPConfig != thatConfig.HTTPConfig {
+		return false
+	}
+
+	if s.StorageConnectionString != thatConfig.StorageConnectionString {
+		return false
+	}
+
+	return true
+}
+
+func (s *StorageConnectionStringCredential) Sanitize() cloud.Config {
+	return &StorageConnectionStringCredential{
+		StorageConnectionString: cloud.Redacted,
+		HTTPConfig:              s.HTTPConfig,
+	}
+}
+
+func (s *StorageConnectionStringCredential) GetBlobClient(serviceURL string) (*azblob.Client, error) {
+	dt, err := s.HTTPConfig.GetHTTPTransport()
+	if err != nil {
+		return nil, fmt.Errorf("error creating transport: %w", err)
+	}
+	options := &azblob.ClientOptions{
+		ClientOptions: azcore.ClientOptions{
+			Transport: &http.Client{Transport: dt},
+		},
+	}
+	client, err := azblob.NewClientFromConnectionString(s.StorageConnectionString, options)
+	return client, err
+}

+ 246 - 10
pkg/cloud/azure/storageconfiguration_test.go

@@ -5,6 +5,7 @@ import (
 	"testing"
 
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/storage"
 	"github.com/opencost/opencost/core/pkg/util/json"
 	"github.com/opencost/opencost/pkg/cloud"
 )
@@ -122,6 +123,30 @@ func TestStorageConfiguration_Validate(t *testing.T) {
 			},
 			expected: nil,
 		},
+		"valid config StorageConnectionStringCredential": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString",
+				},
+			},
+			expected: nil,
+		},
+		"missing storage connection string": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     &StorageConnectionStringCredential{},
+			},
+			expected: fmt.Errorf("StorageConnectionStringCredential: missing storage connection string"),
+		},
 	}
 
 	for name, testCase := range testCases {
@@ -426,6 +451,79 @@ func TestStorageConfiguration_Equals(t *testing.T) {
 			},
 			expected: false,
 		},
+		"matching config StorageConnectionStringCredential": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString",
+				},
+			},
+			expected: true,
+		},
+		"different StorageConnectionString in StorageConnectionStringCredential": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString1",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString2",
+				},
+			},
+			expected: false,
+		},
+		"different HTTPConfig in StorageConnectionStringCredential": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString",
+					HTTPConfig:              defaultHTTPConfig,
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString",
+					HTTPConfig: storage.HTTPConfig{
+						InsecureSkipVerify: true,
+					},
+				},
+			},
+			expected: false,
+		},
 	}
 
 	for name, testCase := range testCases {
@@ -440,13 +538,19 @@ func TestStorageConfiguration_Equals(t *testing.T) {
 
 func TestStorageConfiguration_JSON(t *testing.T) {
 	testCases := map[string]struct {
-		config StorageConfiguration
+		input          map[string]interface{}
+		afterUnmarshal StorageConfiguration
 	}{
-		"Empty Config": {
-			config: StorageConfiguration{},
-		},
 		"Nil Authorizer": {
-			config: StorageConfiguration{
+			input: map[string]interface{}{
+				"subscriptionID": "subscriptionID",
+				"account":        "account",
+				"container":      "container",
+				"path":           "path",
+				"cloud":          "cloud",
+				"authorizer":     nil,
+			},
+			afterUnmarshal: StorageConfiguration{
 				SubscriptionID: "subscriptionID",
 				Account:        "account",
 				Container:      "container",
@@ -456,7 +560,19 @@ func TestStorageConfiguration_JSON(t *testing.T) {
 			},
 		},
 		"SharedKeyCredential Authorizer": {
-			config: StorageConfiguration{
+			input: map[string]interface{}{
+				"subscriptionID": "subscriptionID",
+				"account":        "account",
+				"container":      "container",
+				"path":           "path",
+				"cloud":          "cloud",
+				"authorizer": map[string]interface{}{
+					"authorizerType": "AzureAccessKey",
+					"accessKey":      "accessKey",
+					"account":        "account",
+				},
+			},
+			afterUnmarshal: StorageConfiguration{
 				SubscriptionID: "subscriptionID",
 				Account:        "account",
 				Container:      "container",
@@ -469,7 +585,17 @@ func TestStorageConfiguration_JSON(t *testing.T) {
 			},
 		},
 		"Default AuthorizerHolder Authorizer": {
-			config: StorageConfiguration{
+			input: map[string]interface{}{
+				"subscriptionID": "subscriptionID",
+				"account":        "account",
+				"container":      "container",
+				"path":           "path",
+				"cloud":          "cloud",
+				"authorizer": map[string]interface{}{
+					"authorizerType": "AzureDefaultCredential",
+				},
+			},
+			afterUnmarshal: StorageConfiguration{
 				SubscriptionID: "subscriptionID",
 				Account:        "account",
 				Container:      "container",
@@ -481,7 +607,20 @@ func TestStorageConfiguration_JSON(t *testing.T) {
 			},
 		},
 		"ClientSecretCredential Authorizer": {
-			config: StorageConfiguration{
+			input: map[string]interface{}{
+				"subscriptionID": "subscriptionID",
+				"account":        "account",
+				"container":      "container",
+				"path":           "path",
+				"cloud":          "cloud",
+				"authorizer": map[string]interface{}{
+					"authorizerType": "AzureClientSecretCredential",
+					"tenantID":       "tenantID",
+					"clientID":       "clientID",
+					"clientSecret":   "clientSecret",
+				},
+			},
+			afterUnmarshal: StorageConfiguration{
 				SubscriptionID: "subscriptionID",
 				Account:        "account",
 				Container:      "container",
@@ -496,12 +635,43 @@ func TestStorageConfiguration_JSON(t *testing.T) {
 				},
 			},
 		},
+		"StorageConnectionStringCredential Authorizer": {
+			input: map[string]interface{}{
+				"subscriptionID": "subscriptionID",
+				"account":        "account",
+				"container":      "container",
+				"path":           "path",
+				"cloud":          "cloud",
+				"authorizer": map[string]interface{}{
+					"authorizerType":          "AzureStorageConnectionString",
+					"storageConnectionString": "storageConnectionString",
+					"httpConfig": map[string]interface{}{
+						"insecureSkipVerify": true,
+					},
+				},
+			},
+			afterUnmarshal: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString",
+					HTTPConfig: func() storage.HTTPConfig {
+						cfg := defaultHTTPConfig
+						cfg.InsecureSkipVerify = true
+						return cfg
+					}(),
+				},
+			},
+		},
 	}
 
 	for name, testCase := range testCases {
 		t.Run(name, func(t *testing.T) {
 			// test JSON Marshalling
-			configJSON, err := json.Marshal(testCase.config)
+			configJSON, err := json.Marshal(testCase.input)
 			if err != nil {
 				t.Errorf("failed to marshal configuration: %s", err.Error())
 			}
@@ -512,9 +682,75 @@ func TestStorageConfiguration_JSON(t *testing.T) {
 				t.Errorf("failed to unmarshal configuration: %s", err.Error())
 			}
 
-			if !testCase.config.Equals(unmarshalledConfig) {
+			if !testCase.afterUnmarshal.Equals(unmarshalledConfig) {
 				t.Error("config does not equal unmarshalled config")
 			}
 		})
 	}
 }
+
+func TestStorageConfiguration_Sanitize(t *testing.T) {
+	testCases := map[string]struct {
+		config   StorageConfiguration
+		expected StorageConfiguration
+	}{
+		"Sanitize StorageConnectionStringCredential": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: "storageConnectionString",
+					HTTPConfig:              defaultHTTPConfig,
+				},
+			},
+			expected: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &StorageConnectionStringCredential{
+					StorageConnectionString: cloud.Redacted,
+					HTTPConfig:              defaultHTTPConfig,
+				},
+			},
+		},
+		"Sanitize SharedKeyCredential": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &SharedKeyCredential{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &SharedKeyCredential{
+					AccessKey: cloud.Redacted,
+					Account:   "account",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Sanitize()
+
+			if !testCase.expected.Equals(actual) {
+				t.Errorf("incorrect result: got %#v, want %#v", actual, testCase.expected)
+			}
+		})
+	}
+}

+ 35 - 0
pkg/cmd/costmodel/costmodel.go

@@ -236,6 +236,29 @@ func StartMCPServer(ctx context.Context, accesses *costmodel.Accesses, cloudCost
 		return nil, mcpResp, nil
 	}
 
+	handleEfficiency := func(ctx context.Context, req *mcp_sdk.CallToolRequest, args EfficiencyArgs) (*mcp_sdk.CallToolResult, interface{}, error) {
+		queryRequest := &opencost_mcp.OpenCostQueryRequest{
+			QueryType: opencost_mcp.EfficiencyQueryType,
+			Window:    args.Window,
+			EfficiencyParams: &opencost_mcp.EfficiencyQuery{
+				Aggregate:                  args.Aggregate,
+				Filter:                     args.Filter,
+				EfficiencyBufferMultiplier: args.BufferMultiplier,
+			},
+		}
+
+		mcpReq := &opencost_mcp.MCPRequest{
+			Query: queryRequest,
+		}
+
+		mcpResp, err := mcpServer.ProcessMCPRequest(mcpReq)
+		if err != nil {
+			return nil, nil, fmt.Errorf("failed to process efficiency request: %w", err)
+		}
+
+		return nil, mcpResp, nil
+	}
+
 	// Register tools
 	mcp_sdk.AddTool(sdkServer, &mcp_sdk.Tool{
 		Name:        "get_allocation_costs",
@@ -252,6 +275,11 @@ func StartMCPServer(ctx context.Context, accesses *costmodel.Accesses, cloudCost
 		Description: "Retrieves cloud cost data.",
 	}, handleCloudCosts)
 
+	mcp_sdk.AddTool(sdkServer, &mcp_sdk.Tool{
+		Name:        "get_efficiency",
+		Description: "Retrieves resource efficiency metrics with rightsizing recommendations and cost savings analysis. Computes CPU and memory efficiency (usage/request ratio), provides recommended resource requests, and calculates potential cost savings. Optional buffer_multiplier parameter (default: 1.2 for 20% headroom) can be set to values like 1.4 for 40% headroom.",
+	}, handleEfficiency)
+
 	// Create HTTP handler
 	handler := mcp_sdk.NewStreamableHTTPHandler(func(r *http.Request) *mcp_sdk.Server {
 		return sdkServer
@@ -320,3 +348,10 @@ type CloudCostArgs struct {
 	Region     string `json:"region,omitempty"`
 	Account    string `json:"account,omitempty"`
 }
+
+type EfficiencyArgs struct {
+	Window           string   `json:"window"`                      // Time window (e.g., "today", "yesterday", "7d", "lastweek")
+	Aggregate        string   `json:"aggregate,omitempty"`         // Aggregation level (e.g., "pod", "namespace", "controller")
+	Filter           string   `json:"filter,omitempty"`            // Filter expression (same as allocation filters)
+	BufferMultiplier *float64 `json:"buffer_multiplier,omitempty"` // Buffer multiplier for recommendations (default: 1.2 for 20% headroom, e.g., 1.4 for 40%)
+}

+ 269 - 1
pkg/mcp/server.go

@@ -6,6 +6,7 @@ import (
 	"encoding/hex"
 	"fmt"
 	"strings"
+	"sync"
 	"time"
 
 	"github.com/go-playground/validator/v10"
@@ -26,6 +27,14 @@ const (
 	AllocationQueryType QueryType = "allocation"
 	AssetQueryType      QueryType = "asset"
 	CloudCostQueryType  QueryType = "cloudcost"
+	EfficiencyQueryType QueryType = "efficiency"
+)
+
+// Efficiency calculation constants
+const (
+	efficiencyBufferMultiplier = 1.2         // 20% headroom for stability
+	efficiencyMinCPU           = 0.001       // minimum CPU cores
+	efficiencyMinRAM           = 1024 * 1024 // 1 MB minimum RAM
 )
 
 // MCPRequest represents a single turn in a conversation with the OpenCost MCP server.
@@ -49,13 +58,14 @@ type QueryMetadata struct {
 
 // OpenCostQueryRequest provides a unified interface for all OpenCost query types.
 type OpenCostQueryRequest struct {
-	QueryType QueryType `json:"queryType" validate:"required,oneof=allocation asset cloudcost"`
+	QueryType QueryType `json:"queryType" validate:"required,oneof=allocation asset cloudcost efficiency"`
 
 	Window string `json:"window" validate:"required"`
 
 	AllocationParams *AllocationQuery `json:"allocationParams,omitempty"`
 	AssetParams      *AssetQuery      `json:"assetParams,omitempty"`
 	CloudCostParams  *CloudCostQuery  `json:"cloudCostParams,omitempty"`
+	EfficiencyParams *EfficiencyQuery `json:"efficiencyParams,omitempty"`
 }
 
 // AllocationQuery contains the parameters for an allocation query.
@@ -93,6 +103,13 @@ type CloudCostQuery struct {
 	Labels          map[string]string `json:"labels,omitempty"`          // Label filters (key->value)
 }
 
+// EfficiencyQuery contains the parameters for an efficiency query.
+type EfficiencyQuery struct {
+	Aggregate                  string   `json:"aggregate,omitempty"`                  // Aggregation properties (e.g., "pod", "namespace", "controller")
+	Filter                     string   `json:"filter,omitempty"`                     // Filter expression for allocations (same as AllocationQuery)
+	EfficiencyBufferMultiplier *float64 `json:"efficiencyBufferMultiplier,omitempty"` // Buffer multiplier for recommendations (default: 1.2 for 20% headroom)
+}
+
 // AllocationResponse represents the allocation data returned to the AI agent.
 type AllocationResponse struct {
 	// The allocation data, as a map of allocation sets.
@@ -301,6 +318,47 @@ type CostMetric struct {
 	KubernetesPercent float64 `json:"kubernetesPercent"`
 }
 
+// EfficiencyResponse represents the efficiency data returned to the AI agent.
+type EfficiencyResponse struct {
+	Efficiencies []*EfficiencyMetric `json:"efficiencies"`
+}
+
+// EfficiencyMetric represents efficiency data for a single pod/workload.
+type EfficiencyMetric struct {
+	Name string `json:"name"` // Pod/namespace/controller name based on aggregation
+
+	// Current state
+	CPUEfficiency    float64 `json:"cpuEfficiency"`    // Usage / Request ratio (0-1+)
+	MemoryEfficiency float64 `json:"memoryEfficiency"` // Usage / Request ratio (0-1+)
+
+	// Current requests and usage
+	CPUCoresRequested float64 `json:"cpuCoresRequested"`
+	CPUCoresUsed      float64 `json:"cpuCoresUsed"`
+	RAMBytesRequested float64 `json:"ramBytesRequested"`
+	RAMBytesUsed      float64 `json:"ramBytesUsed"`
+
+	// Recommendations (based on actual usage with buffer)
+	RecommendedCPURequest float64 `json:"recommendedCpuRequest"` // Recommended CPU cores
+	RecommendedRAMRequest float64 `json:"recommendedRamRequest"` // Recommended RAM bytes
+
+	// Resulting efficiency after applying recommendations
+	ResultingCPUEfficiency    float64 `json:"resultingCpuEfficiency"`
+	ResultingMemoryEfficiency float64 `json:"resultingMemoryEfficiency"`
+
+	// Cost analysis
+	CurrentTotalCost   float64 `json:"currentTotalCost"`   // Current total cost
+	RecommendedCost    float64 `json:"recommendedCost"`    // Estimated cost with recommendations
+	CostSavings        float64 `json:"costSavings"`        // Potential savings
+	CostSavingsPercent float64 `json:"costSavingsPercent"` // Savings as percentage
+
+	// Buffer multiplier used for recommendations
+	EfficiencyBufferMultiplier float64 `json:"efficiencyBufferMultiplier"` // Buffer multiplier applied (e.g., 1.2 for 20% headroom)
+
+	// Time window
+	Start time.Time `json:"start"`
+	End   time.Time `json:"end"`
+}
+
 // MCPServer holds the dependencies for the MCP API server.
 type MCPServer struct {
 	costModel    *costmodel.CostModel
@@ -338,6 +396,8 @@ func (s *MCPServer) ProcessMCPRequest(request *MCPRequest) (*MCPResponse, error)
 		data, err = s.QueryAssets(request.Query)
 	case CloudCostQueryType:
 		data, err = s.QueryCloudCosts(request.Query)
+	case EfficiencyQueryType:
+		data, err = s.QueryEfficiency(request.Query)
 	default:
 		return nil, fmt.Errorf("unsupported query type: %s", request.Query.QueryType)
 	}
@@ -918,3 +978,211 @@ func transformCloudCostSetRange(ccsr *opencost.CloudCostSetRange) *CloudCostResp
 		Summary:    summary,
 	}
 }
+
+// QueryEfficiency queries allocation data and computes efficiency metrics with recommendations.
+func (s *MCPServer) QueryEfficiency(query *OpenCostQueryRequest) (*EfficiencyResponse, error) {
+	// 1. Parse Window
+	window, err := opencost.ParseWindowWithOffset(query.Window, 0)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse window '%s': %w", query.Window, err)
+	}
+
+	// 2. Set default parameters
+	var aggregateBy []string
+	var filterString string
+	var bufferMultiplier float64 = efficiencyBufferMultiplier // Default to 1.2 (20% headroom)
+
+	// 3. Parse efficiency parameters if provided
+	if query.EfficiencyParams != nil {
+		// Parse aggregation properties (default to pod if not specified)
+		if query.EfficiencyParams.Aggregate != "" {
+			aggregateBy = strings.Split(query.EfficiencyParams.Aggregate, ",")
+		} else {
+			aggregateBy = []string{"pod"}
+		}
+
+		// Set filter string
+		filterString = query.EfficiencyParams.Filter
+
+		// Validate filter string if provided
+		if filterString != "" {
+			parser := allocation.NewAllocationFilterParser()
+			_, err := parser.Parse(filterString)
+			if err != nil {
+				return nil, fmt.Errorf("invalid allocation filter '%s': %w", filterString, err)
+			}
+		}
+
+		// Set buffer multiplier if provided, otherwise use default
+		if query.EfficiencyParams.EfficiencyBufferMultiplier != nil {
+			bufferMultiplier = *query.EfficiencyParams.EfficiencyBufferMultiplier
+		}
+	} else {
+		// Default to pod-level aggregation
+		aggregateBy = []string{"pod"}
+		filterString = ""
+	}
+
+	// 4. Query allocations with the specified parameters
+	// Use the entire window as step to get aggregated data
+	step := window.Duration()
+	asr, err := s.costModel.QueryAllocation(
+		window,
+		step,
+		aggregateBy,
+		false, // includeIdle
+		false, // idleByNode
+		false, // includeProportionalAssetResourceCosts
+		false, // includeAggregatedMetadata
+		false, // sharedLoadBalancer
+		opencost.AccumulateOptionNone,
+		false, // shareIdle
+		filterString,
+	)
+	if err != nil {
+		return nil, fmt.Errorf("failed to query allocations: %w", err)
+	}
+
+	// 5. Handle empty results
+	if asr == nil || len(asr.Allocations) == 0 {
+		return &EfficiencyResponse{
+			Efficiencies: []*EfficiencyMetric{},
+		}, nil
+	}
+
+	// 6. Compute efficiency metrics from allocations using concurrent processing
+	var (
+		mu           sync.Mutex
+		wg           sync.WaitGroup
+		efficiencies = make([]*EfficiencyMetric, 0)
+	)
+
+	// Process each allocation set (typically one per time window) concurrently
+	for _, allocSet := range asr.Allocations {
+		if allocSet == nil {
+			continue
+		}
+
+		// Process this allocation set in a goroutine
+		wg.Add(1)
+		go func(allocSet *opencost.AllocationSet) {
+			defer wg.Done()
+
+			// Compute metrics for all allocations in this set
+			localMetrics := make([]*EfficiencyMetric, 0, len(allocSet.Allocations))
+			for _, alloc := range allocSet.Allocations {
+				if metric := computeEfficiencyMetric(alloc, bufferMultiplier); metric != nil {
+					localMetrics = append(localMetrics, metric)
+				}
+			}
+
+			// Append results to shared slice (thread-safe)
+			if len(localMetrics) > 0 {
+				mu.Lock()
+				efficiencies = append(efficiencies, localMetrics...)
+				mu.Unlock()
+			}
+		}(allocSet)
+	}
+
+	// Wait for all goroutines to complete
+	wg.Wait()
+
+	return &EfficiencyResponse{
+		Efficiencies: efficiencies,
+	}, nil
+}
+
+// safeDiv performs division and returns 0 if denominator is 0.
+func safeDiv(numerator, denominator float64) float64 {
+	if denominator == 0 {
+		return 0
+	}
+	return numerator / denominator
+}
+
+// computeEfficiencyMetric calculates efficiency metrics for a single allocation.
+func computeEfficiencyMetric(alloc *opencost.Allocation, bufferMultiplier float64) *EfficiencyMetric {
+	if alloc == nil {
+		return nil
+	}
+
+	// Calculate time duration in hours
+	hours := alloc.Minutes() / 60.0
+	if hours <= 0 {
+		return nil
+	}
+
+	// Get current usage (average over the period)
+	cpuCoresUsed := alloc.CPUCoreHours / hours
+	ramBytesUsed := alloc.RAMByteHours / hours
+
+	// Get requested amounts
+	cpuCoresRequested := alloc.CPUCoreRequestAverage
+	ramBytesRequested := alloc.RAMBytesRequestAverage
+
+	// Calculate current efficiency (will be 0 if no requests are set)
+	cpuEfficiency := safeDiv(cpuCoresUsed, cpuCoresRequested)
+	memoryEfficiency := safeDiv(ramBytesUsed, ramBytesRequested)
+
+	// Calculate recommendations with buffer for headroom
+	recommendedCPU := cpuCoresUsed * bufferMultiplier
+	recommendedRAM := ramBytesUsed * bufferMultiplier
+
+	// Ensure recommendations meet minimum thresholds
+	if recommendedCPU < efficiencyMinCPU {
+		recommendedCPU = efficiencyMinCPU
+	}
+	if recommendedRAM < efficiencyMinRAM {
+		recommendedRAM = efficiencyMinRAM
+	}
+
+	// Calculate resulting efficiency after applying recommendations
+	resultingCPUEff := safeDiv(cpuCoresUsed, recommendedCPU)
+	resultingMemEff := safeDiv(ramBytesUsed, recommendedRAM)
+
+	// Calculate cost per unit based on REQUESTED amounts (not used amounts)
+	// This gives us the cost per core-hour or byte-hour that the cluster charges
+	cpuCostPerCoreHour := safeDiv(alloc.CPUCost, cpuCoresRequested*hours)
+	ramCostPerByteHour := safeDiv(alloc.RAMCost, ramBytesRequested*hours)
+
+	// Current total cost
+	currentTotalCost := alloc.TotalCost()
+
+	// Estimate recommended cost based on recommended requests
+	recommendedCPUCost := recommendedCPU * hours * cpuCostPerCoreHour
+	recommendedRAMCost := recommendedRAM * hours * ramCostPerByteHour
+	// Keep other costs the same (PV, network, shared, external, GPU)
+	otherCosts := alloc.PVCost() + alloc.NetworkCost + alloc.SharedCost + alloc.ExternalCost + alloc.GPUCost
+	recommendedTotalCost := recommendedCPUCost + recommendedRAMCost + otherCosts
+
+	// Clamp recommended cost to avoid rounding issues making it higher than current
+	if recommendedTotalCost > currentTotalCost && (recommendedTotalCost-currentTotalCost) < 0.0001 {
+		recommendedTotalCost = currentTotalCost
+	}
+
+	// Calculate savings
+	costSavings := currentTotalCost - recommendedTotalCost
+	costSavingsPercent := safeDiv(costSavings, currentTotalCost) * 100
+
+	return &EfficiencyMetric{
+		Name:                       alloc.Name,
+		CPUEfficiency:              cpuEfficiency,
+		MemoryEfficiency:           memoryEfficiency,
+		CPUCoresRequested:          cpuCoresRequested,
+		CPUCoresUsed:               cpuCoresUsed,
+		RAMBytesRequested:          ramBytesRequested,
+		RAMBytesUsed:               ramBytesUsed,
+		RecommendedCPURequest:      recommendedCPU,
+		RecommendedRAMRequest:      recommendedRAM,
+		ResultingCPUEfficiency:     resultingCPUEff,
+		ResultingMemoryEfficiency:  resultingMemEff,
+		CurrentTotalCost:           currentTotalCost,
+		RecommendedCost:            recommendedTotalCost,
+		CostSavings:                costSavings,
+		CostSavingsPercent:         costSavingsPercent,
+		EfficiencyBufferMultiplier: bufferMultiplier,
+		Start:                      alloc.Start,
+		End:                        alloc.End,
+	}
+}

+ 450 - 1
pkg/mcp/server_test.go

@@ -873,7 +873,6 @@ func TestQueryAllocations_InvalidWindow(t *testing.T) {
 	assert.Contains(t, err.Error(), "failed to parse window")
 }
 
-
 func TestProcessMCPRequest_ResponseMetadata(t *testing.T) {
 	dq := &dummyQuerier{}
 	s := &MCPServer{cloudQuerier: dq}
@@ -910,3 +909,453 @@ func TestCloudCostQuery_NewFields(t *testing.T) {
 	assert.Equal(t, "prod", query.Labels["environment"])
 	assert.Equal(t, "platform", query.Labels["team"])
 }
+
+// ---- Tests for Efficiency Tool ----
+
+func TestEfficiencyQueryStruct(t *testing.T) {
+	bufferMultiplier := 1.4
+	query := EfficiencyQuery{
+		Aggregate:                  "pod",
+		Filter:                     "namespace:production",
+		EfficiencyBufferMultiplier: &bufferMultiplier,
+	}
+
+	assert.Equal(t, "pod", query.Aggregate)
+	assert.Equal(t, "namespace:production", query.Filter)
+	assert.NotNil(t, query.EfficiencyBufferMultiplier)
+	assert.Equal(t, 1.4, *query.EfficiencyBufferMultiplier)
+}
+
+func TestEfficiencyQueryDefaultValues(t *testing.T) {
+	query := EfficiencyQuery{}
+
+	assert.Empty(t, query.Aggregate)
+	assert.Empty(t, query.Filter)
+	assert.Nil(t, query.EfficiencyBufferMultiplier)
+}
+
+func TestEfficiencyMetricStruct(t *testing.T) {
+	now := time.Now()
+	metric := EfficiencyMetric{
+		Name:                       "test-pod",
+		CPUEfficiency:              0.5,
+		MemoryEfficiency:           0.6,
+		CPUCoresRequested:          2.0,
+		CPUCoresUsed:               1.0,
+		RAMBytesRequested:          2147483648, // 2GB
+		RAMBytesUsed:               1288490188, // ~1.2GB
+		RecommendedCPURequest:      1.2,
+		RecommendedRAMRequest:      1546188226, // ~1.44GB
+		ResultingCPUEfficiency:     0.833,
+		ResultingMemoryEfficiency:  0.833,
+		CurrentTotalCost:           10.0,
+		RecommendedCost:            6.0,
+		CostSavings:                4.0,
+		CostSavingsPercent:         40.0,
+		EfficiencyBufferMultiplier: 1.2,
+		Start:                      now.Add(-24 * time.Hour),
+		End:                        now,
+	}
+
+	assert.Equal(t, "test-pod", metric.Name)
+	assert.Equal(t, 0.5, metric.CPUEfficiency)
+	assert.Equal(t, 0.6, metric.MemoryEfficiency)
+	assert.Equal(t, 2.0, metric.CPUCoresRequested)
+	assert.Equal(t, 1.0, metric.CPUCoresUsed)
+	assert.Equal(t, 2147483648.0, metric.RAMBytesRequested)
+	assert.Equal(t, 1288490188.0, metric.RAMBytesUsed)
+	assert.Equal(t, 1.2, metric.RecommendedCPURequest)
+	assert.Equal(t, 1546188226.0, metric.RecommendedRAMRequest)
+	assert.Equal(t, 0.833, metric.ResultingCPUEfficiency)
+	assert.Equal(t, 0.833, metric.ResultingMemoryEfficiency)
+	assert.Equal(t, 10.0, metric.CurrentTotalCost)
+	assert.Equal(t, 6.0, metric.RecommendedCost)
+	assert.Equal(t, 4.0, metric.CostSavings)
+	assert.Equal(t, 40.0, metric.CostSavingsPercent)
+	assert.Equal(t, 1.2, metric.EfficiencyBufferMultiplier)
+	assert.True(t, metric.Start.Before(metric.End))
+}
+
+func TestEfficiencyResponseStruct(t *testing.T) {
+	now := time.Now()
+	metric1 := &EfficiencyMetric{
+		Name:             "pod-1",
+		CPUEfficiency:    0.5,
+		MemoryEfficiency: 0.6,
+		Start:            now.Add(-24 * time.Hour),
+		End:              now,
+	}
+	metric2 := &EfficiencyMetric{
+		Name:             "pod-2",
+		CPUEfficiency:    0.7,
+		MemoryEfficiency: 0.8,
+		Start:            now.Add(-24 * time.Hour),
+		End:              now,
+	}
+
+	response := EfficiencyResponse{
+		Efficiencies: []*EfficiencyMetric{metric1, metric2},
+	}
+
+	require.NotNil(t, response.Efficiencies)
+	assert.Len(t, response.Efficiencies, 2)
+	assert.Equal(t, "pod-1", response.Efficiencies[0].Name)
+	assert.Equal(t, "pod-2", response.Efficiencies[1].Name)
+}
+
+func TestSafeDiv(t *testing.T) {
+	tests := []struct {
+		name        string
+		numerator   float64
+		denominator float64
+		expected    float64
+	}{
+		{"normal division", 10.0, 2.0, 5.0},
+		{"zero denominator", 10.0, 0.0, 0.0},
+		{"zero numerator", 0.0, 2.0, 0.0},
+		{"both zero", 0.0, 0.0, 0.0},
+		{"negative values", -10.0, 2.0, -5.0},
+		{"fractional result", 5.0, 2.0, 2.5},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			result := safeDiv(tt.numerator, tt.denominator)
+			assert.Equal(t, tt.expected, result)
+		})
+	}
+}
+
+func TestComputeEfficiencyMetric_NilAllocation(t *testing.T) {
+	result := computeEfficiencyMetric(nil, 1.2)
+	assert.Nil(t, result)
+}
+
+func TestComputeEfficiencyMetric_ZeroMinutes(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:  "test-pod",
+		Start: now,
+		End:   now, // Same time, so 0 minutes
+	}
+
+	result := computeEfficiencyMetric(alloc, 1.2)
+	assert.Nil(t, result)
+}
+
+func TestComputeEfficiencyMetric_ValidAllocation(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:  "test-pod",
+		Start: now.Add(-24 * time.Hour),
+		End:   now,
+		// 24 hours = 1440 minutes
+		CPUCoreHours:           24.0,   // 1 core for 24 hours
+		RAMByteHours:           24.0e9, // ~1GB for 24 hours
+		CPUCoreRequestAverage:  2.0,    // Requested 2 cores
+		RAMBytesRequestAverage: 2.0e9,  // Requested 2GB
+		CPUCost:                10.0,
+		RAMCost:                5.0,
+	}
+
+	result := computeEfficiencyMetric(alloc, 1.2)
+
+	require.NotNil(t, result)
+	assert.Equal(t, "test-pod", result.Name)
+	assert.Equal(t, 2.0, result.CPUCoresRequested)
+	assert.Equal(t, 2.0e9, result.RAMBytesRequested)
+	assert.Equal(t, 1.0, result.CPUCoresUsed)            // 24 core-hours / 24 hours = 1 core
+	assert.Equal(t, 1.0e9, result.RAMBytesUsed)          // 24GB-hours / 24 hours = 1GB
+	assert.Equal(t, 0.5, result.CPUEfficiency)           // 1 / 2 = 0.5
+	assert.Equal(t, 0.5, result.MemoryEfficiency)        // 1GB / 2GB = 0.5
+	assert.Equal(t, 1.2, result.RecommendedCPURequest)   // 1 * 1.2 = 1.2
+	assert.Equal(t, 1.2e9, result.RecommendedRAMRequest) // 1GB * 1.2 = 1.2GB
+	assert.Equal(t, 1.2, result.EfficiencyBufferMultiplier)
+	assert.Greater(t, result.CostSavings, 0.0)
+}
+
+func TestComputeEfficiencyMetric_CustomBufferMultiplier(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:                   "test-pod",
+		Start:                  now.Add(-24 * time.Hour),
+		End:                    now,
+		CPUCoreHours:           24.0,
+		RAMByteHours:           24.0e9,
+		CPUCoreRequestAverage:  2.0,
+		RAMBytesRequestAverage: 2.0e9,
+		CPUCost:                10.0,
+		RAMCost:                5.0,
+	}
+
+	// Test with 1.4 buffer multiplier (40% headroom)
+	result := computeEfficiencyMetric(alloc, 1.4)
+
+	require.NotNil(t, result)
+	assert.Equal(t, 1.4, result.RecommendedCPURequest)   // 1 * 1.4 = 1.4
+	assert.Equal(t, 1.4e9, result.RecommendedRAMRequest) // 1GB * 1.4 = 1.4GB
+	assert.Equal(t, 1.4, result.EfficiencyBufferMultiplier)
+
+	// Resulting efficiency should be usage / recommended
+	expectedCPUEff := 1.0 / 1.4
+	expectedMemEff := 1.0e9 / 1.4e9
+	assert.InDelta(t, expectedCPUEff, result.ResultingCPUEfficiency, 0.001)
+	assert.InDelta(t, expectedMemEff, result.ResultingMemoryEfficiency, 0.001)
+}
+
+func TestComputeEfficiencyMetric_MinimumThresholds(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:  "test-pod",
+		Start: now.Add(-24 * time.Hour),
+		End:   now,
+		// Very small usage
+		CPUCoreHours:           0.00001, // 0.000000417 cores average
+		RAMByteHours:           100,     // ~4 bytes average
+		CPUCoreRequestAverage:  0.1,
+		RAMBytesRequestAverage: 1000,
+		CPUCost:                0.001,
+		RAMCost:                0.001,
+	}
+
+	result := computeEfficiencyMetric(alloc, 1.2)
+
+	require.NotNil(t, result)
+	// Should enforce minimum CPU (0.001 cores)
+	assert.Equal(t, efficiencyMinCPU, result.RecommendedCPURequest)
+	// Should enforce minimum RAM (1MB)
+	assert.Equal(t, float64(efficiencyMinRAM), result.RecommendedRAMRequest)
+}
+
+func TestComputeEfficiencyMetric_NoRequests(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:                   "test-pod",
+		Start:                  now.Add(-24 * time.Hour),
+		End:                    now,
+		CPUCoreHours:           24.0,
+		RAMByteHours:           24.0e9,
+		CPUCoreRequestAverage:  0.0, // No requests set
+		RAMBytesRequestAverage: 0.0, // No requests set
+		CPUCost:                10.0,
+		RAMCost:                5.0,
+	}
+
+	result := computeEfficiencyMetric(alloc, 1.2)
+
+	require.NotNil(t, result)
+	// Efficiency should be 0 when no requests are set
+	assert.Equal(t, 0.0, result.CPUEfficiency)
+	assert.Equal(t, 0.0, result.MemoryEfficiency)
+	// Recommendations should still be calculated based on usage
+	assert.Equal(t, 1.2, result.RecommendedCPURequest)
+	assert.Equal(t, 1.2e9, result.RecommendedRAMRequest)
+}
+
+func TestComputeEfficiencyMetric_OverProvisioned(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:                   "test-pod",
+		Start:                  now.Add(-24 * time.Hour),
+		End:                    now,
+		CPUCoreHours:           12.0,   // 0.5 cores average
+		RAMByteHours:           12.0e9, // 0.5GB average
+		CPUCoreRequestAverage:  4.0,    // Requested 4 cores (over-provisioned)
+		RAMBytesRequestAverage: 8.0e9,  // Requested 8GB (over-provisioned)
+		CPUCost:                40.0,
+		RAMCost:                20.0,
+	}
+
+	result := computeEfficiencyMetric(alloc, 1.2)
+
+	require.NotNil(t, result)
+	// Low efficiency due to over-provisioning
+	assert.Equal(t, 0.125, result.CPUEfficiency)     // 0.5 / 4 = 0.125
+	assert.Equal(t, 0.0625, result.MemoryEfficiency) // 0.5GB / 8GB = 0.0625
+	// Recommendations should be much lower
+	assert.Equal(t, 0.6, result.RecommendedCPURequest)   // 0.5 * 1.2 = 0.6
+	assert.Equal(t, 0.6e9, result.RecommendedRAMRequest) // 0.5GB * 1.2 = 0.6GB
+	// Should have significant cost savings
+	assert.Greater(t, result.CostSavings, 0.0)
+	assert.Greater(t, result.CostSavingsPercent, 50.0)
+}
+
+func TestComputeEfficiencyMetric_UnderProvisioned(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:                   "test-pod",
+		Start:                  now.Add(-24 * time.Hour),
+		End:                    now,
+		CPUCoreHours:           48.0,   // 2 cores average
+		RAMByteHours:           48.0e9, // 2GB average
+		CPUCoreRequestAverage:  1.0,    // Requested 1 core (under-provisioned)
+		RAMBytesRequestAverage: 1.0e9,  // Requested 1GB (under-provisioned)
+		CPUCost:                10.0,
+		RAMCost:                5.0,
+	}
+
+	result := computeEfficiencyMetric(alloc, 1.2)
+
+	require.NotNil(t, result)
+	// High efficiency (>100%) due to under-provisioning
+	assert.Equal(t, 2.0, result.CPUEfficiency)    // 2 / 1 = 2.0
+	assert.Equal(t, 2.0, result.MemoryEfficiency) // 2GB / 1GB = 2.0
+	// Recommendations should be higher than current requests
+	assert.Equal(t, 2.4, result.RecommendedCPURequest)   // 2 * 1.2 = 2.4
+	assert.Equal(t, 2.4e9, result.RecommendedRAMRequest) // 2GB * 1.2 = 2.4GB
+}
+
+func TestComputeEfficiencyMetric_CostCalculations(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:                   "test-pod",
+		Start:                  now.Add(-24 * time.Hour),
+		End:                    now,
+		CPUCoreHours:           24.0,
+		RAMByteHours:           24.0e9,
+		CPUCoreRequestAverage:  2.0,
+		RAMBytesRequestAverage: 2.0e9,
+		CPUCost:                10.0, // $10 for CPU
+		RAMCost:                5.0,  // $5 for RAM
+		NetworkCost:            1.0,  // $1 for network
+		SharedCost:             0.5,  // $0.5 shared
+		ExternalCost:           0.5,  // $0.5 external
+		GPUCost:                1.0,  // $1 for GPU
+	}
+
+	result := computeEfficiencyMetric(alloc, 1.2)
+
+	require.NotNil(t, result)
+
+	// Current total cost should include all costs
+	expectedCurrentCost := 10.0 + 5.0 + 1.0 + 0.5 + 0.5 + 1.0 // = 18.0
+	assert.Equal(t, expectedCurrentCost, result.CurrentTotalCost)
+
+	// Recommended cost should be lower due to right-sizing
+	assert.Less(t, result.RecommendedCost, result.CurrentTotalCost)
+
+	// Cost savings should be positive
+	assert.Greater(t, result.CostSavings, 0.0)
+	assert.Equal(t, result.CurrentTotalCost-result.RecommendedCost, result.CostSavings)
+
+	// Cost savings percent should be calculated correctly
+	expectedPercent := (result.CostSavings / result.CurrentTotalCost) * 100
+	assert.InDelta(t, expectedPercent, result.CostSavingsPercent, 0.001)
+}
+
+func TestComputeEfficiencyMetric_OtherCostsPreserved(t *testing.T) {
+	now := time.Now()
+	alloc := &opencost.Allocation{
+		Name:                   "test-pod",
+		Start:                  now.Add(-24 * time.Hour),
+		End:                    now,
+		CPUCoreHours:           24.0,
+		RAMByteHours:           24.0e9,
+		CPUCoreRequestAverage:  2.0,
+		RAMBytesRequestAverage: 2.0e9,
+		CPUCost:                10.0,
+		RAMCost:                5.0,
+		NetworkCost:            2.0, // Fixed cost
+		SharedCost:             1.0, // Fixed cost
+		ExternalCost:           1.0, // Fixed cost
+		GPUCost:                0.0,
+	}
+
+	result := computeEfficiencyMetric(alloc, 1.2)
+
+	require.NotNil(t, result)
+
+	// The "other costs" (Network, Shared, External, GPU) should be preserved
+	// in the recommended cost calculation
+	otherCosts := 2.0 + 1.0 + 1.0 + 0.0 // = 4.0
+
+	// CPU and RAM costs should be reduced based on right-sizing
+	// Original: 10.0 + 5.0 = 15.0
+	// Usage: 1 core + 1GB
+	// Recommended: 1.2 cores + 1.2GB
+	// Cost is calculated based on REQUESTED amounts (2 cores, 2GB)
+	cpuCostPerCoreHour := 10.0 / (2.0 * 24.0)  // CPU cost / (requested cores * hours)
+	ramCostPerByteHour := 5.0 / (2.0e9 * 24.0) // RAM cost / (requested bytes * hours)
+	expectedRecommendedCPUCost := 1.2 * 24.0 * cpuCostPerCoreHour
+	expectedRecommendedRAMCost := 1.2e9 * 24.0 * ramCostPerByteHour
+	expectedRecommendedTotal := expectedRecommendedCPUCost + expectedRecommendedRAMCost + otherCosts
+
+	assert.InDelta(t, expectedRecommendedTotal, result.RecommendedCost, 0.01)
+}
+
+func TestQueryEfficiency_InvalidWindow(t *testing.T) {
+	s := &MCPServer{}
+
+	req := &OpenCostQueryRequest{
+		QueryType: EfficiencyQueryType,
+		Window:    "invalid-window",
+	}
+
+	_, err := s.QueryEfficiency(req)
+	require.Error(t, err)
+	assert.Contains(t, err.Error(), "failed to parse window")
+}
+
+func TestQueryEfficiency_DefaultBufferMultiplier(t *testing.T) {
+	// Test that default buffer multiplier is 1.2 when not specified
+	req := &OpenCostQueryRequest{
+		QueryType:        EfficiencyQueryType,
+		Window:           "24h",
+		EfficiencyParams: &EfficiencyQuery{
+			// EfficiencyBufferMultiplier not set - should default to 1.2
+		},
+	}
+
+	assert.Nil(t, req.EfficiencyParams.EfficiencyBufferMultiplier)
+}
+
+func TestQueryEfficiency_CustomBufferMultiplier(t *testing.T) {
+	bufferMultiplier := 1.4
+	req := &OpenCostQueryRequest{
+		QueryType: EfficiencyQueryType,
+		Window:    "24h",
+		EfficiencyParams: &EfficiencyQuery{
+			EfficiencyBufferMultiplier: &bufferMultiplier,
+		},
+	}
+
+	assert.NotNil(t, req.EfficiencyParams.EfficiencyBufferMultiplier)
+	assert.Equal(t, 1.4, *req.EfficiencyParams.EfficiencyBufferMultiplier)
+}
+
+func TestQueryEfficiency_WithFilter(t *testing.T) {
+	req := &OpenCostQueryRequest{
+		QueryType: EfficiencyQueryType,
+		Window:    "7d",
+		EfficiencyParams: &EfficiencyQuery{
+			Aggregate: "pod",
+			Filter:    "namespace:production",
+		},
+	}
+
+	assert.Equal(t, "pod", req.EfficiencyParams.Aggregate)
+	assert.Equal(t, "namespace:production", req.EfficiencyParams.Filter)
+}
+
+func TestQueryEfficiency_WithAggregation(t *testing.T) {
+	req := &OpenCostQueryRequest{
+		QueryType: EfficiencyQueryType,
+		Window:    "7d",
+		EfficiencyParams: &EfficiencyQuery{
+			Aggregate: "namespace,controller",
+		},
+	}
+
+	assert.Equal(t, "namespace,controller", req.EfficiencyParams.Aggregate)
+}
+
+func TestEfficiencyConstants(t *testing.T) {
+	// Test that efficiency constants are defined correctly
+	assert.Equal(t, 1.2, efficiencyBufferMultiplier)
+	assert.Equal(t, 0.001, efficiencyMinCPU)
+	assert.Equal(t, 1024*1024, efficiencyMinRAM)
+}
+
+func TestEfficiencyQueryType(t *testing.T) {
+	assert.Equal(t, QueryType("efficiency"), EfficiencyQueryType)
+}