فهرست منبع

Move all query results to concrete result types

Signed-off-by: Matt Bolt <mbolt35@gmail.com>
Matt Bolt 1 سال پیش
والد
کامیت
cf668959ab

+ 94 - 94
core/pkg/source/datasource.go

@@ -9,115 +9,115 @@ import (
 
 type ClusterMetricsQuerier interface {
 	// Cluster Disks
-	QueryPVActiveMinutes(start, end time.Time) QueryResultsChan
-	QueryPVUsedAverage(start, end time.Time) QueryResultsChan
-	QueryPVUsedMax(start, end time.Time) QueryResultsChan
+	QueryPVActiveMinutes(start, end time.Time) *Future[PVActiveMinutesResult]
+	QueryPVUsedAverage(start, end time.Time) *Future[PVUsedAvgResult]
+	QueryPVUsedMax(start, end time.Time) *Future[PVUsedMaxResult]
 
 	// Local Cluster Disks
-	QueryLocalStorageActiveMinutes(start, end time.Time) QueryResultsChan
-	QueryLocalStorageCost(start, end time.Time) QueryResultsChan
-	QueryLocalStorageUsedCost(start, end time.Time) QueryResultsChan
-	QueryLocalStorageUsedAvg(start, end time.Time) QueryResultsChan
-	QueryLocalStorageUsedMax(start, end time.Time) QueryResultsChan
-	QueryLocalStorageBytes(start, end time.Time) QueryResultsChan
-	QueryLocalStorageBytesByProvider(provider string, start, end time.Time) QueryResultsChan
-	QueryLocalStorageUsedByProvider(provider string, start, end time.Time) QueryResultsChan
+	QueryLocalStorageActiveMinutes(start, end time.Time) *Future[LocalStorageActiveMinutesResult]
+	QueryLocalStorageCost(start, end time.Time) *Future[LocalStorageCostResult]
+	QueryLocalStorageUsedCost(start, end time.Time) *Future[LocalStorageUsedCostResult]
+	QueryLocalStorageUsedAvg(start, end time.Time) *Future[LocalStorageUsedAvgResult]
+	QueryLocalStorageUsedMax(start, end time.Time) *Future[LocalStorageUsedMaxResult]
+	QueryLocalStorageBytes(start, end time.Time) *Future[LocalStorageBytesResult]
+	QueryLocalStorageBytesByProvider(provider string, start, end time.Time) *Future[LocalStorageBytesByProviderResult]
+	QueryLocalStorageUsedByProvider(provider string, start, end time.Time) *Future[LocalStorageUsedByProviderResult]
 
 	// Nodes
-	QueryNodeActiveMinutes(start, end time.Time) QueryResultsChan
-	QueryNodeCPUCoresCapacity(start, end time.Time) QueryResultsChan
-	QueryNodeCPUCoresAllocatable(start, end time.Time) QueryResultsChan
-	QueryNodeRAMBytesCapacity(start, end time.Time) QueryResultsChan
-	QueryNodeRAMBytesAllocatable(start, end time.Time) QueryResultsChan
-	QueryNodeGPUCount(start, end time.Time) QueryResultsChan
-	QueryNodeCPUModeTotal(start, end time.Time) QueryResultsChan
-	QueryNodeIsSpot(start, end time.Time) QueryResultsChan
-	QueryNodeCPUModePercent(start, end time.Time) QueryResultsChan
-	QueryNodeRAMSystemPercent(start, end time.Time) QueryResultsChan
-	QueryNodeRAMUserPercent(start, end time.Time) QueryResultsChan
+	QueryNodeActiveMinutes(start, end time.Time) *Future[NodeActiveMinutesResult]
+	QueryNodeCPUCoresCapacity(start, end time.Time) *Future[NodeCPUCoresCapacityResult]
+	QueryNodeCPUCoresAllocatable(start, end time.Time) *Future[NodeCPUCoresAllocatableResult]
+	QueryNodeRAMBytesCapacity(start, end time.Time) *Future[NodeRAMBytesCapacityResult]
+	QueryNodeRAMBytesAllocatable(start, end time.Time) *Future[NodeRAMBytesAllocatableResult]
+	QueryNodeGPUCount(start, end time.Time) *Future[NodeGPUCountResult]
+	QueryNodeCPUModeTotal(start, end time.Time) *Future[NodeCPUModeTotalResult]
+	QueryNodeIsSpot(start, end time.Time) *Future[NodeIsSpotResult]
+	QueryNodeCPUModePercent(start, end time.Time) *Future[NodeCPUModePercentResult]
+	QueryNodeRAMSystemPercent(start, end time.Time) *Future[NodeRAMSystemPercentResult]
+	QueryNodeRAMUserPercent(start, end time.Time) *Future[NodeRAMUserPercentResult]
 
 	// Load Balancers
-	QueryLBActiveMinutes(start, end time.Time) QueryResultsChan
-	QueryLBPricePerHr(start, end time.Time) QueryResultsChan
+	QueryLBActiveMinutes(start, end time.Time) *Future[LBActiveMinutesResult]
+	QueryLBPricePerHr(start, end time.Time) *Future[LBPricePerHrResult]
 
 	// Cluster Management
-	QueryClusterManagementDuration(start, end time.Time) QueryResultsChan
-	QueryClusterManagementPricePerHr(start, end time.Time) QueryResultsChan
+	QueryClusterManagementDuration(start, end time.Time) *Future[ClusterManagementDurationResult]
+	QueryClusterManagementPricePerHr(start, end time.Time) *Future[ClusterManagementPricePerHrResult]
 
 	// Cluster Costs
-	QueryDataCount(start, end time.Time) QueryResultsChan
-	QueryTotalGPU(start, end time.Time) QueryResultsChan
-	QueryTotalCPU(start, end time.Time) QueryResultsChan
-	QueryTotalRAM(start, end time.Time) QueryResultsChan
-	QueryTotalStorage(start, end time.Time) QueryResultsChan
+	QueryDataCount(start, end time.Time) *Future[DataCountResult]
+	QueryTotalGPU(start, end time.Time) *Future[TotalGPUResult]
+	QueryTotalCPU(start, end time.Time) *Future[TotalCPUResult]
+	QueryTotalRAM(start, end time.Time) *Future[TotalRAMResult]
+	QueryTotalStorage(start, end time.Time) *Future[TotalStorageResult]
 
 	// Cluster Costs
-	QueryClusterCores(start, end time.Time, step time.Duration) QueryResultsChan
-	QueryClusterRAM(start, end time.Time, step time.Duration) QueryResultsChan
-	QueryClusterStorage(start, end time.Time, step time.Duration) QueryResultsChan
-	QueryClusterStorageByProvider(provider string, start, end time.Time, step time.Duration) QueryResultsChan
-	QueryClusterTotal(start, end time.Time, step time.Duration) QueryResultsChan
-	QueryClusterTotalByProvider(provider string, start, end time.Time, step time.Duration) QueryResultsChan
-	QueryClusterNodes(start, end time.Time, step time.Duration) QueryResultsChan
-	QueryClusterNodesByProvider(provider string, start, end time.Time, step time.Duration) QueryResultsChan
+	QueryClusterCores(start, end time.Time, step time.Duration) *Future[ClusterCoresResult]
+	QueryClusterRAM(start, end time.Time, step time.Duration) *Future[ClusterRAMResult]
+	QueryClusterStorage(start, end time.Time, step time.Duration) *Future[ClusterStorageResult]
+	QueryClusterStorageByProvider(provider string, start, end time.Time, step time.Duration) *Future[ClusterStorageResult]
+	QueryClusterTotal(start, end time.Time, step time.Duration) *Future[ClusterTotalResult]
+	QueryClusterTotalByProvider(provider string, start, end time.Time, step time.Duration) *Future[ClusterTotalResult]
+	QueryClusterNodes(start, end time.Time, step time.Duration) *Future[ClusterNodesResult]
+	QueryClusterNodesByProvider(provider string, start, end time.Time, step time.Duration) *Future[ClusterNodesResult]
 }
 
 type AllocationMetricsQuerier interface {
-	QueryPods(start, end time.Time) QueryResultsChan
-	QueryPodsUID(start, end time.Time) QueryResultsChan
-
-	QueryRAMBytesAllocated(start, end time.Time) QueryResultsChan
-	QueryRAMRequests(start, end time.Time) QueryResultsChan
-	QueryRAMUsageAvg(start, end time.Time) QueryResultsChan
-	QueryRAMUsageMax(start, end time.Time) QueryResultsChan
-	QueryNodeRAMPricePerGiBHr(start, end time.Time) QueryResultsChan
-
-	QueryCPUCoresAllocated(start, end time.Time) QueryResultsChan
-	QueryCPURequests(start, end time.Time) QueryResultsChan
-	QueryCPUUsageAvg(start, end time.Time) QueryResultsChan
-	QueryCPUUsageMax(start, end time.Time) QueryResultsChan
-	QueryNodeCPUPricePerHr(start, end time.Time) QueryResultsChan
-
-	QueryGPUsAllocated(start, end time.Time) QueryResultsChan
-	QueryGPUsRequested(start, end time.Time) QueryResultsChan
-	QueryGPUsUsageAvg(start, end time.Time) QueryResultsChan
-	QueryGPUsUsageMax(start, end time.Time) QueryResultsChan
-	QueryNodeGPUPricePerHr(start, end time.Time) QueryResultsChan
-	QueryGPUInfo(start, end time.Time) QueryResultsChan
-	QueryIsGPUShared(start, end time.Time) QueryResultsChan
-
-	QueryPodPVCAllocation(start, end time.Time) QueryResultsChan
-	QueryPVCBytesRequested(start, end time.Time) QueryResultsChan
-	QueryPVCInfo(start, end time.Time) QueryResultsChan
-
-	QueryPVBytes(start, end time.Time) QueryResultsChan
-	QueryPVPricePerGiBHour(start, end time.Time) QueryResultsChan
-	QueryPVInfo(start, end time.Time) QueryResultsChan
-
-	QueryNetZoneGiB(start, end time.Time) QueryResultsChan
-	QueryNetZoneCostPerGiB(start, end time.Time) QueryResultsChan
-	QueryNetRegionGiB(start, end time.Time) QueryResultsChan
-	QueryNetRegionCostPerGiB(start, end time.Time) QueryResultsChan
-	QueryNetInternetGiB(start, end time.Time) QueryResultsChan
-	QueryNetInternetCostPerGiB(start, end time.Time) QueryResultsChan
-	QueryNetReceiveBytes(start, end time.Time) QueryResultsChan
-	QueryNetTransferBytes(start, end time.Time) QueryResultsChan
-
-	QueryNamespaceAnnotations(start, end time.Time) QueryResultsChan
-	QueryPodAnnotations(start, end time.Time) QueryResultsChan
-
-	QueryNodeLabels(start, end time.Time) QueryResultsChan
-	QueryNamespaceLabels(start, end time.Time) QueryResultsChan
-	QueryPodLabels(start, end time.Time) QueryResultsChan
-	QueryServiceLabels(start, end time.Time) QueryResultsChan
-	QueryDeploymentLabels(start, end time.Time) QueryResultsChan
-	QueryStatefulSetLabels(start, end time.Time) QueryResultsChan
-	QueryDaemonSetLabels(start, end time.Time) QueryResultsChan
-	QueryJobLabels(start, end time.Time) QueryResultsChan
-
-	QueryPodsWithReplicaSetOwner(start, end time.Time) QueryResultsChan
-	QueryReplicaSetsWithoutOwners(start, end time.Time) QueryResultsChan
-	QueryReplicaSetsWithRollout(start, end time.Time) QueryResultsChan
+	QueryPods(start, end time.Time) *Future[PodsResult]
+	QueryPodsUID(start, end time.Time) *Future[PodsResult]
+
+	QueryRAMBytesAllocated(start, end time.Time) *Future[RAMBytesAllocatedResult]
+	QueryRAMRequests(start, end time.Time) *Future[RAMRequestsResult]
+	QueryRAMUsageAvg(start, end time.Time) *Future[RAMUsageAvgResult]
+	QueryRAMUsageMax(start, end time.Time) *Future[RAMUsageMaxResult]
+	QueryNodeRAMPricePerGiBHr(start, end time.Time) *Future[NodeRAMPricePerGiBHrResult]
+
+	QueryCPUCoresAllocated(start, end time.Time) *Future[CPUCoresAllocatedResult]
+	QueryCPURequests(start, end time.Time) *Future[CPURequestsResult]
+	QueryCPUUsageAvg(start, end time.Time) *Future[CPUUsageAvgResult]
+	QueryCPUUsageMax(start, end time.Time) *Future[CPUUsageMaxResult]
+	QueryNodeCPUPricePerHr(start, end time.Time) *Future[NodeCPUPricePerHrResult]
+
+	QueryGPUsAllocated(start, end time.Time) *Future[GPUsAllocatedResult]
+	QueryGPUsRequested(start, end time.Time) *Future[GPUsRequestedResult]
+	QueryGPUsUsageAvg(start, end time.Time) *Future[GPUsUsageAvgResult]
+	QueryGPUsUsageMax(start, end time.Time) *Future[GPUsUsageMaxResult]
+	QueryNodeGPUPricePerHr(start, end time.Time) *Future[NodeGPUPricePerHrResult]
+	QueryGPUInfo(start, end time.Time) *Future[GPUInfoResult]
+	QueryIsGPUShared(start, end time.Time) *Future[IsGPUSharedResult]
+
+	QueryPodPVCAllocation(start, end time.Time) *Future[PodPVCAllocationResult]
+	QueryPVCBytesRequested(start, end time.Time) *Future[PVCBytesRequestedResult]
+	QueryPVCInfo(start, end time.Time) *Future[PVCInfoResult]
+
+	QueryPVBytes(start, end time.Time) *Future[PVBytesResult]
+	QueryPVPricePerGiBHour(start, end time.Time) *Future[PVPricePerGiBHourResult]
+	QueryPVInfo(start, end time.Time) *Future[PVInfoResult]
+
+	QueryNetZoneGiB(start, end time.Time) *Future[NetZoneGiBResult]
+	QueryNetZonePricePerGiB(start, end time.Time) *Future[NetZonePricePerGiBResult]
+	QueryNetRegionGiB(start, end time.Time) *Future[NetRegionGiBResult]
+	QueryNetRegionPricePerGiB(start, end time.Time) *Future[NetRegionPricePerGiBResult]
+	QueryNetInternetGiB(start, end time.Time) *Future[NetInternetGiBResult]
+	QueryNetInternetPricePerGiB(start, end time.Time) *Future[NetInternetPricePerGiBResult]
+	QueryNetReceiveBytes(start, end time.Time) *Future[NetReceiveBytesResult]
+	QueryNetTransferBytes(start, end time.Time) *Future[NetTransferBytesResult]
+
+	QueryNamespaceAnnotations(start, end time.Time) *Future[NamespaceAnnotationsResult]
+	QueryPodAnnotations(start, end time.Time) *Future[PodAnnotationsResult]
+
+	QueryNodeLabels(start, end time.Time) *Future[NodeLabelsResult]
+	QueryNamespaceLabels(start, end time.Time) *Future[NamespaceLabelsResult]
+	QueryPodLabels(start, end time.Time) *Future[PodLabelsResult]
+	QueryServiceLabels(start, end time.Time) *Future[ServiceLabelsResult]
+	QueryDeploymentLabels(start, end time.Time) *Future[DeploymentLabelsResult]
+	QueryStatefulSetLabels(start, end time.Time) *Future[StatefulSetLabelsResult]
+	QueryDaemonSetLabels(start, end time.Time) *Future[DaemonSetLabelsResult]
+	QueryJobLabels(start, end time.Time) *Future[JobLabelsResult]
+
+	QueryPodsWithReplicaSetOwner(start, end time.Time) *Future[PodsWithReplicaSetOwnerResult]
+	QueryReplicaSetsWithoutOwners(start, end time.Time) *Future[ReplicaSetsWithoutOwnersResult]
+	QueryReplicaSetsWithRollout(start, end time.Time) *Future[ReplicaSetsWithRolloutResult]
 
 	QueryDataCoverage(limitDays int) (time.Time, time.Time, error)
 }

+ 1389 - 0
core/pkg/source/decoders.go

@@ -0,0 +1,1389 @@
+package source
+
+import (
+	"github.com/opencost/opencost/core/pkg/util"
+)
+
+type PVResult struct {
+	Cluster          string
+	PersistentVolume string
+}
+
+type PVUsedAvgResult struct {
+	Cluster               string
+	Namespace             string
+	PersistentVolumeClaim string
+
+	Data []*util.Vector
+}
+
+func DecodePVUsedAvgResult(result *QueryResult) *PVUsedAvgResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pvc, _ := result.GetString("persistentvolumeclaim")
+
+	return &PVUsedAvgResult{
+		Cluster:               cluster,
+		Namespace:             namespace,
+		PersistentVolumeClaim: pvc,
+		Data:                  result.Values,
+	}
+}
+
+type PVActiveMinutesResult struct {
+	Cluster          string
+	PersistentVolume string
+
+	Data []*util.Vector
+}
+
+func DecodePVActiveMinutesResult(result *QueryResult) *PVActiveMinutesResult {
+	cluster, _ := result.GetCluster()
+	pv, _ := result.GetString("persistentvolume")
+
+	return &PVActiveMinutesResult{
+		Cluster:          cluster,
+		PersistentVolume: pv,
+		Data:             result.Values,
+	}
+}
+
+type PVUsedMaxResult struct {
+	Cluster               string
+	Namespace             string
+	PersistentVolumeClaim string
+	Data                  []*util.Vector
+}
+
+func DecodePVUsedMaxResult(result *QueryResult) *PVUsedMaxResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pvc, _ := result.GetString("persistentvolumeclaim")
+
+	return &PVUsedMaxResult{
+		Cluster:               cluster,
+		Namespace:             namespace,
+		PersistentVolumeClaim: pvc,
+		Data:                  result.Values,
+	}
+}
+
+type LocalStorageActiveMinutesResult struct {
+	Cluster    string
+	Node       string
+	ProviderID string
+
+	Data []*util.Vector
+}
+
+func DecodeLocalStorageActiveMinutesResult(result *QueryResult) *LocalStorageActiveMinutesResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	providerId, _ := result.GetProviderID()
+
+	return &LocalStorageActiveMinutesResult{
+		Cluster:    cluster,
+		Node:       node,
+		ProviderID: providerId,
+		Data:       result.Values,
+	}
+}
+
+type LocalStorageCostResult struct {
+	Cluster  string
+	Instance string
+	Device   string
+
+	Data []*util.Vector
+}
+
+func DecodeLocalStorageCostResult(result *QueryResult) *LocalStorageCostResult {
+	cluster, _ := result.GetCluster()
+	instance, _ := result.GetInstance()
+	device, _ := result.GetDevice()
+
+	return &LocalStorageCostResult{
+		Cluster:  cluster,
+		Instance: instance,
+		Device:   device,
+		Data:     result.Values,
+	}
+}
+
+type LocalStorageUsedCostResult struct {
+	Cluster  string
+	Instance string
+	Device   string
+	Data     []*util.Vector
+}
+
+func DecodeLocalStorageUsedCostResult(result *QueryResult) *LocalStorageUsedCostResult {
+	cluster, _ := result.GetCluster()
+	instance, _ := result.GetInstance()
+	device, _ := result.GetDevice()
+
+	return &LocalStorageUsedCostResult{
+		Cluster:  cluster,
+		Instance: instance,
+		Device:   device,
+		Data:     result.Values,
+	}
+}
+
+type LocalStorageUsedAvgResult struct {
+	Cluster  string
+	Instance string
+	Device   string
+	Data     []*util.Vector
+}
+
+func DecodeLocalStorageUsedAvgResult(result *QueryResult) *LocalStorageUsedAvgResult {
+	cluster, _ := result.GetCluster()
+	instance, _ := result.GetInstance()
+	device, _ := result.GetDevice()
+
+	return &LocalStorageUsedAvgResult{
+		Cluster:  cluster,
+		Instance: instance,
+		Device:   device,
+		Data:     result.Values,
+	}
+}
+
+type LocalStorageUsedMaxResult struct {
+	Cluster  string
+	Instance string
+	Device   string
+	Data     []*util.Vector
+}
+
+func DecodeLocalStorageUsedMaxResult(result *QueryResult) *LocalStorageUsedMaxResult {
+	cluster, _ := result.GetCluster()
+	instance, _ := result.GetInstance()
+	device, _ := result.GetDevice()
+
+	return &LocalStorageUsedMaxResult{
+		Cluster:  cluster,
+		Instance: instance,
+		Device:   device,
+		Data:     result.Values,
+	}
+}
+
+type LocalStorageBytesResult struct {
+	Cluster  string
+	Instance string
+	Device   string
+	Data     []*util.Vector
+}
+
+func DecodeLocalStorageBytesResult(result *QueryResult) *LocalStorageBytesResult {
+	cluster, _ := result.GetCluster()
+	instance, _ := result.GetInstance()
+	device, _ := result.GetDevice()
+
+	return &LocalStorageBytesResult{
+		Cluster:  cluster,
+		Instance: instance,
+		Device:   device,
+		Data:     result.Values,
+	}
+}
+
+type LocalStorageBytesByProviderResult = TotalStorageResult
+
+func DecodeLocalStorageBytesByProviderResult(result *QueryResult) *LocalStorageBytesByProviderResult {
+	return DecodeTotalStorageResult(result)
+}
+
+type LocalStorageUsedByProviderResult = TotalStorageResult
+
+func DecodeLocalStorageUsedByProviderResult(result *QueryResult) *LocalStorageUsedByProviderResult {
+	return DecodeTotalStorageResult(result)
+}
+
+type NodeActiveMinutesResult struct {
+	Cluster    string
+	Node       string
+	ProviderID string
+	Data       []*util.Vector
+}
+
+func DecodeNodeActiveMinutesResult(result *QueryResult) *NodeActiveMinutesResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	providerId, _ := result.GetProviderID()
+
+	return &NodeActiveMinutesResult{
+		Cluster:    cluster,
+		Node:       node,
+		ProviderID: providerId,
+		Data:       result.Values,
+	}
+}
+
+type NodeCPUCoresCapacityResult struct {
+	Cluster string
+	Node    string
+	Data    []*util.Vector
+}
+
+func DecodeNodeCPUCoresCapacityResult(result *QueryResult) *NodeCPUCoresCapacityResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+
+	return &NodeCPUCoresCapacityResult{
+		Cluster: cluster,
+		Node:    node,
+		Data:    result.Values,
+	}
+}
+
+type NodeCPUCoresAllocatableResult = NodeCPUCoresCapacityResult
+
+func DecodeNodeCPUCoresAllocatableResult(result *QueryResult) *NodeCPUCoresAllocatableResult {
+	return DecodeNodeCPUCoresCapacityResult(result)
+}
+
+type NodeRAMBytesCapacityResult struct {
+	Cluster string
+	Node    string
+	Data    []*util.Vector
+}
+
+func DecodeNodeRAMBytesCapacityResult(result *QueryResult) *NodeRAMBytesCapacityResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+
+	return &NodeRAMBytesCapacityResult{
+		Cluster: cluster,
+		Node:    node,
+		Data:    result.Values,
+	}
+}
+
+type NodeRAMBytesAllocatableResult = NodeRAMBytesCapacityResult
+
+func DecodeNodeRAMBytesAllocatableResult(result *QueryResult) *NodeRAMBytesAllocatableResult {
+	return DecodeNodeRAMBytesCapacityResult(result)
+}
+
+type NodeGPUCountResult struct {
+	Cluster    string
+	Node       string
+	ProviderID string
+
+	Data []*util.Vector
+}
+
+func DecodeNodeGPUCountResult(result *QueryResult) *NodeGPUCountResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	providerId, _ := result.GetProviderID()
+
+	return &NodeGPUCountResult{
+		Cluster:    cluster,
+		Node:       node,
+		ProviderID: providerId,
+		Data:       result.Values,
+	}
+}
+
+type NodeCPUModeTotalResult struct {
+	Cluster string
+	Node    string
+	Mode    string
+	Data    []*util.Vector
+}
+
+func DecodeNodeCPUModeTotalResult(result *QueryResult) *NodeCPUModeTotalResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetString("kubernetes_node")
+	mode, _ := result.GetString("mode")
+
+	return &NodeCPUModeTotalResult{
+		Cluster: cluster,
+		Node:    node,
+		Mode:    mode,
+		Data:    result.Values,
+	}
+}
+
+type NodeIsSpotResult struct {
+	Cluster    string
+	Node       string
+	ProviderID string
+	Data       []*util.Vector
+}
+
+func DecodeNodeIsSpotResult(result *QueryResult) *NodeIsSpotResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	providerId, _ := result.GetProviderID()
+
+	return &NodeIsSpotResult{
+		Cluster:    cluster,
+		Node:       node,
+		ProviderID: providerId,
+		Data:       result.Values,
+	}
+}
+
+type NodeCPUModePercentResult struct {
+	Cluster string
+	Node    string
+	Mode    string
+	Data    []*util.Vector
+}
+
+func DecodeNodeCPUModePercentResult(result *QueryResult) *NodeCPUModePercentResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetString("kubernetes_node")
+	mode, _ := result.GetString("mode")
+
+	return &NodeCPUModePercentResult{
+		Cluster: cluster,
+		Node:    node,
+		Mode:    mode,
+		Data:    result.Values,
+	}
+}
+
+type NodeRAMSystemPercentResult struct {
+	Cluster  string
+	Instance string
+	Data     []*util.Vector
+}
+
+func DecodeNodeRAMSystemPercentResult(result *QueryResult) *NodeRAMSystemPercentResult {
+	cluster, _ := result.GetCluster()
+	instance, _ := result.GetInstance()
+
+	return &NodeRAMSystemPercentResult{
+		Cluster:  cluster,
+		Instance: instance,
+		Data:     result.Values,
+	}
+}
+
+type NodeRAMUserPercentResult = NodeRAMSystemPercentResult
+
+func DecodeNodeRAMUserPercentResult(result *QueryResult) *NodeRAMUserPercentResult {
+	return DecodeNodeRAMSystemPercentResult(result)
+}
+
+type LBActiveMinutesResult struct {
+	Cluster   string
+	Namespace string
+	Service   string
+	IngressIP string
+
+	Data []*util.Vector
+}
+
+func DecodeLBActiveMinutesResult(result *QueryResult) *LBActiveMinutesResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	service, _ := result.GetString("service_name")
+	ingressIp, _ := result.GetString("ingress_ip")
+
+	return &LBActiveMinutesResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Service:   service,
+		IngressIP: ingressIp,
+		Data:      result.Values,
+	}
+}
+
+type LBPricePerHrResult = LBActiveMinutesResult
+
+func DecodeLBPricePerHrResult(result *QueryResult) *LBPricePerHrResult {
+	return DecodeLBActiveMinutesResult(result)
+}
+
+type ClusterManagementDurationResult struct {
+	Cluster     string
+	Provisioner string
+	Data        []*util.Vector
+}
+
+func DecodeClusterManagementDurationResult(result *QueryResult) *ClusterManagementDurationResult {
+	cluster, _ := result.GetCluster()
+	provisioner, _ := result.GetString("provisioner_name")
+
+	return &ClusterManagementDurationResult{
+		Cluster:     cluster,
+		Provisioner: provisioner,
+		Data:        result.Values,
+	}
+}
+
+type ClusterManagementPricePerHrResult = ClusterManagementDurationResult
+
+func DecodeClusterManagementPricePerHrResult(result *QueryResult) *ClusterManagementPricePerHrResult {
+	return DecodeClusterManagementDurationResult(result)
+}
+
+type DataCountResult struct {
+	Cluster string
+	Data    []*util.Vector
+}
+
+func DecodeDataCountResult(result *QueryResult) *DataCountResult {
+	cluster, _ := result.GetCluster()
+
+	return &DataCountResult{
+		Cluster: cluster,
+		Data:    result.Values,
+	}
+}
+
+type TotalResult struct {
+	Cluster string
+
+	Data []*util.Vector
+}
+
+func DecodeTotalResult(result *QueryResult) *TotalResult {
+	cluster, _ := result.GetCluster()
+	return &TotalResult{
+		Cluster: cluster,
+		Data:    result.Values,
+	}
+}
+
+type TotalCPUResult = TotalResult
+
+func DecodeTotalCPUResult(result *QueryResult) *TotalCPUResult {
+	return DecodeTotalResult(result)
+}
+
+type TotalRAMResult = TotalResult
+
+func DecodeTotalRAMResult(result *QueryResult) *TotalRAMResult {
+	return DecodeTotalResult(result)
+}
+
+type TotalGPUResult = TotalResult
+
+func DecodeTotalGPUResult(result *QueryResult) *TotalGPUResult {
+	return DecodeTotalResult(result)
+}
+
+type TotalStorageResult = TotalResult
+
+func DecodeTotalStorageResult(result *QueryResult) *TotalStorageResult {
+	return DecodeTotalResult(result)
+}
+
+type ClusterResult struct {
+	Data []*util.Vector
+}
+
+func DecodeClusterResult(result *QueryResult) *ClusterResult {
+	return &ClusterResult{
+		Data: result.Values,
+	}
+}
+
+type ClusterCoresResult = ClusterResult
+
+func DecodeClusterCoresResult(result *QueryResult) *ClusterCoresResult {
+	return DecodeClusterResult(result)
+}
+
+type ClusterRAMResult = ClusterResult
+
+func DecodeClusterRAMResult(result *QueryResult) *ClusterRAMResult {
+	return DecodeClusterResult(result)
+}
+
+type ClusterStorageResult = ClusterResult
+
+func DecodeClusterStorageResult(result *QueryResult) *ClusterStorageResult {
+	return DecodeClusterResult(result)
+}
+
+type ClusterTotalResult = ClusterResult
+
+func DecodeClusterTotalResult(result *QueryResult) *ClusterTotalResult {
+	return DecodeClusterResult(result)
+}
+
+type ClusterNodesResult = ClusterResult
+
+func DecodeClusterNodesResult(result *QueryResult) *ClusterNodesResult {
+	return DecodeClusterResult(result)
+}
+
+type ClusterNodesByProviderResult struct {
+	Data []*util.Vector
+}
+
+func DecodeClusterNodesByProviderResult(result *QueryResult) *ClusterNodesByProviderResult {
+	return &ClusterNodesByProviderResult{
+		Data: result.Values,
+	}
+}
+
+type PodsResult struct {
+	UID       string
+	Cluster   string
+	Namespace string
+	Pod       string
+
+	Data []*util.Vector
+}
+
+func DecodePodsResult(result *QueryResult) *PodsResult {
+	uid, _ := result.GetString("uid")
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+
+	return &PodsResult{
+		UID:       uid,
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Data:      result.Values,
+	}
+}
+
+type ContainerMetricResult struct {
+	Cluster   string
+	Node      string
+	Namespace string
+	Pod       string
+	Container string
+
+	Data []*util.Vector
+}
+
+func DecodeContainerMetricResult(result *QueryResult) *ContainerMetricResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	container, _ := result.GetContainer()
+
+	return &ContainerMetricResult{
+		Cluster:   cluster,
+		Node:      node,
+		Namespace: namespace,
+		Pod:       pod,
+		Container: container,
+		Data:      result.Values,
+	}
+}
+
+type RAMBytesAllocatedResult = ContainerMetricResult
+
+func DecodeRAMBytesAllocatedResult(result *QueryResult) *RAMBytesAllocatedResult {
+	return DecodeContainerMetricResult(result)
+}
+
+type RAMRequestsResult = ContainerMetricResult
+
+func DecodeRAMRequestsResult(result *QueryResult) *RAMRequestsResult {
+	return DecodeContainerMetricResult(result)
+}
+
+type RAMUsageAvgResult = ContainerMetricResult
+
+func DecodeRAMUsageAvgResult(result *QueryResult) *RAMUsageAvgResult {
+	return DecodeContainerMetricResult(result)
+}
+
+type RAMUsageMaxResult = ContainerMetricResult
+
+func DecodeRAMUsageMaxResult(result *QueryResult) *RAMUsageMaxResult {
+	return DecodeContainerMetricResult(result)
+}
+
+type NodeRAMPricePerGiBHrResult struct {
+	Cluster      string
+	Node         string
+	InstanceType string
+	ProviderID   string
+	Data         []*util.Vector
+}
+
+func DecodeNodeRAMPricePerGiBHrResult(result *QueryResult) *NodeRAMPricePerGiBHrResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	instanceType, _ := result.GetInstanceType()
+	providerId, _ := result.GetProviderID()
+
+	return &NodeRAMPricePerGiBHrResult{
+		Cluster:      cluster,
+		Node:         node,
+		InstanceType: instanceType,
+		ProviderID:   providerId,
+		Data:         result.Values,
+	}
+}
+
+type CPUCoresAllocatedResult = ContainerMetricResult
+
+func DecodeCPUCoresAllocatedResult(result *QueryResult) *CPUCoresAllocatedResult {
+	return DecodeContainerMetricResult(result)
+}
+
+type CPURequestsResult = ContainerMetricResult
+
+func DecodeCPURequestsResult(result *QueryResult) *CPURequestsResult {
+	return DecodeContainerMetricResult(result)
+}
+
+type CPUUsageAvgResult = ContainerMetricResult
+
+func DecodeCPUUsageAvgResult(result *QueryResult) *CPUUsageAvgResult {
+	return DecodeContainerMetricResult(result)
+}
+
+type CPUUsageMaxResult = ContainerMetricResult
+
+func DecodeCPUUsageMaxResult(result *QueryResult) *CPUUsageMaxResult {
+	return DecodeContainerMetricResult(result)
+}
+
+type NodeCPUPricePerHrResult struct {
+	Cluster      string
+	Node         string
+	InstanceType string
+	ProviderID   string
+	Data         []*util.Vector
+}
+
+func DecodeNodeCPUPricePerHrResult(result *QueryResult) *NodeCPUPricePerHrResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	instanceType, _ := result.GetInstanceType()
+	providerId, _ := result.GetProviderID()
+
+	return &NodeCPUPricePerHrResult{
+		Cluster:      cluster,
+		Node:         node,
+		InstanceType: instanceType,
+		ProviderID:   providerId,
+		Data:         result.Values,
+	}
+}
+
+// type alias requested result to allocated result, as you can only request a full GPU
+type GPUsRequestedResult = GPUsAllocatedResult
+
+func DecodeGPUsRequestedResult(result *QueryResult) *GPUsRequestedResult {
+	return DecodeGPUsAllocatedResult(result)
+}
+
+type GPUsAllocatedResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Container string
+	Data      []*util.Vector
+}
+
+func DecodeGPUsAllocatedResult(result *QueryResult) *GPUsAllocatedResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	container, _ := result.GetContainer()
+
+	return &GPUsAllocatedResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Container: container,
+		Data:      result.Values,
+	}
+}
+
+type GPUsUsageAvgResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Container string
+
+	Data []*util.Vector
+}
+
+func DecodeGPUsUsageAvgResult(result *QueryResult) *GPUsUsageAvgResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	container, _ := result.GetContainer()
+
+	return &GPUsUsageAvgResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Container: container,
+		Data:      result.Values,
+	}
+}
+
+type GPUsUsageMaxResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Container string
+	Data      []*util.Vector
+}
+
+func DecodeGPUsUsageMaxResult(result *QueryResult) *GPUsUsageMaxResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	container, _ := result.GetContainer()
+
+	return &GPUsUsageMaxResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Container: container,
+		Data:      result.Values,
+	}
+}
+
+type NodeGPUPricePerHrResult struct {
+	Cluster      string
+	Node         string
+	InstanceType string
+	ProviderID   string
+	Data         []*util.Vector
+}
+
+func DecodeNodeGPUPricePerHrResult(result *QueryResult) *NodeGPUPricePerHrResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	instanceType, _ := result.GetInstanceType()
+	providerId, _ := result.GetProviderID()
+
+	return &NodeGPUPricePerHrResult{
+		Cluster:      cluster,
+		Node:         node,
+		InstanceType: instanceType,
+		ProviderID:   providerId,
+		Data:         result.Values,
+	}
+}
+
+type GPUInfoResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Container string
+	Device    string
+	ModelName string
+	UUID      string
+	Data      []*util.Vector
+}
+
+func DecodeGPUInfoResult(result *QueryResult) *GPUInfoResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	container, _ := result.GetContainer()
+	device, _ := result.GetString("device")
+	modelName, _ := result.GetString("modelName")
+	uuid, _ := result.GetString("UUID")
+
+	return &GPUInfoResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Container: container,
+		Device:    device,
+		ModelName: modelName,
+		UUID:      uuid,
+		Data:      result.Values,
+	}
+}
+
+type IsGPUSharedResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Container string
+	Resource  string
+	Data      []*util.Vector
+}
+
+func DecodeIsGPUSharedResult(result *QueryResult) *IsGPUSharedResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	container, _ := result.GetContainer()
+	resource, _ := result.GetString("resource")
+
+	return &IsGPUSharedResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Container: container,
+		Resource:  resource,
+		Data:      result.Values,
+	}
+}
+
+type PodPVCAllocationResult struct {
+	Cluster               string
+	Namespace             string
+	Pod                   string
+	PersistentVolume      string
+	PersistentVolumeClaim string
+	Data                  []*util.Vector
+}
+
+func DecodePodPVCAllocationResult(result *QueryResult) *PodPVCAllocationResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	pv, _ := result.GetString("persistentvolume")
+	pvc, _ := result.GetString("persistentvolumeclaim")
+
+	return &PodPVCAllocationResult{
+		Cluster:               cluster,
+		Namespace:             namespace,
+		Pod:                   pod,
+		PersistentVolume:      pv,
+		PersistentVolumeClaim: pvc,
+		Data:                  result.Values,
+	}
+}
+
+type PVCBytesRequestedResult struct {
+	Cluster               string
+	Namespace             string
+	PersistentVolumeClaim string
+
+	Data []*util.Vector
+}
+
+func DecodePVCBytesRequestedResult(result *QueryResult) *PVCBytesRequestedResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pvc, _ := result.GetString("persistentvolumeclaim")
+
+	return &PVCBytesRequestedResult{
+		Cluster:               cluster,
+		Namespace:             namespace,
+		PersistentVolumeClaim: pvc,
+		Data:                  result.Values,
+	}
+}
+
+type PVCInfoResult struct {
+	Cluster               string
+	Namespace             string
+	VolumeName            string
+	PersistentVolumeClaim string
+	StorageClass          string
+
+	Data []*util.Vector
+}
+
+func DecodePVCInfoResult(result *QueryResult) *PVCInfoResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	volumeName, _ := result.GetString("volumename")
+	pvc, _ := result.GetString("persistentvolumeclaim")
+	storageClass, _ := result.GetString("storageclass")
+
+	return &PVCInfoResult{
+		Cluster:               cluster,
+		Namespace:             namespace,
+		VolumeName:            volumeName,
+		PersistentVolumeClaim: pvc,
+		StorageClass:          storageClass,
+		Data:                  result.Values,
+	}
+}
+
+type PVBytesResult struct {
+	Cluster          string
+	PersistentVolume string
+
+	Data []*util.Vector
+}
+
+func DecodePVBytesResult(result *QueryResult) *PVBytesResult {
+	cluster, _ := result.GetCluster()
+	pv, _ := result.GetString("persistentvolume")
+
+	return &PVBytesResult{
+		Cluster:          cluster,
+		PersistentVolume: pv,
+		Data:             result.Values,
+	}
+}
+
+type PVPricePerGiBHourResult struct {
+	Cluster          string
+	VolumeName       string
+	PersistentVolume string
+	ProviderID       string
+
+	Data []*util.Vector
+}
+
+func DecodePVPricePerGiBHourResult(result *QueryResult) *PVPricePerGiBHourResult {
+	cluster, _ := result.GetCluster()
+	volumeName, _ := result.GetString("volumename")
+	pv, _ := result.GetString("persistentvolume")
+	providerId, _ := result.GetProviderID()
+
+	return &PVPricePerGiBHourResult{
+		Cluster:          cluster,
+		VolumeName:       volumeName,
+		PersistentVolume: pv,
+		ProviderID:       providerId,
+
+		Data: result.Values,
+	}
+}
+
+type PVInfoResult struct {
+	Cluster          string
+	PersistentVolume string
+	StorageClass     string
+	ProviderID       string
+
+	Data []*util.Vector
+}
+
+func DecodePVInfoResult(result *QueryResult) *PVInfoResult {
+	cluster, _ := result.GetCluster()
+	storageClass, _ := result.GetString("storageclass")
+	providerId, _ := result.GetProviderID()
+	pv, _ := result.GetString("persistentvolume")
+
+	return &PVInfoResult{
+		Cluster:          cluster,
+		PersistentVolume: pv,
+		StorageClass:     storageClass,
+		ProviderID:       providerId,
+		Data:             result.Values,
+	}
+}
+
+// Base type for network usage results
+type NetworkGiBResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+
+	Data []*util.Vector
+}
+
+func DecodeNetworkGiBResult(result *QueryResult) *NetworkGiBResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+
+	return &NetworkGiBResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Data:      result.Values,
+	}
+}
+
+// Base type for network price results
+type NetworkPricePerGiBResult struct {
+	Cluster string
+
+	Data []*util.Vector
+}
+
+func DecodeNetworkPricePerGiBResult(result *QueryResult) *NetworkPricePerGiBResult {
+	cluster, _ := result.GetCluster()
+
+	return &NetworkPricePerGiBResult{
+		Cluster: cluster,
+		Data:    result.Values,
+	}
+}
+
+// Type alias the specific network subclassification results AND price results
+type NetZoneGiBResult = NetworkGiBResult
+type NetZonePricePerGiBResult = NetworkPricePerGiBResult
+
+type NetRegionGiBResult = NetworkGiBResult
+type NetRegionPricePerGiBResult = NetworkPricePerGiBResult
+
+type NetInternetGiBResult = NetworkGiBResult
+type NetInternetPricePerGiBResult = NetworkPricePerGiBResult
+
+func DecodeNetZoneGiBResult(result *QueryResult) *NetZoneGiBResult {
+	return DecodeNetworkGiBResult(result)
+}
+
+func DecodeNetZonePricePerGiBResult(result *QueryResult) *NetZonePricePerGiBResult {
+	return DecodeNetworkPricePerGiBResult(result)
+}
+
+func DecodeNetRegionGiBResult(result *QueryResult) *NetRegionGiBResult {
+	return DecodeNetworkGiBResult(result)
+}
+
+func DecodeNetRegionPricePerGiBResult(result *QueryResult) *NetRegionPricePerGiBResult {
+	return DecodeNetworkPricePerGiBResult(result)
+}
+
+func DecodeNetInternetGiBResult(result *QueryResult) *NetInternetGiBResult {
+	return DecodeNetworkGiBResult(result)
+}
+
+func DecodeNetInternetPricePerGiBResult(result *QueryResult) *NetInternetPricePerGiBResult {
+	return DecodeNetworkPricePerGiBResult(result)
+}
+
+type NetReceiveBytesResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Container string
+	Data      []*util.Vector
+}
+
+func DecodeNetReceiveBytesResult(result *QueryResult) *NetReceiveBytesResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	container, _ := result.GetContainer()
+
+	return &NetReceiveBytesResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Container: container,
+		Data:      result.Values,
+	}
+}
+
+type NetTransferBytesResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Container string
+
+	Data []*util.Vector
+}
+
+func DecodeNetTransferBytesResult(result *QueryResult) *NetTransferBytesResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	container, _ := result.GetContainer()
+
+	return &NetTransferBytesResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Container: container,
+		Data:      result.Values,
+	}
+}
+
+type NamespaceAnnotationsResult struct {
+	Namespace   string
+	Annotations map[string]string
+
+	Data []*util.Vector
+}
+
+func DecodeNamespaceAnnotationsResult(result *QueryResult) *NamespaceAnnotationsResult {
+	namespace, _ := result.GetNamespace()
+	annotations := result.GetAnnotations()
+
+	return &NamespaceAnnotationsResult{
+		Namespace:   namespace,
+		Annotations: annotations,
+		Data:        result.Values,
+	}
+}
+
+type PodAnnotationsResult struct {
+	Cluster     string
+	Namespace   string
+	Pod         string
+	Annotations map[string]string
+
+	Data []*util.Vector
+}
+
+func DecodePodAnnotationsResult(result *QueryResult) *PodAnnotationsResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	annotations := result.GetAnnotations()
+
+	return &PodAnnotationsResult{
+		Cluster:     cluster,
+		Namespace:   namespace,
+		Pod:         pod,
+		Annotations: annotations,
+		Data:        result.Values,
+	}
+}
+
+type NodeLabelsResult struct {
+	Cluster string
+	Node    string
+	Labels  map[string]string
+	Data    []*util.Vector
+}
+
+func DecodeNodeLabelsResult(result *QueryResult) *NodeLabelsResult {
+	cluster, _ := result.GetCluster()
+	node, _ := result.GetNode()
+	labels := result.GetLabels()
+
+	return &NodeLabelsResult{
+		Cluster: cluster,
+		Node:    node,
+		Labels:  labels,
+		Data:    result.Values,
+	}
+}
+
+type NamespaceLabelsResult struct {
+	Cluster   string
+	Namespace string
+	Labels    map[string]string
+	Data      []*util.Vector
+}
+
+func DecodeNamespaceLabelsResult(result *QueryResult) *NamespaceLabelsResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	labels := result.GetLabels()
+
+	return &NamespaceLabelsResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Labels:    labels,
+		Data:      result.Values,
+	}
+}
+
+type PodLabelsResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Labels    map[string]string
+	Data      []*util.Vector
+}
+
+func DecodePodLabelsResult(result *QueryResult) *PodLabelsResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	labels := result.GetLabels()
+
+	return &PodLabelsResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Labels:    labels,
+		Data:      result.Values,
+	}
+}
+
+type ServiceLabelsResult struct {
+	Cluster   string
+	Namespace string
+	Service   string
+	Labels    map[string]string
+
+	Data []*util.Vector
+}
+
+func DecodeServiceLabelsResult(result *QueryResult) *ServiceLabelsResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	service, _ := result.GetString("service")
+	labels := result.GetLabels()
+
+	return &ServiceLabelsResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Service:   service,
+		Labels:    labels,
+		Data:      result.Values,
+	}
+}
+
+type DeploymentLabelsResult struct {
+	Cluster    string
+	Namespace  string
+	Deployment string
+	Labels     map[string]string
+	Data       []*util.Vector
+}
+
+func DecodeDeploymentLabelsResult(result *QueryResult) *DeploymentLabelsResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	deployment, _ := result.GetString("deployment")
+	labels := result.GetLabels()
+
+	return &DeploymentLabelsResult{
+		Cluster:    cluster,
+		Namespace:  namespace,
+		Deployment: deployment,
+		Labels:     labels,
+		Data:       result.Values,
+	}
+}
+
+type StatefulSetLabelsResult struct {
+	Cluster     string
+	Namespace   string
+	StatefulSet string
+	Labels      map[string]string
+	Data        []*util.Vector
+}
+
+func DecodeStatefulSetLabelsResult(result *QueryResult) *StatefulSetLabelsResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	statefulSet, _ := result.GetString("statefulSet")
+	labels := result.GetLabels()
+
+	return &StatefulSetLabelsResult{
+		Cluster:     cluster,
+		Namespace:   namespace,
+		StatefulSet: statefulSet,
+		Labels:      labels,
+		Data:        result.Values,
+	}
+}
+
+type DaemonSetLabelsResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	DaemonSet string
+	Labels    map[string]string
+	Data      []*util.Vector
+}
+
+func DecodeDaemonSetLabelsResult(result *QueryResult) *DaemonSetLabelsResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	daemonSet, _ := result.GetString("owner_name")
+	labels := result.GetLabels()
+
+	return &DaemonSetLabelsResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		DaemonSet: daemonSet,
+		Labels:    labels,
+		Data:      result.Values,
+	}
+}
+
+type JobLabelsResult struct {
+	Cluster   string
+	Namespace string
+	Pod       string
+	Job       string
+	Labels    map[string]string
+	Data      []*util.Vector
+}
+
+func DecodeJobLabelsResult(result *QueryResult) *JobLabelsResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	pod, _ := result.GetPod()
+	job, _ := result.GetString("owner_name")
+	labels := result.GetLabels()
+
+	return &JobLabelsResult{
+		Cluster:   cluster,
+		Namespace: namespace,
+		Pod:       pod,
+		Job:       job,
+		Labels:    labels,
+		Data:      result.Values,
+	}
+}
+
+type PodsWithReplicaSetOwnerResult struct {
+	Cluster    string
+	Namespace  string
+	Pod        string
+	ReplicaSet string
+
+	Data []*util.Vector
+}
+
+func DecodePodsWithReplicaSetOwnerResult(result *QueryResult) *PodsWithReplicaSetOwnerResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	replicaSet, _ := result.GetString("owner_name")
+	pod, _ := result.GetPod()
+
+	return &PodsWithReplicaSetOwnerResult{
+		Cluster:    cluster,
+		Namespace:  namespace,
+		Pod:        pod,
+		ReplicaSet: replicaSet,
+		Data:       result.Values,
+	}
+}
+
+type ReplicaSetsWithoutOwnersResult struct {
+	Cluster    string
+	Namespace  string
+	ReplicaSet string
+
+	Data []*util.Vector
+}
+
+func DecodeReplicaSetsWithoutOwnersResult(result *QueryResult) *ReplicaSetsWithoutOwnersResult {
+	return &ReplicaSetsWithoutOwnersResult{
+		Data: result.Values,
+	}
+}
+
+type ReplicaSetsWithRolloutResult struct {
+	Cluster    string
+	Namespace  string
+	ReplicaSet string
+	OwnerName  string
+	OwnerKind  string
+	Data       []*util.Vector
+}
+
+func DecodeReplicaSetsWithRolloutResult(result *QueryResult) *ReplicaSetsWithRolloutResult {
+	cluster, _ := result.GetCluster()
+	namespace, _ := result.GetNamespace()
+	replicaSet, _ := result.GetString("replicaset")
+	ownerName, _ := result.GetString("owner_name")
+	ownerKind, _ := result.GetString("owner_kind")
+
+	return &ReplicaSetsWithRolloutResult{
+		Cluster:    cluster,
+		Namespace:  namespace,
+		ReplicaSet: replicaSet,
+		OwnerName:  ownerName,
+		OwnerKind:  ownerKind,
+		Data:       result.Values,
+	}
+}
+
+func DecodeAll[T any](results []*QueryResult, decode ResultDecoder[T]) []*T {
+	decoded := make([]*T, 0, len(results))
+	for _, result := range results {
+		decoded = append(decoded, decode(result))
+	}
+
+	return decoded
+}

+ 42 - 0
core/pkg/source/future.go

@@ -0,0 +1,42 @@
+package source
+
+type ResultDecoder[T any] func(*QueryResult) *T
+
+type Future[T any] struct {
+	decoder     ResultDecoder[T]
+	resultsChan QueryResultsChan
+}
+
+func NewFuture[T any](decoder ResultDecoder[T], resultsChan QueryResultsChan) *Future[T] {
+	return &Future[T]{
+		decoder:     decoder,
+		resultsChan: resultsChan,
+	}
+}
+
+// awaitWith allows internal callers to pass an error collector for grouping futures
+func (f *Future[T]) awaitWith(errorCollector *QueryErrorCollector) ([]*T, error) {
+	defer close(f.resultsChan)
+	result := <-f.resultsChan
+
+	q := result.Query
+	err := result.Error
+
+	if err != nil {
+		errorCollector.AppendError(&QueryError{Query: q, Error: err})
+		return nil, err
+	}
+
+	decoded := DecodeAll(result.Results, f.decoder)
+	return decoded, nil
+}
+
+func (f *Future[T]) Await() ([]*T, error) {
+	results, err := f.resultsChan.Await()
+	if err != nil {
+		return nil, err
+	}
+
+	decoded := DecodeAll(results, f.decoder)
+	return decoded, nil
+}

+ 18 - 0
core/pkg/source/querygroup.go

@@ -9,6 +9,22 @@ type QueryGroupAsyncResult struct {
 	resultsChan    QueryResultsChan
 }
 
+type QueryGroupFuture[T any] struct {
+	errorCollector *QueryErrorCollector
+	future         *Future[T]
+}
+
+func WithGroup[T any](g *QueryGroup, f *Future[T]) *QueryGroupFuture[T] {
+	return &QueryGroupFuture[T]{
+		errorCollector: g.errorCollector,
+		future:         f,
+	}
+}
+
+func (qgf *QueryGroupFuture[T]) Await() ([]*T, error) {
+	return qgf.future.awaitWith(qgf.errorCollector)
+}
+
 func NewQueryGroup() *QueryGroup {
 	var errorCollector QueryErrorCollector
 
@@ -55,6 +71,7 @@ func (qgar *QueryGroupAsyncResult) Await() ([]*QueryResult, error) {
 	return result.Results, nil
 }
 
+/*
 type QueryResultCollection []*QueryResults
 
 func (qrc *QueryResultCollection) HasErrors() bool {
@@ -93,3 +110,4 @@ func (qrc *QueryResultCollection) Error() error {
 
 	return &errCollection
 }
+*/

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 152 - 172
modules/prometheus-source/pkg/prom/datasource.go


+ 60 - 67
pkg/costmodel/allocation.go

@@ -12,13 +12,6 @@ import (
 	"github.com/opencost/opencost/pkg/env"
 )
 
-// Constants for Network Cost Subtype
-const (
-	networkCrossZoneCost   = "NetworkCrossZoneCost"
-	networkCrossRegionCost = "NetworkCrossRegionCost"
-	networkInternetCost    = "NetworkInternetCost"
-)
-
 // CanCompute should return true if CostModel can act as a valid source for the
 // given time range. In the case of CostModel we want to attempt to compute as
 // long as the range starts in the past. If the CostModel ends up not having
@@ -286,15 +279,15 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	grp := source.NewQueryGroup()
 	ds := cm.DataSource
 
-	resChRAMBytesAllocated := grp.With(ds.QueryRAMBytesAllocated(start, end))
-	resChRAMRequests := grp.With(ds.QueryRAMRequests(start, end))
-	resChRAMUsageAvg := grp.With(ds.QueryRAMUsageAvg(start, end))
-	resChRAMUsageMax := grp.With(ds.QueryRAMUsageMax(start, end))
+	resChRAMBytesAllocated := source.WithGroup(grp, ds.QueryRAMBytesAllocated(start, end))
+	resChRAMRequests := source.WithGroup(grp, ds.QueryRAMRequests(start, end))
+	resChRAMUsageAvg := source.WithGroup(grp, ds.QueryRAMUsageAvg(start, end))
+	resChRAMUsageMax := source.WithGroup(grp, ds.QueryRAMUsageMax(start, end))
 
-	resChCPUCoresAllocated := grp.With(ds.QueryCPUCoresAllocated(start, end))
-	resChCPURequests := grp.With(ds.QueryCPURequests(start, end))
-	resChCPUUsageAvg := grp.With(ds.QueryCPUUsageAvg(start, end))
-	resChCPUUsageMax := grp.With(ds.QueryCPUUsageMax(start, end))
+	resChCPUCoresAllocated := source.WithGroup(grp, ds.QueryCPUCoresAllocated(start, end))
+	resChCPURequests := source.WithGroup(grp, ds.QueryCPURequests(start, end))
+	resChCPUUsageAvg := source.WithGroup(grp, ds.QueryCPUUsageAvg(start, end))
+	resChCPUUsageMax := source.WithGroup(grp, ds.QueryCPUUsageMax(start, end))
 	resCPUUsageMax, _ := resChCPUUsageMax.Await()
 	// This avoids logspam if there is no data for either metric (e.g. if
 	// the Prometheus didn't exist in the queried window of time).
@@ -303,63 +296,63 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	}
 
 	// GPU Queries
-	resChIsGpuShared := grp.With(ds.QueryIsGPUShared(start, end))
-	resChGPUsAllocated := grp.With(ds.QueryGPUsAllocated(start, end))
-	resChGPUsRequested := grp.With(ds.QueryGPUsRequested(start, end))
-	resChGPUsUsageAvg := grp.With(ds.QueryGPUsUsageAvg(start, end))
-	resChGPUsUsageMax := grp.With(ds.QueryGPUsUsageMax(start, end))
-	resChGetGPUInfo := grp.With(ds.QueryGPUInfo(start, end))
+	resChIsGpuShared := source.WithGroup(grp, ds.QueryIsGPUShared(start, end))
+	resChGPUsAllocated := source.WithGroup(grp, ds.QueryGPUsAllocated(start, end))
+	resChGPUsRequested := source.WithGroup(grp, ds.QueryGPUsRequested(start, end))
+	resChGPUsUsageAvg := source.WithGroup(grp, ds.QueryGPUsUsageAvg(start, end))
+	resChGPUsUsageMax := source.WithGroup(grp, ds.QueryGPUsUsageMax(start, end))
+	resChGetGPUInfo := source.WithGroup(grp, ds.QueryGPUInfo(start, end))
 
-	resChNodeCostPerCPUHr := grp.With(ds.QueryNodeCPUPricePerHr(start, end))
-	resChNodeCostPerRAMGiBHr := grp.With(ds.QueryNodeRAMPricePerGiBHr(start, end))
-	resChNodeCostPerGPUHr := grp.With(ds.QueryNodeGPUPricePerHr(start, end))
+	resChNodeCostPerCPUHr := source.WithGroup(grp, ds.QueryNodeCPUPricePerHr(start, end))
+	resChNodeCostPerRAMGiBHr := source.WithGroup(grp, ds.QueryNodeRAMPricePerGiBHr(start, end))
+	resChNodeCostPerGPUHr := source.WithGroup(grp, ds.QueryNodeGPUPricePerHr(start, end))
 
-	resChNodeIsSpot := grp.With(ds.QueryNodeIsSpot(start, end))
-	resChPVCInfo := grp.With(ds.QueryPVCInfo(start, end))
+	resChNodeIsSpot := source.WithGroup(grp, ds.QueryNodeIsSpot(start, end))
+	resChPVCInfo := source.WithGroup(grp, ds.QueryPVCInfo(start, end))
 
-	resChPodPVCAllocation := grp.With(ds.QueryPodPVCAllocation(start, end))
-	resChPVCBytesRequested := grp.With(ds.QueryPVCBytesRequested(start, end))
-	resChPVActiveMins := grp.With(ds.QueryPVActiveMinutes(start, end))
-	resChPVBytes := grp.With(ds.QueryPVBytes(start, end))
-	resChPVCostPerGiBHour := grp.With(ds.QueryPVPricePerGiBHour(start, end))
-	resChPVMeta := grp.With(ds.QueryPVInfo(start, end))
+	resChPodPVCAllocation := source.WithGroup(grp, ds.QueryPodPVCAllocation(start, end))
+	resChPVCBytesRequested := source.WithGroup(grp, ds.QueryPVCBytesRequested(start, end))
+	resChPVActiveMins := source.WithGroup(grp, ds.QueryPVActiveMinutes(start, end))
+	resChPVBytes := source.WithGroup(grp, ds.QueryPVBytes(start, end))
+	resChPVCostPerGiBHour := source.WithGroup(grp, ds.QueryPVPricePerGiBHour(start, end))
+	resChPVMeta := source.WithGroup(grp, ds.QueryPVInfo(start, end))
 
-	resChNetTransferBytes := grp.With(ds.QueryNetTransferBytes(start, end))
-	resChNetReceiveBytes := grp.With(ds.QueryNetReceiveBytes(start, end))
+	resChNetTransferBytes := source.WithGroup(grp, ds.QueryNetTransferBytes(start, end))
+	resChNetReceiveBytes := source.WithGroup(grp, ds.QueryNetReceiveBytes(start, end))
 
-	resChNetZoneGiB := grp.With(ds.QueryNetZoneGiB(start, end))
-	resChNetZoneCostPerGiB := grp.With(ds.QueryNetZoneCostPerGiB(start, end))
+	resChNetZoneGiB := source.WithGroup(grp, ds.QueryNetZoneGiB(start, end))
+	resChNetZonePricePerGiB := source.WithGroup(grp, ds.QueryNetZonePricePerGiB(start, end))
 
-	resChNetRegionGiB := grp.With(ds.QueryNetRegionGiB(start, end))
-	resChNetRegionCostPerGiB := grp.With(ds.QueryNetRegionCostPerGiB(start, end))
+	resChNetRegionGiB := source.WithGroup(grp, ds.QueryNetRegionGiB(start, end))
+	resChNetRegionPricePerGiB := source.WithGroup(grp, ds.QueryNetRegionPricePerGiB(start, end))
 
-	resChNetInternetGiB := grp.With(ds.QueryNetInternetGiB(start, end))
-	resChNetInternetCostPerGiB := grp.With(ds.QueryNetInternetCostPerGiB(start, end))
+	resChNetInternetGiB := source.WithGroup(grp, ds.QueryNetInternetGiB(start, end))
+	resChNetInternetPricePerGiB := source.WithGroup(grp, ds.QueryNetInternetPricePerGiB(start, end))
 
-	var resChNodeLabels *source.QueryGroupAsyncResult
+	var resChNodeLabels *source.QueryGroupFuture[source.NodeLabelsResult]
 	if env.GetAllocationNodeLabelsEnabled() {
-		resChNodeLabels = grp.With(ds.QueryNodeLabels(start, end))
+		resChNodeLabels = source.WithGroup(grp, ds.QueryNodeLabels(start, end))
 	}
 
-	resChNamespaceLabels := grp.With(ds.QueryNamespaceLabels(start, end))
-	resChNamespaceAnnotations := grp.With(ds.QueryNamespaceAnnotations(start, end))
+	resChNamespaceLabels := source.WithGroup(grp, ds.QueryNamespaceLabels(start, end))
+	resChNamespaceAnnotations := source.WithGroup(grp, ds.QueryNamespaceAnnotations(start, end))
 
-	resChPodLabels := grp.With(ds.QueryPodLabels(start, end))
-	resChPodAnnotations := grp.With(ds.QueryPodAnnotations(start, end))
+	resChPodLabels := source.WithGroup(grp, ds.QueryPodLabels(start, end))
+	resChPodAnnotations := source.WithGroup(grp, ds.QueryPodAnnotations(start, end))
 
-	resChServiceLabels := grp.With(ds.QueryServiceLabels(start, end))
-	resChDeploymentLabels := grp.With(ds.QueryDeploymentLabels(start, end))
-	resChStatefulSetLabels := grp.With(ds.QueryStatefulSetLabels(start, end))
-	resChDaemonSetLabels := grp.With(ds.QueryDaemonSetLabels(start, end))
+	resChServiceLabels := source.WithGroup(grp, ds.QueryServiceLabels(start, end))
+	resChDeploymentLabels := source.WithGroup(grp, ds.QueryDeploymentLabels(start, end))
+	resChStatefulSetLabels := source.WithGroup(grp, ds.QueryStatefulSetLabels(start, end))
+	resChDaemonSetLabels := source.WithGroup(grp, ds.QueryDaemonSetLabels(start, end))
 
-	resChPodsWithReplicaSetOwner := grp.With(ds.QueryPodsWithReplicaSetOwner(start, end))
-	resChReplicaSetsWithoutOwners := grp.With(ds.QueryReplicaSetsWithoutOwners(start, end))
-	resChReplicaSetsWithRolloutOwner := grp.With(ds.QueryReplicaSetsWithRollout(start, end))
+	resChPodsWithReplicaSetOwner := source.WithGroup(grp, ds.QueryPodsWithReplicaSetOwner(start, end))
+	resChReplicaSetsWithoutOwners := source.WithGroup(grp, ds.QueryReplicaSetsWithoutOwners(start, end))
+	resChReplicaSetsWithRolloutOwner := source.WithGroup(grp, ds.QueryReplicaSetsWithRollout(start, end))
 
-	resChJobLabels := grp.With(ds.QueryJobLabels(start, end))
+	resChJobLabels := source.WithGroup(grp, ds.QueryJobLabels(start, end))
 
-	resChLBCostPerHr := grp.With(ds.QueryLBPricePerHr(start, end))
-	resChLBActiveMins := grp.With(ds.QueryLBActiveMinutes(start, end))
+	resChLBCostPerHr := source.WithGroup(grp, ds.QueryLBPricePerHr(start, end))
+	resChLBActiveMins := source.WithGroup(grp, ds.QueryLBActiveMinutes(start, end))
 
 	resCPUCoresAllocated, _ := resChCPUCoresAllocated.Await()
 	resCPURequests, _ := resChCPURequests.Await()
@@ -394,13 +387,13 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	resNetTransferBytes, _ := resChNetTransferBytes.Await()
 	resNetReceiveBytes, _ := resChNetReceiveBytes.Await()
 	resNetZoneGiB, _ := resChNetZoneGiB.Await()
-	resNetZoneCostPerGiB, _ := resChNetZoneCostPerGiB.Await()
+	resNetZonePricePerGiB, _ := resChNetZonePricePerGiB.Await()
 	resNetRegionGiB, _ := resChNetRegionGiB.Await()
-	resNetRegionCostPerGiB, _ := resChNetRegionCostPerGiB.Await()
+	resNetRegionPricePerGiB, _ := resChNetRegionPricePerGiB.Await()
 	resNetInternetGiB, _ := resChNetInternetGiB.Await()
-	resNetInternetCostPerGiB, _ := resChNetInternetCostPerGiB.Await()
+	resNetInternetPricePerGiB, _ := resChNetInternetPricePerGiB.Await()
 
-	var resNodeLabels []*source.QueryResult
+	var resNodeLabels []*source.NodeLabelsResult
 	if env.GetAllocationNodeLabelsEnabled() {
 		resNodeLabels, _ = resChNodeLabels.Await()
 	}
@@ -438,15 +431,15 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	applyRAMBytesRequested(podMap, resRAMRequests, podUIDKeyMap)
 	applyRAMBytesUsedAvg(podMap, resRAMUsageAvg, podUIDKeyMap)
 	applyRAMBytesUsedMax(podMap, resRAMUsageMax, podUIDKeyMap)
-	applyGPUUsage(podMap, resGPUsUsageAvg, podUIDKeyMap, GpuUsageAverageMode)
-	applyGPUUsage(podMap, resGPUsUsageMax, podUIDKeyMap, GpuUsageMaxMode)
-	applyGPUUsage(podMap, resIsGpuShared, podUIDKeyMap, GpuIsSharedMode)
-	applyGPUUsage(podMap, resGetGPUInfo, podUIDKeyMap, GpuInfoMode)
+	applyGPUUsageAvg(podMap, resGPUsUsageAvg, podUIDKeyMap)
+	applyGPUUsageMax(podMap, resGPUsUsageMax, podUIDKeyMap)
+	applyGPUUsageShared(podMap, resIsGpuShared, podUIDKeyMap)
+	applyGPUInfo(podMap, resGetGPUInfo, podUIDKeyMap)
 	applyGPUsAllocated(podMap, resGPUsRequested, resGPUsAllocated, podUIDKeyMap)
 	applyNetworkTotals(podMap, resNetTransferBytes, resNetReceiveBytes, podUIDKeyMap)
-	applyNetworkAllocation(podMap, resNetZoneGiB, resNetZoneCostPerGiB, podUIDKeyMap, networkCrossZoneCost)
-	applyNetworkAllocation(podMap, resNetRegionGiB, resNetRegionCostPerGiB, podUIDKeyMap, networkCrossRegionCost)
-	applyNetworkAllocation(podMap, resNetInternetGiB, resNetInternetCostPerGiB, podUIDKeyMap, networkInternetCost)
+	applyNetworkAllocation(podMap, resNetZoneGiB, resNetZonePricePerGiB, podUIDKeyMap, applyCrossZoneNetworkAllocation)
+	applyNetworkAllocation(podMap, resNetRegionGiB, resNetRegionPricePerGiB, podUIDKeyMap, applyCrossRegionNetworkAllocation)
+	applyNetworkAllocation(podMap, resNetInternetGiB, resNetInternetPricePerGiB, podUIDKeyMap, applyInternetNetworkAllocation)
 
 	// In the case that a two pods with the same name had different containers,
 	// we will double-count the containers. There is no way to associate each

تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 307 - 194
pkg/costmodel/allocation_helpers.go


+ 6 - 2
pkg/costmodel/allocation_helpers_test.go

@@ -362,7 +362,11 @@ func TestBuildPVMap(t *testing.T) {
 	for name, testCase := range testCases {
 		t.Run(name, func(t *testing.T) {
 			pvMap := make(map[pvKey]*pv)
-			buildPVMap(testCase.resolution, pvMap, testCase.resultsPVCostPerGiBHour, testCase.resultsActiveMinutes, []*source.QueryResult{}, window)
+			pvCostResults := source.DecodeAll(testCase.resultsPVCostPerGiBHour, source.DecodePVPricePerGiBHourResult)
+			pvActiveMinsResults := source.DecodeAll(testCase.resultsActiveMinutes, source.DecodePVActiveMinutesResult)
+			pvInfoResult := []*source.PVInfoResult{}
+
+			buildPVMap(testCase.resolution, pvMap, pvCostResults, pvActiveMinsResults, pvInfoResult, window)
 			if len(pvMap) != len(testCase.expected) {
 				t.Errorf("pv map does not have the expected length %d : %d", len(pvMap), len(testCase.expected))
 			}
@@ -579,7 +583,7 @@ func TestCalculateStartAndEnd(t *testing.T) {
 
 	for name, testCase := range testCases {
 		t.Run(name, func(t *testing.T) {
-			start, end := calculateStartAndEnd(testCase.result, testCase.resolution, window)
+			start, end := calculateStartAndEnd(testCase.result.Values, testCase.resolution, window)
 			if !start.Equal(testCase.expectedStart) {
 				t.Errorf("start does not match: expected %v; got %v", testCase.expectedStart, start)
 			}

+ 208 - 197
pkg/costmodel/cluster.go

@@ -132,13 +132,13 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 
 	grp := source.NewQueryGroup()
 
-	resChPVCost := grp.With(dataSource.QueryPVPricePerGiBHour(start, end))
-	resChPVSize := grp.With(dataSource.QueryPVBytes(start, end))
-	resChActiveMins := grp.With(dataSource.QueryPVActiveMinutes(start, end))
-	resChPVStorageClass := grp.With(dataSource.QueryPVInfo(start, end))
-	resChPVUsedAvg := grp.With(dataSource.QueryPVUsedAverage(start, end))
-	resChPVUsedMax := grp.With(dataSource.QueryPVUsedMax(start, end))
-	resChPVCInfo := grp.With(dataSource.QueryPVCInfo(start, end))
+	resChPVCost := source.WithGroup(grp, dataSource.QueryPVPricePerGiBHour(start, end))
+	resChPVSize := source.WithGroup(grp, dataSource.QueryPVBytes(start, end))
+	resChActiveMins := source.WithGroup(grp, dataSource.QueryPVActiveMinutes(start, end))
+	resChPVStorageClass := source.WithGroup(grp, dataSource.QueryPVInfo(start, end))
+	resChPVUsedAvg := source.WithGroup(grp, dataSource.QueryPVUsedAverage(start, end))
+	resChPVUsedMax := source.WithGroup(grp, dataSource.QueryPVUsedMax(start, end))
+	resChPVCInfo := source.WithGroup(grp, dataSource.QueryPVCInfo(start, end))
 
 	resPVCost, _ := resChPVCost.Await()
 	resPVSize, _ := resChPVSize.Await()
@@ -157,20 +157,20 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/RootDeviceStorage.html
 	// https://learn.microsoft.com/en-us/azure/virtual-machines/managed-disks-overview#temporary-disk
 	// https://cloud.google.com/compute/docs/disks/local-ssd
-	resLocalStorageCost := []*source.QueryResult{}
-	resLocalStorageUsedCost := []*source.QueryResult{}
-	resLocalStorageUsedAvg := []*source.QueryResult{}
-	resLocalStorageUsedMax := []*source.QueryResult{}
-	resLocalStorageBytes := []*source.QueryResult{}
-	resLocalActiveMins := []*source.QueryResult{}
+	resLocalStorageCost := []*source.LocalStorageCostResult{}
+	resLocalStorageUsedCost := []*source.LocalStorageUsedCostResult{}
+	resLocalStorageUsedAvg := []*source.LocalStorageUsedAvgResult{}
+	resLocalStorageUsedMax := []*source.LocalStorageUsedMaxResult{}
+	resLocalStorageBytes := []*source.LocalStorageBytesResult{}
+	resLocalActiveMins := []*source.LocalStorageActiveMinutesResult{}
 
 	if env.GetAssetIncludeLocalDiskCost() {
-		resChLocalStorageCost := grp.With(dataSource.QueryLocalStorageCost(start, end))
-		resChLocalStorageUsedCost := grp.With(dataSource.QueryLocalStorageUsedCost(start, end))
-		resChLocalStoreageUsedAvg := grp.With(dataSource.QueryLocalStorageUsedAvg(start, end))
-		resChLocalStoreageUsedMax := grp.With(dataSource.QueryLocalStorageUsedMax(start, end))
-		resChLocalStorageBytes := grp.With(dataSource.QueryLocalStorageBytes(start, end))
-		resChLocalActiveMins := grp.With(dataSource.QueryLocalStorageActiveMinutes(start, end))
+		resChLocalStorageCost := source.WithGroup(grp, dataSource.QueryLocalStorageCost(start, end))
+		resChLocalStorageUsedCost := source.WithGroup(grp, dataSource.QueryLocalStorageUsedCost(start, end))
+		resChLocalStoreageUsedAvg := source.WithGroup(grp, dataSource.QueryLocalStorageUsedAvg(start, end))
+		resChLocalStoreageUsedMax := source.WithGroup(grp, dataSource.QueryLocalStorageUsedMax(start, end))
+		resChLocalStorageBytes := source.WithGroup(grp, dataSource.QueryLocalStorageBytes(start, end))
+		resChLocalActiveMins := source.WithGroup(grp, dataSource.QueryLocalStorageActiveMinutes(start, end))
 
 		resLocalStorageCost, _ = resChLocalStorageCost.Await()
 		resLocalStorageUsedCost, _ = resChLocalStorageUsedCost.Await()
@@ -198,24 +198,24 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	// Start with local storage bytes so that the device with the largest size which has passed the
 	// query filters can be determined
 	for _, result := range resLocalStorageBytes {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetInstance()
-		if err != nil {
+		name := result.Instance
+		if name == "" {
 			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetDevice()
-		if err != nil {
+		device := result.Device
+		if device == "" {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
 		}
 
-		bytes := result.Values[0].Value
+		bytes := result.Data[0].Value
 		// Ignore disks that are larger than the max size
 		if bytes > MAX_LOCAL_STORAGE_SIZE {
 			continue
@@ -240,24 +240,24 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	}
 
 	for _, result := range resLocalStorageCost {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetInstance()
-		if err != nil {
+		name := result.Instance
+		if name == "" {
 			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetDevice()
-		if err != nil {
+		device := result.Device
+		if device == "" {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
 		}
 
-		cost := result.Values[0].Value
+		cost := result.Data[0].Value
 		key := DiskIdentifier{cluster, name}
 		ls, ok := localStorageDisks[key]
 		if !ok || ls.device != device {
@@ -268,24 +268,24 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	}
 
 	for _, result := range resLocalStorageUsedCost {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetInstance()
-		if err != nil {
-			log.Warnf("ClusterDisks: local storage usage data missing instance")
+		name := result.Instance
+		if name == "" {
+			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetDevice()
-		if err != nil {
+		device := result.Device
+		if device == "" {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
 		}
 
-		cost := result.Values[0].Value
+		cost := result.Data[0].Value
 		key := DiskIdentifier{cluster, name}
 		ls, ok := localStorageDisks[key]
 		if !ok || ls.device != device {
@@ -295,24 +295,24 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	}
 
 	for _, result := range resLocalStorageUsedAvg {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetInstance()
-		if err != nil {
+		name := result.Instance
+		if name == "" {
 			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetDevice()
-		if err != nil {
+		device := result.Device
+		if device == "" {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
 		}
 
-		bytesAvg := result.Values[0].Value
+		bytesAvg := result.Data[0].Value
 		key := DiskIdentifier{cluster, name}
 		ls, ok := localStorageDisks[key]
 		if !ok || ls.device != device {
@@ -322,24 +322,24 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	}
 
 	for _, result := range resLocalStorageUsedMax {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetInstance()
-		if err != nil {
+		name := result.Instance
+		if name == "" {
 			log.Warnf("ClusterDisks: local storage data missing instance")
 			continue
 		}
 
-		device, err := result.GetDevice()
-		if err != nil {
+		device := result.Device
+		if device == "" {
 			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
 		}
 
-		bytesMax := result.Values[0].Value
+		bytesMax := result.Data[0].Value
 		key := DiskIdentifier{cluster, name}
 		ls, ok := localStorageDisks[key]
 		if !ok || ls.device != device {
@@ -349,19 +349,19 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	}
 
 	for _, result := range resLocalActiveMins {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetNode()
-		if err != nil {
+		name := result.Node
+		if name == "" {
 			log.DedupedWarningf(5, "ClusterDisks: local active mins data missing instance")
 			continue
 		}
 
-		providerID, err := result.GetProviderID()
-		if err != nil {
+		providerID := result.ProviderID
+		if providerID == "" {
 			log.DedupedWarningf(5, "ClusterDisks: local active mins data missing instance")
 			continue
 		}
@@ -374,12 +374,12 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 
 		ls.disk.ProviderID = provider.ParseLocalDiskID(providerID)
 
-		if len(result.Values) == 0 {
+		if len(result.Data) == 0 {
 			continue
 		}
 
-		s := time.Unix(int64(result.Values[0].Timestamp), 0)
-		e := time.Unix(int64(result.Values[len(result.Values)-1].Timestamp), 0)
+		s := time.Unix(int64(result.Data[0].Timestamp), 0)
+		e := time.Unix(int64(result.Data[len(result.Data)-1].Timestamp), 0)
 		mins := e.Sub(s).Minutes()
 
 		// TODO niko/assets if mins >= threshold, interpolate for missing data?
@@ -397,13 +397,12 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	var unTracedDiskLogData []DiskIdentifier
 	//Iterating through Persistent Volume given by custom metrics kubecost_pv_info and assign the storage class if known and __unknown__ if not populated.
 	for _, result := range resPVStorageClass {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, _ := result.GetString("persistentvolume")
-
+		name := result.PersistentVolume
 		key := DiskIdentifier{cluster, name}
 		if _, ok := diskMap[key]; !ok {
 			if !slices.Contains(unTracedDiskLogData, key) {
@@ -412,13 +411,12 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 			continue
 		}
 
-		if len(result.Values) == 0 {
+		if len(result.Data) == 0 {
 			continue
 		}
 
-		storageClass, err := result.GetString("storageclass")
-
-		if err != nil {
+		storageClass := result.StorageClass
+		if storageClass == "" {
 			diskMap[key].StorageClass = opencost.UnknownStorageClass
 		} else {
 			diskMap[key].StorageClass = storageClass
@@ -542,22 +540,22 @@ func ClusterNodes(dataSource source.OpenCostDataSource, cp models.Provider, star
 	optionalGrp := source.NewQueryGroup()
 
 	// return errors if these fail
-	resChNodeCPUHourlyCost := requiredGrp.With(dataSource.QueryNodeCPUPricePerHr(start, end))
-	resChNodeCPUCoresCapacity := requiredGrp.With(dataSource.QueryNodeCPUCoresCapacity(start, end))
-	resChNodeCPUCoresAllocatable := requiredGrp.With(dataSource.QueryNodeCPUCoresAllocatable(start, end))
-	resChNodeRAMHourlyCost := requiredGrp.With(dataSource.QueryNodeRAMPricePerGiBHr(start, end))
-	resChNodeRAMBytesCapacity := requiredGrp.With(dataSource.QueryNodeRAMBytesCapacity(start, end))
-	resChNodeRAMBytesAllocatable := requiredGrp.With(dataSource.QueryNodeRAMBytesAllocatable(start, end))
-	resChNodeGPUCount := requiredGrp.With(dataSource.QueryNodeGPUCount(start, end))
-	resChNodeGPUHourlyPrice := requiredGrp.With(dataSource.QueryNodeGPUPricePerHr(start, end))
-	resChActiveMins := requiredGrp.With(dataSource.QueryNodeActiveMinutes(start, end))
-	resChIsSpot := requiredGrp.With(dataSource.QueryNodeIsSpot(start, end))
+	resChNodeCPUHourlyCost := source.WithGroup(requiredGrp, dataSource.QueryNodeCPUPricePerHr(start, end))
+	resChNodeCPUCoresCapacity := source.WithGroup(requiredGrp, dataSource.QueryNodeCPUCoresCapacity(start, end))
+	resChNodeCPUCoresAllocatable := source.WithGroup(requiredGrp, dataSource.QueryNodeCPUCoresAllocatable(start, end))
+	resChNodeRAMHourlyCost := source.WithGroup(requiredGrp, dataSource.QueryNodeRAMPricePerGiBHr(start, end))
+	resChNodeRAMBytesCapacity := source.WithGroup(requiredGrp, dataSource.QueryNodeRAMBytesCapacity(start, end))
+	resChNodeRAMBytesAllocatable := source.WithGroup(requiredGrp, dataSource.QueryNodeRAMBytesAllocatable(start, end))
+	resChNodeGPUCount := source.WithGroup(requiredGrp, dataSource.QueryNodeGPUCount(start, end))
+	resChNodeGPUHourlyPrice := source.WithGroup(requiredGrp, dataSource.QueryNodeGPUPricePerHr(start, end))
+	resChActiveMins := source.WithGroup(requiredGrp, dataSource.QueryNodeActiveMinutes(start, end))
+	resChIsSpot := source.WithGroup(requiredGrp, dataSource.QueryNodeIsSpot(start, end))
 
 	// Do not return errors if these fail, but log warnings
-	resChNodeCPUModeTotal := optionalGrp.With(dataSource.QueryNodeCPUModeTotal(start, end))
-	resChNodeRAMSystemPct := optionalGrp.With(dataSource.QueryNodeRAMSystemPercent(start, end))
-	resChNodeRAMUserPct := optionalGrp.With(dataSource.QueryNodeRAMUserPercent(start, end))
-	resChLabels := optionalGrp.With(dataSource.QueryNodeLabels(start, end))
+	resChNodeCPUModeTotal := source.WithGroup(optionalGrp, dataSource.QueryNodeCPUModeTotal(start, end))
+	resChNodeRAMSystemPct := source.WithGroup(optionalGrp, dataSource.QueryNodeRAMSystemPercent(start, end))
+	resChNodeRAMUserPct := source.WithGroup(optionalGrp, dataSource.QueryNodeRAMUserPercent(start, end))
+	resChLabels := source.WithGroup(optionalGrp, dataSource.QueryNodeLabels(start, end))
 
 	resNodeCPUHourlyCost, _ := resChNodeCPUHourlyCost.Await()
 	resNodeCPUCoresCapacity, _ := resChNodeCPUCoresCapacity.Await()
@@ -587,7 +585,7 @@ func ClusterNodes(dataSource source.OpenCostDataSource, cp models.Provider, star
 		return nil, requiredGrp.Error()
 	}
 
-	activeDataMap := buildActiveDataMap(resActiveMins, nodeKeyGen, resolution, opencost.NewClosedWindow(start, end))
+	activeDataMap := buildActiveDataMap(resActiveMins, nodeKeyGen, nodeValues, resolution, opencost.NewClosedWindow(start, end))
 
 	gpuCountMap := buildGPUCountMap(resNodeGPUCount)
 	preemptibleMap := buildPreemptibleMap(resIsSpot)
@@ -681,8 +679,8 @@ func ClusterLoadBalancers(dataSource source.OpenCostDataSource, start, end time.
 
 	grp := source.NewQueryGroup()
 
-	resChLBCost := grp.With(dataSource.QueryLBPricePerHr(start, end))
-	resChActiveMins := grp.With(dataSource.QueryLBActiveMinutes(start, end))
+	resChLBCost := source.WithGroup(grp, dataSource.QueryLBPricePerHr(start, end))
+	resChActiveMins := source.WithGroup(grp, dataSource.QueryLBActiveMinutes(start, end))
 
 	resLBCost, _ := resChLBCost.Await()
 	resActiveMins, _ := resChActiveMins.Await()
@@ -692,7 +690,7 @@ func ClusterLoadBalancers(dataSource source.OpenCostDataSource, start, end time.
 	}
 
 	loadBalancerMap := make(map[LoadBalancerIdentifier]*LoadBalancer, len(resActiveMins))
-	activeMap := buildActiveDataMap(resActiveMins, loadBalancerKeyGen, resolution, opencost.NewClosedWindow(start, end))
+	activeMap := buildActiveDataMap(resActiveMins, loadBalancerKeyGen, lbValues, resolution, opencost.NewClosedWindow(start, end))
 
 	for _, result := range resLBCost {
 		key, ok := loadBalancerKeyGen(result)
@@ -700,7 +698,7 @@ func ClusterLoadBalancers(dataSource source.OpenCostDataSource, start, end time.
 			continue
 		}
 
-		lbPricePerHr := result.Values[0].Value
+		lbPricePerHr := result.Data[0].Value
 
 		lb := &LoadBalancer{
 			Cluster:   key.Cluster,
@@ -734,8 +732,8 @@ func ClusterManagement(dataSource source.OpenCostDataSource, start, end time.Tim
 
 	grp := source.NewQueryGroup()
 
-	resChCMPrice := grp.With(dataSource.QueryClusterManagementPricePerHr(start, end))
-	resChCMDur := grp.With(dataSource.QueryClusterManagementDuration(start, end))
+	resChCMPrice := source.WithGroup(grp, dataSource.QueryClusterManagementPricePerHr(start, end))
+	resChCMDur := source.WithGroup(grp, dataSource.QueryClusterManagementDuration(start, end))
 
 	resCMPrice, _ := resChCMPrice.Await()
 	resCMDur, _ := resChCMDur.Await()
@@ -745,7 +743,7 @@ func ClusterManagement(dataSource source.OpenCostDataSource, start, end time.Tim
 	}
 
 	clusterManagementPriceMap := make(map[ClusterManagementIdentifier]*ClusterManagementCost, len(resCMDur))
-	activeMap := buildActiveDataMap(resCMDur, clusterManagementKeyGen, resolution, opencost.NewClosedWindow(start, end))
+	activeMap := buildActiveDataMap(resCMDur, clusterManagementKeyGen, clusterManagementValues, resolution, opencost.NewClosedWindow(start, end))
 
 	for _, result := range resCMPrice {
 		key, ok := clusterManagementKeyGen(result)
@@ -753,7 +751,7 @@ func ClusterManagement(dataSource source.OpenCostDataSource, start, end time.Tim
 			continue
 		}
 
-		cmPricePerHr := result.Values[0].Value
+		cmPricePerHr := result.Data[0].Value
 		cm := &ClusterManagementCost{
 			Cluster:     key.Cluster,
 			Provisioner: key.Provisioner,
@@ -798,31 +796,30 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 
 	grp := source.NewQueryGroup()
 
-	resChs := []*source.QueryGroupAsyncResult{}
+	queryDataCount := source.WithGroup(grp, dataSource.QueryDataCount(start, end))
+	queryTotalGPU := source.WithGroup(grp, dataSource.QueryTotalGPU(start, end))
+	queryTotalCPU := source.WithGroup(grp, dataSource.QueryTotalCPU(start, end))
+	queryTotalRAM := source.WithGroup(grp, dataSource.QueryTotalRAM(start, end))
+	queryTotalStorage := source.WithGroup(grp, dataSource.QueryTotalStorage(start, end))
+	queryTotalLocalStorage := source.WithGroup(grp, dataSource.QueryLocalStorageBytesByProvider(providerName, start, end))
 
-	queryDataCount := grp.With(dataSource.QueryDataCount(start, end))
-	queryTotalGPU := grp.With(dataSource.QueryTotalGPU(start, end))
-	queryTotalCPU := grp.With(dataSource.QueryTotalCPU(start, end))
-	queryTotalRAM := grp.With(dataSource.QueryTotalRAM(start, end))
-	queryTotalStorage := grp.With(dataSource.QueryTotalStorage(start, end))
-	queryTotalLocalStorage := grp.With(dataSource.QueryLocalStorageBytesByProvider(providerName, start, end))
-
-	resChs = append(resChs, queryDataCount, queryTotalGPU, queryTotalCPU, queryTotalRAM, queryTotalStorage, queryTotalLocalStorage)
+	var queryCPUModePct *source.QueryGroupFuture[source.NodeCPUModePercentResult]
+	var queryRAMSystemPct *source.QueryGroupFuture[source.NodeRAMSystemPercentResult]
+	var queryRAMUserPct *source.QueryGroupFuture[source.NodeRAMUserPercentResult]
+	var queryUsedLocalStorage *source.QueryGroupFuture[source.LocalStorageUsedByProviderResult]
 
 	if withBreakdown {
-		queryCPUModePct := grp.With(dataSource.QueryNodeCPUModePercent(start, end))
-		queryRAMSystemPct := grp.With(dataSource.QueryNodeRAMSystemPercent(start, end))
-		queryRAMUserPct := grp.With(dataSource.QueryNodeRAMUserPercent(start, end))
-		queryUsedLocalStorage := grp.With(dataSource.QueryLocalStorageUsedByProvider(providerName, start, end))
-
-		resChs = append(resChs, queryCPUModePct, queryRAMSystemPct, queryRAMUserPct, queryUsedLocalStorage)
+		queryCPUModePct = source.WithGroup(grp, dataSource.QueryNodeCPUModePercent(start, end))
+		queryRAMSystemPct = source.WithGroup(grp, dataSource.QueryNodeRAMSystemPercent(start, end))
+		queryRAMUserPct = source.WithGroup(grp, dataSource.QueryNodeRAMUserPercent(start, end))
+		queryUsedLocalStorage = source.WithGroup(grp, dataSource.QueryLocalStorageUsedByProvider(providerName, start, end))
 	}
 
-	resDataCount, _ := resChs[0].Await()
-	resTotalGPU, _ := resChs[1].Await()
-	resTotalCPU, _ := resChs[2].Await()
-	resTotalRAM, _ := resChs[3].Await()
-	resTotalStorage, _ := resChs[4].Await()
+	resDataCount, _ := queryDataCount.Await()
+	resTotalGPU, _ := queryTotalGPU.Await()
+	resTotalCPU, _ := queryTotalCPU.Await()
+	resTotalRAM, _ := queryTotalRAM.Await()
+	resTotalStorage, _ := queryTotalStorage.Await()
 
 	if grp.HasErrors() {
 		return nil, grp.Error()
@@ -832,13 +829,14 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 
 	dataMinsByCluster := map[string]float64{}
 	for _, result := range resDataCount {
-		clusterID, _ := result.GetCluster()
+		clusterID := result.Cluster
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
+
 		dataMins := mins
-		if len(result.Values) > 0 {
-			dataMins = result.Values[0].Value
+		if len(result.Data) > 0 {
+			dataMins = result.Data[0].Value
 		} else {
 			log.Warnf("Cluster cost data count returned no results for cluster %s", clusterID)
 		}
@@ -864,18 +862,20 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 
 	// Helper function to iterate over Prom query results, parsing the raw values into
 	// the intermediate costData structure.
-	setCostsFromResults := func(costData map[string]map[string]float64, results []*source.QueryResult, name string, discount float64, customDiscount float64) {
+	setCostsFromResults := func(costData map[string]map[string]float64, results []*source.TotalResult, name string, discount float64, customDiscount float64) {
 		for _, result := range results {
-			clusterID, _ := result.GetCluster()
+			clusterID := result.Cluster
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
+
 			if _, ok := costData[clusterID]; !ok {
 				costData[clusterID] = map[string]float64{}
 			}
-			if len(result.Values) > 0 {
-				costData[clusterID][name] += result.Values[0].Value * (1.0 - discount) * (1.0 - customDiscount)
-				costData[clusterID]["total"] += result.Values[0].Value * (1.0 - discount) * (1.0 - customDiscount)
+
+			if len(result.Data) > 0 {
+				costData[clusterID][name] += result.Data[0].Value * (1.0 - discount) * (1.0 - customDiscount)
+				costData[clusterID]["total"] += result.Data[0].Value * (1.0 - discount) * (1.0 - customDiscount)
 			}
 		}
 	}
@@ -886,7 +886,7 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 	setCostsFromResults(costData, resTotalGPU, "gpu", 0.0, customDiscount)
 	setCostsFromResults(costData, resTotalStorage, "storage", 0.0, customDiscount)
 
-	resTotalLocalStorage, err := resChs[5].Await()
+	resTotalLocalStorage, err := queryTotalLocalStorage.Await()
 	if err != nil {
 		return nil, err
 	}
@@ -899,16 +899,16 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 	ramBreakdownMap := map[string]*ClusterCostsBreakdown{}
 	pvUsedCostMap := map[string]float64{}
 	if withBreakdown {
-		resCPUModePct, _ := resChs[6].Await()
-		resRAMSystemPct, _ := resChs[7].Await()
-		resRAMUserPct, _ := resChs[8].Await()
+		resCPUModePct, _ := queryCPUModePct.Await()
+		resRAMSystemPct, _ := queryRAMSystemPct.Await()
+		resRAMUserPct, _ := queryRAMUserPct.Await()
 
 		if grp.HasErrors() {
 			return nil, grp.Error()
 		}
 
 		for _, result := range resCPUModePct {
-			clusterID, _ := result.GetCluster()
+			clusterID := result.Cluster
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
@@ -917,26 +917,26 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 			}
 			cpuBD := cpuBreakdownMap[clusterID]
 
-			mode, err := result.GetString("mode")
-			if err != nil {
+			mode := result.Mode
+			if mode == "" {
 				log.Warnf("ComputeClusterCosts: unable to read CPU mode: %s", err)
 				mode = "other"
 			}
 
 			switch mode {
 			case "idle":
-				cpuBD.Idle += result.Values[0].Value
+				cpuBD.Idle += result.Data[0].Value
 			case "system":
-				cpuBD.System += result.Values[0].Value
+				cpuBD.System += result.Data[0].Value
 			case "user":
-				cpuBD.User += result.Values[0].Value
+				cpuBD.User += result.Data[0].Value
 			default:
-				cpuBD.Other += result.Values[0].Value
+				cpuBD.Other += result.Data[0].Value
 			}
 		}
 
 		for _, result := range resRAMSystemPct {
-			clusterID, _ := result.GetCluster()
+			clusterID := result.Cluster
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
@@ -944,10 +944,10 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 				ramBreakdownMap[clusterID] = &ClusterCostsBreakdown{}
 			}
 			ramBD := ramBreakdownMap[clusterID]
-			ramBD.System += result.Values[0].Value
+			ramBD.System += result.Data[0].Value
 		}
 		for _, result := range resRAMUserPct {
-			clusterID, _ := result.GetCluster()
+			clusterID := result.Cluster
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
@@ -955,7 +955,7 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 				ramBreakdownMap[clusterID] = &ClusterCostsBreakdown{}
 			}
 			ramBD := ramBreakdownMap[clusterID]
-			ramBD.User += result.Values[0].Value
+			ramBD.User += result.Data[0].Value
 		}
 		for _, ramBD := range ramBreakdownMap {
 			remaining := 1.0
@@ -965,17 +965,17 @@ func (a *Accesses) ComputeClusterCosts(dataSource source.OpenCostDataSource, pro
 			ramBD.Idle = remaining
 		}
 
-		resUsedLocalStorage, err := resChs[9].Await()
+		resUsedLocalStorage, err := queryUsedLocalStorage.Await()
 		if err != nil {
 			return nil, err
 		}
 
 		for _, result := range resUsedLocalStorage {
-			clusterID, _ := result.GetCluster()
+			clusterID := result.Cluster
 			if clusterID == "" {
 				clusterID = defaultClusterID
 			}
-			pvUsedCostMap[clusterID] += result.Values[0].Value
+			pvUsedCostMap[clusterID] += result.Data[0].Value
 		}
 	}
 
@@ -1025,14 +1025,14 @@ type Totals struct {
 	StorageCost [][]string `json:"storageCost"`
 }
 
-func resultToTotals(qrs []*source.QueryResult) ([][]string, error) {
+func resultToTotals(qrs []*source.ClusterResult) ([][]string, error) {
 	if len(qrs) == 0 {
 		return [][]string{}, fmt.Errorf("not enough data available in the selected time range")
 	}
 
 	result := qrs[0]
 	totals := [][]string{}
-	for _, value := range result.Values {
+	for _, value := range result.Data {
 		d0 := fmt.Sprintf("%f", value.Timestamp)
 		d1 := fmt.Sprintf("%f", value.Value)
 		toAppend := []string{
@@ -1054,10 +1054,10 @@ func ClusterCostsOverTime(dataSource source.OpenCostDataSource, provider models.
 
 	grp := source.NewQueryGroup()
 
-	qCores := grp.With(dataSource.QueryClusterCores(start, end, window))
-	qRAM := grp.With(dataSource.QueryClusterRAM(start, end, window))
-	qStorage := grp.With(dataSource.QueryClusterStorageByProvider(providerName, start, end, window))
-	qTotal := grp.With(dataSource.QueryClusterTotalByProvider(providerName, start, end, window))
+	qCores := source.WithGroup(grp, dataSource.QueryClusterCores(start, end, window))
+	qRAM := source.WithGroup(grp, dataSource.QueryClusterRAM(start, end, window))
+	qStorage := source.WithGroup(grp, dataSource.QueryClusterStorageByProvider(providerName, start, end, window))
+	qTotal := source.WithGroup(grp, dataSource.QueryClusterTotalByProvider(providerName, start, end, window))
 
 	resultClusterCores, _ := qCores.Await()
 	resultClusterRAM, _ := qRAM.Await()
@@ -1090,7 +1090,7 @@ func ClusterCostsOverTime(dataSource source.OpenCostDataSource, provider models.
 		// If clusterTotal query failed, it's likely because there are no PVs, which
 		// causes the qTotal query to return no data. Instead, query only node costs.
 		// If that fails, return an error because something is actually wrong.
-		qNodes := grp.With(dataSource.QueryClusterNodesByProvider(providerName, start, end, window))
+		qNodes := source.WithGroup(grp, dataSource.QueryClusterNodesByProvider(providerName, start, end, window))
 
 		resultNodes, err := qNodes.Await()
 		if err != nil {
@@ -1112,20 +1112,31 @@ func ClusterCostsOverTime(dataSource source.OpenCostDataSource, provider models.
 	}, nil
 }
 
-func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo []*source.QueryResult, cp models.Provider, window opencost.Window) {
+func pvCosts(
+	diskMap map[DiskIdentifier]*Disk,
+	resolution time.Duration,
+	resActiveMins []*source.PVActiveMinutesResult,
+	resPVSize []*source.PVBytesResult,
+	resPVCost []*source.PVPricePerGiBHourResult,
+	resPVUsedAvg []*source.PVUsedAvgResult,
+	resPVUsedMax []*source.PVUsedMaxResult,
+	resPVCInfo []*source.PVCInfoResult,
+	cp models.Provider,
+	window opencost.Window,
+) {
 	for _, result := range resActiveMins {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("persistentvolume")
-		if err != nil {
+		name := result.PersistentVolume
+		if name == "" {
 			log.Warnf("ClusterDisks: active mins missing pv name")
 			continue
 		}
 
-		if len(result.Values) == 0 {
+		if len(result.Data) == 0 {
 			continue
 		}
 
@@ -1138,7 +1149,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 			}
 		}
 
-		s, e := calculateStartAndEnd(result, resolution, window)
+		s, e := calculateStartAndEnd(result.Data, resolution, window)
 		mins := e.Sub(s).Minutes()
 
 		diskMap[key].End = e
@@ -1147,20 +1158,20 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 	}
 
 	for _, result := range resPVSize {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("persistentvolume")
-		if err != nil {
+		name := result.PersistentVolume
+		if name == "" {
 			log.Warnf("ClusterDisks: PV size data missing persistentvolume")
 			continue
 		}
 
 		// TODO niko/assets storage class
 
-		bytes := result.Values[0].Value
+		bytes := result.Data[0].Value
 		key := DiskIdentifier{cluster, name}
 		if _, ok := diskMap[key]; !ok {
 			diskMap[key] = &Disk{
@@ -1179,13 +1190,13 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 	}
 
 	for _, result := range resPVCost {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetString("persistentvolume")
-		if err != nil {
+		name := result.PersistentVolume
+		if name == "" {
 			log.Warnf("ClusterDisks: PV cost data missing persistentvolume")
 			continue
 		}
@@ -1203,7 +1214,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 
 			cost = customPVCost
 		} else {
-			cost = result.Values[0].Value
+			cost = result.Data[0].Value
 		}
 
 		key := DiskIdentifier{cluster, name}
@@ -1216,25 +1227,26 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		}
 
 		diskMap[key].Cost = cost * (diskMap[key].Bytes / 1024 / 1024 / 1024) * (diskMap[key].Minutes / 60)
-		providerID, _ := result.GetProviderID() // just put the providerID set up here, it's the simplest query.
+		providerID := result.ProviderID // just put the providerID set up here, it's the simplest query.
 		if providerID != "" {
 			diskMap[key].ProviderID = provider.ParsePVID(providerID)
 		}
 	}
 
 	for _, result := range resPVUsedAvg {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		claimName, err := result.GetString("persistentvolumeclaim")
-		if err != nil {
+		claimName := result.PersistentVolumeClaim
+		if claimName == "" {
 			log.Debugf("ClusterDisks: pv usage data missing persistentvolumeclaim")
 			continue
 		}
-		claimNamespace, err := result.GetNamespace()
-		if err != nil {
+
+		claimNamespace := result.Namespace
+		if claimNamespace == "" {
 			log.Debugf("ClusterDisks: pv usage data missing namespace")
 			continue
 		}
@@ -1243,25 +1255,25 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 
 		for _, thatRes := range resPVCInfo {
 
-			thatCluster, err := thatRes.GetCluster()
-			if err != nil {
+			thatCluster := thatRes.Cluster
+			if thatCluster == "" {
 				thatCluster = env.GetClusterID()
 			}
 
-			thatVolumeName, err := thatRes.GetString("volumename")
-			if err != nil {
+			thatVolumeName := thatRes.VolumeName
+			if thatVolumeName == "" {
 				log.Debugf("ClusterDisks: pv claim data missing volumename")
 				continue
 			}
 
-			thatClaimName, err := thatRes.GetString("persistentvolumeclaim")
-			if err != nil {
+			thatClaimName := thatRes.PersistentVolumeClaim
+			if thatClaimName == "" {
 				log.Debugf("ClusterDisks: pv claim data missing persistentvolumeclaim")
 				continue
 			}
 
-			thatClaimNamespace, err := thatRes.GetNamespace()
-			if err != nil {
+			thatClaimNamespace := thatRes.Namespace
+			if thatClaimNamespace == "" {
 				log.Debugf("ClusterDisks: pv claim data missing namespace")
 				continue
 			}
@@ -1271,7 +1283,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 			}
 		}
 
-		usage := result.Values[0].Value
+		usage := result.Data[0].Value
 
 		key := DiskIdentifier{cluster, volumeName}
 
@@ -1286,19 +1298,19 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 	}
 
 	for _, result := range resPVUsedMax {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		claimName, err := result.GetString("persistentvolumeclaim")
-		if err != nil {
+		claimName := result.PersistentVolumeClaim
+		if claimName == "" {
 			log.Debugf("ClusterDisks: pv usage data missing persistentvolumeclaim")
 			continue
 		}
 
-		claimNamespace, err := result.GetNamespace()
-		if err != nil {
+		claimNamespace := result.Namespace
+		if claimNamespace == "" {
 			log.Debugf("ClusterDisks: pv usage data missing namespace")
 			continue
 		}
@@ -1306,26 +1318,25 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		var volumeName string
 
 		for _, thatRes := range resPVCInfo {
-
-			thatCluster, err := thatRes.GetCluster()
-			if err != nil {
+			thatCluster := thatRes.Cluster
+			if thatCluster == "" {
 				thatCluster = env.GetClusterID()
 			}
 
-			thatVolumeName, err := thatRes.GetString("volumename")
-			if err != nil {
+			thatVolumeName := thatRes.VolumeName
+			if thatVolumeName == "" {
 				log.Debugf("ClusterDisks: pv claim data missing volumename")
 				continue
 			}
 
-			thatClaimName, err := thatRes.GetString("persistentvolumeclaim")
-			if err != nil {
+			thatClaimName := thatRes.PersistentVolumeClaim
+			if thatClaimName == "" {
 				log.Debugf("ClusterDisks: pv claim data missing persistentvolumeclaim")
 				continue
 			}
 
-			thatClaimNamespace, err := thatRes.GetNamespace()
-			if err != nil {
+			thatClaimNamespace := thatRes.Namespace
+			if thatClaimNamespace == "" {
 				log.Debugf("ClusterDisks: pv claim data missing namespace")
 				continue
 			}
@@ -1335,7 +1346,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 			}
 		}
 
-		usage := result.Values[0].Value
+		usage := result.Data[0].Value
 
 		key := DiskIdentifier{cluster, volumeName}
 

+ 136 - 113
pkg/costmodel/cluster_helpers.go

@@ -10,6 +10,7 @@ import (
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
+	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/pkg/env"
 )
 
@@ -31,7 +32,7 @@ func mergeTypeMaps(clusterAndNameToType1, clusterAndNameToType2 map[nodeIdentifi
 }
 
 func buildCPUCostMap(
-	resNodeCPUCost []*source.QueryResult,
+	resNodeCPUCost []*source.NodeCPUPricePerHrResult,
 	cp models.Provider,
 	preemptible map[NodeIdentifier]bool,
 ) (map[NodeIdentifier]float64, map[nodeIdentifierNoProviderID]string) {
@@ -45,19 +46,19 @@ func buildCPUCostMap(
 	}
 
 	for _, result := range resNodeCPUCost {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetNode()
-		if err != nil {
+		name := result.Node
+		if name == "" {
 			log.Warnf("ClusterNodes: CPU cost data missing node")
 			continue
 		}
 
-		nodeType, _ := result.GetString("instance_type")
-		providerID, _ := result.GetProviderID()
+		nodeType := result.InstanceType
+		providerID := result.ProviderID
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -87,9 +88,7 @@ func buildCPUCostMap(
 			cpuCost = customCPUCost
 
 		} else {
-
-			cpuCost = result.Values[0].Value
-
+			cpuCost = result.Data[0].Value
 		}
 
 		clusterAndNameToType[keyNon] = nodeType
@@ -101,7 +100,7 @@ func buildCPUCostMap(
 }
 
 func buildRAMCostMap(
-	resNodeRAMCost []*source.QueryResult,
+	resNodeRAMCost []*source.NodeRAMPricePerGiBHrResult,
 	cp models.Provider,
 	preemptible map[NodeIdentifier]bool,
 ) (map[NodeIdentifier]float64, map[nodeIdentifierNoProviderID]string) {
@@ -115,19 +114,19 @@ func buildRAMCostMap(
 	}
 
 	for _, result := range resNodeRAMCost {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetNode()
-		if err != nil {
+		name := result.Node
+		if name == "" {
 			log.Warnf("ClusterNodes: RAM cost data missing node")
 			continue
 		}
 
-		nodeType, _ := result.GetString("instance_type")
-		providerID, _ := result.GetProviderID()
+		nodeType := result.InstanceType
+		providerID := result.ProviderID
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -157,9 +156,7 @@ func buildRAMCostMap(
 			ramCost = customRAMCost
 
 		} else {
-
-			ramCost = result.Values[0].Value
-
+			ramCost = result.Data[0].Value
 		}
 
 		clusterAndNameToType[keyNon] = nodeType
@@ -172,7 +169,7 @@ func buildRAMCostMap(
 }
 
 func buildGPUCostMap(
-	resNodeGPUCost []*source.QueryResult,
+	resNodeGPUCost []*source.NodeGPUPricePerHrResult,
 	gpuCountMap map[NodeIdentifier]float64,
 	cp models.Provider,
 	preemptible map[NodeIdentifier]bool,
@@ -188,19 +185,19 @@ func buildGPUCostMap(
 	}
 
 	for _, result := range resNodeGPUCost {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetNode()
-		if err != nil {
+		name := result.Node
+		if name == "" {
 			log.Warnf("ClusterNodes: GPU cost data missing node")
 			continue
 		}
 
-		nodeType, _ := result.GetString("instance_type")
-		providerID, _ := result.GetProviderID()
+		nodeType := result.InstanceType
+		providerID := result.ProviderID
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -230,9 +227,7 @@ func buildGPUCostMap(
 			gpuCost = customGPUCost
 
 		} else {
-
-			gpuCost = result.Values[0].Value
-
+			gpuCost = result.Data[0].Value
 		}
 
 		clusterAndNameToType[keyNon] = nodeType
@@ -249,23 +244,23 @@ func buildGPUCostMap(
 	return gpuCostMap, clusterAndNameToType
 }
 
-func buildGPUCountMap(resNodeGPUCount []*source.QueryResult) map[NodeIdentifier]float64 {
+func buildGPUCountMap(resNodeGPUCount []*source.NodeGPUCountResult) map[NodeIdentifier]float64 {
 	gpuCountMap := make(map[NodeIdentifier]float64)
 
 	for _, result := range resNodeGPUCount {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetNode()
-		if err != nil {
+		name := result.Node
+		if name == "" {
 			log.Warnf("ClusterNodes: GPU count data missing node")
 			continue
 		}
 
-		gpuCount := result.Values[0].Value
-		providerID, _ := result.GetProviderID()
+		gpuCount := result.Data[0].Value
+		providerID := result.ProviderID
 
 		key := NodeIdentifier{
 			Cluster:    cluster,
@@ -278,22 +273,22 @@ func buildGPUCountMap(resNodeGPUCount []*source.QueryResult) map[NodeIdentifier]
 	return gpuCountMap
 }
 
-func buildCPUCoresMap(resNodeCPUCores []*source.QueryResult) map[nodeIdentifierNoProviderID]float64 {
+func buildCPUCoresMap(resNodeCPUCores []*source.NodeCPUCoresCapacityResult) map[nodeIdentifierNoProviderID]float64 {
 	m := make(map[nodeIdentifierNoProviderID]float64)
 
 	for _, result := range resNodeCPUCores {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetNode()
-		if err != nil {
+		name := result.Node
+		if name == "" {
 			log.Warnf("ClusterNodes: CPU cores data missing node")
 			continue
 		}
 
-		cpuCores := result.Values[0].Value
+		cpuCores := result.Data[0].Value
 
 		key := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -305,22 +300,22 @@ func buildCPUCoresMap(resNodeCPUCores []*source.QueryResult) map[nodeIdentifierN
 	return m
 }
 
-func buildRAMBytesMap(resNodeRAMBytes []*source.QueryResult) map[nodeIdentifierNoProviderID]float64 {
+func buildRAMBytesMap(resNodeRAMBytes []*source.NodeRAMBytesCapacityResult) map[nodeIdentifierNoProviderID]float64 {
 	m := make(map[nodeIdentifierNoProviderID]float64)
 
 	for _, result := range resNodeRAMBytes {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetNode()
-		if err != nil {
+		name := result.Node
+		if name == "" {
 			log.Warnf("ClusterNodes: RAM bytes data missing node")
 			continue
 		}
 
-		ramBytes := result.Values[0].Value
+		ramBytes := result.Data[0].Value
 
 		key := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -333,7 +328,7 @@ func buildRAMBytesMap(resNodeRAMBytes []*source.QueryResult) map[nodeIdentifierN
 }
 
 // Mapping of cluster/node=cpu for computing resource efficiency
-func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.QueryResult) map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown {
+func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.NodeCPUModeTotalResult) map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown {
 	cpuBreakdownMap := make(map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown)
 
 	// Mapping of cluster/node=cpu for computing resource efficiency
@@ -344,20 +339,20 @@ func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.QueryResult) map[nodeIde
 	// Build intermediate structures for CPU usage by (cluster, node) and by
 	// (cluster, node, mode) for computing resouce efficiency
 	for _, result := range resNodeCPUModeTotal {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		node, err := result.GetString("kubernetes_node")
-		if err != nil {
+		node := result.Node
+		if node == "" {
 			log.DedupedWarningf(5, "ClusterNodes: CPU mode data missing node")
 			continue
 		}
 
-		mode, err := result.GetString("mode")
-		if err != nil {
-			log.Warnf("ClusterNodes: unable to read CPU mode: %s", err)
+		mode := result.Mode
+		if mode == "" {
+			log.Warnf("ClusterNodes: unable to read CPU mode data.")
 			mode = "other"
 		}
 
@@ -366,7 +361,7 @@ func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.QueryResult) map[nodeIde
 			Name:    node,
 		}
 
-		total := result.Values[0].Value
+		total := result.Data[0].Value
 
 		// Increment total
 		clusterNodeCPUTotal[key] += total
@@ -446,23 +441,22 @@ func buildOverheadMap(capRam, allocRam, capCPU, allocCPU map[nodeIdentifierNoPro
 	return m
 }
 
-func buildRAMUserPctMap(resNodeRAMUserPct []*source.QueryResult) map[nodeIdentifierNoProviderID]float64 {
-
+func buildRAMUserPctMap(resNodeRAMUserPct []*source.NodeRAMUserPercentResult) map[nodeIdentifierNoProviderID]float64 {
 	m := make(map[nodeIdentifierNoProviderID]float64)
 
 	for _, result := range resNodeRAMUserPct {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetInstance()
-		if err != nil {
+		name := result.Instance
+		if name == "" {
 			log.Warnf("ClusterNodes: RAM user percent missing node")
 			continue
 		}
 
-		pct := result.Values[0].Value
+		pct := result.Data[0].Value
 
 		key := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -475,23 +469,23 @@ func buildRAMUserPctMap(resNodeRAMUserPct []*source.QueryResult) map[nodeIdentif
 	return m
 }
 
-func buildRAMSystemPctMap(resNodeRAMSystemPct []*source.QueryResult) map[nodeIdentifierNoProviderID]float64 {
+func buildRAMSystemPctMap(resNodeRAMSystemPct []*source.NodeRAMSystemPercentResult) map[nodeIdentifierNoProviderID]float64 {
 
 	m := make(map[nodeIdentifierNoProviderID]float64)
 
 	for _, result := range resNodeRAMSystemPct {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		name, err := result.GetInstance()
-		if err != nil {
+		name := result.Instance
+		if name == "" {
 			log.Warnf("ClusterNodes: RAM system percent missing node")
 			continue
 		}
 
-		pct := result.Values[0].Value
+		pct := result.Data[0].Value
 
 		key := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -511,34 +505,38 @@ type activeData struct {
 }
 
 // cluster management key gen
-func clusterManagementKeyGen(result *source.QueryResult) (ClusterManagementIdentifier, bool) {
-	cluster, err := result.GetCluster()
-	if err != nil {
+func clusterManagementKeyGen(result *source.ClusterManagementDurationResult) (ClusterManagementIdentifier, bool) {
+	cluster := result.Cluster
+	if cluster == "" {
 		cluster = env.GetClusterID()
 	}
 
-	provisionerName, _ := result.GetString("provisioner_name")
+	provisionerName := result.Provisioner
+
 	return ClusterManagementIdentifier{
 		Cluster:     cluster,
 		Provisioner: provisionerName,
 	}, true
 }
 
+func clusterManagementValues(result *source.ClusterManagementDurationResult) []*util.Vector {
+	return result.Data
+}
+
 // node key gen
-func nodeKeyGen(result *source.QueryResult) (NodeIdentifier, bool) {
-	cluster, err := result.GetCluster()
-	if err != nil {
+func nodeKeyGen(result *source.NodeActiveMinutesResult) (NodeIdentifier, bool) {
+	cluster := result.Cluster
+	if cluster == "" {
 		cluster = env.GetClusterID()
 	}
 
-	name, err := result.GetNode()
-	if err != nil {
+	name := result.Node
+	if name == "" {
 		log.Warnf("ClusterNodes: active mins missing node")
 		return NodeIdentifier{}, false
 	}
 
-	providerID, _ := result.GetProviderID()
-
+	providerID := result.ProviderID
 	return NodeIdentifier{
 		Cluster:    cluster,
 		Name:       name,
@@ -546,26 +544,30 @@ func nodeKeyGen(result *source.QueryResult) (NodeIdentifier, bool) {
 	}, true
 }
 
-func loadBalancerKeyGen(result *source.QueryResult) (LoadBalancerIdentifier, bool) {
-	cluster, err := result.GetCluster()
-	if err != nil {
+func nodeValues(result *source.NodeActiveMinutesResult) []*util.Vector {
+	return result.Data
+}
+
+func loadBalancerKeyGen(result *source.LBActiveMinutesResult) (LoadBalancerIdentifier, bool) {
+	cluster := result.Cluster
+	if cluster == "" {
 		cluster = env.GetClusterID()
 	}
 
-	namespace, err := result.GetNamespace()
-	if err != nil {
+	namespace := result.Namespace
+	if namespace == "" {
 		log.Warnf("ClusterLoadBalancers: LB cost data missing namespace")
 		return LoadBalancerIdentifier{}, false
 	}
 
-	name, err := result.GetString("service_name")
-	if err != nil {
+	name := result.Service
+	if name == "" {
 		log.Warnf("ClusterLoadBalancers: LB cost data missing service_name")
 		return LoadBalancerIdentifier{}, false
 	}
 
-	ingressIp, err := result.GetString("ingress_ip")
-	if err != nil {
+	ingressIp := result.IngressIP
+	if ingressIp == "" {
 		log.DedupedWarningf(5, "ClusterLoadBalancers: LB cost data missing ingress_ip")
 		// only update asset cost when an actual IP was returned
 		return LoadBalancerIdentifier{}, false
@@ -579,16 +581,22 @@ func loadBalancerKeyGen(result *source.QueryResult) (LoadBalancerIdentifier, boo
 	}, true
 }
 
-func buildActiveDataMap[T comparable](results []*source.QueryResult, keyGen func(*source.QueryResult) (T, bool), resolution time.Duration, window opencost.Window) map[T]activeData {
+func lbValues(result *source.LBActiveMinutesResult) []*util.Vector {
+	return result.Data
+}
+
+func buildActiveDataMap[T comparable, U any](results []*U, keyGen func(*U) (T, bool), valuesFunc func(*U) []*util.Vector, resolution time.Duration, window opencost.Window) map[T]activeData {
 	m := make(map[T]activeData)
 
 	for _, result := range results {
 		key, ok := keyGen(result)
-		if !ok || len(result.Values) == 0 {
+		values := valuesFunc(result)
+
+		if !ok || len(values) == 0 {
 			continue
 		}
 
-		s, e := calculateStartAndEnd(result, resolution, window)
+		s, e := calculateStartAndEnd(values, resolution, window)
 		mins := e.Sub(s).Minutes()
 
 		m[key] = activeData{
@@ -604,19 +612,32 @@ func buildActiveDataMap[T comparable](results []*source.QueryResult, keyGen func
 // Determine preemptibility with node labels
 // node id -> is preemptible?
 func buildPreemptibleMap(
-	resIsSpot []*source.QueryResult,
+	resIsSpot []*source.NodeIsSpotResult,
 ) map[NodeIdentifier]bool {
 
 	m := make(map[NodeIdentifier]bool)
 
 	for _, result := range resIsSpot {
-		key, ok := nodeKeyGen(result)
-		if !ok {
+		cluster := result.Cluster
+		if cluster == "" {
+			cluster = env.GetClusterID()
+		}
+
+		name := result.Node
+		if name == "" {
+			log.Warnf("ClusterNodes: active mins missing node")
 			continue
 		}
 
+		providerID := result.ProviderID
+		key := NodeIdentifier{
+			Cluster:    cluster,
+			Name:       name,
+			ProviderID: provider.ParseID(providerID),
+		}
+
 		// GCP preemptible label
-		pre := result.Values[0].Value
+		pre := result.Data[0].Value
 
 		// TODO(michaelmdresser): check this condition at merge time?
 		// if node, ok := nodeMap[key]; pre > 0.0 && ok {
@@ -632,27 +653,28 @@ func buildPreemptibleMap(
 	return m
 }
 
-func buildAssetsPVCMap(resPVCInfo []*source.QueryResult) map[DiskIdentifier]*Disk {
+func buildAssetsPVCMap(resPVCInfo []*source.PVCInfoResult) map[DiskIdentifier]*Disk {
 	diskMap := map[DiskIdentifier]*Disk{}
 
 	for _, result := range resPVCInfo {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
 
-		volumeName, err := result.GetString("volumename")
-		if err != nil {
+		volumeName := result.VolumeName
+		if volumeName == "" {
 			log.Debugf("ClusterDisks: pv claim data missing volumename")
 			continue
 		}
-		claimName, err := result.GetString("persistentvolumeclaim")
-		if err != nil {
+		claimName := result.PersistentVolumeClaim
+		if claimName == "" {
 			log.Debugf("ClusterDisks: pv claim data missing persistentvolumeclaim")
 			continue
 		}
-		claimNamespace, err := result.GetNamespace()
-		if err != nil {
+
+		claimNamespace := result.Namespace
+		if claimNamespace == "" {
 			log.Debugf("ClusterDisks: pv claim data missing namespace")
 			continue
 		}
@@ -675,19 +697,20 @@ func buildAssetsPVCMap(resPVCInfo []*source.QueryResult) map[DiskIdentifier]*Dis
 }
 
 func buildLabelsMap(
-	resLabels []*source.QueryResult,
+	resLabels []*source.NodeLabelsResult,
 ) map[nodeIdentifierNoProviderID]map[string]string {
 
 	m := make(map[nodeIdentifierNoProviderID]map[string]string)
 
 	// Copy labels into node
 	for _, result := range resLabels {
-		cluster, err := result.GetCluster()
-		if err != nil {
+		cluster := result.Cluster
+		if cluster == "" {
 			cluster = env.GetClusterID()
 		}
-		node, err := result.GetNode()
-		if err != nil {
+
+		node := result.Node
+		if node == "" {
 			log.DedupedWarningf(5, "ClusterNodes: label data missing node")
 			continue
 		}
@@ -703,7 +726,7 @@ func buildLabelsMap(
 		if _, ok := m[key]; !ok {
 			m[key] = map[string]string{}
 		}
-		for k, l := range result.GetLabels() {
+		for k, l := range result.Labels {
 			m[key][k] = l
 		}
 	}

+ 19 - 6
pkg/costmodel/cluster_helpers_test.go

@@ -884,7 +884,8 @@ func TestBuildGPUCostMap(t *testing.T) {
 				Config: provider.NewProviderConfig(config.NewConfigFileManager(nil), "fakeFile"),
 			}
 			testPreemptible := make(map[NodeIdentifier]bool)
-			result, _ := buildGPUCostMap(testCase.promResult, testCase.countMap, testProvider, testPreemptible)
+			gpuPrices := source.DecodeAll(testCase.promResult, source.DecodeNodeGPUPricePerHrResult)
+			result, _ := buildGPUCostMap(gpuPrices, testCase.countMap, testProvider, testPreemptible)
 			if !reflect.DeepEqual(result, testCase.expected) {
 				t.Errorf("buildGPUCostMap case %s failed. Got %+v but expected %+v", testCase.name, result, testCase.expected)
 			}
@@ -1088,16 +1089,27 @@ func TestAssetCustompricing(t *testing.T) {
 			testProvider.UpdateConfigFromConfigMap(testCase.customPricingMap)
 
 			testPreemptible := make(map[NodeIdentifier]bool)
-			cpuMap, _ := buildCPUCostMap(nodePromResult, testProvider, testPreemptible)
-			ramMap, _ := buildRAMCostMap(nodePromResult, testProvider, testPreemptible)
-			gpuMap, _ := buildGPUCostMap(nodePromResult, gpuCountMap, testProvider, testPreemptible)
+			nodeCpuResult := source.DecodeAll(nodePromResult, source.DecodeNodeCPUPricePerHrResult)
+			nodeRamResult := source.DecodeAll(nodePromResult, source.DecodeNodeRAMPricePerGiBHrResult)
+			nodeGpuResult := source.DecodeAll(nodePromResult, source.DecodeNodeGPUPricePerHrResult)
+
+			cpuMap, _ := buildCPUCostMap(nodeCpuResult, testProvider, testPreemptible)
+			ramMap, _ := buildRAMCostMap(nodeRamResult, testProvider, testPreemptible)
+			gpuMap, _ := buildGPUCostMap(nodeGpuResult, gpuCountMap, testProvider, testPreemptible)
 
 			cpuResult := cpuMap[nodeKey]
 			ramResult := ramMap[nodeKey]
 			gpuResult := gpuMap[nodeKey]
 
 			diskMap := map[DiskIdentifier]*Disk{}
-			pvCosts(diskMap, time.Hour, pvMinsPromResult, pvSizePromResult, pvCostPromResult, pvAvgUsagePromResult, pvMaxUsagePromResult, pvInfoPromResult, testProvider, window)
+			pvMinsResult := source.DecodeAll(pvMinsPromResult, source.DecodePVActiveMinutesResult)
+			pvSizeResult := source.DecodeAll(pvSizePromResult, source.DecodePVBytesResult)
+			pvCostResult := source.DecodeAll(pvCostPromResult, source.DecodePVPricePerGiBHourResult)
+			pvUsedAvgResult := source.DecodeAll(pvAvgUsagePromResult, source.DecodePVUsedAvgResult)
+			pvMaxUsageResult := source.DecodeAll(pvMaxUsagePromResult, source.DecodePVUsedMaxResult)
+			pvcInfoResult := source.DecodeAll(pvInfoPromResult, source.DecodePVCInfoResult)
+
+			pvCosts(diskMap, time.Hour, pvMinsResult, pvSizeResult, pvCostResult, pvUsedAvgResult, pvMaxUsageResult, pvcInfoResult, testProvider, window)
 
 			diskResult := diskMap[DiskIdentifier{"cluster1", "pvc1"}].Cost
 
@@ -1167,7 +1179,8 @@ func TestBuildLabelsMap(t *testing.T) {
 		),
 	}
 
-	nodeLabelMap := buildLabelsMap(nodePromResult)
+	nodeLabelsResult := source.DecodeAll(nodePromResult, source.DecodeNodeLabelsResult)
+	nodeLabelMap := buildLabelsMap(nodeLabelsResult)
 	// Test that for all nodes and all label keys in the map there isn't a key with the label_ prefix.
 	for _, labelMap := range nodeLabelMap {
 		for key, value := range labelMap {

+ 38 - 0
pkg/costmodel/containerkeys.go

@@ -194,3 +194,41 @@ func NewContainerMetricFromResult(result *source.QueryResult, defaultClusterID s
 		key:           containerMetricKey(namespace, podName, containerName, nodeName, clusterID),
 	}, nil
 }
+
+func NewContainerMetricFrom(result *source.ContainerMetricResult, defaultClusterID string) (*ContainerMetric, error) {
+	containerName := result.Container
+	if containerName == "" {
+		return nil, ErrNoContainer
+	}
+
+	podName := result.Pod
+	if podName == "" {
+		return nil, ErrNoPodName
+	}
+
+	namespace := result.Namespace
+	if namespace == "" {
+		return nil, ErrNoNamespaceName
+	}
+
+	nodeName := result.Node
+	if nodeName == "" {
+		log.Debugf("metric vector does not have node name")
+		nodeName = ""
+	}
+
+	clusterID := result.Cluster
+	if clusterID == "" {
+		log.Debugf("metric vector does not have cluster id")
+		clusterID = defaultClusterID
+	}
+
+	return &ContainerMetric{
+		ContainerName: containerName,
+		PodName:       podName,
+		Namespace:     namespace,
+		NodeName:      nodeName,
+		ClusterID:     clusterID,
+		key:           containerMetricKey(namespace, podName, containerName, nodeName, clusterID),
+	}, nil
+}

+ 11 - 11
pkg/costmodel/costmodel.go

@@ -139,11 +139,11 @@ func (cm *CostModel) ComputeCostData(start, end time.Time) (map[string]*CostData
 
 	grp := source.NewQueryGroup()
 
-	resChRAMUsage := grp.With(ds.QueryRAMUsageAvg(start, end))
-	resChCPUUsage := grp.With(ds.QueryCPUUsageAvg(start, end))
-	resChNetZoneRequests := grp.With(ds.QueryNetZoneGiB(start, end))
-	resChNetRegionRequests := grp.With(ds.QueryNetRegionGiB(start, end))
-	resChNetInternetRequests := grp.With(ds.QueryNetInternetGiB(start, end))
+	resChRAMUsage := source.WithGroup(grp, ds.QueryRAMUsageAvg(start, end))
+	resChCPUUsage := source.WithGroup(grp, ds.QueryCPUUsageAvg(start, end))
+	resChNetZoneRequests := source.WithGroup(grp, ds.QueryNetZoneGiB(start, end))
+	resChNetRegionRequests := source.WithGroup(grp, ds.QueryNetRegionGiB(start, end))
+	resChNetInternetRequests := source.WithGroup(grp, ds.QueryNetInternetGiB(start, end))
 
 	// Pull pod information from k8s API
 	podlist := cm.Cache.GetAllPods()
@@ -643,9 +643,9 @@ func findDeletedNodeInfo(dataSource source.OpenCostDataSource, missingNodes map[
 
 		grp := source.NewQueryGroup()
 
-		cpuCostResCh := grp.With(dataSource.QueryNodeCPUPricePerHr(start, end))
-		ramCostResCh := grp.With(dataSource.QueryNodeRAMPricePerGiBHr(start, end))
-		gpuCostResCh := grp.With(dataSource.QueryNodeGPUPricePerHr(start, end))
+		cpuCostResCh := source.WithGroup(grp, dataSource.QueryNodeCPUPricePerHr(start, end))
+		ramCostResCh := source.WithGroup(grp, dataSource.QueryNodeRAMPricePerGiBHr(start, end))
+		gpuCostResCh := source.WithGroup(grp, dataSource.QueryNodeGPUPricePerHr(start, end))
 
 		cpuCostRes, _ := cpuCostResCh.Await()
 		ramCostRes, _ := ramCostResCh.Await()
@@ -655,15 +655,15 @@ func findDeletedNodeInfo(dataSource source.OpenCostDataSource, missingNodes map[
 			return grp.Error()
 		}
 
-		cpuCosts, err := getCost(cpuCostRes)
+		cpuCosts, err := getCost(cpuCostRes, cpuCostNode, cpuCostData)
 		if err != nil {
 			return err
 		}
-		ramCosts, err := getCost(ramCostRes)
+		ramCosts, err := getCost(ramCostRes, ramCostNode, ramCostData)
 		if err != nil {
 			return err
 		}
-		gpuCosts, err := getCost(gpuCostRes)
+		gpuCosts, err := getCost(gpuCostRes, gpuCostNode, gpuCostData)
 		if err != nil {
 			return err
 		}

+ 87 - 106
pkg/costmodel/key.go

@@ -8,60 +8,20 @@ import (
 	"github.com/opencost/opencost/pkg/env"
 )
 
-type containerKey struct {
-	Cluster   string
-	Namespace string
-	Pod       string
-	Container string
-}
-
-func (k containerKey) String() string {
-	return fmt.Sprintf("%s/%s/%s/%s", k.Cluster, k.Namespace, k.Pod, k.Container)
-}
-
-func newContainerKey(cluster, namespace, pod, container string) containerKey {
-	return containerKey{
-		Cluster:   cluster,
-		Namespace: namespace,
-		Pod:       pod,
-		Container: container,
-	}
-}
-
-// resultContainerKey converts a Prometheus query result to a containerKey by
-// looking up values associated with the given label names. For example,
-// passing "cluster_id" for clusterLabel will use the value of the label
-// "cluster_id" as the containerKey's Cluster field. If a given field does not
-// exist on the result, an error is returned. (The only exception to that is
-// clusterLabel, which we expect may not exist, but has a default value.)
-func resultContainerKey(res *source.QueryResult) (containerKey, error) {
-	key := containerKey{}
-
-	cluster, err := res.GetCluster()
-	if err != nil {
+func newResultPodKey(cluster string, namespace string, pod string) (podKey, error) {
+	if cluster == "" {
 		cluster = env.GetClusterID()
 	}
-	key.Cluster = cluster
 
-	namespace, err := res.GetNamespace()
-	if err != nil {
-		return key, err
-	}
-	key.Namespace = namespace
-
-	pod, err := res.GetPod()
-	if err != nil {
-		return key, err
+	if namespace == "" {
+		return podKey{}, fmt.Errorf("namespace is required")
 	}
-	key.Pod = pod
 
-	container, err := res.GetContainer()
-	if err != nil {
-		return key, err
+	if pod == "" {
+		return podKey{}, fmt.Errorf("pod is required")
 	}
-	key.Container = container
 
-	return key, nil
+	return newPodKey(cluster, namespace, pod), nil
 }
 
 type podKey struct {
@@ -158,6 +118,18 @@ func resultNamespaceKey(res *source.QueryResult) (namespaceKey, error) {
 	return key, nil
 }
 
+func newResultNamespaceKey(cluster string, namespace string) (namespaceKey, error) {
+	if cluster == "" {
+		cluster = env.GetClusterID()
+	}
+
+	if namespace == "" {
+		return namespaceKey{}, fmt.Errorf("namespace is required")
+	}
+
+	return newNamespaceKey(cluster, namespace), nil
+}
+
 type controllerKey struct {
 	Cluster        string
 	Namespace      string
@@ -178,72 +150,20 @@ func newControllerKey(cluster, namespace, controllerKind, controller string) con
 	}
 }
 
-// resultControllerKey converts a Prometheus query result to a controllerKey by
-// looking up values associated with the given label names. For example,
-// passing "cluster_id" for clusterLabel will use the value of the label
-// "cluster_id" as the controllerKey's Cluster field. If a given field does not
-// exist on the result, an error is returned. (The only exception to that is
-// clusterLabel, which we expect may not exist, but has a default value.)
-func resultControllerKey(controllerKind string, res *source.QueryResult, controllerLabel string) (controllerKey, error) {
-	key := controllerKey{}
-
-	cluster, err := res.GetCluster()
-	if err != nil {
+func newResultControllerKey(cluster, namespace, controller, controllerKind string) (controllerKey, error) {
+	if cluster == "" {
 		cluster = env.GetClusterID()
 	}
-	key.Cluster = cluster
 
-	namespace, err := res.GetNamespace()
-	if err != nil {
-		return key, err
+	if namespace == "" {
+		return controllerKey{}, fmt.Errorf("namespace is required")
 	}
-	key.Namespace = namespace
 
-	controller, err := res.GetString(controllerLabel)
-	if err != nil {
-		return key, err
+	if controller == "" {
+		return controllerKey{}, fmt.Errorf("controller is required")
 	}
-	key.Controller = controller
-
-	key.ControllerKind = controllerKind
-
-	return key, nil
-}
-
-// resultDeploymentKey creates a controllerKey for a Deployment.
-// (See resultControllerKey for more.)
-func resultDeploymentKey(res *source.QueryResult, controllerLabel string) (controllerKey, error) {
-	return resultControllerKey("deployment", res, controllerLabel)
-}
-
-// resultStatefulSetKey creates a controllerKey for a StatefulSet.
-// (See resultControllerKey for more.)
-func resultStatefulSetKey(res *source.QueryResult, controllerLabel string) (controllerKey, error) {
-	return resultControllerKey("statefulset", res, controllerLabel)
-}
-
-// resultDaemonSetKey creates a controllerKey for a DaemonSet.
-// (See resultControllerKey for more.)
-func resultDaemonSetKey(res *source.QueryResult, controllerLabel string) (controllerKey, error) {
-	return resultControllerKey("daemonset", res, controllerLabel)
-}
-
-// resultJobKey creates a controllerKey for a Job.
-// (See resultControllerKey for more.)
-func resultJobKey(res *source.QueryResult, controllerLabel string) (controllerKey, error) {
-	return resultControllerKey("job", res, controllerLabel)
-}
 
-// resultReplicaSetKey creates a controllerKey for a Job.
-// (See resultControllerKey for more.)
-func resultReplicaSetKey(res *source.QueryResult, controllerLabel string) (controllerKey, error) {
-	return resultControllerKey("replicaset", res, controllerLabel)
-}
-
-// resultReplicaSetRolloutKey creates a controllerKey for a Job.
-// (See resultControllerKey for more.)
-func resultReplicaSetRolloutKey(res *source.QueryResult, controllerLabel string) (controllerKey, error) {
-	return resultControllerKey("rollout", res, controllerLabel)
+	return newControllerKey(cluster, namespace, controllerKind, controller), nil
 }
 
 type serviceKey struct {
@@ -294,6 +214,22 @@ func resultServiceKey(res *source.QueryResult, serviceLabel string) (serviceKey,
 	return key, nil
 }
 
+func newResultServiceKey(cluster, namespace, service string) (serviceKey, error) {
+	if cluster == "" {
+		cluster = env.GetClusterID()
+	}
+
+	if namespace == "" {
+		return serviceKey{}, fmt.Errorf("namespace is required")
+	}
+
+	if service == "" {
+		return serviceKey{}, fmt.Errorf("service is required")
+	}
+
+	return newServiceKey(cluster, namespace, service), nil
+}
+
 type nodeKey struct {
 	Cluster string
 	Node    string
@@ -334,6 +270,18 @@ func resultNodeKey(res *source.QueryResult) (nodeKey, error) {
 	return key, nil
 }
 
+func newResultNodeKey(cluster string, node string) (nodeKey, error) {
+	if cluster == "" {
+		cluster = env.GetClusterID()
+	}
+
+	if node == "" {
+		return nodeKey{}, fmt.Errorf("node is required")
+	}
+
+	return newNodeKey(cluster, node), nil
+}
+
 type pvcKey struct {
 	Cluster               string
 	Namespace             string
@@ -382,6 +330,28 @@ func resultPVCKey(res *source.QueryResult, pvcLabel string) (pvcKey, error) {
 	return key, nil
 }
 
+// resultPVCKey converts a Prometheus query result to a pvcKey by
+// looking up values associated with the given label names. For example,
+// passing "cluster_id" for clusterLabel will use the value of the label
+// "cluster_id" as the pvcKey's Cluster field. If a given field does not
+// exist on the result, an error is returned. (The only exception to that is
+// clusterLabel, which we expect may not exist, but has a default value.)
+func newResultPVCKey(cluster, namespace, pvc string) (pvcKey, error) {
+	if cluster == "" {
+		cluster = env.GetClusterID()
+	}
+
+	if namespace == "" {
+		return pvcKey{}, fmt.Errorf("namespace is required")
+	}
+
+	if pvc == "" {
+		return pvcKey{}, fmt.Errorf("persistentvolumeclaim is required")
+	}
+
+	return newPVCKey(cluster, namespace, pvc), nil
+}
+
 type pvKey struct {
 	Cluster          string
 	PersistentVolume string
@@ -421,3 +391,14 @@ func resultPVKey(res *source.QueryResult, persistentVolumeLabel string) (pvKey,
 
 	return key, nil
 }
+
+func newResultPVKey(cluster, pv string) (pvKey, error) {
+	if cluster == "" {
+		cluster = env.GetClusterID()
+	}
+	if pv == "" {
+		return pvKey{}, fmt.Errorf("persistentvolume is required")
+	}
+
+	return newPVKey(cluster, pv), nil
+}

+ 12 - 12
pkg/costmodel/networkcosts.go

@@ -1,7 +1,8 @@
 package costmodel
 
 import (
-	"github.com/opencost/opencost/core/pkg/log"
+	"fmt"
+
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
@@ -27,7 +28,7 @@ type NetworkUsageVector struct {
 
 // GetNetworkUsageData performs a join of the the results of zone, region, and internet usage queries to return a single
 // map containing network costs for each namespace+pod
-func GetNetworkUsageData(zr []*source.QueryResult, rr []*source.QueryResult, ir []*source.QueryResult, defaultClusterID string) (map[string]*NetworkUsageData, error) {
+func GetNetworkUsageData(zr []*source.NetZoneGiBResult, rr []*source.NetRegionGiBResult, ir []*source.NetInternetGiBResult, defaultClusterID string) (map[string]*NetworkUsageData, error) {
 	zoneNetworkMap, err := getNetworkUsage(zr, defaultClusterID)
 	if err != nil {
 		return nil, err
@@ -137,23 +138,22 @@ func GetNetworkCost(usage *NetworkUsageData, cloud costAnalyzerCloud.Provider) (
 	return results, nil
 }
 
-func getNetworkUsage(qrs []*source.QueryResult, defaultClusterID string) (map[string]*NetworkUsageVector, error) {
+func getNetworkUsage(qrs []*source.NetworkGiBResult, defaultClusterID string) (map[string]*NetworkUsageVector, error) {
 	ncdmap := make(map[string]*NetworkUsageVector)
 
 	for _, val := range qrs {
-		podName, err := val.GetString("pod_name")
-		if err != nil {
-			return nil, err
+		podName := val.Pod
+		if podName == "" {
+			return nil, fmt.Errorf("network vector does not contain 'pod' or 'pod_name' field")
 		}
 
-		namespace, err := val.GetNamespace()
-		if err != nil {
-			return nil, err
+		namespace := val.Namespace
+		if namespace == "" {
+			return nil, fmt.Errorf("network vector does not contain 'namespace' field")
 		}
 
-		clusterID, err := val.GetCluster()
+		clusterID := val.Cluster
 		if clusterID == "" {
-			log.Debugf("Prometheus vector does not have cluster id")
 			clusterID = defaultClusterID
 		}
 
@@ -162,7 +162,7 @@ func getNetworkUsage(qrs []*source.QueryResult, defaultClusterID string) (map[st
 			ClusterID: clusterID,
 			Namespace: namespace,
 			PodName:   podName,
-			Values:    val.Values,
+			Values:    val.Data,
 		}
 	}
 	return ncdmap, nil

+ 40 - 16
pkg/costmodel/resultparsers.go

@@ -425,27 +425,27 @@ func GetServiceSelectorLabelsMetrics(qrs []*source.QueryResult, defaultClusterID
 	return toReturn, nil
 }
 
-func GetContainerMetricVector(qrs []*source.QueryResult, defaultClusterID string) (map[string][]*util.Vector, error) {
+func GetContainerMetricVector(qrs []*source.ContainerMetricResult, defaultClusterID string) (map[string][]*util.Vector, error) {
 	containerData := make(map[string][]*util.Vector)
 	for _, val := range qrs {
-		containerMetric, err := NewContainerMetricFromResult(val, defaultClusterID)
+		containerMetric, err := NewContainerMetricFrom(val, defaultClusterID)
 		if err != nil {
 			return nil, err
 		}
 
-		containerData[containerMetric.Key()] = val.Values
+		containerData[containerMetric.Key()] = val.Data
 	}
 	return containerData, nil
 }
 
-func GetContainerMetricVectors(qrs []*source.QueryResult, defaultClusterID string) (map[string][]*util.Vector, error) {
+func GetContainerMetricVectors(qrs []*source.ContainerMetricResult, defaultClusterID string) (map[string][]*util.Vector, error) {
 	containerData := make(map[string][]*util.Vector)
 	for _, val := range qrs {
-		containerMetric, err := NewContainerMetricFromResult(val, defaultClusterID)
+		containerMetric, err := NewContainerMetricFrom(val, defaultClusterID)
 		if err != nil {
 			return nil, err
 		}
-		containerData[containerMetric.Key()] = val.Values
+		containerData[containerMetric.Key()] = val.Data
 	}
 	return containerData, nil
 }
@@ -462,35 +462,59 @@ func GetNormalizedContainerMetricVectors(qrs []*source.QueryResult, normalizatio
 	return containerData, nil
 }
 
-func getCost(qrs []*source.QueryResult) (map[string][]*util.Vector, error) {
+func getCost[T any](qrs []*T, nodeFunc func(*T) string, dataFunc func(*T) []*util.Vector) (map[string][]*util.Vector, error) {
 	toReturn := make(map[string][]*util.Vector)
 
 	for _, val := range qrs {
-		instance, err := val.GetNode()
-		if err != nil {
-			return toReturn, err
+		instance := nodeFunc(val)
+		if instance == "" {
+			return toReturn, fmt.Errorf("missing node field")
 		}
 
-		toReturn[instance] = val.Values
+		toReturn[instance] = dataFunc(val)
 	}
 
 	return toReturn, nil
 }
 
-func parsePodLabels(qrs []*source.QueryResult) (map[string]map[string]string, error) {
+func cpuCostNode(res *source.NodeCPUPricePerHrResult) string {
+	return res.Node
+}
+
+func cpuCostData(res *source.NodeCPUPricePerHrResult) []*util.Vector {
+	return res.Data
+}
+
+func ramCostNode(res *source.NodeRAMPricePerGiBHrResult) string {
+	return res.Node
+}
+
+func ramCostData(res *source.NodeRAMPricePerGiBHrResult) []*util.Vector {
+	return res.Data
+}
+
+func gpuCostNode(res *source.NodeGPUPricePerHrResult) string {
+	return res.Node
+}
+
+func gpuCostData(res *source.NodeGPUPricePerHrResult) []*util.Vector {
+	return res.Data
+}
+
+func parsePodLabels(qrs []*source.PodLabelsResult) (map[string]map[string]string, error) {
 	podLabels := map[string]map[string]string{}
 
 	for _, result := range qrs {
-		pod, err := result.GetPod()
-		if err != nil {
+		pod := result.Pod
+		if pod == "" {
 			return podLabels, errors.New("missing pod field")
 		}
 
 		if _, ok := podLabels[pod]; ok {
-			podLabels[pod] = result.GetLabels()
+			podLabels[pod] = result.Labels
 		} else {
 			podLabels[pod] = map[string]string{}
-			podLabels[pod] = result.GetLabels()
+			podLabels[pod] = result.Labels
 		}
 	}
 

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است