Просмотр исходного кода

Merge pull request #786 from kubecost/sean/gpu-hours

Sean/gpu hours
Sean Holcomb 5 лет назад
Родитель
Сommit
f40f20eb67

+ 42 - 16
pkg/costmodel/allocation.go

@@ -18,7 +18,7 @@ import (
 
 const (
 	queryFmtPods              = `avg(kube_pod_container_status_running{}) by (pod, namespace, cluster_id)[%s:%s]%s`
-	queryFmtRAMBytesAllocated = `avg(avg_over_time(container_memory_allocation_bytes{container!="", container!="POD", node!=""}[%s]%s)) by (container, pod, namespace, node, cluster_id)`
+	queryFmtRAMBytesAllocated = `avg(avg_over_time(container_memory_allocation_bytes{container!="", container!="POD", node!=""}[%s]%s)) by (container, pod, namespace, node, cluster_id, provider_id)`
 	queryFmtRAMRequests       = `avg(avg_over_time(kube_pod_container_resource_requests_memory_bytes{container!="", container!="POD", node!=""}[%s]%s)) by (container, pod, namespace, node, cluster_id)`
 	queryFmtRAMUsageAvg       = `avg(avg_over_time(container_memory_working_set_bytes{container_name!="", container_name!="POD", instance!=""}[%s]%s)) by (container_name, pod_name, namespace, instance, cluster_id)`
 	queryFmtRAMUsageMax       = `max(max_over_time(container_memory_working_set_bytes{container_name!="", container_name!="POD", instance!=""}[%s]%s)) by (container_name, pod_name, namespace, instance, cluster_id)`
@@ -36,9 +36,9 @@ const (
 	// https://prometheus.io/blog/2019/01/28/subquery-support/#examples
 	queryFmtCPUUsageMax           = `max(max_over_time(kubecost_savings_container_cpu_usage_seconds[%s]%s)) by (container_name, pod_name, namespace, instance, cluster_id)`
 	queryFmtGPUsRequested         = `avg(avg_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!=""}[%s]%s)) by (container, pod, namespace, node, cluster_id)`
-	queryFmtNodeCostPerCPUHr      = `avg(avg_over_time(node_cpu_hourly_cost[%s]%s)) by (node, cluster_id, instance_type)`
-	queryFmtNodeCostPerRAMGiBHr   = `avg(avg_over_time(node_ram_hourly_cost[%s]%s)) by (node, cluster_id, instance_type)`
-	queryFmtNodeCostPerGPUHr      = `avg(avg_over_time(node_gpu_hourly_cost[%s]%s)) by (node, cluster_id, instance_type)`
+	queryFmtNodeCostPerCPUHr      = `avg(avg_over_time(node_cpu_hourly_cost[%s]%s)) by (node, cluster_id, instance_type, provider_id)`
+	queryFmtNodeCostPerRAMGiBHr   = `avg(avg_over_time(node_ram_hourly_cost[%s]%s)) by (node, cluster_id, instance_type, provider_id)`
+	queryFmtNodeCostPerGPUHr      = `avg(avg_over_time(node_gpu_hourly_cost[%s]%s)) by (node, cluster_id, instance_type, provider_id)`
 	queryFmtNodeIsSpot            = `avg_over_time(kubecost_node_is_spot[%s]%s)`
 	queryFmtPVCInfo               = `avg(kube_persistentvolumeclaim_info{volumename != ""}) by (persistentvolumeclaim, storageclass, volumename, namespace, cluster_id)[%s:%s]%s`
 	queryFmtPVBytes               = `avg(avg_over_time(kube_persistentvolume_capacity_bytes[%s]%s)) by (persistentvolume, cluster_id)`
@@ -308,9 +308,9 @@ func (cm *CostModel) ComputeAllocation(start, end time.Time, resolution time.Dur
 	// for converting resource allocation data to cumulative costs.
 	nodeMap := map[nodeKey]*NodePricing{}
 
-	applyNodeCostPerCPUHr(nodeMap, resNodeCostPerCPUHr)
-	applyNodeCostPerRAMGiBHr(nodeMap, resNodeCostPerRAMGiBHr)
-	applyNodeCostPerGPUHr(nodeMap, resNodeCostPerGPUHr)
+	applyNodeCostPerCPUHr(nodeMap, resNodeCostPerCPUHr, cm.Provider.ParseID)
+	applyNodeCostPerRAMGiBHr(nodeMap, resNodeCostPerRAMGiBHr, cm.Provider.ParseID)
+	applyNodeCostPerGPUHr(nodeMap, resNodeCostPerGPUHr, cm.Provider.ParseID)
 	applyNodeSpot(nodeMap, resNodeIsSpot)
 	applyNodeDiscount(nodeMap, cm)
 
@@ -357,6 +357,7 @@ func (cm *CostModel) ComputeAllocation(start, end time.Time, resolution time.Dur
 			nodeKey := newNodeKey(cluster, nodeName)
 
 			node := cm.getNodePricing(nodeMap, nodeKey)
+			alloc.Properties.ProviderID = node.ProviderID
 			alloc.CPUCost = alloc.CPUCoreHours * node.CostPerCPUHr
 			alloc.RAMCost = (alloc.RAMByteHours / 1024 / 1024 / 1024) * node.CostPerRAMGiBHr
 			alloc.GPUCost = alloc.GPUHours * node.CostPerGPUHr
@@ -1282,7 +1283,8 @@ func applyControllersToPods(podMap map[podKey]*Pod, podControllerMap map[podKey]
 	}
 }
 
-func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerCPUHr []*prom.QueryResult) {
+func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerCPUHr []*prom.QueryResult,
+	providerIDParser func(string) string) {
 	for _, res := range resNodeCostPerCPUHr {
 		cluster, err := res.GetString("cluster_id")
 		if err != nil {
@@ -1301,11 +1303,18 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerCPUHr
 			continue
 		}
 
+		providerID, err := res.GetString("provider_id")
+		if err != nil {
+			log.Warningf("CostModel.ComputeAllocation: Node CPU cost query result missing field: %s", err)
+			continue
+		}
+
 		key := newNodeKey(cluster, node)
 		if _, ok := nodeMap[key]; !ok {
 			nodeMap[key] = &NodePricing{
-				Name:     node,
-				NodeType: instanceType,
+				Name:       node,
+				NodeType:   instanceType,
+				ProviderID: providerIDParser(providerID),
 			}
 		}
 
@@ -1313,7 +1322,8 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerCPUHr
 	}
 }
 
-func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerRAMGiBHr []*prom.QueryResult) {
+func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerRAMGiBHr []*prom.QueryResult,
+	providerIDParser func(string) string) {
 	for _, res := range resNodeCostPerRAMGiBHr {
 		cluster, err := res.GetString("cluster_id")
 		if err != nil {
@@ -1332,11 +1342,18 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerRA
 			continue
 		}
 
+		providerID, err := res.GetString("provider_id")
+		if err != nil {
+			log.Warningf("CostModel.ComputeAllocation: Node RAM cost query result missing field: %s", err)
+			continue
+		}
+
 		key := newNodeKey(cluster, node)
 		if _, ok := nodeMap[key]; !ok {
 			nodeMap[key] = &NodePricing{
-				Name:     node,
-				NodeType: instanceType,
+				Name:       node,
+				NodeType:   instanceType,
+				ProviderID: providerIDParser(providerID),
 			}
 		}
 
@@ -1344,7 +1361,8 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerRA
 	}
 }
 
-func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerGPUHr []*prom.QueryResult) {
+func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerGPUHr []*prom.QueryResult,
+	providerIDParser func(string) string) {
 	for _, res := range resNodeCostPerGPUHr {
 		cluster, err := res.GetString("cluster_id")
 		if err != nil {
@@ -1363,11 +1381,18 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*NodePricing, resNodeCostPerGPUHr
 			continue
 		}
 
+		providerID, err := res.GetString("provider_id")
+		if err != nil {
+			log.Warningf("CostModel.ComputeAllocation: Node GPU cost query result missing field: %s", err)
+			continue
+		}
+
 		key := newNodeKey(cluster, node)
 		if _, ok := nodeMap[key]; !ok {
 			nodeMap[key] = &NodePricing{
-				Name:     node,
-				NodeType: instanceType,
+				Name:       node,
+				NodeType:   instanceType,
+				ProviderID: providerIDParser(providerID),
 			}
 		}
 
@@ -1885,6 +1910,7 @@ func (cm *CostModel) getCustomNodePricing(spot bool) *NodePricing {
 type NodePricing struct {
 	Name            string
 	NodeType        string
+	ProviderID      string
 	Preemptible     bool
 	CostPerCPUHr    float64
 	CostPerRAMGiBHr float64

+ 2 - 2
pkg/costmodel/costmodel.go

@@ -1057,7 +1057,7 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 
 			cpuToRAMRatio := defaultCPU / defaultRAM
 			if math.IsNaN(cpuToRAMRatio) {
-				klog.V(1).Infof("[Warning] cpuToRAMRatio[defaultCPU: %f / defaultRam: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
+				klog.V(1).Infof("[Warning] cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
 				cpuToRAMRatio = 0
 			}
 
@@ -1137,7 +1137,7 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 
 			cpuToRAMRatio := defaultCPU / defaultRAM
 			if math.IsNaN(cpuToRAMRatio) {
-				klog.V(1).Infof("[Warning] cpuToRAMRatio[defaultCPU: %f / defaultRam: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
+				klog.V(1).Infof("[Warning] cpuToRAMRatio[defaultCPU: %f / defaultRAM: %f] is NaN. Setting to 0.", defaultCPU, defaultRAM)
 				cpuToRAMRatio = 0
 			}
 

+ 155 - 20
pkg/kubecost/allocation.go

@@ -59,8 +59,10 @@ type Allocation struct {
 	CPUCoreRequestAverage  float64               `json:"cpuCoreRequestAverage"`
 	CPUCoreUsageAverage    float64               `json:"cpuCoreUsageAverage"`
 	CPUCost                float64               `json:"cpuCost"`
+	CPUCostAdjustment      float64               `json:"cpuCostAdjustment"`
 	GPUHours               float64               `json:"gpuHours"`
 	GPUCost                float64               `json:"gpuCost"`
+	GPUCostAdjustment      float64               `json:"gpuCostAdjustment"`
 	NetworkCost            float64               `json:"networkCost"`
 	LoadBalancerCost       float64               `json:"loadBalancerCost"`
 	PVByteHours            float64               `json:"pvByteHours"`
@@ -69,6 +71,7 @@ type Allocation struct {
 	RAMBytesRequestAverage float64               `json:"ramByteRequestAverage"`
 	RAMBytesUsageAverage   float64               `json:"ramByteUsageAverage"`
 	RAMCost                float64               `json:"ramCost"`
+	RAMCostAdjustment      float64               `json:"ramCostAdjustment"`
 	SharedCost             float64               `json:"sharedCost"`
 	ExternalCost           float64               `json:"externalCost"`
 
@@ -144,8 +147,10 @@ func (a *Allocation) Clone() *Allocation {
 		CPUCoreRequestAverage:  a.CPUCoreRequestAverage,
 		CPUCoreUsageAverage:    a.CPUCoreUsageAverage,
 		CPUCost:                a.CPUCost,
+		CPUCostAdjustment:      a.CPUCostAdjustment,
 		GPUHours:               a.GPUHours,
 		GPUCost:                a.GPUCost,
+		GPUCostAdjustment:      a.GPUCostAdjustment,
 		NetworkCost:            a.NetworkCost,
 		LoadBalancerCost:       a.LoadBalancerCost,
 		PVByteHours:            a.PVByteHours,
@@ -154,6 +159,7 @@ func (a *Allocation) Clone() *Allocation {
 		RAMBytesRequestAverage: a.RAMBytesRequestAverage,
 		RAMBytesUsageAverage:   a.RAMBytesUsageAverage,
 		RAMCost:                a.RAMCost,
+		RAMCostAdjustment:      a.RAMCostAdjustment,
 		SharedCost:             a.SharedCost,
 		ExternalCost:           a.ExternalCost,
 		RawAllocationOnly:      a.RawAllocationOnly.Clone(),
@@ -202,12 +208,18 @@ func (a *Allocation) Equal(that *Allocation) bool {
 	if !util.IsApproximately(a.CPUCost, that.CPUCost) {
 		return false
 	}
+	if !util.IsApproximately(a.CPUCostAdjustment, that.CPUCostAdjustment) {
+		return false
+	}
 	if !util.IsApproximately(a.GPUHours, that.GPUHours) {
 		return false
 	}
 	if !util.IsApproximately(a.GPUCost, that.GPUCost) {
 		return false
 	}
+	if !util.IsApproximately(a.GPUCostAdjustment, that.GPUCostAdjustment) {
+		return false
+	}
 	if !util.IsApproximately(a.NetworkCost, that.NetworkCost) {
 		return false
 	}
@@ -226,6 +238,9 @@ func (a *Allocation) Equal(that *Allocation) bool {
 	if !util.IsApproximately(a.RAMCost, that.RAMCost) {
 		return false
 	}
+	if !util.IsApproximately(a.RAMCostAdjustment, that.RAMCostAdjustment) {
+		return false
+	}
 	if !util.IsApproximately(a.SharedCost, that.SharedCost) {
 		return false
 	}
@@ -254,7 +269,19 @@ func (a *Allocation) Equal(that *Allocation) bool {
 
 // TotalCost is the total cost of the Allocation
 func (a *Allocation) TotalCost() float64 {
-	return a.CPUCost + a.GPUCost + a.RAMCost + a.PVCost + a.NetworkCost + a.SharedCost + a.ExternalCost + a.LoadBalancerCost
+	return a.CPUTotalCost() + a.GPUTotalCost() + a.RAMTotalCost() + a.PVCost + a.NetworkCost + a.SharedCost + a.ExternalCost + a.LoadBalancerCost
+}
+
+func (a *Allocation) CPUTotalCost() float64 {
+	return a.CPUCost + a.CPUCostAdjustment
+}
+
+func (a *Allocation) GPUTotalCost() float64 {
+	return a.GPUCost + a.GPUCostAdjustment
+}
+
+func (a *Allocation) RAMTotalCost() float64 {
+	return a.RAMCost + a.RAMCostAdjustment
 }
 
 // CPUEfficiency is the ratio of usage to request. If there is no request and
@@ -290,10 +317,10 @@ func (a *Allocation) RAMEfficiency() float64 {
 // TotalEfficiency is the cost-weighted average of CPU and RAM efficiency. If
 // there is no cost at all, then efficiency is zero.
 func (a *Allocation) TotalEfficiency() float64 {
-	if a.CPUCost+a.RAMCost > 0 {
-		ramCostEff := a.RAMEfficiency() * a.RAMCost
-		cpuCostEff := a.CPUEfficiency() * a.CPUCost
-		return (ramCostEff + cpuCostEff) / (a.CPUCost + a.RAMCost)
+	if a.RAMTotalCost()+a.CPUTotalCost() > 0 {
+		ramCostEff := a.RAMEfficiency() * a.RAMTotalCost()
+		cpuCostEff := a.CPUEfficiency() * a.CPUTotalCost()
+		return (ramCostEff + cpuCostEff) / (a.CPUTotalCost() + a.RAMTotalCost())
 	}
 
 	return 0.0
@@ -315,6 +342,14 @@ func (a *Allocation) RAMBytes() float64 {
 	return a.RAMByteHours / (a.Minutes() / 60.0)
 }
 
+// GPUs converts the Allocation's GPUHours into average GPUs
+func (a *Allocation) GPUs() float64 {
+	if a.Minutes() <= 0.0 {
+		return 0.0
+	}
+	return a.GPUHours / (a.Minutes() / 60.0)
+}
+
 // PVBytes converts the Allocation's PVByteHours into average PVBytes
 func (a *Allocation) PVBytes() float64 {
 	if a.Minutes() <= 0.0 {
@@ -337,9 +372,12 @@ func (a *Allocation) MarshalJSON() ([]byte, error) {
 	jsonEncodeFloat64(buffer, "cpuCoreUsageAverage", a.CPUCoreUsageAverage, ",")
 	jsonEncodeFloat64(buffer, "cpuCoreHours", a.CPUCoreHours, ",")
 	jsonEncodeFloat64(buffer, "cpuCost", a.CPUCost, ",")
+	jsonEncodeFloat64(buffer, "cpuCostAdjustment", a.CPUCostAdjustment, ",")
 	jsonEncodeFloat64(buffer, "cpuEfficiency", a.CPUEfficiency(), ",")
+	jsonEncodeFloat64(buffer, "gpuCount", a.GPUs(), ",")
 	jsonEncodeFloat64(buffer, "gpuHours", a.GPUHours, ",")
 	jsonEncodeFloat64(buffer, "gpuCost", a.GPUCost, ",")
+	jsonEncodeFloat64(buffer, "gpuCostAdjustment", a.GPUCostAdjustment, ",")
 	jsonEncodeFloat64(buffer, "networkCost", a.NetworkCost, ",")
 	jsonEncodeFloat64(buffer, "loadBalancerCost", a.LoadBalancerCost, ",")
 	jsonEncodeFloat64(buffer, "pvBytes", a.PVBytes(), ",")
@@ -350,6 +388,7 @@ func (a *Allocation) MarshalJSON() ([]byte, error) {
 	jsonEncodeFloat64(buffer, "ramByteUsageAverage", a.RAMBytesUsageAverage, ",")
 	jsonEncodeFloat64(buffer, "ramByteHours", a.RAMByteHours, ",")
 	jsonEncodeFloat64(buffer, "ramCost", a.RAMCost, ",")
+	jsonEncodeFloat64(buffer, "ramCostAdjustment", a.RAMCostAdjustment, ",")
 	jsonEncodeFloat64(buffer, "ramEfficiency", a.RAMEfficiency(), ",")
 	jsonEncodeFloat64(buffer, "sharedCost", a.SharedCost, ",")
 	jsonEncodeFloat64(buffer, "externalCost", a.ExternalCost, ",")
@@ -479,6 +518,11 @@ func (a *Allocation) add(that *Allocation) {
 	a.SharedCost += that.SharedCost
 	a.ExternalCost += that.ExternalCost
 
+	// Sum all cumulative adjustment fields
+	a.CPUCostAdjustment += that.CPUCostAdjustment
+	a.RAMCostAdjustment += that.RAMCostAdjustment
+	a.GPUCostAdjustment += that.GPUCostAdjustment
+
 	// Any data that is in a "raw allocation only" is not valid in any
 	// sort of cumulative Allocation (like one that is added).
 	a.RawAllocationOnly = nil
@@ -1095,13 +1139,13 @@ func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet,
 				totals[clusterID][r] += 1.0
 			}
 		} else {
-			coeffs[clusterID][name]["cpu"] += alloc.CPUCost
-			coeffs[clusterID][name]["gpu"] += alloc.GPUCost
-			coeffs[clusterID][name]["ram"] += alloc.RAMCost
+			coeffs[clusterID][name]["cpu"] += alloc.CPUTotalCost()
+			coeffs[clusterID][name]["gpu"] += alloc.GPUTotalCost()
+			coeffs[clusterID][name]["ram"] += alloc.RAMTotalCost()
 
-			totals[clusterID]["cpu"] += alloc.CPUCost
-			totals[clusterID]["gpu"] += alloc.GPUCost
-			totals[clusterID]["ram"] += alloc.RAMCost
+			totals[clusterID]["cpu"] += alloc.CPUTotalCost()
+			totals[clusterID]["gpu"] += alloc.GPUTotalCost()
+			totals[clusterID]["ram"] += alloc.RAMTotalCost()
 		}
 	}
 
@@ -1142,13 +1186,13 @@ func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet,
 				totals[clusterID][r] += 1.0
 			}
 		} else {
-			coeffs[clusterID][name]["cpu"] += alloc.CPUCost
-			coeffs[clusterID][name]["gpu"] += alloc.GPUCost
-			coeffs[clusterID][name]["ram"] += alloc.RAMCost
+			coeffs[clusterID][name]["cpu"] += alloc.CPUTotalCost()
+			coeffs[clusterID][name]["gpu"] += alloc.GPUTotalCost()
+			coeffs[clusterID][name]["ram"] += alloc.RAMTotalCost()
 
-			totals[clusterID]["cpu"] += alloc.CPUCost
-			totals[clusterID]["gpu"] += alloc.GPUCost
-			totals[clusterID]["ram"] += alloc.RAMCost
+			totals[clusterID]["cpu"] += alloc.CPUTotalCost()
+			totals[clusterID]["gpu"] += alloc.GPUTotalCost()
+			totals[clusterID]["ram"] += alloc.RAMTotalCost()
 		}
 	}
 
@@ -1418,9 +1462,9 @@ func (as *AllocationSet) ComputeIdleAllocations(assetSet *AssetSet) (map[string]
 			clusterEnds[cluster] = a.End
 		}
 
-		assetClusterResourceCosts[cluster]["cpu"] -= a.CPUCost
-		assetClusterResourceCosts[cluster]["gpu"] -= a.GPUCost
-		assetClusterResourceCosts[cluster]["ram"] -= a.RAMCost
+		assetClusterResourceCosts[cluster]["cpu"] -= a.CPUTotalCost()
+		assetClusterResourceCosts[cluster]["gpu"] -= a.GPUTotalCost()
+		assetClusterResourceCosts[cluster]["ram"] -= a.RAMTotalCost()
 	})
 
 	// Turn remaining un-allocated asset costs into idle allocations
@@ -1460,6 +1504,97 @@ func (as *AllocationSet) ComputeIdleAllocations(assetSet *AssetSet) (map[string]
 	return idleAllocs, nil
 }
 
+// Reconcile calculate the exact cost of Allocation by resource(cpu, ram, gpu etc) based on Asset(s) on which
+// the Allocation depends.
+func (as *AllocationSet) Reconcile(assetSet *AssetSet) error {
+	if as == nil {
+		return fmt.Errorf("cannot reconcile allocation for nil AllocationSet")
+	}
+
+	if assetSet == nil {
+		return fmt.Errorf("cannot reconcile allocation with nil AssetSet")
+	}
+
+	if !as.Window.Equal(assetSet.Window) {
+		return fmt.Errorf("cannot reconcile allocation for sets with mismatched windows: %s != %s", as.Window, assetSet.Window)
+	}
+
+	// Build map of Assets with type Node by their ProviderId so that they can be matched to Allocations to determine
+	// proper CPU GPU and RAM prices
+	nodeByProviderID := map[string]*Node{}
+	assetSet.Each(func(key string, a Asset) {
+		if node, ok := a.(*Node); ok && node.properties.ProviderID != "" {
+			nodeByProviderID[node.properties.ProviderID] = node
+		}
+	})
+
+	// Match Assets against allocations and adjust allocation cost based on the proportion of the asset that they used
+	as.Each(func(name string, a *Allocation) {
+		providerId := a.Properties.ProviderID
+
+		// Reconcile with node Assets
+		node, ok := nodeByProviderID[providerId]
+		if !ok || providerId == "" {
+			// Failed to find node for allocation
+			return
+		}
+
+		// adjustmentRate is used to scale resource costs proportionally
+		// by the adjustment. This is necessary because we only get one
+		// adjustment per Node, not one per-resource-per-Node.
+		//
+		// e.g. total cost = $90, adjustment = -$10 => 0.9
+		// e.g. total cost = $150, adjustment = -$300 => 0.3333
+		// e.g. total cost = $150, adjustment = $50 => 1.5
+		adjustmentRate := 1.0
+		if node.TotalCost()-node.Adjustment() == 0 {
+			// If (totalCost - adjustment) is 0.0 then adjustment cancels
+			// the entire node cost and we should make everything 0
+			// without dividing by 0.
+			adjustmentRate = 0.0
+		} else if node.Adjustment() != 0.0 {
+			// adjustmentRate is the ratio of cost-with-adjustment (i.e. TotalCost)
+			// to cost-without-adjustment (i.e. TotalCost - Adjustment).
+			adjustmentRate = node.TotalCost() / (node.TotalCost() - node.Adjustment())
+		}
+
+		// Find total cost of each node resource for the window
+		cpuCost := node.CPUCost * (1.0 - node.Discount) * adjustmentRate
+		ramCost := node.RAMCost * (1.0 - node.Discount) * adjustmentRate
+		gpuCost := node.GPUCost * adjustmentRate
+
+		// Find the proportion of resource hours used by the allocation, checking for 0 denominators
+		cpuUsageProportion := 0.0
+		if node.CPUCoreHours != 0 {
+			cpuUsageProportion = a.CPUCoreHours / node.CPUCoreHours
+		} else {
+			log.Warningf("Missing CPU Hours for node Provider ID: %s", providerId)
+		}
+		ramUsageProportion := 0.0
+		if node.RAMByteHours != 0 {
+			ramUsageProportion = a.RAMByteHours / node.RAMByteHours
+		} else {
+			log.Warningf("Missing RAM Byte Hours for node Provider ID: %s", providerId)
+		}
+		gpuUsageProportion := 0.0
+		if node.GPUHours != 0 {
+			gpuUsageProportion = a.GPUHours / node.GPUHours
+		}
+		// No log for GPU because not all nodes have GPU
+
+		// Calculate the allocation's resource costs by the proportion of resources used and total costs
+		allocCPUCost := cpuUsageProportion * cpuCost
+		allocRAMCost := ramUsageProportion * ramCost
+		allocGPUCost := gpuUsageProportion * gpuCost
+
+		a.CPUCostAdjustment = allocCPUCost - a.CPUCost
+		a.RAMCostAdjustment = allocRAMCost - a.RAMCost
+		a.GPUCostAdjustment = allocGPUCost - a.GPUCost
+	})
+
+	return nil
+}
+
 // Delete removes the allocation with the given name from the set
 func (as *AllocationSet) Delete(name string) {
 	if as == nil {

+ 523 - 225
pkg/kubecost/allocation_test.go

@@ -112,14 +112,17 @@ func TestAllocation_Add(t *testing.T) {
 		CPUCoreRequestAverage:  2.0,
 		CPUCoreUsageAverage:    1.0,
 		CPUCost:                2.0 * hrs1 * cpuPrice,
+		CPUCostAdjustment:      3.0,
 		GPUHours:               1.0 * hrs1,
 		GPUCost:                1.0 * hrs1 * gpuPrice,
+		GPUCostAdjustment:      2.0,
 		PVByteHours:            100.0 * gib * hrs1,
 		PVCost:                 100.0 * hrs1 * pvPrice,
 		RAMByteHours:           8.0 * gib * hrs1,
 		RAMBytesRequestAverage: 8.0 * gib,
 		RAMBytesUsageAverage:   4.0 * gib,
 		RAMCost:                8.0 * hrs1 * ramPrice,
+		RAMCostAdjustment:      1.0,
 		SharedCost:             2.00,
 		ExternalCost:           1.00,
 		RawAllocationOnly:      &RawAllocationOnlyData{},
@@ -173,12 +176,21 @@ func TestAllocation_Add(t *testing.T) {
 	if !util.IsApproximately(a1.CPUCost+a2.CPUCost, act.CPUCost) {
 		t.Fatalf("Allocation.Add: expected %f; actual %f", a1.CPUCost+a2.CPUCost, act.CPUCost)
 	}
+	if !util.IsApproximately(a1.CPUCostAdjustment+a2.CPUCostAdjustment, act.CPUCostAdjustment) {
+		t.Fatalf("Allocation.Add: expected %f; actual %f", a1.CPUCostAdjustment+a2.CPUCostAdjustment, act.CPUCostAdjustment)
+	}
 	if !util.IsApproximately(a1.GPUCost+a2.GPUCost, act.GPUCost) {
 		t.Fatalf("Allocation.Add: expected %f; actual %f", a1.GPUCost+a2.GPUCost, act.GPUCost)
 	}
+	if !util.IsApproximately(a1.GPUCostAdjustment+a2.GPUCostAdjustment, act.GPUCostAdjustment) {
+		t.Fatalf("Allocation.Add: expected %f; actual %f", a1.GPUCostAdjustment+a2.GPUCostAdjustment, act.GPUCostAdjustment)
+	}
 	if !util.IsApproximately(a1.RAMCost+a2.RAMCost, act.RAMCost) {
 		t.Fatalf("Allocation.Add: expected %f; actual %f", a1.RAMCost+a2.RAMCost, act.RAMCost)
 	}
+	if !util.IsApproximately(a1.RAMCostAdjustment+a2.RAMCostAdjustment, act.RAMCostAdjustment) {
+		t.Fatalf("Allocation.Add: expected %f; actual %f", a1.RAMCostAdjustment+a2.RAMCostAdjustment, act.RAMCostAdjustment)
+	}
 	if !util.IsApproximately(a1.PVCost+a2.PVCost, act.PVCost) {
 		t.Fatalf("Allocation.Add: expected %f; actual %f", a1.PVCost+a2.PVCost, act.PVCost)
 	}
@@ -242,8 +254,8 @@ func TestAllocation_Add(t *testing.T) {
 	if !util.IsApproximately(2.0000000, act.RAMEfficiency()) {
 		t.Fatalf("Allocation.Add: expected %f; actual %f", 2.0000000, act.RAMEfficiency())
 	}
-	if !util.IsApproximately(1.6493506, act.TotalEfficiency()) {
-		t.Fatalf("Allocation.Add: expected %f; actual %f", 1.6493506, act.TotalEfficiency())
+	if !util.IsApproximately(1.279690, act.TotalEfficiency()) {
+		t.Fatalf("Allocation.Add: expected %f; actual %f", 1.279690, act.TotalEfficiency())
 	}
 
 	if act.RawAllocationOnly != nil {
@@ -269,14 +281,17 @@ func TestAllocation_Share(t *testing.T) {
 		CPUCoreRequestAverage:  2.0,
 		CPUCoreUsageAverage:    1.0,
 		CPUCost:                2.0 * hrs1 * cpuPrice,
+		CPUCostAdjustment:      3.0,
 		GPUHours:               1.0 * hrs1,
 		GPUCost:                1.0 * hrs1 * gpuPrice,
+		GPUCostAdjustment:      2.0,
 		PVByteHours:            100.0 * gib * hrs1,
 		PVCost:                 100.0 * hrs1 * pvPrice,
 		RAMByteHours:           8.0 * gib * hrs1,
 		RAMBytesRequestAverage: 8.0 * gib,
 		RAMBytesUsageAverage:   4.0 * gib,
 		RAMCost:                8.0 * hrs1 * ramPrice,
+		RAMCostAdjustment:      1.0,
 		SharedCost:             2.00,
 		ExternalCost:           1.00,
 	}
@@ -330,14 +345,14 @@ func TestAllocation_Share(t *testing.T) {
 	}
 
 	// Costs should match before (expect TotalCost and SharedCost)
-	if !util.IsApproximately(a1.CPUCost, act.CPUCost) {
-		t.Fatalf("Allocation.Share: expected %f; actual %f", a1.CPUCost, act.CPUCost)
+	if !util.IsApproximately(a1.CPUTotalCost(), act.CPUTotalCost()) {
+		t.Fatalf("Allocation.Share: expected %f; actual %f", a1.CPUTotalCost(), act.CPUTotalCost())
 	}
-	if !util.IsApproximately(a1.GPUCost, act.GPUCost) {
-		t.Fatalf("Allocation.Share: expected %f; actual %f", a1.GPUCost, act.GPUCost)
+	if !util.IsApproximately(a1.GPUTotalCost(), act.GPUTotalCost()) {
+		t.Fatalf("Allocation.Share: expected %f; actual %f", a1.GPUTotalCost(), act.GPUTotalCost())
 	}
-	if !util.IsApproximately(a1.RAMCost, act.RAMCost) {
-		t.Fatalf("Allocation.Share: expected %f; actual %f", a1.RAMCost, act.RAMCost)
+	if !util.IsApproximately(a1.RAMTotalCost(), act.RAMTotalCost()) {
+		t.Fatalf("Allocation.Share: expected %f; actual %f", a1.RAMTotalCost(), act.RAMTotalCost())
 	}
 	if !util.IsApproximately(a1.PVCost, act.PVCost) {
 		t.Fatalf("Allocation.Share: expected %f; actual %f", a1.PVCost, act.PVCost)
@@ -425,8 +440,10 @@ func TestAllocation_MarshalJSON(t *testing.T) {
 		CPUCoreRequestAverage:  2.0,
 		CPUCoreUsageAverage:    1.0,
 		CPUCost:                2.0 * hrs * cpuPrice,
+		CPUCostAdjustment:      3.0,
 		GPUHours:               1.0 * hrs,
 		GPUCost:                1.0 * hrs * gpuPrice,
+		GPUCostAdjustment:      2.0,
 		NetworkCost:            0.05,
 		LoadBalancerCost:       0.02,
 		PVByteHours:            100.0 * gib * hrs,
@@ -435,6 +452,7 @@ func TestAllocation_MarshalJSON(t *testing.T) {
 		RAMBytesRequestAverage: 8.0 * gib,
 		RAMBytesUsageAverage:   4.0 * gib,
 		RAMCost:                8.0 * hrs * ramPrice,
+		RAMCostAdjustment:      1.0,
 		SharedCost:             2.00,
 		ExternalCost:           1.00,
 		RawAllocationOnly:      &RawAllocationOnlyData{},
@@ -519,8 +537,9 @@ func TestNewAllocationSet(t *testing.T) {
 func generateAllocationSet(start time.Time) *AllocationSet {
 	// Idle allocations
 	a1i := NewUnitAllocation(fmt.Sprintf("cluster1/%s", IdleSuffix), start, day, &AllocationProperties{
-		Cluster: "cluster1",
-		Node:    "node1",
+		Cluster:    "cluster1",
+		Node:       "node1",
+		ProviderID: "c1nodes",
 	})
 	a1i.CPUCost = 5.0
 	a1i.RAMCost = 15.0
@@ -535,88 +554,100 @@ func generateAllocationSet(start time.Time) *AllocationSet {
 
 	// Active allocations
 	a1111 := NewUnitAllocation("cluster1/namespace1/pod1/container1", start, day, &AllocationProperties{
-		Cluster:   "cluster1",
-		Namespace: "namespace1",
-		Pod:       "pod1",
-		Container: "container1",
+		Cluster:    "cluster1",
+		Namespace:  "namespace1",
+		Pod:        "pod1",
+		Container:  "container1",
+		ProviderID: "c1nodes",
 	})
 	a1111.RAMCost = 11.00
 
 	a11abc2 := NewUnitAllocation("cluster1/namespace1/pod-abc/container2", start, day, &AllocationProperties{
-		Cluster:   "cluster1",
-		Namespace: "namespace1",
-		Pod:       "pod-abc",
-		Container: "container2",
+		Cluster:    "cluster1",
+		Namespace:  "namespace1",
+		Pod:        "pod-abc",
+		Container:  "container2",
+		ProviderID: "c1nodes",
 	})
 
 	a11def3 := NewUnitAllocation("cluster1/namespace1/pod-def/container3", start, day, &AllocationProperties{
-		Cluster:   "cluster1",
-		Namespace: "namespace1",
-		Pod:       "pod-def",
-		Container: "container3",
+		Cluster:    "cluster1",
+		Namespace:  "namespace1",
+		Pod:        "pod-def",
+		Container:  "container3",
+		ProviderID: "c1nodes",
 	})
 
 	a12ghi4 := NewUnitAllocation("cluster1/namespace2/pod-ghi/container4", start, day, &AllocationProperties{
-		Cluster:   "cluster1",
-		Namespace: "namespace2",
-		Pod:       "pod-ghi",
-		Container: "container4",
+		Cluster:    "cluster1",
+		Namespace:  "namespace2",
+		Pod:        "pod-ghi",
+		Container:  "container4",
+		ProviderID: "c1nodes",
 	})
 
 	a12ghi5 := NewUnitAllocation("cluster1/namespace2/pod-ghi/container5", start, day, &AllocationProperties{
-		Cluster:   "cluster1",
-		Namespace: "namespace2",
-		Pod:       "pod-ghi",
-		Container: "container5",
+		Cluster:    "cluster1",
+		Namespace:  "namespace2",
+		Pod:        "pod-ghi",
+		Container:  "container5",
+		ProviderID: "c1nodes",
 	})
 
 	a12jkl6 := NewUnitAllocation("cluster1/namespace2/pod-jkl/container6", start, day, &AllocationProperties{
-		Cluster:   "cluster1",
-		Namespace: "namespace2",
-		Pod:       "pod-jkl",
-		Container: "container6",
+		Cluster:    "cluster1",
+		Namespace:  "namespace2",
+		Pod:        "pod-jkl",
+		Container:  "container6",
+		ProviderID: "c1nodes",
 	})
 
 	a22mno4 := NewUnitAllocation("cluster2/namespace2/pod-mno/container4", start, day, &AllocationProperties{
-		Cluster:   "cluster2",
-		Namespace: "namespace2",
-		Pod:       "pod-mno",
-		Container: "container4",
+		Cluster:    "cluster2",
+		Namespace:  "namespace2",
+		Pod:        "pod-mno",
+		Container:  "container4",
+		ProviderID: "node1",
 	})
 
 	a22mno5 := NewUnitAllocation("cluster2/namespace2/pod-mno/container5", start, day, &AllocationProperties{
-		Cluster:   "cluster2",
-		Namespace: "namespace2",
-		Pod:       "pod-mno",
-		Container: "container5",
+		Cluster:    "cluster2",
+		Namespace:  "namespace2",
+		Pod:        "pod-mno",
+		Container:  "container5",
+		ProviderID: "node1",
 	})
 
 	a22pqr6 := NewUnitAllocation("cluster2/namespace2/pod-pqr/container6", start, day, &AllocationProperties{
-		Cluster:   "cluster2",
-		Namespace: "namespace2",
-		Pod:       "pod-pqr",
-		Container: "container6",
+		Cluster:    "cluster2",
+		Namespace:  "namespace2",
+		Pod:        "pod-pqr",
+		Container:  "container6",
+		ProviderID: "node2",
 	})
 
 	a23stu7 := NewUnitAllocation("cluster2/namespace3/pod-stu/container7", start, day, &AllocationProperties{
-		Cluster:   "cluster2",
-		Namespace: "namespace3",
-		Pod:       "pod-stu",
-		Container: "container7",
+		Cluster:    "cluster2",
+		Namespace:  "namespace3",
+		Pod:        "pod-stu",
+		Container:  "container7",
+		ProviderID: "node2",
 	})
 
 	a23vwx8 := NewUnitAllocation("cluster2/namespace3/pod-vwx/container8", start, day, &AllocationProperties{
-		Cluster:   "cluster2",
-		Namespace: "namespace3",
-		Pod:       "pod-vwx",
-		Container: "container8",
+		Cluster:    "cluster2",
+		Namespace:  "namespace3",
+		Pod:        "pod-vwx",
+		Container:  "container8",
+		ProviderID: "node3",
 	})
 
 	a23vwx9 := NewUnitAllocation("cluster2/namespace3/pod-vwx/container9", start, day, &AllocationProperties{
-		Cluster:   "cluster2",
-		Namespace: "namespace3",
-		Pod:       "pod-vwx",
-		Container: "container9",
+		Cluster:    "cluster2",
+		Namespace:  "namespace3",
+		Pod:        "pod-vwx",
+		Container:  "container9",
+		ProviderID: "node3",
 	})
 
 	// Controllers
@@ -680,6 +711,149 @@ func generateAllocationSet(start time.Time) *AllocationSet {
 	)
 }
 
+func generateAssetSets(start, end time.Time) []*AssetSet {
+	var assetSets []*AssetSet
+
+	// Create an AssetSet representing cluster costs for two clusters (cluster1
+	// and cluster2). Include Nodes and Disks for both, even though only
+	// Nodes will be counted. Whereas in practice, Assets should be aggregated
+	// by type, here we will provide multiple Nodes for one of the clusters to
+	// make sure the function still holds.
+
+	// NOTE: we're re-using generateAllocationSet so this has to line up with
+	// the allocated node costs from that function. See table above.
+
+	// | Hierarchy                               | Cost |  CPU |  RAM |  GPU | Adjustment |
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster1:
+	//     nodes                                  100.00  55.00  44.00  11.00      -10.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster1 subtotal (adjusted)             100.00  50.00  40.00  10.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster1 allocated                        48.00   6.00  16.00   6.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster1 idle                             72.00  44.00  24.00   4.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster2:
+	//     node1                                   35.00  20.00  15.00   0.00        0.00
+	//     node2                                   35.00  20.00  15.00   0.00        0.00
+	//     node3                                   30.00  10.00  10.00  10.00        0.00
+	//     (disks should not matter for idle)
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster2 subtotal                        100.00  50.00  40.00  10.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster2 allocated                        28.00   6.00   6.00   6.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster2 idle                             82.00  44.00  34.00   4.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+
+	cluster1Nodes := NewNode("", "cluster1", "c1nodes", start, end, NewWindow(&start, &end))
+	cluster1Nodes.CPUCost = 55.0
+	cluster1Nodes.RAMCost = 44.0
+	cluster1Nodes.GPUCost = 11.0
+	cluster1Nodes.adjustment = -10.00
+	cluster1Nodes.CPUCoreHours = 8
+	cluster1Nodes.RAMByteHours = 6
+	cluster1Nodes.GPUHours = 24
+
+	cluster2Node1 := NewNode("node1", "cluster2", "node1", start, end, NewWindow(&start, &end))
+	cluster2Node1.CPUCost = 20.0
+	cluster2Node1.RAMCost = 15.0
+	cluster2Node1.GPUCost = 0.0
+	cluster2Node1.CPUCoreHours = 4
+	cluster2Node1.RAMByteHours = 3
+	cluster2Node1.GPUHours = 0
+
+	cluster2Node2 := NewNode("node2", "cluster2", "node2", start, end, NewWindow(&start, &end))
+	cluster2Node2.CPUCost = 20.0
+	cluster2Node2.RAMCost = 15.0
+	cluster2Node2.GPUCost = 0.0
+	cluster2Node2.CPUCoreHours = 3
+	cluster2Node2.RAMByteHours = 2
+	cluster2Node2.GPUHours = 0
+
+	cluster2Node3 := NewNode("node3", "cluster2", "node3", start, end, NewWindow(&start, &end))
+	cluster2Node3.CPUCost = 10.0
+	cluster2Node3.RAMCost = 10.0
+	cluster2Node3.GPUCost = 10.0
+	cluster2Node3.CPUCoreHours = 2
+	cluster2Node3.RAMByteHours = 2
+	cluster2Node3.GPUHours = 24
+
+	cluster2Disk1 := NewDisk("disk1", "cluster2", "disk1", start, end, NewWindow(&start, &end))
+	cluster2Disk1.Cost = 5.0
+
+	assetSet1 := NewAssetSet(start, end, cluster1Nodes, cluster2Node1, cluster2Node2, cluster2Node3, cluster2Disk1)
+	assetSets = append(assetSets, assetSet1)
+
+	// NOTE: we're re-using generateAllocationSet so this has to line up with
+	// the allocated node costs from that function. See table above.
+
+	// | Hierarchy                               | Cost |  CPU |  RAM |  GPU | Adjustment |
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster1:
+	//     nodes                                  100.00   5.00   4.00   1.00       90.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster1 subtotal (adjusted)             100.00  50.00  40.00  10.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster1 allocated                        48.00   6.00  16.00   6.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster1 idle                             72.00  44.00  24.00   4.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster2:
+	//     node1                                   35.00  20.00  15.00   0.00        0.00
+	//     node2                                   35.00  20.00  15.00   0.00        0.00
+	//     node3                                   30.00  10.00  10.00  10.00        0.00
+	//     (disks should not matter for idle)
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster2 subtotal                        100.00  50.00  40.00  10.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster2 allocated                        28.00   6.00   6.00   6.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+	//   cluster2 idle                             82.00  44.00  34.00   4.00        0.00
+	// +-----------------------------------------+------+------+------+------+------------+
+
+	cluster1Nodes = NewNode("", "cluster1", "c1nodes", start, end, NewWindow(&start, &end))
+	cluster1Nodes.CPUCost = 5.0
+	cluster1Nodes.RAMCost = 4.0
+	cluster1Nodes.GPUCost = 1.0
+	cluster1Nodes.adjustment = 90.00
+	cluster1Nodes.CPUCoreHours = 8
+	cluster1Nodes.RAMByteHours = 6
+	cluster1Nodes.GPUHours = 24
+
+	cluster2Node1 = NewNode("node1", "cluster2", "node1", start, end, NewWindow(&start, &end))
+	cluster2Node1.CPUCost = 20.0
+	cluster2Node1.RAMCost = 15.0
+	cluster2Node1.GPUCost = 0.0
+	cluster2Node1.CPUCoreHours = 4
+	cluster2Node1.RAMByteHours = 3
+	cluster2Node1.GPUHours = 0
+
+	cluster2Node2 = NewNode("node2", "cluster2", "node2", start, end, NewWindow(&start, &end))
+	cluster2Node2.CPUCost = 20.0
+	cluster2Node2.RAMCost = 15.0
+	cluster2Node2.GPUCost = 0.0
+	cluster2Node2.CPUCoreHours = 3
+	cluster2Node2.RAMByteHours = 2
+	cluster2Node2.GPUHours = 0
+
+	cluster2Node3 = NewNode("node3", "cluster2", "node3", start, end, NewWindow(&start, &end))
+	cluster2Node3.CPUCost = 10.0
+	cluster2Node3.RAMCost = 10.0
+	cluster2Node3.GPUCost = 10.0
+	cluster2Node3.CPUCoreHours = 2
+	cluster2Node3.RAMByteHours = 2
+	cluster2Node3.GPUHours = 24
+
+	cluster2Disk1 = NewDisk("disk1", "cluster2", "disk1", start, end, NewWindow(&start, &end))
+	cluster2Disk1.Cost = 5.0
+
+	assetSet2 := NewAssetSet(start, end, cluster1Nodes, cluster2Node1, cluster2Node2, cluster2Node3, cluster2Disk1)
+	assetSets = append(assetSets, assetSet2)
+	return assetSets
+}
+
 func assertAllocationSetTotals(t *testing.T, as *AllocationSet, msg string, err error, length int, totalCost float64) {
 	if err != nil {
 		t.Fatalf("AllocationSet.AggregateBy[%s]: unexpected error: %s", msg, err)
@@ -1491,187 +1665,311 @@ func TestAllocationSet_ComputeIdleAllocations(t *testing.T) {
 		as.Delete(key)
 	}
 
-	// Create an AssetSet representing cluster costs for two clusters (cluster1
-	// and cluster2). Include Nodes and Disks for both, even though only
-	// Nodes will be counted. Whereas in practice, Assets should be aggregated
-	// by type, here we will provide multiple Nodes for one of the clusters to
-	// make sure the function still holds.
-
-	// NOTE: we're re-using generateAllocationSet so this has to line up with
-	// the allocated node costs from that function. See table above.
-
-	// | Hierarchy                               | Cost |  CPU |  RAM |  GPU | Adjustment |
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster1:
-	//     nodes                                  100.00  55.00  44.00  11.00      -10.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster1 subtotal (adjusted)             100.00  50.00  40.00  10.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster1 allocated                        48.00   6.00  16.00   6.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster1 idle                             72.00  44.00  24.00   4.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster2:
-	//     node1                                   35.00  20.00  15.00   0.00        0.00
-	//     node2                                   35.00  20.00  15.00   0.00        0.00
-	//     node3                                   30.00  10.00  10.00  10.00        0.00
-	//     (disks should not matter for idle)
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster2 subtotal                        100.00  50.00  40.00  10.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster2 allocated                        28.00   6.00   6.00   6.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster2 idle                             82.00  44.00  34.00   4.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-
-	cluster1Nodes := NewNode("", "cluster1", "", start, end, NewWindow(&start, &end))
-	cluster1Nodes.CPUCost = 55.0
-	cluster1Nodes.RAMCost = 44.0
-	cluster1Nodes.GPUCost = 11.0
-	cluster1Nodes.adjustment = -10.00
-
-	cluster2Node1 := NewNode("node1", "cluster2", "node1", start, end, NewWindow(&start, &end))
-	cluster2Node1.CPUCost = 20.0
-	cluster2Node1.RAMCost = 15.0
-	cluster2Node1.GPUCost = 0.0
-
-	cluster2Node2 := NewNode("node2", "cluster2", "node2", start, end, NewWindow(&start, &end))
-	cluster2Node2.CPUCost = 20.0
-	cluster2Node2.RAMCost = 15.0
-	cluster2Node2.GPUCost = 0.0
-
-	cluster2Node3 := NewNode("node3", "cluster2", "node3", start, end, NewWindow(&start, &end))
-	cluster2Node3.CPUCost = 10.0
-	cluster2Node3.RAMCost = 10.0
-	cluster2Node3.GPUCost = 10.0
-
-	cluster2Disk1 := NewDisk("disk1", "cluster2", "disk1", start, end, NewWindow(&start, &end))
-	cluster2Disk1.Cost = 5.0
-
-	assetSet := NewAssetSet(start, end, cluster1Nodes, cluster2Node1, cluster2Node2, cluster2Node3, cluster2Disk1)
-
-	idles, err = as.ComputeIdleAllocations(assetSet)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
+	assetSets := generateAssetSets(start, end)
+
+	cases := map[string]struct {
+		allocationSet *AllocationSet
+		assetSet      *AssetSet
+		clusters      map[string]Allocation
+	}{
+		"1a": {
+			allocationSet: as,
+			assetSet:      assetSets[0],
+			clusters: map[string]Allocation{
+				"cluster1": {
+					CPUCost: 44.0,
+					RAMCost: 24.0,
+					GPUCost: 4.0,
+				},
+				"cluster2": {
+					CPUCost: 44.0,
+					RAMCost: 34.0,
+					GPUCost: 4.0,
+				},
+			},
+		},
+		"1b": {
+			allocationSet: as,
+			assetSet:      assetSets[1],
+			clusters: map[string]Allocation{
+				"cluster1": {
+					CPUCost: 44.0,
+					RAMCost: 24.0,
+					GPUCost: 4.0,
+				},
+				"cluster2": {
+					CPUCost: 44.0,
+					RAMCost: 34.0,
+					GPUCost: 4.0,
+				},
+			},
+		},
 	}
 
-	if len(idles) != 2 {
-		t.Fatalf("idles: expected length %d; got length %d", 2, len(idles))
-	}
+	for name, testcase := range cases {
+		t.Run(name, func(t *testing.T) {
+			idles, err = as.ComputeIdleAllocations(testcase.assetSet)
+			if err != nil {
+				t.Fatalf("unexpected error: %s", err)
+			}
 
-	if idle, ok := idles["cluster1"]; !ok {
-		t.Fatalf("expected idle cost for %s", "cluster1")
-	} else {
-		if !util.IsApproximately(idle.TotalCost(), 72.0) {
-			t.Fatalf("%s idle: expected total cost %f; got total cost %f", "cluster1", 72.0, idle.TotalCost())
-		}
-	}
-	if !util.IsApproximately(idles["cluster1"].CPUCost, 44.0) {
-		t.Fatalf("expected idle CPU cost for %s to be %.2f; got %.2f", "cluster1", 44.0, idles["cluster1"].CPUCost)
-	}
-	if !util.IsApproximately(idles["cluster1"].RAMCost, 24.0) {
-		t.Fatalf("expected idle RAM cost for %s to be %.2f; got %.2f", "cluster1", 24.0, idles["cluster1"].RAMCost)
-	}
-	if !util.IsApproximately(idles["cluster1"].GPUCost, 4.0) {
-		t.Fatalf("expected idle GPU cost for %s to be %.2f; got %.2f", "cluster1", 4.0, idles["cluster1"].GPUCost)
-	}
+			if len(idles) != len(testcase.clusters) {
+				t.Fatalf("idles: expected length %d; got length %d", len(testcase.clusters), len(idles))
+			}
 
-	if idle, ok := idles["cluster2"]; !ok {
-		t.Fatalf("expected idle cost for %s", "cluster2")
-	} else {
-		if !util.IsApproximately(idle.TotalCost(), 82.0) {
-			t.Fatalf("%s idle: expected total cost %f; got total cost %f", "cluster2", 82.0, idle.TotalCost())
-		}
+			for clusterName, cluster := range testcase.clusters {
+				if idle, ok := idles[clusterName]; !ok {
+					t.Fatalf("expected idle cost for %s", clusterName)
+				} else {
+					if !util.IsApproximately(idle.TotalCost(), cluster.TotalCost()) {
+						t.Fatalf("%s idle: expected total cost %f; got total cost %f", clusterName, cluster.TotalCost(), idle.TotalCost())
+					}
+				}
+				if !util.IsApproximately(idles[clusterName].CPUCost, cluster.CPUCost) {
+					t.Fatalf("expected idle CPU cost for %s to be %.2f; got %.2f", clusterName, cluster.CPUCost, idles[clusterName].CPUCost)
+				}
+				if !util.IsApproximately(idles[clusterName].RAMCost, cluster.RAMCost) {
+					t.Fatalf("expected idle RAM cost for %s to be %.2f; got %.2f", clusterName, cluster.RAMCost, idles[clusterName].RAMCost)
+				}
+				if !util.IsApproximately(idles[clusterName].GPUCost, cluster.GPUCost) {
+					t.Fatalf("expected idle GPU cost for %s to be %.2f; got %.2f", clusterName, cluster.GPUCost, idles[clusterName].GPUCost)
+				}
+			}
+		})
 	}
+}
 
-	// NOTE: we're re-using generateAllocationSet so this has to line up with
-	// the allocated node costs from that function. See table above.
-
-	// | Hierarchy                               | Cost |  CPU |  RAM |  GPU | Adjustment |
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster1:
-	//     nodes                                  100.00   5.00   4.00   1.00       90.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster1 subtotal (adjusted)             100.00  50.00  40.00  10.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster1 allocated                        48.00   6.00  16.00   6.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster1 idle                             72.00  44.00  24.00   4.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster2:
-	//     node1                                   35.00  20.00  15.00   0.00        0.00
-	//     node2                                   35.00  20.00  15.00   0.00        0.00
-	//     node3                                   30.00  10.00  10.00  10.00        0.00
-	//     (disks should not matter for idle)
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster2 subtotal                        100.00  50.00  40.00  10.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster2 allocated                        28.00   6.00   6.00   6.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-	//   cluster2 idle                             82.00  44.00  34.00   4.00        0.00
-	// +-----------------------------------------+------+------+------+------+------------+
-
-	cluster1Nodes = NewNode("", "cluster1", "", start, end, NewWindow(&start, &end))
-	cluster1Nodes.CPUCost = 5.0
-	cluster1Nodes.RAMCost = 4.0
-	cluster1Nodes.GPUCost = 1.0
-	cluster1Nodes.adjustment = 90.00
-
-	cluster2Node1 = NewNode("node1", "cluster2", "node1", start, end, NewWindow(&start, &end))
-	cluster2Node1.CPUCost = 20.0
-	cluster2Node1.RAMCost = 15.0
-	cluster2Node1.GPUCost = 0.0
-
-	cluster2Node2 = NewNode("node2", "cluster2", "node2", start, end, NewWindow(&start, &end))
-	cluster2Node2.CPUCost = 20.0
-	cluster2Node2.RAMCost = 15.0
-	cluster2Node2.GPUCost = 0.0
-
-	cluster2Node3 = NewNode("node3", "cluster2", "node3", start, end, NewWindow(&start, &end))
-	cluster2Node3.CPUCost = 10.0
-	cluster2Node3.RAMCost = 10.0
-	cluster2Node3.GPUCost = 10.0
-
-	cluster2Disk1 = NewDisk("disk1", "cluster2", "disk1", start, end, NewWindow(&start, &end))
-	cluster2Disk1.Cost = 5.0
+func TestAllocationSet_ReconcileAllocations(t *testing.T) {
+	var as *AllocationSet
+	var err error
 
-	assetSet = NewAssetSet(start, end, cluster1Nodes, cluster2Node1, cluster2Node2, cluster2Node3, cluster2Disk1)
+	end := time.Now().UTC().Truncate(day)
+	start := end.Add(-day)
 
-	idles, err = as.ComputeIdleAllocations(assetSet)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
+	// Generate AllocationSet and strip out any existing idle allocations
+	as = generateAllocationSet(start)
+	for key := range as.idleKeys {
+		as.Delete(key)
 	}
 
-	if len(idles) != 2 {
-		t.Fatalf("idles: expected length %d; got length %d", 2, len(idles))
+	assetSets := generateAssetSets(start, end)
+
+	cases := map[string]struct {
+		allocationSet *AllocationSet
+		assetSet      *AssetSet
+		allocations   map[string]Allocation
+	}{
+		"1a": {
+			allocationSet: as,
+			assetSet:      assetSets[0],
+			allocations: map[string]Allocation{
+				// Allocation adjustments are found with the formula:
+				// ADJUSTMENT_RATE * NODE_COST * (ALLOC_HOURS / NODE_HOURS) - ALLOC_COST
+				// ADJUSTMENT_RATE: 0.90909090909
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|    55	    |	  8	     |     1 	  |	  1
+				// RAM	|    44	    |	  6	     |     11 	  |	  1
+				// GPU	|    11	    |	 24      |     1 	  |	  1
+				"cluster1/namespace1/pod1/container1": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: -4.333333,
+					GPUCostAdjustment: -0.583333,
+				},
+				// ADJUSTMENT_RATE: 0.90909090909
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|    55	    |	  8	     |     1 	  |	  1
+				// RAM	|    44	    |	  6	     |     1 	  |	  1
+				// GPU	|    11	    |	 24      |     1 	  |	  1
+				"cluster1/namespace1/pod-abc/container2": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster1/namespace1/pod-def/container3": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster1/namespace2/pod-ghi/container4": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster1/namespace2/pod-ghi/container5": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster1/namespace2/pod-jkl/container6": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				// ADJUSTMENT_RATE: 1.0
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|    20	    |	  4	     |     1 	  |	  1
+				// RAM	|    15	    |	  3	     |     1 	  |	  1
+				// GPU	|    0	    |	  0      |     1 	  |	  1
+				"cluster2/namespace2/pod-mno/container4": {
+					CPUCostAdjustment: 4.0,
+					RAMCostAdjustment: 4.0,
+					GPUCostAdjustment: -1.0,
+				},
+				"cluster2/namespace2/pod-mno/container5": {
+					CPUCostAdjustment: 4.0,
+					RAMCostAdjustment: 4.0,
+					GPUCostAdjustment: -1.0,
+				},
+				// ADJUSTMENT_RATE: 1.0
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|    20	    |	  3	     |     1 	  |	  1
+				// RAM	|    15	    |	  2	     |     1 	  |	  1
+				// GPU	|    0	    |	  0      |     1 	  |	  1
+				"cluster2/namespace2/pod-pqr/container6": {
+					CPUCostAdjustment: 5.666667,
+					RAMCostAdjustment: 6.5,
+					GPUCostAdjustment: -1.0,
+				},
+				"cluster2/namespace3/pod-stu/container7": {
+					CPUCostAdjustment: 5.666667,
+					RAMCostAdjustment: 6.5,
+					GPUCostAdjustment: -1.0,
+				},
+				// ADJUSTMENT_RATE: 1.0
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|    10	    |	  2	     |     1 	  |	  1
+				// RAM	|    10	    |	  2	     |     1 	  |	  1
+				// GPU	|    10	    |	 24      |     1 	  |	  1
+				"cluster2/namespace3/pod-vwx/container8": {
+					CPUCostAdjustment: 4.0,
+					RAMCostAdjustment: 4.0,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster2/namespace3/pod-vwx/container9": {
+					CPUCostAdjustment: 4.0,
+					RAMCostAdjustment: 4.0,
+					GPUCostAdjustment: -0.583333,
+				},
+			},
+		},
+		"1b": {
+			allocationSet: as,
+			assetSet:      assetSets[1],
+			allocations: map[string]Allocation{
+				// ADJUSTMENT_RATE: 10
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|     5	    |	  8	     |     1 	  |	  1
+				// RAM	|     4	    |	  6	     |    11 	  |	  1
+				// GPU	|     1	    |	 24      |     1 	  |	  1
+				"cluster1/namespace1/pod1/container1": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: -4.333333,
+					GPUCostAdjustment: -0.583333,
+				},
+				// ADJUSTMENT_RATE: 10
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|     5	    |	  8	     |     1 	  |	  1
+				// RAM	|     4	    |	  6	     |     1 	  |	  1
+				// GPU	|     1	    |	 24      |     1 	  |	  1
+				"cluster1/namespace1/pod-abc/container2": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.6666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster1/namespace1/pod-def/container3": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.6666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster1/namespace2/pod-ghi/container4": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.6666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster1/namespace2/pod-ghi/container5": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.6666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster1/namespace2/pod-jkl/container6": {
+					CPUCostAdjustment: 5.25,
+					RAMCostAdjustment: 5.6666667,
+					GPUCostAdjustment: -0.583333,
+				},
+				// ADJUSTMENT_RATE: 1.0
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|    20	    |	  4	     |     1 	  |	  1
+				// RAM	|    15	    |	  3	     |     1 	  |	  1
+				// GPU	|    0	    |	  0      |     1 	  |	  1
+				"cluster2/namespace2/pod-mno/container4": {
+					CPUCostAdjustment: 4.0,
+					RAMCostAdjustment: 4.0,
+					GPUCostAdjustment: -1.0,
+				},
+				"cluster2/namespace2/pod-mno/container5": {
+					CPUCostAdjustment: 4.0,
+					RAMCostAdjustment: 4.0,
+					GPUCostAdjustment: -1.0,
+				},
+				// ADJUSTMENT_RATE: 1.0
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|    20	    |	  3	     |     1 	  |	  1
+				// RAM	|    15	    |	  2	     |     1 	  |	  1
+				// GPU	|    0	    |	  0      |     1 	  |	  1
+				"cluster2/namespace2/pod-pqr/container6": {
+					CPUCostAdjustment: 5.666667,
+					RAMCostAdjustment: 6.5,
+					GPUCostAdjustment: -1.0,
+				},
+				"cluster2/namespace3/pod-stu/container7": {
+					CPUCostAdjustment: 5.666667,
+					RAMCostAdjustment: 6.5,
+					GPUCostAdjustment: -1.0,
+				},
+				// ADJUSTMENT_RATE: 1.0
+				// Type | NODE_COST | NODE_HOURs | ALLOC_COST | ALLOC_HOURS
+				// CPU	|    10	    |	  2	     |     1 	  |	  1
+				// RAM	|    10	    |	  2	     |     1 	  |	  1
+				// GPU	|    10	    |	 24      |     1 	  |	  1
+				"cluster2/namespace3/pod-vwx/container8": {
+					CPUCostAdjustment: 4.0,
+					RAMCostAdjustment: 4.0,
+					GPUCostAdjustment: -0.583333,
+				},
+				"cluster2/namespace3/pod-vwx/container9": {
+					CPUCostAdjustment: 4.0,
+					RAMCostAdjustment: 4.0,
+					GPUCostAdjustment: -0.583333,
+				},
+			},
+		},
 	}
 
-	if idle, ok := idles["cluster1"]; !ok {
-		t.Fatalf("expected idle cost for %s", "cluster1")
-	} else {
-		if !util.IsApproximately(idle.TotalCost(), 72.0) {
-			t.Fatalf("%s idle: expected total cost %f; got total cost %f", "cluster1", 72.0, idle.TotalCost())
-		}
-	}
-	if !util.IsApproximately(idles["cluster1"].CPUCost, 44.0) {
-		t.Fatalf("expected idle CPU cost for %s to be %.2f; got %.2f", "cluster1", 44.0, idles["cluster1"].CPUCost)
-	}
-	if !util.IsApproximately(idles["cluster1"].RAMCost, 24.0) {
-		t.Fatalf("expected idle RAM cost for %s to be %.2f; got %.2f", "cluster1", 24.0, idles["cluster1"].RAMCost)
-	}
-	if !util.IsApproximately(idles["cluster1"].GPUCost, 4.0) {
-		t.Fatalf("expected idle GPU cost for %s to be %.2f; got %.2f", "cluster1", 4.0, idles["cluster1"].GPUCost)
-	}
+	for name, testcase := range cases {
+		t.Run(name, func(t *testing.T) {
+			err = as.Reconcile(testcase.assetSet)
+			reconAllocs := as.allocations
+			if err != nil {
+				t.Fatalf("unexpected error: %s", err)
+			}
 
-	if idle, ok := idles["cluster2"]; !ok {
-		t.Fatalf("expected idle cost for %s", "cluster2")
-	} else {
-		if !util.IsApproximately(idle.TotalCost(), 82.0) {
-			t.Fatalf("%s idle: expected total cost %f; got total cost %f", "cluster2", 82.0, idle.TotalCost())
-		}
+			for allocationName, testAlloc := range testcase.allocations {
+				if _, ok := reconAllocs[allocationName]; !ok {
+					t.Fatalf("expected allocation %s", allocationName)
+				}
+
+				if !util.IsApproximately(reconAllocs[allocationName].CPUCostAdjustment, testAlloc.CPUCostAdjustment) {
+					t.Fatalf("expected CPU Adjustment for %s to be %f; got %f", allocationName, testAlloc.CPUCostAdjustment, reconAllocs[allocationName].CPUCostAdjustment)
+				}
+				if !util.IsApproximately(reconAllocs[allocationName].RAMCostAdjustment, testAlloc.RAMCostAdjustment) {
+					t.Fatalf("expected RAM Adjustment for %s to be %f; got %f", allocationName, testAlloc.RAMCostAdjustment, reconAllocs[allocationName].RAMCostAdjustment)
+				}
+				if !util.IsApproximately(reconAllocs[allocationName].GPUCostAdjustment, testAlloc.GPUCostAdjustment) {
+					t.Fatalf("expected GPU Adjustment for %s to be %f; got %f", allocationName, testAlloc.GPUCostAdjustment, reconAllocs[allocationName].GPUCostAdjustment)
+				}
+			}
+		})
 	}
-
-	// TODO assert value of each resource cost precisely
 }
 
 // TODO niko/etl

+ 26 - 4
pkg/kubecost/asset.go

@@ -1663,6 +1663,7 @@ type Node struct {
 	NodeType     string
 	CPUCoreHours float64
 	RAMByteHours float64
+	GPUHours     float64
 	CPUBreakdown *Breakdown
 	RAMBreakdown *Breakdown
 	CPUCost      float64
@@ -1889,6 +1890,7 @@ func (n *Node) add(that *Node) {
 
 	n.CPUCoreHours += that.CPUCoreHours
 	n.RAMByteHours += that.RAMByteHours
+	n.GPUHours += that.GPUHours
 
 	n.CPUCost += that.CPUCost
 	n.GPUCost += that.GPUCost
@@ -1912,6 +1914,7 @@ func (n *Node) Clone() Asset {
 		NodeType:     n.NodeType,
 		CPUCoreHours: n.CPUCoreHours,
 		RAMByteHours: n.RAMByteHours,
+		GPUHours:     n.GPUHours,
 		CPUBreakdown: n.CPUBreakdown.Clone(),
 		RAMBreakdown: n.RAMBreakdown.Clone(),
 		CPUCost:      n.CPUCost,
@@ -1960,6 +1963,9 @@ func (n *Node) Equal(a Asset) bool {
 	if n.RAMByteHours != that.RAMByteHours {
 		return false
 	}
+	if n.GPUHours != that.GPUHours {
+		return false
+	}
 	if !n.CPUBreakdown.Equal(that.CPUBreakdown) {
 		return false
 	}
@@ -2000,13 +2006,14 @@ func (n *Node) MarshalJSON() ([]byte, error) {
 	jsonEncodeFloat64(buffer, "ramBytes", n.RAMBytes(), ",")
 	jsonEncodeFloat64(buffer, "cpuCoreHours", n.CPUCoreHours, ",")
 	jsonEncodeFloat64(buffer, "ramByteHours", n.RAMByteHours, ",")
+	jsonEncodeFloat64(buffer, "GPUHours", n.GPUHours, ",")
 	jsonEncode(buffer, "cpuBreakdown", n.CPUBreakdown, ",")
 	jsonEncode(buffer, "ramBreakdown", n.RAMBreakdown, ",")
 	jsonEncodeFloat64(buffer, "preemptible", n.Preemptible, ",")
 	jsonEncodeFloat64(buffer, "discount", n.Discount, ",")
 	jsonEncodeFloat64(buffer, "cpuCost", n.CPUCost, ",")
 	jsonEncodeFloat64(buffer, "gpuCost", n.GPUCost, ",")
-	jsonEncodeFloat64(buffer, "gpuCount", n.GPUCount, ",")
+	jsonEncodeFloat64(buffer, "gpuCount", n.GPUs(), ",")
 	jsonEncodeFloat64(buffer, "ramCost", n.RAMCost, ",")
 	jsonEncodeFloat64(buffer, "adjustment", n.Adjustment(), ",")
 	jsonEncodeFloat64(buffer, "totalCost", n.TotalCost(), "")
@@ -2047,15 +2054,30 @@ func (n *Node) CPUCores() float64 {
 // and a 16GiB-RAM node running for the last 20 hours of the same 24-hour window
 // would produce:
 //   (12*10 + 16*20) / 24 = 18.333GiB RAM
-// However, any number of cores running for the full span of a window will
-// report the actual number of cores of the static node; e.g. the above
+// However, any number of bytes running for the full span of a window will
+// report the actual number of bytes of the static node; e.g. the above
 // scenario for one entire 24-hour window:
-//   (12*24 + 16*24) / 24 = (12 + 16) = 28 cores
+//   (12*24 + 16*24) / 24 = (12 + 16) = 28GiB RAM
 func (n *Node) RAMBytes() float64 {
 	// [b*hr]*([min/hr]*[1/min]) = [b*hr]/[hr] = b
 	return n.RAMByteHours * (60.0 / n.Minutes())
 }
 
+// GPUs returns the amount of GPUs belonging to the node. This could be
+// fractional because it's the number of gpu*hours divided by the number of
+// hours running; e.g. the sum of a 2 gpu node running for the first 10 hours
+// and a 1 gpu node running for the last 20 hours of the same 24-hour window
+// would produce:
+//   (2*10 + 1*20) / 24 = 1.667 GPUs
+// However, any number of GPUs running for the full span of a window will
+// report the actual number of GPUs of the static node; e.g. the above
+// scenario for one entire 24-hour window:
+//   (2*24 + 1*24) / 24 = (2 + 1) = 3 GPUs
+func (n *Node) GPUs() float64 {
+	// [b*hr]*([min/hr]*[1/min]) = [b*hr]/[hr] = b
+	return n.GPUHours * (60.0 / n.Minutes())
+}
+
 // LoadBalancer is an Asset representing a single load balancer in a cluster
 // TODO: add GB of ingress processed, numForwardingRules once we start recording those to prometheus metric
 type LoadBalancer struct {

+ 16 - 0
pkg/kubecost/asset_test.go

@@ -65,6 +65,7 @@ func generateAssetSet(start time.Time) *AssetSet {
 	node1.Discount = 0.5
 	node1.CPUCoreHours = 2.0 * hours
 	node1.RAMByteHours = 4.0 * gb * hours
+	node1.GPUHours = 1.0 * hours
 	node1.SetAdjustment(1.0)
 	node1.SetLabels(map[string]string{"test": "test"})
 
@@ -75,6 +76,7 @@ func generateAssetSet(start time.Time) *AssetSet {
 	node2.Discount = 0.5
 	node2.CPUCoreHours = 2.0 * hours
 	node2.RAMByteHours = 4.0 * gb * hours
+	node2.GPUHours = 0.0 * hours
 	node2.SetAdjustment(1.5)
 
 	node3 := NewNode("node3", "cluster1", "gcp-node3", *window.Clone().start, *window.Clone().end, window.Clone())
@@ -84,6 +86,7 @@ func generateAssetSet(start time.Time) *AssetSet {
 	node3.Discount = 0.5
 	node3.CPUCoreHours = 2.0 * hours
 	node3.RAMByteHours = 4.0 * gb * hours
+	node3.GPUHours = 2.0 * hours
 	node3.SetAdjustment(-0.5)
 
 	node4 := NewNode("node4", "cluster2", "gcp-node4", *window.Clone().start, *window.Clone().end, window.Clone())
@@ -93,6 +96,7 @@ func generateAssetSet(start time.Time) *AssetSet {
 	node4.Discount = 0.25
 	node4.CPUCoreHours = 4.0 * hours
 	node4.RAMByteHours = 12.0 * gb * hours
+	node4.GPUHours = 0.0 * hours
 	node4.SetAdjustment(-1.0)
 
 	node5 := NewNode("node5", "cluster3", "aws-node5", *window.Clone().start, *window.Clone().end, window.Clone())
@@ -102,6 +106,7 @@ func generateAssetSet(start time.Time) *AssetSet {
 	node5.Discount = 0.0
 	node5.CPUCoreHours = 8.0 * hours
 	node5.RAMByteHours = 24.0 * gb * hours
+	node5.GPUHours = 0.0 * hours
 	node5.SetAdjustment(2.0)
 
 	disk1 := NewDisk("disk1", "cluster1", "gcp-disk1", *window.Clone().start, *window.Clone().end, window.Clone())
@@ -481,6 +486,7 @@ func TestNode_Add(t *testing.T) {
 	node1 := NewNode("node1", "cluster1", "node1", *windows[0].start, *windows[0].end, windows[0])
 	node1.CPUCoreHours = 1.0 * hours
 	node1.RAMByteHours = 2.0 * gb * hours
+	node1.GPUHours = 0.0 * hours
 	node1.GPUCost = 0.0
 	node1.CPUCost = 8.0
 	node1.RAMCost = 4.0
@@ -502,6 +508,7 @@ func TestNode_Add(t *testing.T) {
 	node2 := NewNode("node2", "cluster1", "node2", *windows[0].start, *windows[0].end, windows[0])
 	node2.CPUCoreHours = 1.0 * hours
 	node2.RAMByteHours = 2.0 * gb * hours
+	node2.GPUHours = 0.0 * hours
 	node2.GPUCost = 0.0
 	node2.CPUCost = 3.0
 	node2.RAMCost = 1.0
@@ -566,6 +573,7 @@ func TestNode_Add(t *testing.T) {
 	node3 := NewNode("node3", "cluster1", "node3", *windows[0].start, *windows[0].end, windows[0])
 	node3.CPUCoreHours = 0 * hours
 	node3.RAMByteHours = 0 * hours
+	node3.GPUHours = 0.0 * hours
 	node3.GPUCost = 0
 	node3.CPUCost = 0.0
 	node3.RAMCost = 0.0
@@ -575,6 +583,7 @@ func TestNode_Add(t *testing.T) {
 	node4 := NewNode("node4", "cluster1", "node4", *windows[0].start, *windows[0].end, windows[0])
 	node4.CPUCoreHours = 0 * hours
 	node4.RAMByteHours = 0 * hours
+	node4.GPUHours = 0.0 * hours
 	node4.GPUCost = 0
 	node4.CPUCost = 0.0
 	node4.RAMCost = 0.0
@@ -595,6 +604,7 @@ func TestNode_Add(t *testing.T) {
 	nodeA1 := NewNode("nodeA1", "cluster1", "nodeA1", *windows[0].start, *windows[0].end, windows[0])
 	nodeA1.CPUCoreHours = 1.0 * hours
 	nodeA1.RAMByteHours = 2.0 * gb * hours
+	nodeA1.GPUHours = 0.0 * hours
 	nodeA1.GPUCost = 0.0
 	nodeA1.CPUCost = 8.0
 	nodeA1.RAMCost = 4.0
@@ -604,6 +614,7 @@ func TestNode_Add(t *testing.T) {
 	nodeA2 := NewNode("nodeA2", "cluster1", "nodeA2", *windows[1].start, *windows[1].end, windows[1])
 	nodeA2.CPUCoreHours = 1.0 * hours
 	nodeA2.RAMByteHours = 2.0 * gb * hours
+	nodeA2.GPUHours = 0.0 * hours
 	nodeA2.GPUCost = 0.0
 	nodeA2.CPUCost = 3.0
 	nodeA2.RAMCost = 1.0
@@ -637,6 +648,9 @@ func TestNode_Add(t *testing.T) {
 	if nodeAT.RAMBytes() != 2.0*gb {
 		t.Fatalf("Node.Add: expected %f; got %f", 2.0*gb, nodeAT.RAMBytes())
 	}
+	if nodeAT.GPUs() != 0.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 0.0, nodeAT.GPUs())
+	}
 
 	// Check that the original assets are unchanged
 	if !util.IsApproximately(nodeA1.TotalCost(), 10.0) {
@@ -664,8 +678,10 @@ func TestNode_MarshalJSON(t *testing.T) {
 	})
 	node.CPUCost = 9.0
 	node.RAMCost = 0.0
+	node.RAMCost = 21.0
 	node.CPUCoreHours = 123.0
 	node.RAMByteHours = 13323.0
+	node.GPUHours = 123.0
 	node.SetAdjustment(1.0)
 
 	_, err := json.Marshal(node)

+ 1 - 1
pkg/kubecost/bingen.go

@@ -26,4 +26,4 @@ package kubecost
 // @bingen:generate:AllocationAnnotations
 // @bingen:generate:RawAllocationOnlyData
 
-//go:generate bingen -package=kubecost -version=11 -buffer=github.com/kubecost/cost-model/pkg/util
+//go:generate bingen -package=kubecost -version=12 -buffer=github.com/kubecost/cost-model/pkg/util

+ 52 - 36
pkg/kubecost/kubecost_codecs.go

@@ -25,7 +25,7 @@ const (
 	GeneratorPackageName string = "kubecost"
 
 	// CodecVersion is the version passed into the generator
-	CodecVersion uint8 = 11
+	CodecVersion uint8 = 12
 )
 
 //--------------------------------------------------------------------------
@@ -164,8 +164,10 @@ func (target *Allocation) MarshalBinary() (data []byte, err error) {
 	buff.WriteFloat64(target.CPUCoreRequestAverage)  // write float64
 	buff.WriteFloat64(target.CPUCoreUsageAverage)    // write float64
 	buff.WriteFloat64(target.CPUCost)                // write float64
+	buff.WriteFloat64(target.CPUCostAdjustment)      // write float64
 	buff.WriteFloat64(target.GPUHours)               // write float64
 	buff.WriteFloat64(target.GPUCost)                // write float64
+	buff.WriteFloat64(target.GPUCostAdjustment)      // write float64
 	buff.WriteFloat64(target.NetworkCost)            // write float64
 	buff.WriteFloat64(target.LoadBalancerCost)       // write float64
 	buff.WriteFloat64(target.PVByteHours)            // write float64
@@ -174,6 +176,7 @@ func (target *Allocation) MarshalBinary() (data []byte, err error) {
 	buff.WriteFloat64(target.RAMBytesRequestAverage) // write float64
 	buff.WriteFloat64(target.RAMBytesUsageAverage)   // write float64
 	buff.WriteFloat64(target.RAMCost)                // write float64
+	buff.WriteFloat64(target.RAMCostAdjustment)      // write float64
 	buff.WriteFloat64(target.SharedCost)             // write float64
 	buff.WriteFloat64(target.ExternalCost)           // write float64
 	if target.RawAllocationOnly == nil {
@@ -282,53 +285,62 @@ func (target *Allocation) UnmarshalBinary(data []byte) (err error) {
 	target.CPUCost = s
 
 	t := buff.ReadFloat64() // read float64
-	target.GPUHours = t
+	target.CPUCostAdjustment = t
 
 	u := buff.ReadFloat64() // read float64
-	target.GPUCost = u
+	target.GPUHours = u
 
 	w := buff.ReadFloat64() // read float64
-	target.NetworkCost = w
+	target.GPUCost = w
 
 	x := buff.ReadFloat64() // read float64
-	target.LoadBalancerCost = x
+	target.GPUCostAdjustment = x
 
 	y := buff.ReadFloat64() // read float64
-	target.PVByteHours = y
+	target.NetworkCost = y
 
 	aa := buff.ReadFloat64() // read float64
-	target.PVCost = aa
+	target.LoadBalancerCost = aa
 
 	bb := buff.ReadFloat64() // read float64
-	target.RAMByteHours = bb
+	target.PVByteHours = bb
 
 	cc := buff.ReadFloat64() // read float64
-	target.RAMBytesRequestAverage = cc
+	target.PVCost = cc
 
 	dd := buff.ReadFloat64() // read float64
-	target.RAMBytesUsageAverage = dd
+	target.RAMByteHours = dd
 
 	ee := buff.ReadFloat64() // read float64
-	target.RAMCost = ee
+	target.RAMBytesRequestAverage = ee
 
 	ff := buff.ReadFloat64() // read float64
-	target.SharedCost = ff
+	target.RAMBytesUsageAverage = ff
 
 	gg := buff.ReadFloat64() // read float64
-	target.ExternalCost = gg
+	target.RAMCost = gg
+
+	hh := buff.ReadFloat64() // read float64
+	target.RAMCostAdjustment = hh
+
+	kk := buff.ReadFloat64() // read float64
+	target.SharedCost = kk
+
+	ll := buff.ReadFloat64() // read float64
+	target.ExternalCost = ll
 
 	if buff.ReadUInt8() == uint8(0) {
 		target.RawAllocationOnly = nil
 	} else {
 		// --- [begin][read][struct](RawAllocationOnlyData) ---
-		hh := &RawAllocationOnlyData{}
-		kk := buff.ReadInt()     // byte array length
-		ll := buff.ReadBytes(kk) // byte array
-		errE := hh.UnmarshalBinary(ll)
+		mm := &RawAllocationOnlyData{}
+		nn := buff.ReadInt()     // byte array length
+		oo := buff.ReadBytes(nn) // byte array
+		errE := mm.UnmarshalBinary(oo)
 		if errE != nil {
 			return errE
 		}
-		target.RawAllocationOnly = hh
+		target.RawAllocationOnly = mm
 		// --- [end][read][struct](RawAllocationOnlyData) ---
 
 	}
@@ -2658,6 +2670,7 @@ func (target *Node) MarshalBinary() (data []byte, err error) {
 	buff.WriteString(target.NodeType)      // write string
 	buff.WriteFloat64(target.CPUCoreHours) // write float64
 	buff.WriteFloat64(target.RAMByteHours) // write float64
+	buff.WriteFloat64(target.GPUHours)     // write float64
 	if target.CPUBreakdown == nil {
 		buff.WriteUInt8(uint8(0)) // write nil byte
 	} else {
@@ -2807,18 +2820,21 @@ func (target *Node) UnmarshalBinary(data []byte) (err error) {
 	x := buff.ReadFloat64() // read float64
 	target.RAMByteHours = x
 
+	y := buff.ReadFloat64() // read float64
+	target.GPUHours = y
+
 	if buff.ReadUInt8() == uint8(0) {
 		target.CPUBreakdown = nil
 	} else {
 		// --- [begin][read][struct](Breakdown) ---
-		y := &Breakdown{}
-		aa := buff.ReadInt()     // byte array length
-		bb := buff.ReadBytes(aa) // byte array
-		errE := y.UnmarshalBinary(bb)
+		aa := &Breakdown{}
+		bb := buff.ReadInt()     // byte array length
+		cc := buff.ReadBytes(bb) // byte array
+		errE := aa.UnmarshalBinary(cc)
 		if errE != nil {
 			return errE
 		}
-		target.CPUBreakdown = y
+		target.CPUBreakdown = aa
 		// --- [end][read][struct](Breakdown) ---
 
 	}
@@ -2826,34 +2842,34 @@ func (target *Node) UnmarshalBinary(data []byte) (err error) {
 		target.RAMBreakdown = nil
 	} else {
 		// --- [begin][read][struct](Breakdown) ---
-		cc := &Breakdown{}
-		dd := buff.ReadInt()     // byte array length
-		ee := buff.ReadBytes(dd) // byte array
-		errF := cc.UnmarshalBinary(ee)
+		dd := &Breakdown{}
+		ee := buff.ReadInt()     // byte array length
+		ff := buff.ReadBytes(ee) // byte array
+		errF := dd.UnmarshalBinary(ff)
 		if errF != nil {
 			return errF
 		}
-		target.RAMBreakdown = cc
+		target.RAMBreakdown = dd
 		// --- [end][read][struct](Breakdown) ---
 
 	}
-	ff := buff.ReadFloat64() // read float64
-	target.CPUCost = ff
-
 	gg := buff.ReadFloat64() // read float64
-	target.GPUCost = gg
+	target.CPUCost = gg
 
 	hh := buff.ReadFloat64() // read float64
-	target.GPUCount = hh
+	target.GPUCost = hh
 
 	kk := buff.ReadFloat64() // read float64
-	target.RAMCost = kk
+	target.GPUCount = kk
 
 	ll := buff.ReadFloat64() // read float64
-	target.Discount = ll
+	target.RAMCost = ll
 
 	mm := buff.ReadFloat64() // read float64
-	target.Preemptible = mm
+	target.Discount = mm
+
+	nn := buff.ReadFloat64() // read float64
+	target.Preemptible = nn
 
 	return nil
 }

+ 4 - 2
pkg/util/json/json.go

@@ -1,12 +1,14 @@
 package json
 
 import (
-    "encoding/json"
+	"encoding/json"
 
-    jsoniter "github.com/json-iterator/go"
+	jsoniter "github.com/json-iterator/go"
 )
 
 var Marshal = jsoniter.ConfigCompatibleWithStandardLibrary.Marshal
 var Unmarshal = jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal
+
 type Marshaler json.Marshaler
+
 var NewDecoder = json.NewDecoder