فهرست منبع

Merge pull request #1104 from kubecost/etl

Changes to support ETL restructuring
Niko Kovacevic 4 سال پیش
والد
کامیت
df2cbdef64
9فایلهای تغییر یافته به همراه624 افزوده شده و 682 حذف شده
  1. 183 357
      pkg/kubecost/allocation.go
  2. 45 273
      pkg/kubecost/allocation_test.go
  3. 17 0
      pkg/kubecost/asset.go
  4. 12 0
      pkg/kubecost/mock.go
  5. 159 0
      pkg/kubecost/query.go
  6. 0 1
      pkg/kubecost/status.go
  7. 115 39
      pkg/kubecost/summaryallocation.go
  8. 83 12
      pkg/kubecost/totals.go
  9. 10 0
      pkg/kubecost/window.go

+ 183 - 357
pkg/kubecost/allocation.go

@@ -403,7 +403,13 @@ func (a *Allocation) NetworkTotalCost() float64 {
 }
 
 // LBTotalCost calculates total LB cost of Allocation including adjustment
+// TODO deprecate
 func (a *Allocation) LBTotalCost() float64 {
+	return a.LoadBalancerTotalCost()
+}
+
+// LoadBalancerTotalCost calculates total LB cost of Allocation including adjustment
+func (a *Allocation) LoadBalancerTotalCost() float64 {
 	if a == nil {
 		return 0.0
 	}
@@ -828,6 +834,8 @@ type AllocationAggregationOptions struct {
 	IdleByNode            bool
 	LabelConfig           *LabelConfig
 	MergeUnallocated      bool
+	Reconcile             bool
+	ReconcileNetwork      bool
 	ShareFuncs            []AllocationMatchFunc
 	ShareIdle             string
 	ShareSplit            string
@@ -872,14 +880,14 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	//     the output (i.e. they can be used to generate a valid key for
 	//     the given properties) then aggregate; otherwise... ignore them?
 	//
-	// 10. If the merge idle option is enabled, merge any remaining idle
+	// 10. Distribute any undistributed idle, in the case that idle
+	//     coefficients end up being zero and some idle is not shared.
+	//
+	// 11. If the merge idle option is enabled, merge any remaining idle
 	//     allocations into a single idle allocation. If there was any idle
 	//	   whose costs were not distributed because there was no usage of a
 	//     specific resource type, re-add the idle to the aggregation with
 	//     only that type.
-	//
-	// 11. Distribute any undistributed idle, in the case that idle
-	//     coefficients end up being zero and some idle is not shared.
 
 	if as.IsEmpty() {
 		return nil
@@ -893,7 +901,12 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		options.LabelConfig = NewLabelConfig()
 	}
 
-	var undistributedIdleMap map[string]bool
+	// idleFiltrationCoefficients relies on this being explicitly set
+	if options.ShareIdle != ShareWeighted {
+		options.ShareIdle = ShareNone
+	}
+
+	var allocatedTotalsMap map[string]map[string]float64
 
 	// If aggregateBy is nil, we don't aggregate anything. On the other hand,
 	// an empty slice implies that we should aggregate everything. See
@@ -1018,7 +1031,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// the shared allocations).
 	var idleCoefficients map[string]map[string]map[string]float64
 	if idleSet.Length() > 0 && options.ShareIdle != ShareNone {
-		idleCoefficients, undistributedIdleMap, err = computeIdleCoeffs(options, as, shareSet)
+		idleCoefficients, allocatedTotalsMap, err = computeIdleCoeffs(options, as, shareSet)
 		if err != nil {
 			log.Warningf("AllocationSet.AggregateBy: compute idle coeff: %s", err)
 			return fmt.Errorf("error computing idle coefficients: %s", err)
@@ -1264,6 +1277,8 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 				idleAlloc.CPUCoreHours *= resourceCoeffs["cpu"]
 				idleAlloc.RAMCost *= resourceCoeffs["ram"]
 				idleAlloc.RAMByteHours *= resourceCoeffs["ram"]
+				idleAlloc.GPUCost *= resourceCoeffs["gpu"]
+				idleAlloc.GPUHours *= resourceCoeffs["gpu"]
 			}
 		}
 	}
@@ -1304,16 +1319,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		}
 	}
 
-	// (10) Combine all idle allocations into a single "__idle__" allocation
-	if !options.SplitIdle {
-		for _, idleAlloc := range aggSet.IdleAllocations() {
-			aggSet.Delete(idleAlloc.Name)
-			idleAlloc.Name = IdleSuffix
-			aggSet.Insert(idleAlloc)
-		}
-	}
-
-	// (11) In the edge case that some idle has not been distributed because
+	// (10) In the edge case that some idle has not been distributed because
 	// there is no usage of that resource type, add idle back to
 	// aggregations with only that cost applied.
 
@@ -1332,9 +1338,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// Name		CPU		GPU		RAM
 	// __idle__ $0      $12     $0
 	// kubecost $12     $0      $7
-
-	hasUndistributedIdle := undistributedIdleMap["cpu"] || undistributedIdleMap["gpu"] || undistributedIdleMap["ram"]
-	if idleSet.Length() > 0 && hasUndistributedIdle {
+	if idleSet.Length() > 0 {
 		for _, idleAlloc := range idleSet.allocations {
 			// if the idle does not apply to the non-filtered values, skip it
 			skip := false
@@ -1348,25 +1352,97 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 				continue
 			}
 
-			// if the idle doesn't have a cost to be shared, also skip it
-			if idleAlloc.CPUCost != 0 && idleAlloc.GPUCost != 0 && idleAlloc.RAMCost != 0 {
-				// artificially set the already shared costs to zero
-				if !undistributedIdleMap["cpu"] {
-					idleAlloc.CPUCost = 0
-				}
-				if !undistributedIdleMap["gpu"] {
-					idleAlloc.GPUCost = 0
-				}
-				if !undistributedIdleMap["ram"] {
-					idleAlloc.RAMCost = 0
-				}
+			idleId, err := idleAlloc.getIdleId(options)
+			if err != nil {
+				log.Errorf("AllocationSet.AggregateBy: idle allocation is missing idleId %s", idleAlloc.Name)
+				continue
+			}
 
-				idleAlloc.Name = IdleSuffix
+			hasUndistributableCost := false
+
+			if idleAlloc.CPUCost > 0 && allocatedTotalsMap[idleId]["cpu"] == 0 {
+				hasUndistributableCost = true
+			} else {
+				idleAlloc.CPUCost = 0
+			}
+
+			if idleAlloc.GPUCost > 0 && allocatedTotalsMap[idleId]["gpu"] == 0 {
+				hasUndistributableCost = true
+			} else {
+				idleAlloc.GPUCost = 0
+			}
+
+			if idleAlloc.RAMCost > 0 && allocatedTotalsMap[idleId]["ram"] == 0 {
+				hasUndistributableCost = true
+			} else {
+				idleAlloc.RAMCost = 0
+			}
+
+			if hasUndistributableCost {
+				idleAlloc.Name = fmt.Sprintf("%s/%s", idleId, IdleSuffix)
 				aggSet.Insert(idleAlloc)
 			}
 		}
 	}
 
+	// (11) Combine all idle allocations into a single "__idle__" allocation
+	if !options.SplitIdle {
+		for _, idleAlloc := range aggSet.IdleAllocations() {
+			aggSet.Delete(idleAlloc.Name)
+			idleAlloc.Name = IdleSuffix
+			aggSet.Insert(idleAlloc)
+		}
+	}
+
+	// TODO revisit this (ideally we just remove sharing from this function!)
+	// If filters and shared resources and shared idle are all enabled then
+	// we will over-count idle by exactly the portion that gets shared with the
+	// filtered allocations -- and idle filtration will miss this because it
+	// only filters the non-idle filtered costs.
+	//
+	// Consider the following example, from unit tests:
+	// - namespace1     28.000
+	// - namespace2     36.000
+	// - namespace3     18.000
+	// - cluster1/idle  20.000
+	// - cluster2/idle  10.000
+	//
+	// Now, we want to share namespace1, filter namespace2, and share idle:
+	//
+	// 1. Distribute idle
+	//                 ns1     ns2     ns3
+	//    non-idle  28.000  36.000  18.000
+	//        idle  14.688  10.312   5.000
+	//
+	// 2. Share namespace1
+	//
+	//                        ns2     ns3
+	//           non-idle  36.000  18.000
+	//               idle  10.312   5.000
+	//    shared non-idle  18.667   9.333
+	//    shared     idle   9.792   4.896 (***)
+	//
+	// 3. Filter out all but namespace2
+	//
+	//    ns2 = 36.000 + 10.312 + 18.667 + 9.792 = 74.771
+	//
+	// So, if we had NOT shared idle, we would expect something like this:
+	//
+	//    ns2 = 36.000 + 18.667 = 54.667
+	//   idle = 10.312 + 9.792  = 20.104
+	//
+	// But we will instead get this:
+	//
+	//    ns2 = 36.000 + 18.667 = 54.667
+	//   idle = 10.312 + 14.688 = 25.000
+	//
+	// Which over-shoots idle by 4.896 (***), i.e. precisely the amount of idle
+	// cost corresponding to namespace1 AND shared with namespace3. Phew.
+	//
+	// I originally wanted to fix this, but after 2 days, I'm punting with the
+	// recommendation that we rewrite this function soon. Too difficult.
+	// - Niko
+
 	as.allocations = aggSet.allocations
 
 	return nil
@@ -1423,8 +1499,8 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 		} else {
 			// Both are additive for weighted distribution, where each
 			// cumulative coefficient will be divided by the total.
-			coeffs[name] += alloc.TotalCost()
-			total += alloc.TotalCost()
+			coeffs[name] += alloc.TotalCost() - alloc.SharedCost
+			total += alloc.TotalCost() - alloc.SharedCost
 		}
 	}
 
@@ -1441,24 +1517,17 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 	return coeffs, nil
 }
 
-func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet, shareSet *AllocationSet) (map[string]map[string]map[string]float64, map[string]bool, error) {
+func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet, shareSet *AllocationSet) (map[string]map[string]map[string]float64, map[string]map[string]float64, error) {
 	types := []string{"cpu", "gpu", "ram"}
-	undistributedIdleMap := map[string]bool{
-		"cpu": true,
-		"gpu": true,
-		"ram": true,
-	}
 
 	// Compute idle coefficients, then save them in AllocationAggregationOptions
+	// [idle_id][allocation name][resource] = [coeff]
 	coeffs := map[string]map[string]map[string]float64{}
 
 	// Compute totals per resource for CPU, GPU, RAM, and PV
+	// [idle_id][resource] = [total]
 	totals := map[string]map[string]float64{}
 
-	// ShareEven counts each allocation with even weight, whereas ShareWeighted
-	// counts each allocation proportionally to its respective costs
-	shareType := options.ShareIdle
-
 	// Record allocation values first, then normalize by totals to get percentages
 	for _, alloc := range as.allocations {
 		if alloc.IsIdle() {
@@ -1486,24 +1555,13 @@ func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet,
 			coeffs[idleId][name] = map[string]float64{}
 		}
 
-		if shareType == ShareEven {
-			for _, r := range types {
-				// Not additive - hard set to 1.0
-				coeffs[idleId][name][r] = 1.0
-
-				// totals are additive
-				totals[idleId][r] += 1.0
-			}
-		} else {
-			coeffs[idleId][name]["cpu"] += alloc.CPUTotalCost()
-			coeffs[idleId][name]["gpu"] += alloc.GPUTotalCost()
-			coeffs[idleId][name]["ram"] += alloc.RAMTotalCost()
-
-			totals[idleId]["cpu"] += alloc.CPUTotalCost()
-			totals[idleId]["gpu"] += alloc.GPUTotalCost()
-			totals[idleId]["ram"] += alloc.RAMTotalCost()
-		}
+		coeffs[idleId][name]["cpu"] += alloc.CPUTotalCost()
+		coeffs[idleId][name]["gpu"] += alloc.GPUTotalCost()
+		coeffs[idleId][name]["ram"] += alloc.RAMTotalCost()
 
+		totals[idleId]["cpu"] += alloc.CPUTotalCost()
+		totals[idleId]["gpu"] += alloc.GPUTotalCost()
+		totals[idleId]["ram"] += alloc.RAMTotalCost()
 	}
 
 	// Do the same for shared allocations
@@ -1534,38 +1592,27 @@ func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet,
 			coeffs[idleId][name] = map[string]float64{}
 		}
 
-		if shareType == ShareEven {
-			for _, r := range types {
-				// Not additive - hard set to 1.0
-				coeffs[idleId][name][r] = 1.0
-
-				// totals are additive
-				totals[idleId][r] += 1.0
-			}
-		} else {
-			coeffs[idleId][name]["cpu"] += alloc.CPUTotalCost()
-			coeffs[idleId][name]["gpu"] += alloc.GPUTotalCost()
-			coeffs[idleId][name]["ram"] += alloc.RAMTotalCost()
+		coeffs[idleId][name]["cpu"] += alloc.CPUTotalCost()
+		coeffs[idleId][name]["gpu"] += alloc.GPUTotalCost()
+		coeffs[idleId][name]["ram"] += alloc.RAMTotalCost()
 
-			totals[idleId]["cpu"] += alloc.CPUTotalCost()
-			totals[idleId]["gpu"] += alloc.GPUTotalCost()
-			totals[idleId]["ram"] += alloc.RAMTotalCost()
-		}
+		totals[idleId]["cpu"] += alloc.CPUTotalCost()
+		totals[idleId]["gpu"] += alloc.GPUTotalCost()
+		totals[idleId]["ram"] += alloc.RAMTotalCost()
 	}
 
 	// Normalize coefficients by totals
-	for c := range coeffs {
-		for a := range coeffs[c] {
+	for id := range coeffs {
+		for a := range coeffs[id] {
 			for _, r := range types {
-				if coeffs[c][a][r] > 0 && totals[c][r] > 0 {
-					coeffs[c][a][r] /= totals[c][r]
-					undistributedIdleMap[r] = false
+				if coeffs[id][a][r] > 0 && totals[id][r] > 0 {
+					coeffs[id][a][r] /= totals[id][r]
 				}
 			}
 		}
 	}
 
-	return coeffs, undistributedIdleMap, nil
+	return coeffs, totals, nil
 }
 
 // getIdleId returns the providerId or cluster of an Allocation depending on the IdleByNode
@@ -1574,7 +1621,7 @@ func (a *Allocation) getIdleId(options *AllocationAggregationOptions) (string, e
 	var idleId string
 	if options.IdleByNode {
 		// Key allocations to ProviderId to match against node
-		idleId = a.Properties.ProviderID
+		idleId = fmt.Sprintf("%s/%s", a.Properties.Cluster, a.Properties.Node)
 		if idleId == "" {
 			return idleId, fmt.Errorf("ProviderId is not set")
 		}
@@ -1629,279 +1676,6 @@ func (as *AllocationSet) Clone() *AllocationSet {
 	}
 }
 
-// ComputeIdleAllocations computes the idle allocations for the AllocationSet,
-// given a set of Assets. Ideally, assetSet should contain only Nodes, but if
-// it contains other Assets, they will be ignored; only CPU, GPU and RAM are
-// considered for idle allocation. If the Nodes have adjustments, then apply
-// the adjustments proportionally to each of the resources so that total
-// allocation with idle reflects the adjusted node costs. One idle allocation
-// per-cluster will be computed and returned, keyed by cluster_id.
-func (as *AllocationSet) ComputeIdleAllocations(assetSet *AssetSet) (map[string]*Allocation, error) {
-	if as == nil {
-		return nil, fmt.Errorf("cannot compute idle allocation for nil AllocationSet")
-	}
-
-	if assetSet == nil {
-		return nil, fmt.Errorf("cannot compute idle allocation with nil AssetSet")
-	}
-
-	if !as.Window.Equal(assetSet.Window) {
-		return nil, fmt.Errorf("cannot compute idle allocation for sets with mismatched windows: %s != %s", as.Window, assetSet.Window)
-	}
-
-	window := as.Window
-
-	// Build a map of cumulative cluster asset costs, per resource; i.e.
-	// cluster-to-{cpu|gpu|ram}-to-cost.
-	assetClusterResourceCosts := map[string]map[string]float64{}
-	assetSet.Each(func(key string, a Asset) {
-		if node, ok := a.(*Node); ok {
-			if _, ok := assetClusterResourceCosts[node.Properties().Cluster]; !ok {
-				assetClusterResourceCosts[node.Properties().Cluster] = map[string]float64{}
-			}
-
-			// adjustmentRate is used to scale resource costs proportionally
-			// by the adjustment. This is necessary because we only get one
-			// adjustment per Node, not one per-resource-per-Node.
-			//
-			// e.g. total cost = $90, adjustment = -$10 => 0.9
-			// e.g. total cost = $150, adjustment = -$300 => 0.3333
-			// e.g. total cost = $150, adjustment = $50 => 1.5
-			adjustmentRate := 1.0
-			if node.TotalCost()-node.Adjustment() == 0 {
-				// If (totalCost - adjustment) is 0.0 then adjustment cancels
-				// the entire node cost and we should make everything 0
-				// without dividing by 0.
-				adjustmentRate = 0.0
-				log.DedupedWarningf(5, "Compute Idle Allocations: Node Cost Adjusted to $0.00 for %s", node.properties.Name)
-			} else if node.Adjustment() != 0.0 {
-				// adjustmentRate is the ratio of cost-with-adjustment (i.e. TotalCost)
-				// to cost-without-adjustment (i.e. TotalCost - Adjustment).
-				adjustmentRate = node.TotalCost() / (node.TotalCost() - node.Adjustment())
-			}
-
-			cpuCost := node.CPUCost * (1.0 - node.Discount) * adjustmentRate
-			ramCost := node.RAMCost * (1.0 - node.Discount) * adjustmentRate
-			gpuCost := node.GPUCost * (1.0) * adjustmentRate
-
-			assetClusterResourceCosts[node.Properties().Cluster]["cpu"] += cpuCost
-			assetClusterResourceCosts[node.Properties().Cluster]["ram"] += ramCost
-			assetClusterResourceCosts[node.Properties().Cluster]["gpu"] += gpuCost
-		}
-	})
-
-	// Determine start, end on a per-cluster basis
-	clusterStarts := map[string]time.Time{}
-	clusterEnds := map[string]time.Time{}
-
-	// Subtract allocated costs from asset costs, leaving only the remaining
-	// idle costs.
-	as.Each(func(name string, a *Allocation) {
-		cluster := a.Properties.Cluster
-		if cluster == "" {
-			// Failed to find allocation's cluster
-			return
-		}
-
-		if _, ok := assetClusterResourceCosts[cluster]; !ok {
-			// Failed to find assets for allocation's cluster
-			return
-		}
-
-		// Set cluster (start, end) if they are either not currently set,
-		// or if the detected (start, end) of the current allocation falls
-		// before or after, respectively, the current values.
-		if s, ok := clusterStarts[cluster]; !ok || a.Start.Before(s) {
-			clusterStarts[cluster] = a.Start
-		}
-		if e, ok := clusterEnds[cluster]; !ok || a.End.After(e) {
-			clusterEnds[cluster] = a.End
-		}
-
-		assetClusterResourceCosts[cluster]["cpu"] -= a.CPUTotalCost()
-		assetClusterResourceCosts[cluster]["gpu"] -= a.GPUTotalCost()
-		assetClusterResourceCosts[cluster]["ram"] -= a.RAMTotalCost()
-	})
-
-	// Turn remaining un-allocated asset costs into idle allocations
-	idleAllocs := map[string]*Allocation{}
-	for cluster, resources := range assetClusterResourceCosts {
-		// Default start and end to the (start, end) of the given window, but
-		// use the actual, detected (start, end) pair if they are available.
-		start := *window.Start()
-		if s, ok := clusterStarts[cluster]; ok && window.Contains(s) {
-			start = s
-		}
-		end := *window.End()
-		if e, ok := clusterEnds[cluster]; ok && window.Contains(e) {
-			end = e
-		}
-
-		idleAlloc := &Allocation{
-			Name:       fmt.Sprintf("%s/%s", cluster, IdleSuffix),
-			Window:     window.Clone(),
-			Properties: &AllocationProperties{Cluster: cluster},
-			Start:      start,
-			End:        end,
-			CPUCost:    resources["cpu"],
-			GPUCost:    resources["gpu"],
-			RAMCost:    resources["ram"],
-		}
-
-		// Do not continue if multiple idle allocations are computed for a
-		// single cluster.
-		if _, ok := idleAllocs[cluster]; ok {
-			return nil, fmt.Errorf("duplicate idle allocations for cluster %s", cluster)
-		}
-
-		idleAllocs[cluster] = idleAlloc
-	}
-
-	return idleAllocs, nil
-}
-
-// ComputeIdleAllocationsByNode computes the idle allocations for the AllocationSet,
-// given a set of Assets. Ideally, assetSet should contain only Nodes, but if
-// it contains other Assets, they will be ignored; only CPU, GPU and RAM are
-// considered for idle allocation. If the Nodes have adjustments, then apply
-// the adjustments proportionally to each of the resources so that total
-// allocation with idle reflects the adjusted node costs. One idle allocation
-// per-node will be computed and returned, keyed by cluster_id.
-func (as *AllocationSet) ComputeIdleAllocationsByNode(assetSet *AssetSet) (map[string]*Allocation, error) {
-
-	if as == nil {
-		return nil, fmt.Errorf("cannot compute idle allocation for nil AllocationSet")
-	}
-
-	if assetSet == nil {
-		return nil, fmt.Errorf("cannot compute idle allocation with nil AssetSet")
-	}
-
-	if !as.Window.Equal(assetSet.Window) {
-		return nil, fmt.Errorf("cannot compute idle allocation for sets with mismatched windows: %s != %s", as.Window, assetSet.Window)
-	}
-
-	window := as.Window
-
-	// Build a map of cumulative cluster asset costs, per resource; i.e.
-	// cluster-to-{cpu|gpu|ram}-to-cost.
-	assetNodeResourceCosts := map[string]map[string]float64{}
-	nodesByProviderId := map[string]*Node{}
-	assetSet.Each(func(key string, a Asset) {
-		if node, ok := a.(*Node); ok {
-			if _, ok := assetNodeResourceCosts[node.Properties().ProviderID]; ok || node.Properties().ProviderID == "" {
-				log.DedupedWarningf(5, "Compute Idle Allocations By Node: Node missing providerId: %s", node.properties.Name)
-				return
-			}
-
-			nodesByProviderId[node.Properties().ProviderID] = node
-			assetNodeResourceCosts[node.Properties().ProviderID] = map[string]float64{}
-
-			// adjustmentRate is used to scale resource costs proportionally
-			// by the adjustment. This is necessary because we only get one
-			// adjustment per Node, not one per-resource-per-Node.
-			//
-			// e.g. total cost = $90, adjustment = -$10 => 0.9
-			// e.g. total cost = $150, adjustment = -$300 => 0.3333
-			// e.g. total cost = $150, adjustment = $50 => 1.5
-			adjustmentRate := 1.0
-			if node.TotalCost()-node.Adjustment() == 0 {
-				// If (totalCost - adjustment) is 0.0 then adjustment cancels
-				// the entire node cost and we should make everything 0
-				// without dividing by 0.
-				adjustmentRate = 0.0
-				log.DedupedWarningf(5, "Compute Idle Allocations: Node Cost Adjusted to $0.00 for %s", node.properties.Name)
-			} else if node.Adjustment() != 0.0 {
-				// adjustmentRate is the ratio of cost-with-adjustment (i.e. TotalCost)
-				// to cost-without-adjustment (i.e. TotalCost - Adjustment).
-				adjustmentRate = node.TotalCost() / (node.TotalCost() - node.Adjustment())
-			}
-
-			cpuCost := node.CPUCost * (1.0 - node.Discount) * adjustmentRate
-			ramCost := node.RAMCost * (1.0 - node.Discount) * adjustmentRate
-			gpuCost := node.GPUCost * adjustmentRate
-
-			assetNodeResourceCosts[node.Properties().ProviderID]["cpu"] += cpuCost
-			assetNodeResourceCosts[node.Properties().ProviderID]["ram"] += ramCost
-			assetNodeResourceCosts[node.Properties().ProviderID]["gpu"] += gpuCost
-		}
-	})
-
-	// Determine start, end on a per-cluster basis
-	nodeStarts := map[string]time.Time{}
-	nodeEnds := map[string]time.Time{}
-
-	// Subtract allocated costs from asset costs, leaving only the remaining
-	// idle costs.
-	as.Each(func(name string, a *Allocation) {
-		providerId := a.Properties.ProviderID
-		if providerId == "" {
-			// Failed to find allocation's node
-			log.DedupedWarningf(5, "Compute Idle Allocations By Node: Allocation missing providerId: %s", a.Name)
-			return
-		}
-
-		if _, ok := assetNodeResourceCosts[providerId]; !ok {
-			// Failed to find assets for allocation's node
-			return
-		}
-
-		// Set cluster (start, end) if they are either not currently set,
-		// or if the detected (start, end) of the current allocation falls
-		// before or after, respectively, the current values.
-		if s, ok := nodeStarts[providerId]; !ok || a.Start.Before(s) {
-			nodeStarts[providerId] = a.Start
-		}
-		if e, ok := nodeEnds[providerId]; !ok || a.End.After(e) {
-			nodeEnds[providerId] = a.End
-		}
-
-		assetNodeResourceCosts[providerId]["cpu"] -= a.CPUTotalCost()
-		assetNodeResourceCosts[providerId]["gpu"] -= a.GPUTotalCost()
-		assetNodeResourceCosts[providerId]["ram"] -= a.RAMTotalCost()
-	})
-
-	// Turn remaining un-allocated asset costs into idle allocations
-	idleAllocs := map[string]*Allocation{}
-	for providerId, resources := range assetNodeResourceCosts {
-		// Default start and end to the (start, end) of the given window, but
-		// use the actual, detected (start, end) pair if they are available.
-		start := *window.Start()
-		if s, ok := nodeStarts[providerId]; ok && window.Contains(s) {
-			start = s
-		}
-		end := *window.End()
-		if e, ok := nodeEnds[providerId]; ok && window.Contains(e) {
-			end = e
-		}
-		node := nodesByProviderId[providerId]
-		idleAlloc := &Allocation{
-			Name:   fmt.Sprintf("%s/%s", node.properties.Name, IdleSuffix),
-			Window: window.Clone(),
-			Properties: &AllocationProperties{
-				Cluster:    node.properties.Cluster,
-				Node:       node.properties.Name,
-				ProviderID: providerId,
-			},
-			Start:   start,
-			End:     end,
-			CPUCost: resources["cpu"],
-			GPUCost: resources["gpu"],
-			RAMCost: resources["ram"],
-		}
-
-		// Do not continue if multiple idle allocations are computed for a
-		// single node.
-		if _, ok := idleAllocs[providerId]; ok {
-			return nil, fmt.Errorf("duplicate idle allocations for node Provider ID: %s", providerId)
-		}
-
-		idleAllocs[providerId] = idleAlloc
-	}
-
-	return idleAllocs, nil
-}
-
 // Delete removes the allocation with the given name from the set
 func (as *AllocationSet) Delete(name string) {
 	if as == nil {
@@ -2101,6 +1875,24 @@ func (as *AllocationSet) MarshalJSON() ([]byte, error) {
 	return json.Marshal(as.allocations)
 }
 
+// ResetAdjustments sets all cost adjustment fields to zero
+func (as *AllocationSet) ResetAdjustments() {
+	if as == nil {
+		return
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	as.resetAdjustments()
+}
+
+func (as *AllocationSet) resetAdjustments() {
+	for _, a := range as.allocations {
+		a.ResetAdjustments()
+	}
+}
+
 // Resolution returns the AllocationSet's window duration
 func (as *AllocationSet) Resolution() time.Duration {
 	return as.Window.Duration()
@@ -2533,3 +2325,37 @@ func (asr *AllocationSetRange) Minutes() float64 {
 
 	return duration.Minutes()
 }
+
+// TotalCost returns the sum of all TotalCosts of the allocations contained
+func (asr *AllocationSetRange) TotalCost() float64 {
+	if asr == nil || len(asr.allocations) == 0 {
+		return 0.0
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+
+	tc := 0.0
+	for _, as := range asr.allocations {
+		tc += as.TotalCost()
+	}
+	return tc
+}
+
+// TODO remove after testing
+func (asr *AllocationSetRange) Print(verbose bool) {
+	fmt.Printf("%s (dur=%s, len=%d, cost=%.5f)\n", asr.Window(), asr.Window().Duration(), asr.Length(), asr.TotalCost())
+	asr.Each(func(i int, as *AllocationSet) {
+		fmt.Printf(" > %s (dur=%s, len=%d, cost=%.5f) \n", as.Window, as.Window.Duration(), as.Length(), as.TotalCost())
+		as.Each(func(key string, a *Allocation) {
+			if verbose {
+				fmt.Printf("   {\"%s\", %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f}\n",
+					key, a.CPUCost, a.CPUCostAdjustment, a.GPUCost, a.GPUCostAdjustment, a.LoadBalancerCost, a.LoadBalancerCostAdjustment,
+					a.NetworkCost, a.NetworkCostAdjustment, a.PVCost(), a.PVCostAdjustment, a.RAMCost, a.RAMCostAdjustment,
+					a.SharedCost, a.ExternalCost)
+			} else {
+				fmt.Printf("   - \"%s\": %.5f\n", key, a.TotalCost())
+			}
+		})
+	})
+}

+ 45 - 273
pkg/kubecost/allocation_test.go

@@ -1086,25 +1086,9 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
-		// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven
-		// namespace1: 38.0000 = 28.00 + 5.00*(1.0/2.0) + 15.0*(1.0/2.0)
-		// namespace2: 51.0000 = 36.00 + 5.0*(1.0/2.0) + 15.0*(1.0/2.0) + 5.0*(1.0/2.0) + 5.0*(1.0/2.0)
-		// namespace3: 23.0000 = 18.00 + 5.0*(1.0/2.0) + 5.0*(1.0/2.0)
-		"3b": {
-			start:      start,
-			aggBy:      []string{AllocationNamespaceProp},
-			aggOpts:    &AllocationAggregationOptions{ShareIdle: ShareEven},
-			numResults: numNamespaces,
-			totalCost:  activeTotalCost + idleTotalCost,
-			results: map[string]float64{
-				"namespace1": 38.00,
-				"namespace2": 51.00,
-				"namespace3": 23.00,
-			},
-			windowStart: startYesterday,
-			windowEnd:   endYesterday,
-			expMinutes:  1440.0,
-		},
+
+		// 3b: sharing idle evenly is deprecated
+
 		// 4  Share resources
 
 		// 4a Share namespace ShareEven
@@ -1315,30 +1299,14 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
-		// 6c Share idle even with filters
-		// Should match values from unfiltered aggregation (3b)
-		// namespace2: 51.0000 = 36.00 + 5.0*(1.0/2.0) + 15.0*(1.0/2.0) + 5.0*(1.0/2.0) + 5.0*(1.0/2.0)
-		"6c": {
-			start: start,
-			aggBy: []string{AllocationNamespaceProp},
-			aggOpts: &AllocationAggregationOptions{
-				FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
-				ShareIdle:   ShareEven,
-			},
-			numResults: 1,
-			totalCost:  51.00,
-			results: map[string]float64{
-				"namespace2": 51.00,
-			},
-			windowStart: startYesterday,
-			windowEnd:   endYesterday,
-			expMinutes:  1440.0,
-		},
+
+		// 6c Share idle even with filters (share idle even is deprecated)
+
 		// 6d Share overhead with filters
 		// namespace1: 85.366 = 28.00 + (7.0*24.0)*(28.00/82.00)
 		// namespace2: 109.756 = 36.00 + (7.0*24.0)*(36.00/82.00)
 		// namespace3: 54.878 = 18.00 + (7.0*24.0)*(18.00/82.00)
-		// idle:       30.0000
+		// idle:       10.3125 = % of idle paired with namespace2
 		// Then namespace 2 is filtered.
 		"6d": {
 			start: start,
@@ -1349,23 +1317,16 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 				ShareSplit:        ShareWeighted,
 			},
 			numResults: 1 + numIdle,
-			totalCost:  139.756,
+			totalCost:  120.0686,
 			results: map[string]float64{
-				"namespace2": 109.756,
-				IdleSuffix:   30.00,
+				"namespace2": 109.7561,
+				IdleSuffix:   10.3125,
 			},
 			windowStart: startYesterday,
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
 		// 6e Share resources with filters
-		// --- Shared ---
-		// namespace1: 28.00 (gets shared among namespace2 and namespace3)
-		// --- Filtered ---
-		// namespace3: 27.33 = 18.00 + (28.00)*(18.00/54.00) (filtered out)
-		// --- Results ---
-		// namespace2: 54.667 = 36.00 + (28.00)*(36.00/54.00)
-		// idle:       30.0000
 		"6e": {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
@@ -1375,16 +1336,35 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 				ShareSplit:  ShareWeighted,
 			},
 			numResults: 1 + numIdle,
-			totalCost:  84.667,
+			totalCost:  79.6667, // should be 74.7708, but I'm punting -- too difficult (NK)
 			results: map[string]float64{
-				"namespace2": 54.667,
-				IdleSuffix:   30.00,
+				"namespace2": 54.6667,
+				IdleSuffix:   25.000, // should be 20.1042, but I'm punting -- too difficult (NK)
+			},
+			windowStart: startYesterday,
+			windowEnd:   endYesterday,
+			expMinutes:  1440.0,
+		},
+		// 6f Share resources with filters and share idle
+		"6f": {
+			start: start,
+			aggBy: []string{AllocationNamespaceProp},
+			aggOpts: &AllocationAggregationOptions{
+				FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
+				ShareFuncs:  []AllocationMatchFunc{isNamespace("namespace1")},
+				ShareSplit:  ShareWeighted,
+				ShareIdle:   ShareWeighted,
+			},
+			numResults: 1,
+			totalCost:  74.77083,
+			results: map[string]float64{
+				"namespace2": 74.77083,
 			},
 			windowStart: startYesterday,
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
-		// 6f Share idle weighted and share resources weighted
+		// 6g Share idle weighted and share resources weighted
 		//
 		// First, share idle weighted produces:
 		//
@@ -1418,7 +1398,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 		//   initial cost   18.0000
 		//   idle cost       5.0000
 		//   shared cost    14.2292 = (42.6875)*(18.0/54.0)
-		"6f": {
+		"6g": {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
@@ -1436,7 +1416,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
-		// 6g Share idle, share resources, and filter
+		// 6h Share idle, share resources, and filter
 		//
 		// First, share idle weighted produces:
 		//
@@ -1472,7 +1452,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 		//   shared cost    14.2292 = (42.6875)*(18.0/54.0)
 		//
 		// Then, filter for namespace2: 74.7708
-		"6g": {
+		"6h": {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
@@ -1490,7 +1470,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
-		// 6h Share idle, share resources, share overhead
+		// 6i Share idle, share resources, share overhead
 		//
 		// Share idle weighted:
 		//
@@ -1518,7 +1498,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 		// namespace3:      59.8780 = 23.0000 + (7.0*24.0)*(18.00/82.00)
 		//
 		// Then namespace 2 is filtered.
-		"6h": {
+		"6i": {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
@@ -1536,8 +1516,8 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
-		// 6i Idle by Node
-		"6i": {
+		// 6j Idle by Node
+		"6j": {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
@@ -1555,8 +1535,8 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
-		// 6j Split Idle, Idle by Node
-		"6j": {
+		// 6k Split Idle, Idle by Node
+		"6k": {
 			start: start,
 			aggBy: []string{AllocationNamespaceProp},
 			aggOpts: &AllocationAggregationOptions{
@@ -1578,26 +1558,9 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 		},
-		// 6k Share idle Even Idle by Node
-		// Should match values from unfiltered aggregation (3b)
-		"6k": {
-			start: start,
-			aggBy: []string{AllocationNamespaceProp},
-			aggOpts: &AllocationAggregationOptions{
-				ShareIdle:  ShareEven,
-				IdleByNode: true,
-			},
-			numResults: 3,
-			totalCost:  112.00,
-			results: map[string]float64{
-				"namespace1": 38.00,
-				"namespace2": 51.00,
-				"namespace3": 23.00,
-			},
-			windowStart: startYesterday,
-			windowEnd:   endYesterday,
-			expMinutes:  1440.0,
-		},
+
+		// Old 6k Share idle Even Idle by Node (share idle even deprecated)
+
 		// 6l Share idle weighted with filters, Idle by Node
 		// Should match values from unfiltered aggregation (3a)
 		// namespace2: 46.3125 = 36.00 + 5.0*(3.0/6.0) + 15.0*(3.0/16.0) + 5.0*(3.0/6.0) + 5.0*(3.0/6.0)
@@ -1644,197 +1607,6 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 // TODO niko/etl
 //func TestAllocationSet_Clone(t *testing.T) {}
 
-func TestAllocationSet_ComputeIdleAllocations(t *testing.T) {
-	var as *AllocationSet
-	var err error
-	var idles map[string]*Allocation
-
-	end := time.Now().UTC().Truncate(day)
-	start := end.Add(-day)
-
-	// Generate AllocationSet without idle allocations
-	as = GenerateMockAllocationSet(start)
-
-	assetSets := GenerateMockAssetSets(start, end)
-
-	cases := map[string]struct {
-		allocationSet *AllocationSet
-		assetSet      *AssetSet
-		clusters      map[string]Allocation
-	}{
-		"1a": {
-			allocationSet: as,
-			assetSet:      assetSets[0],
-			clusters: map[string]Allocation{
-				"cluster1": {
-					CPUCost: 44.0,
-					RAMCost: 24.0,
-					GPUCost: 4.0,
-				},
-				"cluster2": {
-					CPUCost: 44.0,
-					RAMCost: 34.0,
-					GPUCost: 4.0,
-				},
-			},
-		},
-		"1b": {
-			allocationSet: as,
-			assetSet:      assetSets[1],
-			clusters: map[string]Allocation{
-				"cluster1": {
-					CPUCost: 44.0,
-					RAMCost: 24.0,
-					GPUCost: 4.0,
-				},
-				"cluster2": {
-					CPUCost: 44.0,
-					RAMCost: 34.0,
-					GPUCost: 4.0,
-				},
-			},
-		},
-	}
-
-	for name, testcase := range cases {
-		t.Run(name, func(t *testing.T) {
-			idles, err = as.ComputeIdleAllocations(testcase.assetSet)
-			if err != nil {
-				t.Fatalf("unexpected error: %s", err)
-			}
-
-			if len(idles) != len(testcase.clusters) {
-				t.Fatalf("idles: expected length %d; got length %d", len(testcase.clusters), len(idles))
-			}
-
-			for clusterName, cluster := range testcase.clusters {
-				if idle, ok := idles[clusterName]; !ok {
-					t.Fatalf("expected idle cost for %s", clusterName)
-				} else {
-					if !util.IsApproximately(idle.TotalCost(), cluster.TotalCost()) {
-						t.Fatalf("%s idle: expected total cost %f; got total cost %f", clusterName, cluster.TotalCost(), idle.TotalCost())
-					}
-				}
-				if !util.IsApproximately(idles[clusterName].CPUCost, cluster.CPUCost) {
-					t.Fatalf("expected idle CPU cost for %s to be %.2f; got %.2f", clusterName, cluster.CPUCost, idles[clusterName].CPUCost)
-				}
-				if !util.IsApproximately(idles[clusterName].RAMCost, cluster.RAMCost) {
-					t.Fatalf("expected idle RAM cost for %s to be %.2f; got %.2f", clusterName, cluster.RAMCost, idles[clusterName].RAMCost)
-				}
-				if !util.IsApproximately(idles[clusterName].GPUCost, cluster.GPUCost) {
-					t.Fatalf("expected idle GPU cost for %s to be %.2f; got %.2f", clusterName, cluster.GPUCost, idles[clusterName].GPUCost)
-				}
-			}
-		})
-	}
-}
-
-func TestAllocationSet_ComputeIdleAllocationsPerNode(t *testing.T) {
-
-	var as *AllocationSet
-	var err error
-	var idles map[string]*Allocation
-
-	end := time.Now().UTC().Truncate(day)
-	start := end.Add(-day)
-
-	// Generate AllocationSet without idle allocations
-	as = GenerateMockAllocationSet(start)
-
-	assetSets := GenerateMockAssetSets(start, end)
-
-	cases := map[string]struct {
-		allocationSet *AllocationSet
-		assetSet      *AssetSet
-		nodes         map[string]Allocation
-	}{
-		"1a": {
-			allocationSet: as,
-			assetSet:      assetSets[0],
-			nodes: map[string]Allocation{
-				"c1nodes": {
-					CPUCost: 44.0,
-					RAMCost: 24.0,
-					GPUCost: 4.0,
-				},
-				"node1": {
-					CPUCost: 18.0,
-					RAMCost: 13.0,
-					GPUCost: -2.0,
-				},
-				"node2": {
-					CPUCost: 18.0,
-					RAMCost: 13.0,
-					GPUCost: -2.0,
-				},
-				"node3": {
-					CPUCost: 8.0,
-					RAMCost: 8.0,
-					GPUCost: 8.0,
-				},
-			},
-		},
-		"1b": {
-			allocationSet: as,
-			assetSet:      assetSets[1],
-			nodes: map[string]Allocation{
-				"c1nodes": {
-					CPUCost: 44.0,
-					RAMCost: 24.0,
-					GPUCost: 4.0,
-				},
-				"node1": {
-					CPUCost: 18.0,
-					RAMCost: 13.0,
-					GPUCost: -2.0,
-				},
-				"node2": {
-					CPUCost: 18.0,
-					RAMCost: 13.0,
-					GPUCost: -2.0,
-				},
-				"node3": {
-					CPUCost: 8.0,
-					RAMCost: 8.0,
-					GPUCost: 8.0,
-				},
-			},
-		},
-	}
-
-	for name, testcase := range cases {
-		t.Run(name, func(t *testing.T) {
-			idles, err = as.ComputeIdleAllocationsByNode(testcase.assetSet)
-			if err != nil {
-				t.Fatalf("unexpected error: %s", err)
-			}
-
-			if len(idles) != len(testcase.nodes) {
-				t.Fatalf("idles: expected length %d; got length %d", len(testcase.nodes), len(idles))
-			}
-
-			for nodeName, node := range testcase.nodes {
-				if idle, ok := idles[nodeName]; !ok {
-					t.Fatalf("expected idle cost for %s", nodeName)
-				} else {
-					if !util.IsApproximately(idle.TotalCost(), node.TotalCost()) {
-						t.Fatalf("%s idle: expected total cost %f; got total cost %f", nodeName, node.TotalCost(), idle.TotalCost())
-					}
-				}
-				if !util.IsApproximately(idles[nodeName].CPUCost, node.CPUCost) {
-					t.Fatalf("expected idle CPU cost for %s to be %.2f; got %.2f", nodeName, node.CPUCost, idles[nodeName].CPUCost)
-				}
-				if !util.IsApproximately(idles[nodeName].RAMCost, node.RAMCost) {
-					t.Fatalf("expected idle RAM cost for %s to be %.2f; got %.2f", nodeName, node.RAMCost, idles[nodeName].RAMCost)
-				}
-				if !util.IsApproximately(idles[nodeName].GPUCost, node.GPUCost) {
-					t.Fatalf("expected idle GPU cost for %s to be %.2f; got %.2f", nodeName, node.GPUCost, idles[nodeName].GPUCost)
-				}
-			}
-		})
-	}
-}
-
 // TODO niko/etl
 //func TestAllocationSet_Delete(t *testing.T) {}
 

+ 17 - 0
pkg/kubecost/asset.go

@@ -3054,6 +3054,23 @@ func (asr *AssetSetRange) Minutes() float64 {
 	return duration.Minutes()
 }
 
+// TotalCost returns the AssetSetRange's total cost
+func (asr *AssetSetRange) TotalCost() float64 {
+	if asr == nil {
+		return 0.0
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+
+	tc := 0.0
+	for _, as := range asr.assets {
+		tc += as.TotalCost()
+	}
+
+	return tc
+}
+
 // This is a helper type. The Asset API returns a json which cannot be natively
 // unmarshaled into any Asset struct. Therefore, this struct IN COMBINATION WITH
 // DESERIALIZATION LOGIC DEFINED IN asset_unmarshal.go can unmarshal a json directly

+ 12 - 0
pkg/kubecost/mock.go

@@ -156,6 +156,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod1",
 		Container:  "container1",
 		ProviderID: "c1nodes",
+		Node:       "c1nodes",
 	})
 	a1111.RAMCost = 11.00
 
@@ -165,6 +166,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-abc",
 		Container:  "container2",
 		ProviderID: "c1nodes",
+		Node:       "c1nodes",
 	})
 
 	a11def3 := NewMockUnitAllocation("cluster1/namespace1/pod-def/container3", start, day, &AllocationProperties{
@@ -173,6 +175,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-def",
 		Container:  "container3",
 		ProviderID: "c1nodes",
+		Node:       "c1nodes",
 	})
 
 	a12ghi4 := NewMockUnitAllocation("cluster1/namespace2/pod-ghi/container4", start, day, &AllocationProperties{
@@ -181,6 +184,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-ghi",
 		Container:  "container4",
 		ProviderID: "c1nodes",
+		Node:       "c1nodes",
 	})
 
 	a12ghi5 := NewMockUnitAllocation("cluster1/namespace2/pod-ghi/container5", start, day, &AllocationProperties{
@@ -189,6 +193,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-ghi",
 		Container:  "container5",
 		ProviderID: "c1nodes",
+		Node:       "c1nodes",
 	})
 
 	a12jkl6 := NewMockUnitAllocation("cluster1/namespace2/pod-jkl/container6", start, day, &AllocationProperties{
@@ -197,6 +202,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-jkl",
 		Container:  "container6",
 		ProviderID: "c1nodes",
+		Node:       "c1nodes",
 	})
 
 	a22mno4 := NewMockUnitAllocation("cluster2/namespace2/pod-mno/container4", start, day, &AllocationProperties{
@@ -205,6 +211,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-mno",
 		Container:  "container4",
 		ProviderID: "node1",
+		Node:       "node1",
 	})
 
 	a22mno5 := NewMockUnitAllocation("cluster2/namespace2/pod-mno/container5", start, day, &AllocationProperties{
@@ -213,6 +220,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-mno",
 		Container:  "container5",
 		ProviderID: "node1",
+		Node:       "node1",
 	})
 
 	a22pqr6 := NewMockUnitAllocation("cluster2/namespace2/pod-pqr/container6", start, day, &AllocationProperties{
@@ -221,6 +229,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-pqr",
 		Container:  "container6",
 		ProviderID: "node2",
+		Node:       "node2",
 	})
 
 	a23stu7 := NewMockUnitAllocation("cluster2/namespace3/pod-stu/container7", start, day, &AllocationProperties{
@@ -229,6 +238,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-stu",
 		Container:  "container7",
 		ProviderID: "node2",
+		Node:       "node2",
 	})
 
 	a23vwx8 := NewMockUnitAllocation("cluster2/namespace3/pod-vwx/container8", start, day, &AllocationProperties{
@@ -237,6 +247,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-vwx",
 		Container:  "container8",
 		ProviderID: "node3",
+		Node:       "node3",
 	})
 
 	a23vwx9 := NewMockUnitAllocation("cluster2/namespace3/pod-vwx/container9", start, day, &AllocationProperties{
@@ -245,6 +256,7 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Pod:        "pod-vwx",
 		Container:  "container9",
 		ProviderID: "node3",
+		Node:       "node3",
 	})
 
 	// Controllers

+ 159 - 0
pkg/kubecost/query.go

@@ -0,0 +1,159 @@
+package kubecost
+
+import (
+	"time"
+)
+
+// Querier is an aggregate interface which has the ability to query each Kubecost store type
+type Querier interface {
+	AllocationQuerier
+	SummaryAllocationQuerier
+	AssetQuerier
+	CloudUsageQuerier
+}
+
+// AllocationQuerier interface defining api for requesting Allocation data
+type AllocationQuerier interface {
+	QueryAllocation(start, end time.Time, opts *AllocationQueryOptions) (*AllocationSetRange, error)
+}
+
+// SummaryAllocationQuerier interface defining api for requesting SummaryAllocation data
+type SummaryAllocationQuerier interface {
+	QuerySummaryAllocation(start, end time.Time, opts *AllocationQueryOptions) (*SummaryAllocationSetRange, error)
+}
+
+// AssetQuerier interface defining api for requesting Asset data
+type AssetQuerier interface {
+	QueryAsset(start, end time.Time, opts *AssetQueryOptions) (*AssetSetRange, error)
+}
+
+// CloudUsageQuerier interface defining api for requesting CloudUsage data
+type CloudUsageQuerier interface {
+	QueryCloudUsage(start, end time.Time, opts *CloudUsageQueryOptions) (*CloudUsageSetRange, error)
+}
+
+// AllocationQueryOptions defines optional parameters for querying an Allocation Store
+type AllocationQueryOptions struct {
+	Accumulate        bool
+	AccumulateBy      time.Duration
+	AggregateBy       []string
+	Compute           bool
+	FilterFuncs       []AllocationMatchFunc
+	IdleByNode        bool
+	IncludeExternal   bool
+	IncludeIdle       bool
+	LabelConfig       *LabelConfig
+	MergeUnallocated  bool
+	Reconcile         bool
+	ReconcileNetwork  bool
+	ShareFuncs        []AllocationMatchFunc
+	SharedHourlyCosts map[string]float64
+	ShareIdle         string
+	ShareSplit        string
+	ShareTenancyCosts bool
+	SplitIdle         bool
+	Step              time.Duration
+}
+
+// AssetQueryOptions defines optional parameters for querying an Asset Store
+type AssetQueryOptions struct {
+	Accumulate         bool
+	AggregateBy        []string
+	Compute            bool
+	DisableAdjustments bool
+	FilterFuncs        []AssetMatchFunc
+	IncludeCloud       bool
+	SharedHourlyCosts  map[string]float64
+	Step               time.Duration
+}
+
+// CloudUsageQueryOptions define optional parameters for querying a Store
+type CloudUsageQueryOptions struct {
+	Accumulate  bool
+	AggregateBy []string
+	FilterFuncs []CloudUsageMatchFunc
+}
+
+// QueryAllocationAsync provide a functions for retrieving results from any AllocationQuerier Asynchronously
+func QueryAllocationAsync(allocationQuerier AllocationQuerier, start, end time.Time, opts *AllocationQueryOptions) (chan *AllocationSetRange, chan error) {
+	asrCh := make(chan *AllocationSetRange)
+	errCh := make(chan error)
+
+	go func(asrCh chan *AllocationSetRange, errCh chan error) {
+		defer close(asrCh)
+		defer close(errCh)
+
+		asr, err := allocationQuerier.QueryAllocation(start, end, opts)
+		if err != nil {
+			errCh <- err
+			return
+		}
+
+		asrCh <- asr
+	}(asrCh, errCh)
+
+	return asrCh, errCh
+}
+
+// QuerySummaryAllocationAsync provide a functions for retrieving results from any SummaryAllocationQuerier Asynchronously
+func QuerySummaryAllocationAsync(summaryAllocationQuerier SummaryAllocationQuerier, start, end time.Time, opts *AllocationQueryOptions) (chan *SummaryAllocationSetRange, chan error) {
+	asrCh := make(chan *SummaryAllocationSetRange)
+	errCh := make(chan error)
+
+	go func(asrCh chan *SummaryAllocationSetRange, errCh chan error) {
+		defer close(asrCh)
+		defer close(errCh)
+
+		asr, err := summaryAllocationQuerier.QuerySummaryAllocation(start, end, opts)
+		if err != nil {
+			errCh <- err
+			return
+		}
+
+		asrCh <- asr
+	}(asrCh, errCh)
+
+	return asrCh, errCh
+}
+
+// QueryAsseetAsync provide a functions for retrieving results from any AssetQuerier Asynchronously
+func QueryAssetAsync(assetQuerier AssetQuerier, start, end time.Time, opts *AssetQueryOptions) (chan *AssetSetRange, chan error) {
+	asrCh := make(chan *AssetSetRange)
+	errCh := make(chan error)
+
+	go func(asrCh chan *AssetSetRange, errCh chan error) {
+		defer close(asrCh)
+		defer close(errCh)
+
+		asr, err := assetQuerier.QueryAsset(start, end, opts)
+		if err != nil {
+			errCh <- err
+			return
+		}
+
+		asrCh <- asr
+	}(asrCh, errCh)
+
+	return asrCh, errCh
+}
+
+// QueryCloudUsageAsync provide a functions for retrieving results from any CloudUsageQuerier Asynchronously
+func QueryCloudUsageAsync(cloudUsageQuerier CloudUsageQuerier, start, end time.Time, opts *CloudUsageQueryOptions) (chan *CloudUsageSetRange, chan error) {
+	cusrCh := make(chan *CloudUsageSetRange)
+	errCh := make(chan error)
+
+	go func(cusrCh chan *CloudUsageSetRange, errCh chan error) {
+		defer close(cusrCh)
+		defer close(errCh)
+
+		cusr, err := cloudUsageQuerier.QueryCloudUsage(start, end, opts)
+		if err != nil {
+			errCh <- err
+			return
+		}
+
+		cusrCh <- cusr
+	}(cusrCh, errCh)
+
+	return cusrCh, errCh
+}

+ 0 - 1
pkg/kubecost/status.go

@@ -39,7 +39,6 @@ type FileStatus struct {
 type CloudStatus struct {
 	CloudConnectionStatus string                `json:"cloudConnectionStatus"`
 	CloudUsage            *CloudAssetStatus     `json:"cloudUsage,omitempty"`
-	CloudAssets           *CloudAssetStatus     `json:"cloudAssets,omitempty"`
 	Reconciliation        *ReconciliationStatus `json:"reconciliation,omitempty"`
 }
 

+ 115 - 39
pkg/kubecost/summaryallocation.go

@@ -297,11 +297,11 @@ type SummaryAllocationSet struct {
 }
 
 // NewSummaryAllocationSet converts an AllocationSet to a SummaryAllocationSet.
-// Filter functions, sharing functions, and reconciliation parameters are
+// Filter functions, keep functions, and reconciliation parameters are
 // required for unfortunate reasons to do with performance and legacy order-of-
 // operations details, as well as the fact that reconciliation has been
 // pushed down to the conversion step between Allocation and SummaryAllocation.
-func NewSummaryAllocationSet(as *AllocationSet, ffs, sfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
+func NewSummaryAllocationSet(as *AllocationSet, ffs, kfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
 	if as == nil {
 		return nil
 	}
@@ -309,7 +309,7 @@ func NewSummaryAllocationSet(as *AllocationSet, ffs, sfs []AllocationMatchFunc,
 	// If we can know the exact size of the map, use it. If filters or sharing
 	// functions are present, we can't know the size, so we make a default map.
 	var sasMap map[string]*SummaryAllocation
-	if len(ffs) == 0 && len(sfs) == 0 {
+	if len(ffs) == 0 && len(kfs) == 0 {
 		// No filters, so make the map of summary allocations exactly the size
 		// of the origin allocation set.
 		sasMap = make(map[string]*SummaryAllocation, len(as.allocations))
@@ -324,16 +324,16 @@ func NewSummaryAllocationSet(as *AllocationSet, ffs, sfs []AllocationMatchFunc,
 	}
 
 	for _, alloc := range as.allocations {
-		// First, detect if the allocation should be shared. If so, mark it as
+		// First, detect if the allocation should be kept. If so, mark it as
 		// such, insert it, and continue.
-		shouldShare := false
-		for _, sf := range sfs {
-			if sf(alloc) {
-				shouldShare = true
+		shouldKeep := false
+		for _, kf := range kfs {
+			if kf(alloc) {
+				shouldKeep = true
 				break
 			}
 		}
-		if shouldShare {
+		if shouldKeep {
 			sa := NewSummaryAllocation(alloc, reconcile, reconcileNetwork)
 			sa.Share = true
 			sas.Insert(sa)
@@ -454,8 +454,8 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	// an empty slice implies that we should aggregate everything. (See
 	// generateKey for why that makes sense.)
 	shouldAggregate := aggregateBy != nil
-	shouldShare := len(options.SharedHourlyCosts) > 0 || len(options.ShareFuncs) > 0
-	if !shouldAggregate && !shouldShare {
+	shouldKeep := len(options.SharedHourlyCosts) > 0 || len(options.ShareFuncs) > 0
+	if !shouldAggregate && !shouldKeep {
 		return nil
 	}
 
@@ -492,7 +492,6 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	//     by the proportion of allocation resources remaining after filters
 	//     have been applied.
 	//
-	//
 	// 11. Distribute shared resources according to sharing coefficients.
 	//
 	// 12. Insert external allocations into the result set.
@@ -611,15 +610,13 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	// option. (See IdleByNode documentation; defaults to idle-by-cluster.)
 	var allocTotals map[string]*AllocationTotals
 	var ok bool
-	if options.IdleByNode {
-		if options.AllocationTotalsStore != nil {
+	if options.AllocationTotalsStore != nil {
+		if options.IdleByNode {
 			allocTotals, ok = options.AllocationTotalsStore.GetAllocationTotalsByNode(*sas.Window.Start(), *sas.Window.End())
 			if !ok {
 				return fmt.Errorf("nil allocation resource totals by node for %s", sas.Window)
 			}
-		}
-	} else {
-		if options.AllocationTotalsStore != nil {
+		} else {
 			allocTotals, ok = options.AllocationTotalsStore.GetAllocationTotalsByCluster(*sas.Window.Start(), *sas.Window.End())
 			if !ok {
 				return fmt.Errorf("nil allocation resource totals by cluster for %s", sas.Window)
@@ -627,6 +624,20 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		}
 	}
 
+	// If reconciliation has been fully or partially disabled, clear the
+	// relevant adjustments from the alloc totals
+	if allocTotals != nil && (!options.Reconcile || !options.ReconcileNetwork) {
+		if !options.Reconcile {
+			for _, tot := range allocTotals {
+				tot.ClearAdjustments()
+			}
+		} else if !options.ReconcileNetwork {
+			for _, tot := range allocTotals {
+				tot.NetworkCostAdjustment = 0.0
+			}
+		}
+	}
+
 	// If filters have been applied, then we need to record allocation resource
 	// totals after filtration (i.e. the allocations that are present) so that
 	// we can identify the proportion of idle cost to keep. That is, we should
@@ -705,13 +716,13 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		// NOTE: SummaryAllocation does not support ShareEven, so only record
 		// by cost for cost-weighted distribution.
 		if sharingCoeffs != nil {
-			sharingCoeffs[key] += sa.TotalCost()
+			sharingCoeffs[key] += sa.TotalCost() - sa.SharedCost
 		}
 
 		// 6. Distribute idle allocations according to the idle coefficients.
-		// NOTE: if idle allocation is off (i.e. ShareIdle == ShareNone) then
-		// all idle allocations will be in the resultSet at this point, so idleSet
-		// will be empty and we won't enter this block.
+		// NOTE: if idle allocation is off (i.e. options.ShareIdle: ShareNone)
+		// then all idle allocations will be in the resultSet at this point, so
+		// idleSet will be empty and we won't enter this block.
 		if len(idleSet.SummaryAllocations) > 0 {
 			for _, idle := range idleSet.SummaryAllocations {
 				// Idle key is either cluster or node, as determined by the
@@ -784,11 +795,11 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 				// the relevant property matches (i.e. Cluster or Node,
 				// depending on which idle sharing option is selected)
 				if options.IdleByNode {
-					if idle.Properties.Node != sa.Properties.Node {
+					if idle.Properties.Cluster != sa.Properties.Cluster || idle.Properties.Node != sa.Properties.Node {
 						continue
 					}
 
-					key = idle.Properties.Node
+					key = fmt.Sprintf("%s/%s", idle.Properties.Cluster, idle.Properties.Node)
 				} else {
 					if idle.Properties.Cluster != sa.Properties.Cluster {
 						continue
@@ -919,12 +930,12 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	for _, sa := range externalSet.SummaryAllocations {
 		skip := false
 
+		// Make an allocation with the same properties and test that
+		// against the FilterFunc to see if the external allocation should
+		// be filtered or not.
+		// TODO:CLEANUP do something about external cost, this stinks
+		ea := &Allocation{Properties: sa.Properties}
 		for _, ff := range options.FilterFuncs {
-			// Make an allocation with the same properties and test that
-			// against the FilterFunc to see if the external allocation should
-			// be filtered or not.
-			// TODO:CLEANUP do something about external cost, this stinks
-			ea := &Allocation{Properties: sa.Properties}
 			if !ff(ea) {
 				skip = true
 				break
@@ -942,10 +953,27 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	// 13. Distribute remaining, undistributed idle. Undistributed idle is any
 	// per-resource idle cost for which there can be no idle coefficient
 	// computed because there is zero usage across all allocations.
-	for _, ia := range idleSet.SummaryAllocations {
-		key := ia.Properties.Cluster
+	for _, isa := range idleSet.SummaryAllocations {
+		// if the idle does not apply to the non-filtered values, skip it
+		skip := false
+		// Make an allocation with the same properties and test that
+		// against the FilterFunc to see if the external allocation should
+		// be filtered or not.
+		// TODO:CLEANUP do something about external cost, this stinks
+		ia := &Allocation{Properties: isa.Properties}
+		for _, ff := range options.FilterFuncs {
+			if !ff(ia) {
+				skip = true
+				break
+			}
+		}
+		if skip {
+			continue
+		}
+
+		key := isa.Properties.Cluster
 		if options.IdleByNode {
-			key = fmt.Sprintf("%s/%s", ia.Properties.Cluster, ia.Properties.Node)
+			key = fmt.Sprintf("%s/%s", isa.Properties.Cluster, isa.Properties.Node)
 		}
 
 		rt, ok := allocTotals[key]
@@ -956,36 +984,36 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 
 		hasUndistributableCost := false
 
-		if ia.CPUCost > 0.0 && rt.CPUCost == 0.0 {
+		if isa.CPUCost > 0.0 && rt.CPUCost == 0.0 {
 			// There is idle CPU cost, but no allocated CPU cost, so that cost
 			// is undistributable and must be inserted.
 			hasUndistributableCost = true
 		} else {
 			// Cost was entirely distributed, so zero it out
-			ia.CPUCost = 0.0
+			isa.CPUCost = 0.0
 		}
 
-		if ia.GPUCost > 0.0 && rt.GPUCost == 0.0 {
+		if isa.GPUCost > 0.0 && rt.GPUCost == 0.0 {
 			// There is idle GPU cost, but no allocated GPU cost, so that cost
 			// is undistributable and must be inserted.
 			hasUndistributableCost = true
 		} else {
 			// Cost was entirely distributed, so zero it out
-			ia.GPUCost = 0.0
+			isa.GPUCost = 0.0
 		}
 
-		if ia.RAMCost > 0.0 && rt.RAMCost == 0.0 {
+		if isa.RAMCost > 0.0 && rt.RAMCost == 0.0 {
 			// There is idle CPU cost, but no allocated CPU cost, so that cost
 			// is undistributable and must be inserted.
 			hasUndistributableCost = true
 		} else {
 			// Cost was entirely distributed, so zero it out
-			ia.RAMCost = 0.0
+			isa.RAMCost = 0.0
 		}
 
 		if hasUndistributableCost {
-			ia.Name = fmt.Sprintf("%s/%s", key, IdleSuffix)
-			resultSet.Insert(ia)
+			isa.Name = fmt.Sprintf("%s/%s", key, IdleSuffix)
+			resultSet.Insert(isa)
 		}
 	}
 
@@ -1101,6 +1129,22 @@ func (sas *SummaryAllocationSet) Insert(sa *SummaryAllocation) error {
 	return nil
 }
 
+func (sas *SummaryAllocationSet) TotalCost() float64 {
+	if sas == nil {
+		return 0.0
+	}
+
+	sas.RLock()
+	defer sas.RUnlock()
+
+	tc := 0.0
+	for _, sa := range sas.SummaryAllocations {
+		tc += sa.TotalCost()
+	}
+
+	return tc
+}
+
 // SummaryAllocationSetRange is a thread-safe slice of SummaryAllocationSets.
 type SummaryAllocationSetRange struct {
 	sync.RWMutex
@@ -1268,3 +1312,35 @@ func (sasr *SummaryAllocationSetRange) InsertExternalAllocations(that *Allocatio
 	// err might be nil
 	return err
 }
+
+func (sasr *SummaryAllocationSetRange) TotalCost() float64 {
+	if sasr == nil {
+		return 0.0
+	}
+
+	sasr.RLock()
+	defer sasr.RUnlock()
+
+	tc := 0.0
+	for _, sas := range sasr.SummaryAllocationSets {
+		tc += sas.TotalCost()
+	}
+
+	return tc
+}
+
+// TODO remove after testing
+func (sasr *SummaryAllocationSetRange) Print(verbose bool) {
+	fmt.Printf("%s (dur=%s, len=%d, cost=%.5f)\n", sasr.Window, sasr.Window.Duration(), len(sasr.SummaryAllocationSets), sasr.TotalCost())
+	for _, sas := range sasr.SummaryAllocationSets {
+		fmt.Printf(" > %s (dur=%s, len=%d, cost=%.5f) \n", sas.Window, sas.Window.Duration(), len(sas.SummaryAllocations), sas.TotalCost())
+		for key, sa := range sas.SummaryAllocations {
+			if verbose {
+				fmt.Printf("   {\"%s\", cpu: %.5f, gpu: %.5f, lb: %.5f, net: %.5f, pv: %.5f, ram: %.5f, shared: %.5f, external: %.5f}\n",
+					key, sa.CPUCost, sa.GPUCost, sa.LoadBalancerCost, sa.NetworkCost, sa.PVCost, sa.RAMCost, sa.SharedCost, sa.ExternalCost)
+			} else {
+				fmt.Printf("   - \"%s\": %.5f\n", key, sa.TotalCost())
+			}
+		}
+	}
+}

+ 83 - 12
pkg/kubecost/totals.go

@@ -41,9 +41,35 @@ type AllocationTotals struct {
 func (art *AllocationTotals) ClearAdjustments() {
 	art.CPUCostAdjustment = 0.0
 	art.GPUCostAdjustment = 0.0
+	art.LoadBalancerCostAdjustment = 0.0
+	art.NetworkCostAdjustment = 0.0
+	art.PersistentVolumeCostAdjustment = 0.0
 	art.RAMCostAdjustment = 0.0
 }
 
+// Clone deep copies the AllocationTotals
+func (art *AllocationTotals) Clone() *AllocationTotals {
+	return &AllocationTotals{
+		Start:                          art.Start,
+		End:                            art.End,
+		Cluster:                        art.Cluster,
+		Node:                           art.Node,
+		Count:                          art.Count,
+		CPUCost:                        art.CPUCost,
+		CPUCostAdjustment:              art.CPUCostAdjustment,
+		GPUCost:                        art.GPUCost,
+		GPUCostAdjustment:              art.GPUCostAdjustment,
+		LoadBalancerCost:               art.LoadBalancerCost,
+		LoadBalancerCostAdjustment:     art.LoadBalancerCostAdjustment,
+		NetworkCost:                    art.NetworkCost,
+		NetworkCostAdjustment:          art.NetworkCostAdjustment,
+		PersistentVolumeCost:           art.PersistentVolumeCost,
+		PersistentVolumeCostAdjustment: art.PersistentVolumeCostAdjustment,
+		RAMCost:                        art.RAMCost,
+		RAMCostAdjustment:              art.RAMCostAdjustment,
+	}
+}
+
 // TotalCPUCost returns CPU cost with adjustment.
 func (art *AllocationTotals) TotalCPUCost() float64 {
 	return art.CPUCost + art.CPUCostAdjustment
@@ -181,6 +207,31 @@ func (art *AssetTotals) ClearAdjustments() {
 	art.RAMCostAdjustment = 0.0
 }
 
+// Clone deep copies the AssetTotals
+func (art *AssetTotals) Clone() *AssetTotals {
+	return &AssetTotals{
+		Start:                           art.Start,
+		End:                             art.End,
+		Cluster:                         art.Cluster,
+		Node:                            art.Node,
+		Count:                           art.Count,
+		AttachedVolumeCost:              art.AttachedVolumeCost,
+		AttachedVolumeCostAdjustment:    art.AttachedVolumeCostAdjustment,
+		ClusterManagementCost:           art.ClusterManagementCost,
+		ClusterManagementCostAdjustment: art.ClusterManagementCostAdjustment,
+		CPUCost:                         art.CPUCost,
+		CPUCostAdjustment:               art.CPUCostAdjustment,
+		GPUCost:                         art.GPUCost,
+		GPUCostAdjustment:               art.GPUCostAdjustment,
+		LoadBalancerCost:                art.LoadBalancerCost,
+		LoadBalancerCostAdjustment:      art.LoadBalancerCostAdjustment,
+		PersistentVolumeCost:            art.PersistentVolumeCost,
+		PersistentVolumeCostAdjustment:  art.PersistentVolumeCostAdjustment,
+		RAMCost:                         art.RAMCost,
+		RAMCostAdjustment:               art.RAMCostAdjustment,
+	}
+}
+
 // TotalAttachedVolumeCost returns CPU cost with adjustment.
 func (art *AssetTotals) TotalAttachedVolumeCost() float64 {
 	return art.AttachedVolumeCost + art.AttachedVolumeCostAdjustment
@@ -554,10 +605,15 @@ func NewMemoryTotalsStore() *MemoryTotalsStore {
 // by cluster for the given start and end times.
 func (mts *MemoryTotalsStore) GetAllocationTotalsByCluster(start time.Time, end time.Time) (map[string]*AllocationTotals, bool) {
 	k := storeKey(start, end)
-	if raw, ok := mts.allocTotalsByCluster.Get(k); ok {
-		return raw.(map[string]*AllocationTotals), true
-	} else {
+	if raw, ok := mts.allocTotalsByCluster.Get(k); !ok {
 		return map[string]*AllocationTotals{}, false
+	} else {
+		original := raw.(map[string]*AllocationTotals)
+		totals := make(map[string]*AllocationTotals, len(original))
+		for k, v := range original {
+			totals[k] = v.Clone()
+		}
+		return totals, true
 	}
 }
 
@@ -565,10 +621,15 @@ func (mts *MemoryTotalsStore) GetAllocationTotalsByCluster(start time.Time, end
 // by node for the given start and end times.
 func (mts *MemoryTotalsStore) GetAllocationTotalsByNode(start time.Time, end time.Time) (map[string]*AllocationTotals, bool) {
 	k := storeKey(start, end)
-	if raw, ok := mts.allocTotalsByNode.Get(k); ok {
-		return raw.(map[string]*AllocationTotals), true
-	} else {
+	if raw, ok := mts.allocTotalsByNode.Get(k); !ok {
 		return map[string]*AllocationTotals{}, false
+	} else {
+		original := raw.(map[string]*AllocationTotals)
+		totals := make(map[string]*AllocationTotals, len(original))
+		for k, v := range original {
+			totals[k] = v.Clone()
+		}
+		return totals, true
 	}
 }
 
@@ -590,10 +651,15 @@ func (mts *MemoryTotalsStore) SetAllocationTotalsByNode(start time.Time, end tim
 // by cluster for the given start and end times.
 func (mts *MemoryTotalsStore) GetAssetTotalsByCluster(start time.Time, end time.Time) (map[string]*AssetTotals, bool) {
 	k := storeKey(start, end)
-	if raw, ok := mts.assetTotalsByCluster.Get(k); ok {
-		return raw.(map[string]*AssetTotals), true
-	} else {
+	if raw, ok := mts.assetTotalsByCluster.Get(k); !ok {
 		return map[string]*AssetTotals{}, false
+	} else {
+		original := raw.(map[string]*AssetTotals)
+		totals := make(map[string]*AssetTotals, len(original))
+		for k, v := range original {
+			totals[k] = v.Clone()
+		}
+		return totals, true
 	}
 }
 
@@ -601,10 +667,15 @@ func (mts *MemoryTotalsStore) GetAssetTotalsByCluster(start time.Time, end time.
 // by node for the given start and end times.
 func (mts *MemoryTotalsStore) GetAssetTotalsByNode(start time.Time, end time.Time) (map[string]*AssetTotals, bool) {
 	k := storeKey(start, end)
-	if raw, ok := mts.assetTotalsByNode.Get(k); ok {
-		return raw.(map[string]*AssetTotals), true
-	} else {
+	if raw, ok := mts.assetTotalsByNode.Get(k); !ok {
 		return map[string]*AssetTotals{}, false
+	} else {
+		original := raw.(map[string]*AssetTotals)
+		totals := make(map[string]*AssetTotals, len(original))
+		for k, v := range original {
+			totals[k] = v.Clone()
+		}
+		return totals, true
 	}
 }
 

+ 10 - 0
pkg/kubecost/window.go

@@ -343,6 +343,16 @@ func (w Window) Contains(t time.Time) bool {
 	return true
 }
 
+func (w Window) ContainsWindow(that Window) bool {
+	// only support containing closed windows for now
+	// could check if openness is compatible with closure
+	if that.IsOpen() {
+		return false
+	}
+
+	return w.Contains(*that.start) && w.Contains(*that.end)
+}
+
 func (w Window) Duration() time.Duration {
 	if w.IsOpen() {
 		// TODO test