Просмотр исходного кода

Merge pull request #971 from kubecost/niko/performance

Summary view
Niko Kovacevic 4 лет назад
Родитель
Сommit
8a8605b669

+ 135 - 208
pkg/kubecost/allocation.go

@@ -350,46 +350,82 @@ func (a *Allocation) Equal(that *Allocation) bool {
 
 // TotalCost is the total cost of the Allocation including adjustments
 func (a *Allocation) TotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.CPUTotalCost() + a.GPUTotalCost() + a.RAMTotalCost() + a.PVTotalCost() + a.NetworkTotalCost() + a.LBTotalCost() + a.SharedTotalCost() + a.ExternalCost
 }
 
 // CPUTotalCost calculates total CPU cost of Allocation including adjustment
 func (a *Allocation) CPUTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.CPUCost + a.CPUCostAdjustment
 }
 
 // GPUTotalCost calculates total GPU cost of Allocation including adjustment
 func (a *Allocation) GPUTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.GPUCost + a.GPUCostAdjustment
 }
 
 // RAMTotalCost calculates total RAM cost of Allocation including adjustment
 func (a *Allocation) RAMTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.RAMCost + a.RAMCostAdjustment
 }
 
 // PVTotalCost calculates total PV cost of Allocation including adjustment
 func (a *Allocation) PVTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.PVCost() + a.PVCostAdjustment
 }
 
 // NetworkTotalCost calculates total Network cost of Allocation including adjustment
 func (a *Allocation) NetworkTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.NetworkCost + a.NetworkCostAdjustment
 }
 
 // LBTotalCost calculates total LB cost of Allocation including adjustment
 func (a *Allocation) LBTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.LoadBalancerCost + a.LoadBalancerCostAdjustment
 }
 
 // SharedTotalCost calculates total shared cost of Allocation including adjustment
 func (a *Allocation) SharedTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.SharedCost
 }
 
 // PVCost calculate cumulative cost of all PVs that Allocation is attached to
 func (a *Allocation) PVCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	cost := 0.0
 	for _, pv := range a.PVs {
 		cost += pv.Cost
@@ -399,6 +435,10 @@ func (a *Allocation) PVCost() float64 {
 
 // PVByteHours calculate cumulative ByteHours of all PVs that Allocation is attached to
 func (a *Allocation) PVByteHours() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	byteHours := 0.0
 	for _, pv := range a.PVs {
 		byteHours += pv.ByteHours
@@ -410,6 +450,10 @@ func (a *Allocation) PVByteHours() float64 {
 // no usage or cost, then efficiency is zero. If there is no request, but there
 // is usage or cost, then efficiency is 100%.
 func (a *Allocation) CPUEfficiency() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	if a.CPUCoreRequestAverage > 0 {
 		return a.CPUCoreUsageAverage / a.CPUCoreRequestAverage
 	}
@@ -425,6 +469,10 @@ func (a *Allocation) CPUEfficiency() float64 {
 // no usage or cost, then efficiency is zero. If there is no request, but there
 // is usage or cost, then efficiency is 100%.
 func (a *Allocation) RAMEfficiency() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	if a.RAMBytesRequestAverage > 0 {
 		return a.RAMBytesUsageAverage / a.RAMBytesRequestAverage
 	}
@@ -439,6 +487,10 @@ func (a *Allocation) RAMEfficiency() float64 {
 // TotalEfficiency is the cost-weighted average of CPU and RAM efficiency. If
 // there is no cost at all, then efficiency is zero.
 func (a *Allocation) TotalEfficiency() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	if a.RAMTotalCost()+a.CPUTotalCost() > 0 {
 		ramCostEff := a.RAMEfficiency() * a.RAMTotalCost()
 		cpuCostEff := a.CPUEfficiency() * a.CPUTotalCost()
@@ -482,6 +534,10 @@ func (a *Allocation) PVBytes() float64 {
 
 // ResetAdjustments sets all cost adjustment fields to zero
 func (a *Allocation) ResetAdjustments() {
+	if a == nil {
+		return
+	}
+
 	a.CPUCostAdjustment = 0.0
 	a.GPUCostAdjustment = 0.0
 	a.RAMCostAdjustment = 0.0
@@ -550,27 +606,47 @@ func (a *Allocation) IsAggregated() bool {
 
 // IsExternal is true if the given Allocation represents external costs.
 func (a *Allocation) IsExternal() bool {
+	if a == nil {
+		return false
+	}
+
 	return strings.Contains(a.Name, ExternalSuffix)
 }
 
 // IsIdle is true if the given Allocation represents idle costs.
 func (a *Allocation) IsIdle() bool {
+	if a == nil {
+		return false
+	}
+
 	return strings.Contains(a.Name, IdleSuffix)
 }
 
 // IsUnallocated is true if the given Allocation represents unallocated costs.
 func (a *Allocation) IsUnallocated() bool {
+	if a == nil {
+		return false
+	}
+
 	return strings.Contains(a.Name, UnallocatedSuffix)
 }
 
 // IsUnmounted is true if the given Allocation represents unmounted volume costs.
 func (a *Allocation) IsUnmounted() bool {
+	if a == nil {
+		return false
+	}
+
 	return strings.Contains(a.Name, UnmountedSuffix)
 }
 
 // Minutes returns the number of minutes the Allocation represents, as defined
 // by the difference between the end and start times.
 func (a *Allocation) Minutes() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.End.Sub(a.Start).Minutes()
 }
 
@@ -594,6 +670,10 @@ func (a *Allocation) Share(that *Allocation) (*Allocation, error) {
 
 // String represents the given Allocation as a string
 func (a *Allocation) String() string {
+	if a == nil {
+		return "<nil>"
+	}
+
 	return fmt.Sprintf("%s%s=%.2f", a.Name, NewWindow(&a.Start, &a.End), a.TotalCost())
 }
 
@@ -743,15 +823,16 @@ func NewAllocationSet(start, end time.Time, allocs ...*Allocation) *AllocationSe
 // succeeds, the allocation is marked as a shared resource. ShareIdle is a
 // simple flag for sharing idle resources.
 type AllocationAggregationOptions struct {
-	FilterFuncs       []AllocationMatchFunc
-	IdleByNode        bool
-	LabelConfig       *LabelConfig
-	MergeUnallocated  bool
-	SharedHourlyCosts map[string]float64
-	ShareFuncs        []AllocationMatchFunc
-	ShareIdle         string
-	ShareSplit        string
-	SplitIdle         bool
+	AllocationTotalsStore AllocationTotalsStore
+	FilterFuncs           []AllocationMatchFunc
+	IdleByNode            bool
+	LabelConfig           *LabelConfig
+	MergeUnallocated      bool
+	ShareFuncs            []AllocationMatchFunc
+	ShareIdle             string
+	ShareSplit            string
+	SharedHourlyCosts     map[string]float64
+	SplitIdle             bool
 }
 
 // AggregateBy aggregates the Allocations in the given AllocationSet by the given
@@ -759,33 +840,47 @@ type AllocationAggregationOptions struct {
 // given AllocationProperty; e.g. Containers can be divided by Namespace, but not vice-a-versa.
 func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAggregationOptions) error {
 	// The order of operations for aggregating allocations is as follows:
+	//
 	//  1. Partition external, idle, and shared allocations into separate sets.
 	//     Also, create the aggSet into which the results will be aggregated.
+	//
 	//  2. Compute sharing coefficients for idle and shared resources
 	//     a) if idle allocation is to be shared, compute idle coefficients
 	//     b) if idle allocation is NOT shared, but filters are present, compute
 	//        idle filtration coefficients for the purpose of only returning the
 	//        portion of idle allocation that would have been shared with the
 	//        unfiltered results. (See unit tests 5.a,b,c)
-	//     c) generate shared allocation for then given shared overhead, which
+	//     c) generate shared allocation for them given shared overhead, which
 	//        must happen after (2a) and (2b)
 	//     d) if there are shared resources, compute share coefficients
+	//
 	//  3. Drop any allocation that fails any of the filters
+	//
 	//  4. Distribute idle allocations according to the idle coefficients
+	//
 	//  5. Generate aggregation key and insert allocation into the output set
+	//
 	//  6. If idle is shared and resources are shared, some idle might be shared
 	//     with a shared resource. Distribute that to the shared resources
 	//     prior to sharing them with the aggregated results.
+	//
 	//  7. Apply idle filtration coefficients from step (2b)
+	//
 	//  8. Distribute shared allocations according to the share coefficients.
+	//
 	//  9. If there are external allocations that can be aggregated into
 	//     the output (i.e. they can be used to generate a valid key for
 	//     the given properties) then aggregate; otherwise... ignore them?
+	//
 	// 10. If the merge idle option is enabled, merge any remaining idle
 	//     allocations into a single idle allocation. If there was any idle
 	//	   whose costs were not distributed because there was no usage of a
 	//     specific resource type, re-add the idle to the aggregation with
 	//     only that type.
+	//
+	// 11. Distribute any undistributed idle, in the case that idle
+	//     coefficients end up being zero and some idle is not shared.
+
 	if as.IsEmpty() {
 		return nil
 	}
@@ -963,7 +1058,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	}
 
 	// (2c) Convert SharedHourlyCosts to Allocations in the shareSet. This must
-	// come after idle coefficients are computes so that allocations generated
+	// come after idle coefficients are computed so that allocations generated
 	// by shared overhead do not skew the idle coefficient computation.
 	for name, cost := range options.SharedHourlyCosts {
 		if cost > 0.0 {
@@ -1178,7 +1273,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		for _, alloc := range aggSet.allocations {
 			for _, sharedAlloc := range shareSet.allocations {
 				if _, ok := shareCoefficients[alloc.Name]; !ok {
-					if !alloc.IsIdle() {
+					if !alloc.IsIdle() && !alloc.IsUnmounted() {
 						log.Warningf("AllocationSet.AggregateBy: error getting share coefficienct for '%s'", alloc.Name)
 					}
 					continue
@@ -1218,7 +1313,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		}
 	}
 
-	// In the edge case that some idle has not been distributed because
+	// (11) In the edge case that some idle has not been distributed because
 	// there is no usage of that resource type, add idle back to
 	// aggregations with only that cost applied.
 
@@ -1238,42 +1333,36 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// __idle__ $0      $12     $0
 	// kubecost $12     $0      $7
 
-	if idleSet.Length() > 0 && !options.SplitIdle {
-		if undistributedIdleMap["cpu"] || undistributedIdleMap["gpu"] || undistributedIdleMap["ram"] {
-
-			for _, idleAlloc := range idleSet.allocations {
-
-				skip := false
-
-				// if the idle does not apply to the non-filtered values, skip it
-				for _, ff := range options.FilterFuncs {
-					if !ff(idleAlloc) {
-						skip = true
-						break
-					}
+	hasUndistributedIdle := undistributedIdleMap["cpu"] || undistributedIdleMap["gpu"] || undistributedIdleMap["ram"]
+	if idleSet.Length() > 0 && hasUndistributedIdle {
+		for _, idleAlloc := range idleSet.allocations {
+			// if the idle does not apply to the non-filtered values, skip it
+			skip := false
+			for _, ff := range options.FilterFuncs {
+				if !ff(idleAlloc) {
+					skip = true
+					break
 				}
+			}
+			if skip {
+				continue
+			}
 
-				if skip {
-					continue
+			// if the idle doesn't have a cost to be shared, also skip it
+			if idleAlloc.CPUCost != 0 && idleAlloc.GPUCost != 0 && idleAlloc.RAMCost != 0 {
+				// artificially set the already shared costs to zero
+				if !undistributedIdleMap["cpu"] {
+					idleAlloc.CPUCost = 0
 				}
-
-				// if the idle doesn't have a cost to be shared, also skip it
-				if idleAlloc.CPUCost != 0 && idleAlloc.GPUCost != 0 && idleAlloc.RAMCost != 0 {
-
-					// artificially set the already shared costs to zero
-					if !undistributedIdleMap["cpu"] {
-						idleAlloc.CPUCost = 0
-					}
-					if !undistributedIdleMap["gpu"] {
-						idleAlloc.GPUCost = 0
-					}
-					if !undistributedIdleMap["ram"] {
-						idleAlloc.RAMCost = 0
-					}
-
-					idleAlloc.Name = IdleSuffix
-					aggSet.Insert(idleAlloc)
+				if !undistributedIdleMap["gpu"] {
+					idleAlloc.GPUCost = 0
 				}
+				if !undistributedIdleMap["ram"] {
+					idleAlloc.RAMCost = 0
+				}
+
+				idleAlloc.Name = IdleSuffix
+				aggSet.Insert(idleAlloc)
 			}
 		}
 	}
@@ -1504,169 +1593,7 @@ func (a *Allocation) generateKey(aggregateBy []string, labelConfig *LabelConfig)
 		return ""
 	}
 
-	if labelConfig == nil {
-		labelConfig = NewLabelConfig()
-	}
-
-	// Names will ultimately be joined into a single name, which uniquely
-	// identifies allocations.
-	names := []string{}
-
-	for _, agg := range aggregateBy {
-		switch true {
-		case agg == AllocationClusterProp:
-			names = append(names, a.Properties.Cluster)
-		case agg == AllocationNodeProp:
-			names = append(names, a.Properties.Node)
-		case agg == AllocationNamespaceProp:
-			names = append(names, a.Properties.Namespace)
-		case agg == AllocationControllerKindProp:
-			controllerKind := a.Properties.ControllerKind
-			if controllerKind == "" {
-				// Indicate that allocation has no controller
-				controllerKind = UnallocatedSuffix
-			}
-			names = append(names, controllerKind)
-		case agg == AllocationDaemonSetProp || agg == AllocationStatefulSetProp || agg == AllocationDeploymentProp || agg == AllocationJobProp:
-			controller := a.Properties.Controller
-			if agg != a.Properties.ControllerKind || controller == "" {
-				// The allocation does not have the specified controller kind
-				controller = UnallocatedSuffix
-			}
-			names = append(names, controller)
-		case agg == AllocationControllerProp:
-			controller := a.Properties.Controller
-			if controller == "" {
-				// Indicate that allocation has no controller
-				controller = UnallocatedSuffix
-			} else if a.Properties.ControllerKind != "" {
-				controller = fmt.Sprintf("%s:%s", a.Properties.ControllerKind, controller)
-			}
-			names = append(names, controller)
-		case agg == AllocationPodProp:
-			names = append(names, a.Properties.Pod)
-		case agg == AllocationContainerProp:
-			names = append(names, a.Properties.Container)
-		case agg == AllocationServiceProp:
-			services := a.Properties.Services
-			if len(services) == 0 {
-				// Indicate that allocation has no services
-				names = append(names, UnallocatedSuffix)
-			} else {
-				// This just uses the first service
-				for _, service := range services {
-					names = append(names, service)
-					break
-				}
-			}
-		case strings.HasPrefix(agg, "label:"):
-			labels := a.Properties.Labels
-			if labels == nil {
-				names = append(names, UnallocatedSuffix)
-			} else {
-				labelName := labelConfig.Sanitize(strings.TrimPrefix(agg, "label:"))
-				if labelValue, ok := labels[labelName]; ok {
-					names = append(names, fmt.Sprintf("%s=%s", labelName, labelValue))
-				} else {
-					names = append(names, UnallocatedSuffix)
-				}
-			}
-		case strings.HasPrefix(agg, "annotation:"):
-			annotations := a.Properties.Annotations
-			if annotations == nil {
-				names = append(names, UnallocatedSuffix)
-			} else {
-				annotationName := labelConfig.Sanitize(strings.TrimPrefix(agg, "annotation:"))
-				if annotationValue, ok := annotations[annotationName]; ok {
-					names = append(names, fmt.Sprintf("%s=%s", annotationName, annotationValue))
-				} else {
-					names = append(names, UnallocatedSuffix)
-				}
-			}
-		case agg == AllocationDepartmentProp:
-			labels := a.Properties.Labels
-			if labels == nil {
-				names = append(names, UnallocatedSuffix)
-			} else {
-				labelNames := strings.Split(labelConfig.DepartmentLabel, ",")
-				for _, labelName := range labelNames {
-					labelName = labelConfig.Sanitize(labelName)
-					if labelValue, ok := labels[labelName]; ok {
-						names = append(names, labelValue)
-					} else {
-						names = append(names, UnallocatedSuffix)
-					}
-				}
-			}
-		case agg == AllocationEnvironmentProp:
-			labels := a.Properties.Labels
-			if labels == nil {
-				names = append(names, UnallocatedSuffix)
-			} else {
-				labelNames := strings.Split(labelConfig.EnvironmentLabel, ",")
-				for _, labelName := range labelNames {
-					labelName = labelConfig.Sanitize(labelName)
-					if labelValue, ok := labels[labelName]; ok {
-						names = append(names, labelValue)
-					} else {
-						names = append(names, UnallocatedSuffix)
-					}
-				}
-			}
-		case agg == AllocationOwnerProp:
-			labels := a.Properties.Labels
-			if labels == nil {
-				names = append(names, UnallocatedSuffix)
-			} else {
-				labelNames := strings.Split(labelConfig.OwnerLabel, ",")
-				for _, labelName := range labelNames {
-					labelName = labelConfig.Sanitize(labelName)
-					if labelValue, ok := labels[labelName]; ok {
-						names = append(names, labelValue)
-					} else {
-						names = append(names, UnallocatedSuffix)
-					}
-				}
-			}
-		case agg == AllocationProductProp:
-			labels := a.Properties.Labels
-			if labels == nil {
-				names = append(names, UnallocatedSuffix)
-			} else {
-				labelNames := strings.Split(labelConfig.ProductLabel, ",")
-				for _, labelName := range labelNames {
-					labelName = labelConfig.Sanitize(labelName)
-					if labelValue, ok := labels[labelName]; ok {
-						names = append(names, labelValue)
-					} else {
-						names = append(names, UnallocatedSuffix)
-					}
-				}
-			}
-		case agg == AllocationTeamProp:
-			labels := a.Properties.Labels
-			if labels == nil {
-				names = append(names, UnallocatedSuffix)
-			} else {
-				labelNames := strings.Split(labelConfig.TeamLabel, ",")
-				for _, labelName := range labelNames {
-					labelName = labelConfig.Sanitize(labelName)
-					if labelValue, ok := labels[labelName]; ok {
-						names = append(names, labelValue)
-					} else {
-						names = append(names, UnallocatedSuffix)
-					}
-				}
-			}
-		default:
-			// This case should never be reached, as input up until this point
-			// should be checked and rejected if invalid. But if we do get a
-			// value we don't recognize, log a warning.
-			log.Warningf("AggregateBy: illegal aggregation parameter: %s", agg)
-		}
-	}
-
-	return strings.Join(names, "/")
+	return a.Properties.GenerateKey(aggregateBy, labelConfig)
 }
 
 // Clone returns a new AllocationSet with a deep copy of the given

+ 179 - 7
pkg/kubecost/allocationprops.go

@@ -5,6 +5,7 @@ import (
 	"sort"
 	"strings"
 
+	"github.com/kubecost/cost-model/pkg/log"
 	"github.com/kubecost/cost-model/pkg/prom"
 )
 
@@ -128,9 +129,7 @@ func (p *AllocationProperties) Clone() *AllocationProperties {
 	clone.ProviderID = p.ProviderID
 
 	var services []string
-	for _, s := range p.Services {
-		services = append(services, s)
-	}
+	services = append(services, p.Services...)
 	clone.Services = services
 
 	labels := make(map[string]string, len(p.Labels))
@@ -229,6 +228,179 @@ func (p *AllocationProperties) Equal(that *AllocationProperties) bool {
 	return true
 }
 
+// GenerateKey generates a string that represents the key by which the
+// AllocationProperties should be aggregated, given the properties defined by
+// the aggregateBy parameter and the given label configuration.
+func (p *AllocationProperties) GenerateKey(aggregateBy []string, labelConfig *LabelConfig) string {
+	if p == nil {
+		return ""
+	}
+
+	if labelConfig == nil {
+		labelConfig = NewLabelConfig()
+	}
+
+	// Names will ultimately be joined into a single name, which uniquely
+	// identifies allocations.
+	names := []string{}
+
+	for _, agg := range aggregateBy {
+		switch true {
+		case agg == AllocationClusterProp:
+			names = append(names, p.Cluster)
+		case agg == AllocationNodeProp:
+			names = append(names, p.Node)
+		case agg == AllocationNamespaceProp:
+			names = append(names, p.Namespace)
+		case agg == AllocationControllerKindProp:
+			controllerKind := p.ControllerKind
+			if controllerKind == "" {
+				// Indicate that allocation has no controller
+				controllerKind = UnallocatedSuffix
+			}
+			names = append(names, controllerKind)
+		case agg == AllocationDaemonSetProp || agg == AllocationStatefulSetProp || agg == AllocationDeploymentProp || agg == AllocationJobProp:
+			controller := p.Controller
+			if agg != p.ControllerKind || controller == "" {
+				// The allocation does not have the specified controller kind
+				controller = UnallocatedSuffix
+			}
+			names = append(names, controller)
+		case agg == AllocationControllerProp:
+			controller := p.Controller
+			if controller == "" {
+				// Indicate that allocation has no controller
+				controller = UnallocatedSuffix
+			} else if p.ControllerKind != "" {
+				controller = fmt.Sprintf("%s:%s", p.ControllerKind, controller)
+			}
+			names = append(names, controller)
+		case agg == AllocationPodProp:
+			names = append(names, p.Pod)
+		case agg == AllocationContainerProp:
+			names = append(names, p.Container)
+		case agg == AllocationServiceProp:
+			services := p.Services
+			if len(services) == 0 {
+				// Indicate that allocation has no services
+				names = append(names, UnallocatedSuffix)
+			} else {
+				// This just uses the first service
+				for _, service := range services {
+					names = append(names, service)
+					break
+				}
+			}
+		case strings.HasPrefix(agg, "label:"):
+			labels := p.Labels
+			if labels == nil {
+				names = append(names, UnallocatedSuffix)
+			} else {
+				labelName := labelConfig.Sanitize(strings.TrimPrefix(agg, "label:"))
+				if labelValue, ok := labels[labelName]; ok {
+					names = append(names, fmt.Sprintf("%s=%s", labelName, labelValue))
+				} else {
+					names = append(names, UnallocatedSuffix)
+				}
+			}
+		case strings.HasPrefix(agg, "annotation:"):
+			annotations := p.Annotations
+			if annotations == nil {
+				names = append(names, UnallocatedSuffix)
+			} else {
+				annotationName := labelConfig.Sanitize(strings.TrimPrefix(agg, "annotation:"))
+				if annotationValue, ok := annotations[annotationName]; ok {
+					names = append(names, fmt.Sprintf("%s=%s", annotationName, annotationValue))
+				} else {
+					names = append(names, UnallocatedSuffix)
+				}
+			}
+		case agg == AllocationDepartmentProp:
+			labels := p.Labels
+			if labels == nil {
+				names = append(names, UnallocatedSuffix)
+			} else {
+				labelNames := strings.Split(labelConfig.DepartmentLabel, ",")
+				for _, labelName := range labelNames {
+					labelName = labelConfig.Sanitize(labelName)
+					if labelValue, ok := labels[labelName]; ok {
+						names = append(names, labelValue)
+					} else {
+						names = append(names, UnallocatedSuffix)
+					}
+				}
+			}
+		case agg == AllocationEnvironmentProp:
+			labels := p.Labels
+			if labels == nil {
+				names = append(names, UnallocatedSuffix)
+			} else {
+				labelNames := strings.Split(labelConfig.EnvironmentLabel, ",")
+				for _, labelName := range labelNames {
+					labelName = labelConfig.Sanitize(labelName)
+					if labelValue, ok := labels[labelName]; ok {
+						names = append(names, labelValue)
+					} else {
+						names = append(names, UnallocatedSuffix)
+					}
+				}
+			}
+		case agg == AllocationOwnerProp:
+			labels := p.Labels
+			if labels == nil {
+				names = append(names, UnallocatedSuffix)
+			} else {
+				labelNames := strings.Split(labelConfig.OwnerLabel, ",")
+				for _, labelName := range labelNames {
+					labelName = labelConfig.Sanitize(labelName)
+					if labelValue, ok := labels[labelName]; ok {
+						names = append(names, labelValue)
+					} else {
+						names = append(names, UnallocatedSuffix)
+					}
+				}
+			}
+		case agg == AllocationProductProp:
+			labels := p.Labels
+			if labels == nil {
+				names = append(names, UnallocatedSuffix)
+			} else {
+				labelNames := strings.Split(labelConfig.ProductLabel, ",")
+				for _, labelName := range labelNames {
+					labelName = labelConfig.Sanitize(labelName)
+					if labelValue, ok := labels[labelName]; ok {
+						names = append(names, labelValue)
+					} else {
+						names = append(names, UnallocatedSuffix)
+					}
+				}
+			}
+		case agg == AllocationTeamProp:
+			labels := p.Labels
+			if labels == nil {
+				names = append(names, UnallocatedSuffix)
+			} else {
+				labelNames := strings.Split(labelConfig.TeamLabel, ",")
+				for _, labelName := range labelNames {
+					labelName = labelConfig.Sanitize(labelName)
+					if labelValue, ok := labels[labelName]; ok {
+						names = append(names, labelValue)
+					} else {
+						names = append(names, UnallocatedSuffix)
+					}
+				}
+			}
+		default:
+			// This case should never be reached, as input up until this point
+			// should be checked and rejected if invalid. But if we do get a
+			// value we don't recognize, log a warning.
+			log.Warningf("generateKey: illegal aggregation parameter: %s", agg)
+		}
+	}
+
+	return strings.Join(names, "/")
+}
+
 // Intersection returns an *AllocationProperties which contains all matching fields between the calling and parameter AllocationProperties
 // nillable slices and maps are left as nil
 func (p *AllocationProperties) Intersection(that *AllocationProperties) *AllocationProperties {
@@ -310,13 +482,13 @@ func (p *AllocationProperties) String() string {
 	for k, prop := range p.Labels {
 		labelStrs = append(labelStrs, fmt.Sprintf("%s:%s", k, prop))
 	}
-	strs = append(strs, fmt.Sprintf("Labels:{%s}", strings.Join(strs, ",")))
+	strs = append(strs, fmt.Sprintf("Labels:{%s}", strings.Join(labelStrs, ",")))
 
-	var AnnotationStrs []string
+	var annotationStrs []string
 	for k, prop := range p.Annotations {
-		AnnotationStrs = append(AnnotationStrs, fmt.Sprintf("%s:%s", k, prop))
+		annotationStrs = append(annotationStrs, fmt.Sprintf("%s:%s", k, prop))
 	}
-	strs = append(strs, fmt.Sprintf("Annotations:{%s}", strings.Join(strs, ",")))
+	strs = append(strs, fmt.Sprintf("Annotations:{%s}", strings.Join(annotationStrs, ",")))
 
 	return fmt.Sprintf("{%s}", strings.Join(strs, "; "))
 }

+ 3 - 0
pkg/kubecost/assetprops.go

@@ -24,6 +24,9 @@ const (
 	// AssetNameProp describes the name of the Asset
 	AssetNameProp AssetProperty = "name"
 
+	// AssetNodeProp describes the node of the Asset
+	AssetNodeProp AssetProperty = "node"
+
 	// AssetProjectProp describes the project of the Asset
 	AssetProjectProp AssetProperty = "project"
 

+ 1254 - 0
pkg/kubecost/summaryallocation.go

@@ -0,0 +1,1254 @@
+package kubecost
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/log"
+)
+
+// SummaryAllocation summarizes an Allocation, keeping only fields necessary
+// for providing a high-level view of identifying the Allocation over a period
+// of time (Start, End) over which it ran, and inspecting the associated per-
+// resource costs (subtotaled with adjustments), total cost, and efficiency.
+//
+// SummaryAllocation does not have a concept of Window (i.e. the time period
+// within which it is defined, as opposed to the Start and End times). That
+// context must be provided by a SummaryAllocationSet.
+type SummaryAllocation struct {
+	Name                   string                `json:"name"`
+	Properties             *AllocationProperties `json:"-"`
+	Start                  time.Time             `json:"start"`
+	End                    time.Time             `json:"end"`
+	CPUCoreRequestAverage  float64               `json:"cpuCoreRequestAverage"`
+	CPUCoreUsageAverage    float64               `json:"cpuCoreUsageAverage"`
+	CPUCost                float64               `json:"cpuCost"`
+	GPUCost                float64               `json:"gpuCost"`
+	NetworkCost            float64               `json:"networkCost"`
+	LoadBalancerCost       float64               `json:"loadBalancerCost"`
+	PVCost                 float64               `json:"pvCost"`
+	RAMBytesRequestAverage float64               `json:"ramByteRequestAverage"`
+	RAMBytesUsageAverage   float64               `json:"ramByteUsageAverage"`
+	RAMCost                float64               `json:"ramCost"`
+	SharedCost             float64               `json:"sharedCost"`
+	ExternalCost           float64               `json:"externalCost"`
+	Share                  bool                  `json:"-"`
+}
+
+// NewSummaryAllocation converts an Allocation to a SummaryAllocation by
+// dropping unnecessary fields and consolidating others (e.g. adjustments).
+// Reconciliation happens here because that process is synonymous with the
+// consolidation of adjustment fields.
+func NewSummaryAllocation(alloc *Allocation, reconcile, reconcileNetwork bool) *SummaryAllocation {
+	if alloc == nil {
+		return nil
+	}
+
+	sa := &SummaryAllocation{
+		Name:                   alloc.Name,
+		Properties:             alloc.Properties.Clone(),
+		Start:                  alloc.Start,
+		End:                    alloc.End,
+		CPUCoreRequestAverage:  alloc.CPUCoreRequestAverage,
+		CPUCoreUsageAverage:    alloc.CPUCoreUsageAverage,
+		CPUCost:                alloc.CPUCost + alloc.CPUCostAdjustment,
+		GPUCost:                alloc.GPUCost + alloc.GPUCostAdjustment,
+		NetworkCost:            alloc.NetworkCost + alloc.NetworkCostAdjustment,
+		LoadBalancerCost:       alloc.LoadBalancerCost + alloc.LoadBalancerCostAdjustment,
+		PVCost:                 alloc.PVCost() + alloc.PVCostAdjustment,
+		RAMBytesRequestAverage: alloc.RAMBytesRequestAverage,
+		RAMBytesUsageAverage:   alloc.RAMBytesUsageAverage,
+		RAMCost:                alloc.RAMCost + alloc.RAMCostAdjustment,
+		SharedCost:             alloc.SharedCost,
+		ExternalCost:           alloc.ExternalCost,
+	}
+
+	// Revert adjustments if reconciliation is off. If only network
+	// reconciliation is off, only revert network adjustment.
+	if !reconcile {
+		sa.CPUCost -= alloc.CPUCostAdjustment
+		sa.GPUCost -= alloc.GPUCostAdjustment
+		sa.NetworkCost -= alloc.NetworkCostAdjustment
+		sa.LoadBalancerCost -= alloc.LoadBalancerCostAdjustment
+		sa.PVCost -= alloc.PVCostAdjustment
+		sa.RAMCost -= alloc.RAMCostAdjustment
+	} else if !reconcileNetwork {
+		sa.NetworkCost -= alloc.NetworkCostAdjustment
+	}
+
+	return sa
+}
+
+// Add sums two SummaryAllocations, adding the given SummaryAllocation to the
+// receiving one, thus mutating the receiver. For performance reasons, it
+// simply drops Properties, so a SummaryAllocation can only be Added once.
+func (sa *SummaryAllocation) Add(that *SummaryAllocation) error {
+	if sa == nil || that == nil {
+		return errors.New("cannot Add a nil SummaryAllocation")
+	}
+
+	// Once Added, a SummaryAllocation has no Properties. This saves us from
+	// having to compute the intersection of two sets of Properties, which is
+	// expensive.
+	sa.Properties = nil
+
+	// Sum non-cumulative fields by turning them into cumulative, adding them,
+	// and then converting them back into averages after minutes have been
+	// combined (just below).
+	cpuReqCoreMins := sa.CPUCoreRequestAverage * sa.Minutes()
+	cpuReqCoreMins += that.CPUCoreRequestAverage * that.Minutes()
+
+	cpuUseCoreMins := sa.CPUCoreUsageAverage * sa.Minutes()
+	cpuUseCoreMins += that.CPUCoreUsageAverage * that.Minutes()
+
+	ramReqByteMins := sa.RAMBytesRequestAverage * sa.Minutes()
+	ramReqByteMins += that.RAMBytesRequestAverage * that.Minutes()
+
+	ramUseByteMins := sa.RAMBytesUsageAverage * sa.Minutes()
+	ramUseByteMins += that.RAMBytesUsageAverage * that.Minutes()
+
+	// Expand Start and End to be the "max" of among the given Allocations
+	if that.Start.Before(sa.Start) {
+		sa.Start = that.Start
+	}
+	if that.End.After(sa.End) {
+		sa.End = that.End
+	}
+
+	// Convert cumulative request and usage back into rates
+	if sa.Minutes() > 0 {
+		sa.CPUCoreRequestAverage = cpuReqCoreMins / sa.Minutes()
+		sa.CPUCoreUsageAverage = cpuUseCoreMins / sa.Minutes()
+		sa.RAMBytesRequestAverage = ramReqByteMins / sa.Minutes()
+		sa.RAMBytesUsageAverage = ramUseByteMins / sa.Minutes()
+	} else {
+		sa.CPUCoreRequestAverage = 0.0
+		sa.CPUCoreUsageAverage = 0.0
+		sa.RAMBytesRequestAverage = 0.0
+		sa.RAMBytesUsageAverage = 0.0
+	}
+
+	// Sum all cumulative cost fields
+	sa.CPUCost += that.CPUCost
+	sa.ExternalCost += that.ExternalCost
+	sa.GPUCost += that.GPUCost
+	sa.LoadBalancerCost += that.LoadBalancerCost
+	sa.NetworkCost += that.NetworkCost
+	sa.PVCost += that.PVCost
+	sa.RAMCost += that.RAMCost
+	sa.SharedCost += that.SharedCost
+
+	return nil
+}
+
+// Clone copies the SummaryAllocation and returns the copy
+func (sa *SummaryAllocation) Clone() *SummaryAllocation {
+	return &SummaryAllocation{
+		Name:                   sa.Name,
+		Properties:             sa.Properties.Clone(),
+		Start:                  sa.Start,
+		End:                    sa.End,
+		CPUCoreRequestAverage:  sa.CPUCoreRequestAverage,
+		CPUCoreUsageAverage:    sa.CPUCoreUsageAverage,
+		CPUCost:                sa.CPUCost,
+		GPUCost:                sa.GPUCost,
+		NetworkCost:            sa.NetworkCost,
+		LoadBalancerCost:       sa.LoadBalancerCost,
+		PVCost:                 sa.PVCost,
+		RAMBytesRequestAverage: sa.RAMBytesRequestAverage,
+		RAMBytesUsageAverage:   sa.RAMBytesUsageAverage,
+		RAMCost:                sa.RAMCost,
+		SharedCost:             sa.SharedCost,
+		ExternalCost:           sa.ExternalCost,
+	}
+}
+
+// CPUEfficiency is the ratio of usage to request. If there is no request and
+// no usage or cost, then efficiency is zero. If there is no request, but there
+// is usage or cost, then efficiency is 100%.
+func (sa *SummaryAllocation) CPUEfficiency() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
+	if sa.CPUCoreRequestAverage > 0 {
+		return sa.CPUCoreUsageAverage / sa.CPUCoreRequestAverage
+	}
+
+	if sa.CPUCoreUsageAverage == 0.0 || sa.CPUCost == 0.0 {
+		return 0.0
+	}
+
+	return 1.0
+}
+
+func (sa *SummaryAllocation) generateKey(aggregateBy []string, labelConfig *LabelConfig) string {
+	if sa == nil {
+		return ""
+	}
+
+	return sa.Properties.GenerateKey(aggregateBy, labelConfig)
+}
+
+// IsExternal is true if the given SummaryAllocation represents external costs.
+func (sa *SummaryAllocation) IsExternal() bool {
+	if sa == nil {
+		return false
+	}
+
+	return strings.Contains(sa.Name, ExternalSuffix)
+}
+
+// IsIdle is true if the given SummaryAllocation represents idle costs.
+func (sa *SummaryAllocation) IsIdle() bool {
+	if sa == nil {
+		return false
+	}
+
+	return strings.Contains(sa.Name, IdleSuffix)
+}
+
+// IsUnallocated is true if the given SummaryAllocation represents unallocated
+// costs.
+func (sa *SummaryAllocation) IsUnallocated() bool {
+	if sa == nil {
+		return false
+	}
+
+	return strings.Contains(sa.Name, UnallocatedSuffix)
+}
+
+// IsUnmounted is true if the given SummaryAllocation represents unmounted
+// volume costs.
+func (sa *SummaryAllocation) IsUnmounted() bool {
+	if sa == nil {
+		return false
+	}
+
+	return strings.Contains(sa.Name, UnmountedSuffix)
+}
+
+// Minutes returns the number of minutes the SummaryAllocation represents, as
+// defined by the difference between the end and start times.
+func (sa *SummaryAllocation) Minutes() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
+	return sa.End.Sub(sa.Start).Minutes()
+}
+
+// RAMEfficiency is the ratio of usage to request. If there is no request and
+// no usage or cost, then efficiency is zero. If there is no request, but there
+// is usage or cost, then efficiency is 100%.
+func (sa *SummaryAllocation) RAMEfficiency() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
+	if sa.RAMBytesRequestAverage > 0 {
+		return sa.RAMBytesUsageAverage / sa.RAMBytesRequestAverage
+	}
+
+	if sa.RAMBytesUsageAverage == 0.0 || sa.RAMCost == 0.0 {
+		return 0.0
+	}
+
+	return 1.0
+}
+
+// TotalCost is the total cost of the SummaryAllocation
+func (sa *SummaryAllocation) TotalCost() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
+	return sa.CPUCost + sa.GPUCost + sa.RAMCost + sa.PVCost + sa.NetworkCost + sa.LoadBalancerCost + sa.SharedCost + sa.ExternalCost
+}
+
+// TotalEfficiency is the cost-weighted average of CPU and RAM efficiency. If
+// there is no cost at all, then efficiency is zero.
+func (sa *SummaryAllocation) TotalEfficiency() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
+	if sa.RAMCost+sa.CPUCost > 0 {
+		ramCostEff := sa.RAMEfficiency() * sa.RAMCost
+		cpuCostEff := sa.CPUEfficiency() * sa.CPUCost
+		return (ramCostEff + cpuCostEff) / (sa.CPUCost + sa.RAMCost)
+	}
+
+	return 0.0
+}
+
+// SummaryAllocationSet stores a set of SummaryAllocations, each with a unique
+// name, that share a window. An AllocationSet is mutable, so treat it like a
+// threadsafe map.
+type SummaryAllocationSet struct {
+	sync.RWMutex
+	externalKeys       map[string]bool
+	idleKeys           map[string]bool
+	SummaryAllocations map[string]*SummaryAllocation `json:"allocations"`
+	Window             Window                        `json:"window"`
+}
+
+// NewSummaryAllocationSet converts an AllocationSet to a SummaryAllocationSet.
+// Filter functions, sharing functions, and reconciliation parameters are
+// required for unfortunate reasons to do with performance and legacy order-of-
+// operations details, as well as the fact that reconciliation has been
+// pushed down to the conversion step between Allocation and SummaryAllocation.
+func NewSummaryAllocationSet(as *AllocationSet, ffs, sfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
+	if as == nil {
+		return nil
+	}
+
+	// If we can know the exact size of the map, use it. If filters or sharing
+	// functions are present, we can't know the size, so we make a default map.
+	var sasMap map[string]*SummaryAllocation
+	if len(ffs) == 0 && len(sfs) == 0 {
+		// No filters, so make the map of summary allocations exactly the size
+		// of the origin allocation set.
+		sasMap = make(map[string]*SummaryAllocation, len(as.allocations))
+	} else {
+		// There are filters, so start with a standard map
+		sasMap = make(map[string]*SummaryAllocation)
+	}
+
+	sas := &SummaryAllocationSet{
+		SummaryAllocations: sasMap,
+		Window:             as.Window.Clone(),
+	}
+
+	for _, alloc := range as.allocations {
+		// First, detect if the allocation should be shared. If so, mark it as
+		// such, insert it, and continue.
+		shouldShare := false
+		for _, sf := range sfs {
+			if sf(alloc) {
+				shouldShare = true
+				break
+			}
+		}
+		if shouldShare {
+			sa := NewSummaryAllocation(alloc, reconcile, reconcileNetwork)
+			sa.Share = true
+			sas.Insert(sa)
+			continue
+		}
+
+		// If the allocation does not pass any of the given filter functions,
+		// do not insert it into the set.
+		shouldFilter := false
+		for _, ff := range ffs {
+			if !ff(alloc) {
+				shouldFilter = true
+				break
+			}
+		}
+		if shouldFilter {
+			continue
+
+		}
+
+		err := sas.Insert(NewSummaryAllocation(alloc, reconcile, reconcileNetwork))
+		if err != nil {
+			log.Errorf("SummaryAllocation: error inserting summary of %s", alloc.Name)
+		}
+	}
+
+	for key := range as.externalKeys {
+		sas.externalKeys[key] = true
+	}
+
+	for key := range as.idleKeys {
+		sas.idleKeys[key] = true
+	}
+
+	return sas
+}
+
+// Add sums two SummaryAllocationSets, which Adds all SummaryAllocations in the
+// given SummaryAllocationSet to thier counterparts in the receiving set. Add
+// also expands the Window to include both constituent Windows, in the case
+// that Add is being used from accumulating (as opposed to aggregating). For
+// performance reasons, the function may return either a new set, or an
+// unmodified original, so it should not be assumed that the original sets are
+// safeuly usable after calling Add.
+func (sas *SummaryAllocationSet) Add(that *SummaryAllocationSet) (*SummaryAllocationSet, error) {
+	if sas == nil || len(sas.SummaryAllocations) == 0 {
+		return that, nil
+	}
+
+	if that == nil || len(that.SummaryAllocations) == 0 {
+		return sas, nil
+	}
+
+	if sas.Window.IsOpen() {
+		return nil, errors.New("cannot add a SummaryAllocationSet with an open window")
+	}
+
+	// Set start, end to min(start), max(end)
+	start := *sas.Window.Start()
+	end := *sas.Window.End()
+	if that.Window.Start().Before(start) {
+		start = *that.Window.Start()
+	}
+	if that.Window.End().After(end) {
+		end = *that.Window.End()
+	}
+
+	acc := &SummaryAllocationSet{
+		SummaryAllocations: make(map[string]*SummaryAllocation, len(sas.SummaryAllocations)),
+		Window:             NewClosedWindow(start, end),
+	}
+
+	sas.RLock()
+	defer sas.RUnlock()
+
+	that.RLock()
+	defer that.RUnlock()
+
+	for _, alloc := range sas.SummaryAllocations {
+		err := acc.Insert(alloc)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	for _, alloc := range that.SummaryAllocations {
+		err := acc.Insert(alloc)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return acc, nil
+}
+
+// AggregateBy aggregates the Allocations in the given AllocationSet by the given
+// AllocationProperty. This will only be legal if the AllocationSet is divisible by the
+// given AllocationProperty; e.g. Containers can be divided by Namespace, but not vice-a-versa.
+func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *AllocationAggregationOptions) error {
+	if sas == nil || len(sas.SummaryAllocations) == 0 {
+		return nil
+	}
+
+	if sas.Window.IsOpen() {
+		return errors.New("cannot aggregate a SummaryAllocationSet with an open window")
+	}
+
+	if options == nil {
+		options = &AllocationAggregationOptions{}
+	}
+
+	if options.LabelConfig == nil {
+		options.LabelConfig = NewLabelConfig()
+	}
+
+	// Check if we have any work to do; if not, then early return. If
+	// aggregateBy is nil, we don't aggregate anything. On the other hand,
+	// an empty slice implies that we should aggregate everything. (See
+	// generateKey for why that makes sense.)
+	shouldAggregate := aggregateBy != nil
+	shouldShare := len(options.SharedHourlyCosts) > 0 || len(options.ShareFuncs) > 0
+	if !shouldAggregate && !shouldShare {
+		return nil
+	}
+
+	// The order of operations for aggregating a SummaryAllotionSet is as
+	// follows:
+	//
+	//  1. Partition external, idle, and shared allocations into separate sets.
+	//     Also, create the resultSet into which the results will be aggregated.
+	//
+	//  2. Record resource totals for shared costs and unmounted volumes so
+	//     that we can account for them in computing idle coefficients.
+	//
+	//  3. Retrieve pre-computed allocation resource totals, which will be used
+	//     to compute idle sharing coefficients.
+	//
+	//  4. Compute sharing coefficients per-aggregation, if sharing resources.
+	//
+	//  5. Distribute idle allocations according to the idle coefficients.
+	//
+	//  6. Record allocation resource totals (after filtration) if filters have
+	//     been applied. (Used for filtering proportional amount of idle.)
+	//
+	//  7. Generate aggregation key and insert allocation into the output set
+	//
+	//  8. If idle is shared and resources are shared, it's probable that some
+	//     amount of idle cost will be shared with a shared resource.
+	//     Distribute that idle cost, if it exists, among the respective shared
+	//     allocations before sharing them with the aggregated allocations.
+	//
+	//  9. Apply idle filtration, which "filters" the idle cost, or scales it
+	//     by the proportion of allocation resources remaining after filters
+	//     have been applied.
+	//
+	// 10. Convert shared hourly cost into a cumulative allocation to share,
+	//     and insert it into the share set.
+	//
+	// 11. Distribute shared resources according to sharing coefficients.
+	//
+	// 12. Insert external allocations into the result set.
+	//
+	// 13. Insert any undistributed idle, in the case that idle
+	//     coefficients end up being zero and some idle is not shared.
+	//
+	// 14. Combine all idle allocations into a single idle allocation, unless
+	//     the option to keep idle split by cluster or node is enabled.
+
+	// 1. Partition external, idle, and shared allocations into separate sets.
+	// Also, create the resultSet into which the results will be aggregated.
+
+	// resultSet will collect the aggregated allocations
+	resultSet := &SummaryAllocationSet{
+		Window: sas.Window.Clone(),
+	}
+
+	// externalSet will collect external allocations
+	externalSet := &SummaryAllocationSet{
+		Window: sas.Window.Clone(),
+	}
+
+	// idleSet will be shared among resultSet after initial aggregation
+	// is complete
+	idleSet := &SummaryAllocationSet{
+		Window: sas.Window.Clone(),
+	}
+
+	// shareSet will be shared among resultSet after initial aggregation
+	// is complete
+	shareSet := &SummaryAllocationSet{
+		Window: sas.Window.Clone(),
+	}
+
+	sas.Lock()
+	defer sas.Unlock()
+
+	// 2. Record resource totals for shared costs, aggregating by cluster or by
+	// node (depending on if idle is partitioned by cluster or node) so that we
+	// can account for them in computing idle coefficients. Do the same for
+	// unmounted volume costs, which only require a total cost.
+	sharedResourceTotals := map[string]*AllocationTotals{}
+	totalUnmountedCost := 0.0
+
+	// 1 & 2. Identify set membership and aggregate aforementioned totals.
+	for _, sa := range sas.SummaryAllocations {
+		if sa.Share {
+			var key string
+			if options.IdleByNode {
+				key = fmt.Sprintf("%s/%s", sa.Properties.Cluster, sa.Properties.Node)
+			} else {
+				key = sa.Properties.Cluster
+			}
+
+			if _, ok := sharedResourceTotals[key]; !ok {
+				sharedResourceTotals[key] = &AllocationTotals{}
+			}
+			sharedResourceTotals[key].CPUCost += sa.CPUCost
+			sharedResourceTotals[key].GPUCost += sa.GPUCost
+			sharedResourceTotals[key].LoadBalancerCost += sa.LoadBalancerCost
+			sharedResourceTotals[key].NetworkCost += sa.NetworkCost
+			sharedResourceTotals[key].PersistentVolumeCost += sa.PVCost
+			sharedResourceTotals[key].RAMCost += sa.RAMCost
+
+			shareSet.Insert(sa)
+			delete(sas.SummaryAllocations, sa.Name)
+
+			continue
+		}
+
+		// External allocations get aggregated post-hoc (see step 6) and do
+		// not necessarily contain complete sets of properties, so they are
+		// moved to a separate AllocationSet.
+		if sa.IsExternal() {
+			delete(sas.externalKeys, sa.Name)
+			delete(sas.SummaryAllocations, sa.Name)
+			externalSet.Insert(sa)
+			continue
+		}
+
+		// Idle allocations should be separated into idleSet if they are to be
+		// shared later on. If they are not to be shared, then add them to the
+		// resultSet like any other allocation.
+		if sa.IsIdle() {
+			delete(sas.idleKeys, sa.Name)
+			delete(sas.SummaryAllocations, sa.Name)
+
+			if options.ShareIdle == ShareEven || options.ShareIdle == ShareWeighted {
+				idleSet.Insert(sa)
+			} else {
+				resultSet.Insert(sa)
+			}
+
+			continue
+		}
+
+		// Track total unmounted cost because it must be taken out of total
+		// allocated costs for sharing coefficients.
+		if sa.IsUnmounted() {
+			totalUnmountedCost += sa.TotalCost()
+		}
+	}
+
+	// It's possible that no more un-shared, non-idle, non-external allocations
+	// remain at this point. This always results in an emptySet, so return early.
+	if len(sas.SummaryAllocations) == 0 {
+		sas.SummaryAllocations = map[string]*SummaryAllocation{}
+		return nil
+	}
+
+	// 3. Retrieve pre-computed allocation resource totals, which will be used
+	// to compute idle coefficients, based on the ratio of an allocation's per-
+	// resource cost to the per-resource totals of that allocation's cluster or
+	// node. Whether to perform this operation based on cluster or node is an
+	// option. (See IdleByNode documentation; defaults to idle-by-cluster.)
+	var allocTotals map[string]*AllocationTotals
+	var ok bool
+	if options.IdleByNode {
+		if options.AllocationTotalsStore != nil {
+			allocTotals, ok = options.AllocationTotalsStore.GetAllocationTotalsByNode(*sas.Window.Start(), *sas.Window.End())
+			if !ok {
+				return fmt.Errorf("nil allocation resource totals by node for %s", sas.Window)
+			}
+		}
+	} else {
+		if options.AllocationTotalsStore != nil {
+			allocTotals, ok = options.AllocationTotalsStore.GetAllocationTotalsByCluster(*sas.Window.Start(), *sas.Window.End())
+			if !ok {
+				return fmt.Errorf("nil allocation resource totals by cluster for %s", sas.Window)
+			}
+		}
+	}
+
+	// If filters have been applied, then we need to record allocation resource
+	// totals after filtration (i.e. the allocations that are present) so that
+	// we can identify the proportion of idle cost to keep. That is, we should
+	// only return the idle cost that would be shared with the remaining
+	// allocations, even if we're keeping idle separate. The totals should be
+	// recorded by idle-key (cluster or node, depending on the IdleByNode
+	// option). Instantiating this map is a signal to record the totals.
+	var allocTotalsAfterFilters map[string]*AllocationTotals
+	if len(resultSet.idleKeys) > 0 && len(options.FilterFuncs) > 0 {
+		allocTotalsAfterFilters = make(map[string]*AllocationTotals, len(resultSet.idleKeys))
+	}
+
+	// If we're recording allocTotalsAfterFilters and there are shared costs,
+	// then record those resource totals here so that idle for thpse shared
+	// resources gets included.
+	if allocTotalsAfterFilters != nil {
+		for key, rt := range sharedResourceTotals {
+			if _, ok := allocTotalsAfterFilters[key]; !ok {
+				allocTotalsAfterFilters[key] = &AllocationTotals{}
+			}
+
+			// Record only those fields required for computing idle
+			allocTotalsAfterFilters[key].CPUCost += rt.CPUCost
+			allocTotalsAfterFilters[key].GPUCost += rt.GPUCost
+			allocTotalsAfterFilters[key].RAMCost += rt.RAMCost
+		}
+	}
+
+	// Sharing coefficients are recorded by post-aggregation-key (e.g. if
+	// aggregating by namespace, then the key will be the namespace) and only
+	// need to be recorded if there are shared resources. Instantiating this
+	// map is the signal to record sharing coefficients.
+	var sharingCoeffs map[string]float64
+	if len(shareSet.SummaryAllocations) > 0 {
+		sharingCoeffs = map[string]float64{}
+	}
+
+	// Loop over all remaining SummaryAllocations (after filters, sharing, &c.)
+	// doing the following, in this order:
+	//  4. Compute sharing coefficients, if there are shared resources
+	//  5. Distribute idle cost, if sharing idle
+	//  6. Record allocTotalsAfterFiltration, if filters have been applied
+	//  7. Aggregate by key
+	for _, sa := range sas.SummaryAllocations {
+		// Generate key to use for aggregation-by-key and allocation name
+		key := sa.generateKey(aggregateBy, options.LabelConfig)
+
+		// 4. Incrementally add to sharing coefficients before adding idle
+		// cost, which would skew the coefficients. These coefficients will be
+		// later divided by a total, turning them into a coefficient between
+		// 0.0 and 1.0.
+		// NOTE: SummaryAllocation does not support ShareEven, so only record
+		// by cost for cost-weighted distribution.
+		if sharingCoeffs != nil {
+			sharingCoeffs[key] += sa.TotalCost()
+		}
+
+		// 5. Distribute idle allocations according to the idle coefficients.
+		// NOTE: if idle allocation is off (i.e. ShareIdle == ShareNone) then
+		// all idle allocations will be in the resultSet at this point, so idleSet
+		// will be empty and we won't enter this block.
+		if len(idleSet.SummaryAllocations) > 0 {
+			for _, idle := range idleSet.SummaryAllocations {
+				// Idle key is either cluster or node, as determined by the
+				// IdleByNode option.
+				var key string
+
+				// Only share idle allocation with current allocation (sa) if
+				// the relevant properties match (i.e. cluster and/or node)
+				if idle.Properties.Cluster != sa.Properties.Cluster {
+					continue
+				}
+				key = idle.Properties.Cluster
+
+				if options.IdleByNode {
+					if idle.Properties.Node != sa.Properties.Node {
+						continue
+					}
+					key = fmt.Sprintf("%s/%s", idle.Properties.Cluster, idle.Properties.Node)
+				}
+
+				cpuCoeff, gpuCoeff, ramCoeff := ComputeIdleCoefficients(options.ShareIdle, key, sa.CPUCost, sa.GPUCost, sa.RAMCost, allocTotals)
+
+				sa.CPUCost += idle.CPUCost * cpuCoeff
+				sa.GPUCost += idle.GPUCost * gpuCoeff
+				sa.RAMCost += idle.RAMCost * ramCoeff
+			}
+		}
+
+		// The key becomes the allocation's name, which is used as the key by
+		// which the allocation is inserted into the set.
+		sa.Name = key
+
+		// If merging unallocated allocations, rename all unallocated
+		// allocations as simply __unallocated__
+		if options.MergeUnallocated && sa.IsUnallocated() {
+			sa.Name = UnallocatedSuffix
+		}
+
+		// 6. Record filtered resource totals for idle allocation filtration,
+		// only if necessary.
+		if allocTotalsAfterFilters != nil {
+			key := sa.Properties.Cluster
+			if options.IdleByNode {
+				key = fmt.Sprintf("%s/%s", sa.Properties.Cluster, sa.Properties.Node)
+			}
+
+			if _, ok := allocTotalsAfterFilters[key]; ok {
+				allocTotalsAfterFilters[key].CPUCost += sa.CPUCost
+				allocTotalsAfterFilters[key].GPUCost += sa.GPUCost
+				allocTotalsAfterFilters[key].RAMCost += sa.RAMCost
+			} else {
+				allocTotalsAfterFilters[key] = &AllocationTotals{
+					CPUCost: sa.CPUCost,
+					GPUCost: sa.GPUCost,
+					RAMCost: sa.RAMCost,
+				}
+			}
+		}
+
+		// 7. Inserting the allocation with the generated key for a name
+		// performs the actual aggregation step.
+		resultSet.Insert(sa)
+	}
+
+	// 8. If idle is shared and resources are shared, it's probable that some
+	// amount of idle cost will be shared with a shared resource. Distribute
+	// that idle cost, if it exists, among the respective shared allocations
+	// before sharing them with the aggregated allocations.
+	if len(idleSet.SummaryAllocations) > 0 && len(shareSet.SummaryAllocations) > 0 {
+		for _, sa := range shareSet.SummaryAllocations {
+			for _, idle := range idleSet.SummaryAllocations {
+				var key string
+
+				// Only share idle allocation with current allocation (sa) if
+				// the relevant property matches (i.e. Cluster or Node,
+				// depending on which idle sharing option is selected)
+				if options.IdleByNode {
+					if idle.Properties.Node != sa.Properties.Node {
+						continue
+					}
+
+					key = idle.Properties.Node
+				} else {
+					if idle.Properties.Cluster != sa.Properties.Cluster {
+						continue
+					}
+
+					key = idle.Properties.Cluster
+				}
+
+				cpuCoeff, gpuCoeff, ramCoeff := ComputeIdleCoefficients(options.ShareIdle, key, sa.CPUCost, sa.GPUCost, sa.RAMCost, allocTotals)
+
+				sa.CPUCost += idle.CPUCost * cpuCoeff
+				sa.GPUCost += idle.GPUCost * gpuCoeff
+				sa.RAMCost += idle.RAMCost * ramCoeff
+			}
+		}
+	}
+
+	// 9. Apply idle filtration, which "filters" the idle cost, i.e. scales
+	// idle allocation costs per-resource by the proportion of allocation
+	// resources remaining after filtering. In effect, this returns only the
+	// idle costs that would have been shared with the remaining allocations,
+	// even if idle is kept separated.
+	if allocTotalsAfterFilters != nil {
+		for idleKey := range resultSet.idleKeys {
+			ia := resultSet.SummaryAllocations[idleKey]
+
+			var key string
+			if options.IdleByNode {
+				key = ia.Properties.Node
+			} else {
+				key = ia.Properties.Cluster
+			}
+
+			// Percentage of idle that should remain after filters are applied,
+			// which equals the proportion of filtered-to-actual cost.
+			cpuFilterCoeff := 0.0
+			if allocTotals[key].CPUCost > 0.0 {
+				cpuFilterCoeff = allocTotalsAfterFilters[key].CPUCost / allocTotals[key].CPUCost
+			}
+
+			gpuFilterCoeff := 0.0
+			if allocTotals[key].RAMCost > 0.0 {
+				gpuFilterCoeff = allocTotalsAfterFilters[key].RAMCost / allocTotals[key].RAMCost
+			}
+
+			ramFilterCoeff := 0.0
+
+			if allocTotals[key].RAMCost > 0.0 {
+				ramFilterCoeff = allocTotalsAfterFilters[key].RAMCost / allocTotals[key].RAMCost
+			}
+
+			ia.CPUCost *= cpuFilterCoeff
+			ia.GPUCost *= gpuFilterCoeff
+			ia.RAMCost *= ramFilterCoeff
+		}
+	}
+
+	// 10. Convert shared hourly cost into a cumulative allocation to share,
+	// and insert it into the share set.
+	for name, cost := range options.SharedHourlyCosts {
+		if cost > 0.0 {
+			hours := sas.Window.Hours()
+
+			// If set ends in the future, adjust hours accordingly
+			diff := time.Since(*sas.Window.End())
+			if diff < 0.0 {
+				hours += diff.Hours()
+			}
+
+			totalSharedCost := cost * hours
+
+			shareSet.Insert(&SummaryAllocation{
+				Name:       fmt.Sprintf("%s/%s", name, SharedSuffix),
+				Start:      *sas.Window.Start(),
+				End:        *sas.Window.End(),
+				SharedCost: totalSharedCost,
+			})
+		}
+	}
+
+	// 11. Distribute shared resources according to sharing coefficients.
+	// NOTE: ShareEven is not supported
+	if len(shareSet.SummaryAllocations) > 0 {
+		sharingCoeffDenominator := 0.0
+		for _, rt := range allocTotals {
+			sharingCoeffDenominator += rt.TotalCost()
+		}
+
+		// Do not include the shared costs, themselves, when determining
+		// sharing coefficients.
+		for _, rt := range sharedResourceTotals {
+			sharingCoeffDenominator -= rt.TotalCost()
+		}
+
+		// Do not include the unmounted costs when determining sharing
+		// coefficients becuase they do not receive shared costs.
+		sharingCoeffDenominator -= totalUnmountedCost
+
+		if sharingCoeffDenominator <= 0.0 {
+			log.Warningf("SummaryAllocation: sharing coefficient denominator is %f", sharingCoeffDenominator)
+		} else {
+			// Compute sharing coeffs by dividing the thus-far accumulated
+			// numerators by the now-finalized denominator.
+			for key := range sharingCoeffs {
+				sharingCoeffs[key] /= sharingCoeffDenominator
+			}
+
+			for key, sa := range resultSet.SummaryAllocations {
+				// Idle and unmounted allocations, by definition, do not
+				// receive shared cost
+				if sa.IsIdle() || sa.IsUnmounted() {
+					continue
+				}
+
+				sharingCoeff := sharingCoeffs[key]
+
+				// Distribute each shared cost with the current allocation on the
+				// basis of the proportion of the allocation's cost (ShareWeighted)
+				// or count (ShareEven) to the total aggregated cost or count. This
+				// condition should hold in spite of filters because the sharing
+				// coefficient denominator is held constant by pre-computed
+				// resource totals and the post-aggregation total cost of the
+				// remaining allocations will, by definition, not be affected.
+				for _, shared := range shareSet.SummaryAllocations {
+					sa.SharedCost += shared.TotalCost() * sharingCoeff
+				}
+			}
+		}
+	}
+
+	// 12. Insert external allocations into the result set.
+	for _, sa := range externalSet.SummaryAllocations {
+		skip := false
+
+		for _, ff := range options.FilterFuncs {
+			// Make an allocation with the same properties and test that
+			// against the FilterFunc to see if the external allocation should
+			// be filtered or not.
+			// TODO:CLEANUP do something about external cost, this stinks
+			ea := &Allocation{Properties: sa.Properties}
+			if !ff(ea) {
+				skip = true
+				break
+			}
+		}
+
+		if !skip {
+			key := sa.generateKey(aggregateBy, options.LabelConfig)
+
+			sa.Name = key
+			resultSet.Insert(sa)
+		}
+	}
+
+	// 13. Distribute remaining, undistributed idle. Undistributed idle is any
+	// per-resource idle cost for which there can be no idle coefficient
+	// computed because there is zero usage across all allocations.
+	for _, ia := range idleSet.SummaryAllocations {
+		key := ia.Properties.Cluster
+		if options.IdleByNode {
+			key = fmt.Sprintf("%s/%s", ia.Properties.Cluster, ia.Properties.Node)
+		}
+
+		rt, ok := allocTotals[key]
+		if !ok {
+			log.Warningf("SummaryAllocation: AggregateBy: cannot handle undistributed idle for '%s'", key)
+			continue
+		}
+
+		hasUndistributableCost := false
+
+		if ia.CPUCost > 0.0 && rt.CPUCost == 0.0 {
+			// There is idle CPU cost, but no allocated CPU cost, so that cost
+			// is undistributable and must be inserted.
+			hasUndistributableCost = true
+		} else {
+			// Cost was entirely distributed, so zero it out
+			ia.CPUCost = 0.0
+		}
+
+		if ia.GPUCost > 0.0 && rt.GPUCost == 0.0 {
+			// There is idle GPU cost, but no allocated GPU cost, so that cost
+			// is undistributable and must be inserted.
+			hasUndistributableCost = true
+		} else {
+			// Cost was entirely distributed, so zero it out
+			ia.GPUCost = 0.0
+		}
+
+		if ia.RAMCost > 0.0 && rt.RAMCost == 0.0 {
+			// There is idle CPU cost, but no allocated CPU cost, so that cost
+			// is undistributable and must be inserted.
+			hasUndistributableCost = true
+		} else {
+			// Cost was entirely distributed, so zero it out
+			ia.RAMCost = 0.0
+		}
+
+		if hasUndistributableCost {
+			ia.Name = fmt.Sprintf("%s/%s", key, IdleSuffix)
+			resultSet.Insert(ia)
+		}
+	}
+
+	// 14. Combine all idle allocations into a single idle allocation, unless
+	// the option to keep idle split by cluster or node is enabled.
+	if !options.SplitIdle {
+		for _, ia := range resultSet.idleAllocations() {
+			resultSet.Delete(ia.Name)
+			ia.Name = IdleSuffix
+			resultSet.Insert(ia)
+		}
+	}
+
+	// Replace the existing set's data with the new, aggregated summary data
+	sas.SummaryAllocations = resultSet.SummaryAllocations
+
+	return nil
+}
+
+// Delete removes the allocation with the given name from the set
+func (sas *SummaryAllocationSet) Delete(name string) {
+	if sas == nil {
+		return
+	}
+
+	sas.Lock()
+	defer sas.Unlock()
+
+	delete(sas.externalKeys, name)
+	delete(sas.idleKeys, name)
+	delete(sas.SummaryAllocations, name)
+}
+
+// Each invokes the given function for each SummaryAllocation in the set
+func (sas *SummaryAllocationSet) Each(f func(string, *SummaryAllocation)) {
+	if sas == nil {
+		return
+	}
+
+	for k, a := range sas.SummaryAllocations {
+		f(k, a)
+	}
+}
+
+// IdleAllocations returns a map of the idle allocations in the AllocationSet.
+func (sas *SummaryAllocationSet) idleAllocations() map[string]*SummaryAllocation {
+	idles := map[string]*SummaryAllocation{}
+
+	if sas == nil || len(sas.SummaryAllocations) == 0 {
+		return idles
+	}
+
+	sas.RLock()
+	defer sas.RUnlock()
+
+	for key := range sas.idleKeys {
+		if sa, ok := sas.SummaryAllocations[key]; ok {
+			idles[key] = sa
+		}
+	}
+
+	return idles
+}
+
+// Insert aggregates the current entry in the SummaryAllocationSet by the given Allocation,
+// but only if the Allocation is valid, i.e. matches the SummaryAllocationSet's window. If
+// there is no existing entry, one is created. Nil error response indicates success.
+func (sas *SummaryAllocationSet) Insert(sa *SummaryAllocation) error {
+	if sas == nil {
+		return fmt.Errorf("cannot insert into nil SummaryAllocationSet")
+	}
+
+	if sa == nil {
+		return fmt.Errorf("cannot insert a nil SummaryAllocation")
+	}
+
+	sas.Lock()
+	defer sas.Unlock()
+
+	if sas.SummaryAllocations == nil {
+		sas.SummaryAllocations = map[string]*SummaryAllocation{}
+	}
+
+	if sas.externalKeys == nil {
+		sas.externalKeys = map[string]bool{}
+	}
+
+	if sas.idleKeys == nil {
+		sas.idleKeys = map[string]bool{}
+	}
+
+	// Add the given Allocation to the existing entry, if there is one;
+	// otherwise just set directly into allocations
+	if _, ok := sas.SummaryAllocations[sa.Name]; ok {
+		err := sas.SummaryAllocations[sa.Name].Add(sa)
+		if err != nil {
+			return fmt.Errorf("SummaryAllocationSet.Insert: error trying to Add: %s", err)
+		}
+	} else {
+		sas.SummaryAllocations[sa.Name] = sa
+	}
+
+	// If the given Allocation is an external one, record that
+	if sa.IsExternal() {
+		sas.externalKeys[sa.Name] = true
+	}
+
+	// If the given Allocation is an idle one, record that
+	if sa.IsIdle() {
+		sas.idleKeys[sa.Name] = true
+	}
+
+	return nil
+}
+
+// SummaryAllocationSetRange is a thread-safe slice of SummaryAllocationSets.
+type SummaryAllocationSetRange struct {
+	sync.RWMutex
+	Step                  time.Duration           `json:"step"`
+	SummaryAllocationSets []*SummaryAllocationSet `json:"sets"`
+	Window                Window                  `json:"window"`
+}
+
+// NewSummaryAllocationSetRange instantiates a new range composed of the given
+// SummaryAllocationSets in the order provided. The expectations about the
+// SummaryAllocationSets are as follows:
+// - window durations are all equal
+// - sets are consecutive (i.e. chronologically sorted)
+// - there are no gaps between sets
+// - sets do not have overlapping windows
+func NewSummaryAllocationSetRange(sass ...*SummaryAllocationSet) *SummaryAllocationSetRange {
+	var step time.Duration
+	window := NewWindow(nil, nil)
+
+	for _, sas := range sass {
+		if window.Start() == nil || (sas.Window.Start() != nil && sas.Window.Start().Before(*window.Start())) {
+			window.start = sas.Window.Start()
+		}
+		if window.End() == nil || (sas.Window.End() != nil && sas.Window.End().After(*window.End())) {
+			window.end = sas.Window.End()
+		}
+		if step == 0 {
+			step = sas.Window.Duration()
+		} else if step != sas.Window.Duration() {
+			log.Warningf("instantiating range with step %s using set of step %s is illegal", step, sas.Window.Duration())
+		}
+	}
+
+	return &SummaryAllocationSetRange{
+		Step:                  step,
+		SummaryAllocationSets: sass,
+		Window:                window,
+	}
+}
+
+// Accumulate sums each AllocationSet in the given range, returning a single cumulative
+// AllocationSet for the entire range.
+func (sasr *SummaryAllocationSetRange) Accumulate() (*SummaryAllocationSet, error) {
+	var result *SummaryAllocationSet
+	var err error
+
+	sasr.RLock()
+	defer sasr.RUnlock()
+
+	for _, sas := range sasr.SummaryAllocationSets {
+		result, err = result.Add(sas)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return result, nil
+}
+
+// AggregateBy aggregates each AllocationSet in the range by the given
+// properties and options.
+func (sasr *SummaryAllocationSetRange) AggregateBy(aggregateBy []string, options *AllocationAggregationOptions) error {
+	sasr.Lock()
+	defer sasr.Unlock()
+
+	for _, sas := range sasr.SummaryAllocationSets {
+		err := sas.AggregateBy(aggregateBy, options)
+		if err != nil {
+			// Wipe out data so that corrupt data cannot be mistakenly used
+			sasr.SummaryAllocationSets = []*SummaryAllocationSet{}
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Append appends the given AllocationSet to the end of the range. It does not
+// validate whether or not that violates window continuity.
+func (sasr *SummaryAllocationSetRange) Append(sas *SummaryAllocationSet) error {
+	if sasr.Step != 0 && sas.Window.Duration() != sasr.Step {
+		return fmt.Errorf("cannot append set with duration %s to range of step %s", sas.Window.Duration(), sasr.Step)
+	}
+
+	sasr.Lock()
+	defer sasr.Unlock()
+
+	// Append to list of sets
+	sasr.SummaryAllocationSets = append(sasr.SummaryAllocationSets, sas)
+
+	// Set step, if not set
+	if sasr.Step == 0 {
+		sasr.Step = sas.Window.Duration()
+	}
+
+	// Adjust window
+	if sasr.Window.Start() == nil || (sas.Window.Start() != nil && sas.Window.Start().Before(*sasr.Window.Start())) {
+		sasr.Window.start = sas.Window.Start()
+	}
+	if sasr.Window.End() == nil || (sas.Window.End() != nil && sas.Window.End().After(*sasr.Window.End())) {
+		sasr.Window.end = sas.Window.End()
+	}
+
+	return nil
+}
+
+// Each invokes the given function for each AllocationSet in the range
+func (sasr *SummaryAllocationSetRange) Each(f func(int, *SummaryAllocationSet)) {
+	if sasr == nil {
+		return
+	}
+
+	for i, as := range sasr.SummaryAllocationSets {
+		f(i, as)
+	}
+}
+
+// InsertExternalAllocations takes all allocations in the given
+// AllocationSetRange (they should all be considered "external") and inserts
+// them into the receiving SummaryAllocationSetRange.
+// TODO:CLEANUP replace this with a better idea (or get rid of external
+// allocations, as such, altogether)
+func (sasr *SummaryAllocationSetRange) InsertExternalAllocations(that *AllocationSetRange) error {
+	if sasr == nil {
+		return fmt.Errorf("cannot insert range into nil AllocationSetRange")
+	}
+
+	// keys maps window to index in range
+	keys := map[string]int{}
+	for i, as := range sasr.SummaryAllocationSets {
+		if as == nil {
+			continue
+		}
+		keys[as.Window.String()] = i
+	}
+
+	// Nothing to merge, so simply return
+	if len(keys) == 0 {
+		return nil
+	}
+
+	var err error
+	that.Each(func(j int, thatAS *AllocationSet) {
+		if thatAS == nil || err != nil {
+			return
+		}
+
+		// Find matching AllocationSet in asr
+		i, ok := keys[thatAS.Window.String()]
+		if !ok {
+			err = fmt.Errorf("cannot merge AllocationSet into window that does not exist: %s", thatAS.Window.String())
+			return
+		}
+		sas := sasr.SummaryAllocationSets[i]
+
+		// Insert each Allocation from the given set
+		thatAS.Each(func(k string, alloc *Allocation) {
+			externalSA := NewSummaryAllocation(alloc, true, true)
+			// This error will be returned below
+			// TODO:CLEANUP should Each have early-error-return functionality?
+			err = sas.Insert(externalSA)
+		})
+	})
+
+	// err might be nil
+	return err
+}

+ 212 - 0
pkg/kubecost/summaryallocation_test.go

@@ -0,0 +1,212 @@
+package kubecost
+
+import (
+	"testing"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/util"
+)
+
+func TestSummaryAllocation_Add(t *testing.T) {
+	window, _ := ParseWindowUTC("yesterday")
+
+	var sa1, sa2, osa1, osa2, nilsa *SummaryAllocation
+	var err error
+
+	sa1Start := *window.Start()
+
+	sa1End := *window.End()
+
+	sa1 = &SummaryAllocation{
+		Name: "cluster1/namespace1/pod1/container1",
+		Properties: &AllocationProperties{
+			Cluster:   "cluster1",
+			Namespace: "namespace1",
+			Pod:       "pod1",
+			Container: "container1",
+		},
+		Start:                  sa1Start,
+		End:                    sa1End,
+		CPUCoreRequestAverage:  0.5,
+		CPUCoreUsageAverage:    0.1,
+		CPUCost:                0.2,
+		GPUCost:                1.0,
+		NetworkCost:            0.1,
+		LoadBalancerCost:       0.6,
+		PVCost:                 0.005,
+		RAMBytesRequestAverage: 50.0 * 1024.0 * 1024.0,
+		RAMBytesUsageAverage:   10.0 * 1024.0 * 1024.0,
+		RAMCost:                0.05,
+		SharedCost:             1.0,
+		ExternalCost:           1.0,
+	}
+	osa1 = sa1.Clone()
+
+	// sa2 is just as expensive, with twice as much usage and request, and half
+	// the time compared to sa1
+
+	sa2Start := *window.Start()
+	sa2Start = sa2Start.Add(6 * time.Hour)
+
+	sa2End := *window.End()
+	sa2End = sa2End.Add(-6 * time.Hour)
+
+	sa2 = &SummaryAllocation{
+		Name: "cluster1/namespace1/pod2/container2",
+		Properties: &AllocationProperties{
+			Cluster:   "cluster1",
+			Namespace: "namespace1",
+			Pod:       "pod2",
+			Container: "container2",
+		},
+		Start:                  sa2Start,
+		End:                    sa2End,
+		CPUCoreRequestAverage:  sa1.CPUCoreRequestAverage * 2.0,
+		CPUCoreUsageAverage:    sa1.CPUCoreUsageAverage * 2.0,
+		CPUCost:                sa1.CPUCost,
+		GPUCost:                sa1.GPUCost,
+		NetworkCost:            sa1.NetworkCost,
+		LoadBalancerCost:       sa1.LoadBalancerCost,
+		PVCost:                 sa1.PVCost,
+		RAMBytesRequestAverage: sa1.RAMBytesRequestAverage * 2.0,
+		RAMBytesUsageAverage:   sa1.RAMBytesUsageAverage * 2.0,
+		RAMCost:                sa1.RAMCost,
+		SharedCost:             sa1.SharedCost,
+		ExternalCost:           sa1.ExternalCost,
+	}
+	osa2 = sa2.Clone()
+
+	// add nil to nil, expect and error
+	t.Run("nil.Add(nil)", func(t *testing.T) {
+		err = nilsa.Add(nilsa)
+		if err == nil {
+			t.Fatalf("expected error: cannot add nil SummaryAllocations")
+		}
+	})
+
+	// reset
+	sa1 = osa1.Clone()
+	sa2 = osa2.Clone()
+
+	// add sa1 to nil, expect and error
+	t.Run("nil.Add(sa1)", func(t *testing.T) {
+		err = nilsa.Add(sa1)
+		if err == nil {
+			t.Fatalf("expected error: cannot add nil SummaryAllocations")
+		}
+	})
+
+	// reset
+	sa1 = osa1.Clone()
+	sa2 = osa2.Clone()
+
+	// add nil to sa1, expect and error
+	t.Run("sa1.Add(nil)", func(t *testing.T) {
+		err = sa1.Add(nilsa)
+		if err == nil {
+			t.Fatalf("expected error: cannot add nil SummaryAllocations")
+		}
+	})
+
+	// reset
+	sa1 = osa1.Clone()
+	sa2 = osa2.Clone()
+
+	// add sa1 to sa2 and expect same averages, but double costs
+	t.Run("sa2.Add(sa1)", func(t *testing.T) {
+		err = sa2.Add(sa1)
+		if err != nil {
+			t.Fatalf("unexpected error: %s", err)
+		}
+		if sa2.Properties != nil {
+			t.Fatalf("expected properties to be nil; actual: %s", sa1.Properties)
+		}
+		if !util.IsApproximately(sa2.CPUCoreRequestAverage, (0.5*osa2.CPUCoreRequestAverage)+osa1.CPUCoreRequestAverage) {
+			t.Fatalf("incorrect CPUCoreRequestAverage: expected %.5f; actual %.5f", (0.5*osa2.CPUCoreRequestAverage)+osa1.CPUCoreRequestAverage, sa2.CPUCoreRequestAverage)
+		}
+		if !util.IsApproximately(sa2.CPUCoreUsageAverage, (0.5*osa2.CPUCoreUsageAverage)+osa1.CPUCoreUsageAverage) {
+			t.Fatalf("incorrect CPUCoreUsageAverage: expected %.5f; actual %.5f", (0.5*osa2.CPUCoreUsageAverage)+osa1.CPUCoreRequestAverage, sa2.CPUCoreUsageAverage)
+		}
+		if !util.IsApproximately(sa2.RAMBytesRequestAverage, (0.5*osa2.RAMBytesRequestAverage)+osa1.RAMBytesRequestAverage) {
+			t.Fatalf("incorrect RAMBytesRequestAverage: expected %.5f; actual %.5f", (0.5*osa2.RAMBytesRequestAverage)+osa1.RAMBytesRequestAverage, sa2.RAMBytesRequestAverage)
+		}
+		if !util.IsApproximately(sa2.RAMBytesUsageAverage, (0.5*osa2.RAMBytesUsageAverage)+osa1.RAMBytesUsageAverage) {
+			t.Fatalf("incorrect RAMBytesUsageAverage: expected %.5f; actual %.5f", (0.5*osa2.RAMBytesUsageAverage)+osa1.RAMBytesRequestAverage, sa2.RAMBytesUsageAverage)
+		}
+		if !util.IsApproximately(sa2.CPUCost, osa2.CPUCost+osa1.CPUCost) {
+			t.Fatalf("incorrect CPUCost: expected %.5f; actual %.5f", osa2.CPUCost+osa1.CPUCost, sa2.CPUCost)
+		}
+		if !util.IsApproximately(sa2.GPUCost, osa2.GPUCost+osa1.GPUCost) {
+			t.Fatalf("incorrect GPUCost: expected %.5f; actual %.5f", osa2.GPUCost+osa1.GPUCost, sa2.GPUCost)
+		}
+		if !util.IsApproximately(sa2.NetworkCost, osa2.NetworkCost+osa1.NetworkCost) {
+			t.Fatalf("incorrect NetworkCost: expected %.5f; actual %.5f", osa2.NetworkCost+osa1.NetworkCost, sa2.NetworkCost)
+		}
+		if !util.IsApproximately(sa2.LoadBalancerCost, osa2.LoadBalancerCost+osa1.LoadBalancerCost) {
+			t.Fatalf("incorrect LoadBalancerCost: expected %.5f; actual %.5f", osa2.LoadBalancerCost+osa1.LoadBalancerCost, sa2.LoadBalancerCost)
+		}
+		if !util.IsApproximately(sa2.PVCost, osa2.PVCost+osa1.PVCost) {
+			t.Fatalf("incorrect PVCost: expected %.5f; actual %.5f", osa2.PVCost+osa1.PVCost, sa2.PVCost)
+		}
+		if !util.IsApproximately(sa2.RAMCost, osa2.RAMCost+osa1.RAMCost) {
+			t.Fatalf("incorrect RAMCost: expected %.5f; actual %.5f", osa2.RAMCost+osa1.RAMCost, sa2.RAMCost)
+		}
+		if !util.IsApproximately(sa2.SharedCost, osa2.SharedCost+osa1.SharedCost) {
+			t.Fatalf("incorrect SharedCost: expected %.5f; actual %.5f", osa2.SharedCost+osa1.SharedCost, sa2.SharedCost)
+		}
+		if !util.IsApproximately(sa2.ExternalCost, osa2.ExternalCost+osa1.ExternalCost) {
+			t.Fatalf("incorrect ExternalCost: expected %.5f; actual %.5f", osa2.ExternalCost+osa1.ExternalCost, sa2.ExternalCost)
+		}
+	})
+
+	// reset
+	sa1 = osa1.Clone()
+	sa2 = osa2.Clone()
+
+	// add sa2 to sa1 and expect same averages, but double costs
+	t.Run("sa1.Add(sa2)", func(t *testing.T) {
+		err = sa1.Add(sa2)
+		if err != nil {
+			t.Fatalf("unexpected error: %s", err)
+		}
+		if sa1.Properties != nil {
+			t.Fatalf("expected properties to be nil; actual: %s", sa1.Properties)
+		}
+		if !util.IsApproximately(sa1.CPUCoreRequestAverage, (0.5*osa2.CPUCoreRequestAverage)+osa1.CPUCoreRequestAverage) {
+			t.Fatalf("incorrect CPUCoreRequestAverage: expected %.5f; actual %.5f", (0.5*osa2.CPUCoreRequestAverage)+osa1.CPUCoreRequestAverage, sa2.CPUCoreRequestAverage)
+		}
+		if !util.IsApproximately(sa1.CPUCoreUsageAverage, (0.5*osa2.CPUCoreUsageAverage)+osa1.CPUCoreUsageAverage) {
+			t.Fatalf("incorrect CPUCoreUsageAverage: expected %.5f; actual %.5f", (0.5*osa2.CPUCoreUsageAverage)+osa1.CPUCoreRequestAverage, sa2.CPUCoreUsageAverage)
+		}
+		if !util.IsApproximately(sa1.RAMBytesRequestAverage, (0.5*osa2.RAMBytesRequestAverage)+osa1.RAMBytesRequestAverage) {
+			t.Fatalf("incorrect RAMBytesRequestAverage: expected %.5f; actual %.5f", (0.5*osa2.RAMBytesRequestAverage)+osa1.RAMBytesRequestAverage, sa2.RAMBytesRequestAverage)
+		}
+		if !util.IsApproximately(sa1.RAMBytesUsageAverage, (0.5*osa2.RAMBytesUsageAverage)+osa1.RAMBytesUsageAverage) {
+			t.Fatalf("incorrect RAMBytesUsageAverage: expected %.5f; actual %.5f", (0.5*osa2.RAMBytesUsageAverage)+osa1.RAMBytesRequestAverage, sa2.RAMBytesUsageAverage)
+		}
+		if !util.IsApproximately(sa1.CPUCost, osa2.CPUCost+osa1.CPUCost) {
+			t.Fatalf("incorrect CPUCost: expected %.5f; actual %.5f", osa2.CPUCost+osa1.CPUCost, sa2.CPUCost)
+		}
+		if !util.IsApproximately(sa1.GPUCost, osa2.GPUCost+osa1.GPUCost) {
+			t.Fatalf("incorrect GPUCost: expected %.5f; actual %.5f", osa2.GPUCost+osa1.GPUCost, sa2.GPUCost)
+		}
+		if !util.IsApproximately(sa1.NetworkCost, osa2.NetworkCost+osa1.NetworkCost) {
+			t.Fatalf("incorrect NetworkCost: expected %.5f; actual %.5f", osa2.NetworkCost+osa1.NetworkCost, sa2.NetworkCost)
+		}
+		if !util.IsApproximately(sa1.LoadBalancerCost, osa2.LoadBalancerCost+osa1.LoadBalancerCost) {
+			t.Fatalf("incorrect LoadBalancerCost: expected %.5f; actual %.5f", osa2.LoadBalancerCost+osa1.LoadBalancerCost, sa2.LoadBalancerCost)
+		}
+		if !util.IsApproximately(sa1.PVCost, osa2.PVCost+osa1.PVCost) {
+			t.Fatalf("incorrect PVCost: expected %.5f; actual %.5f", osa2.PVCost+osa1.PVCost, sa2.PVCost)
+		}
+		if !util.IsApproximately(sa1.RAMCost, osa2.RAMCost+osa1.RAMCost) {
+			t.Fatalf("incorrect RAMCost: expected %.5f; actual %.5f", osa2.RAMCost+osa1.RAMCost, sa2.RAMCost)
+		}
+		if !util.IsApproximately(sa1.SharedCost, osa2.SharedCost+osa1.SharedCost) {
+			t.Fatalf("incorrect SharedCost: expected %.5f; actual %.5f", osa2.SharedCost+osa1.SharedCost, sa2.SharedCost)
+		}
+		if !util.IsApproximately(sa1.ExternalCost, osa2.ExternalCost+osa1.ExternalCost) {
+			t.Fatalf("incorrect ExternalCost: expected %.5f; actual %.5f", osa2.ExternalCost+osa1.ExternalCost, sa2.ExternalCost)
+		}
+	})
+}

+ 524 - 0
pkg/kubecost/totals.go

@@ -0,0 +1,524 @@
+package kubecost
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/log"
+	"github.com/patrickmn/go-cache"
+)
+
+// AllocationTotals represents aggregate costs of all Allocations for
+// a given cluster or tuple of (cluster, node) between a given start and end
+// time, where the costs are aggregated per-resource. AllocationTotals
+// is designed to be used as a pre-computed intermediate data structure when
+// contextual knowledge is required to carry out a task, but computing totals
+// on-the-fly would be expensive; e.g. idle allocation; sharing coefficients
+// for idle or shared resources, etc.
+type AllocationTotals struct {
+	Start                          time.Time `json:"start"`
+	End                            time.Time `json:"end"`
+	Cluster                        string    `json:"cluster"`
+	Node                           string    `json:"node"`
+	Count                          int       `json:"count"`
+	CPUCost                        float64   `json:"cpuCost"`
+	CPUCostAdjustment              float64   `json:"cpuCostAdjustment"`
+	GPUCost                        float64   `json:"gpuCost"`
+	GPUCostAdjustment              float64   `json:"gpuCostAdjustment"`
+	LoadBalancerCost               float64   `json:"loadBalancerCost"`
+	LoadBalancerCostAdjustment     float64   `json:"loadBalancerCostAdjustment"`
+	NetworkCost                    float64   `json:"networkCost"`
+	NetworkCostAdjustment          float64   `json:"networkCostAdjustment"`
+	PersistentVolumeCost           float64   `json:"persistentVolumeCost"`
+	PersistentVolumeCostAdjustment float64   `json:"persistentVolumeCostAdjustment"`
+	RAMCost                        float64   `json:"ramCost"`
+	RAMCostAdjustment              float64   `json:"ramCostAdjustment"`
+}
+
+// ClearAdjustments sets all adjustment fields to 0.0
+func (art *AllocationTotals) ClearAdjustments() {
+	art.CPUCostAdjustment = 0.0
+	art.GPUCostAdjustment = 0.0
+	art.RAMCostAdjustment = 0.0
+}
+
+// TotalCPUCost returns CPU cost with adjustment.
+func (art *AllocationTotals) TotalCPUCost() float64 {
+	return art.CPUCost + art.CPUCostAdjustment
+}
+
+// TotalGPUCost returns GPU cost with adjustment.
+func (art *AllocationTotals) TotalGPUCost() float64 {
+	return art.GPUCost + art.GPUCostAdjustment
+}
+
+// TotalRAMCost returns RAM cost with adjustment.
+func (art *AllocationTotals) TotalRAMCost() float64 {
+	return art.RAMCost + art.RAMCostAdjustment
+}
+
+// TotalCost returns the sum of all costs.
+func (art *AllocationTotals) TotalCost() float64 {
+	return art.TotalCPUCost() + art.TotalGPUCost() + art.LoadBalancerCost +
+		art.NetworkCost + art.PersistentVolumeCost + art.TotalRAMCost()
+}
+
+// ComputeAllocationTotals totals the resource costs of the given AllocationSet
+// using the given property, i.e. cluster or node, where "node" really means to
+// use the fully-qualified (cluster, node) tuple.
+func ComputeAllocationTotals(as *AllocationSet, prop string) map[string]*AllocationTotals {
+	arts := map[string]*AllocationTotals{}
+
+	as.Each(func(name string, alloc *Allocation) {
+		// Do not count idle or unmounted allocations
+		if alloc.IsIdle() || alloc.IsUnmounted() {
+			return
+		}
+
+		// Default to computing totals by Cluster, but allow override to use Node.
+		key := alloc.Properties.Cluster
+		if prop == AllocationNodeProp {
+			key = fmt.Sprintf("%s/%s", alloc.Properties.Cluster, alloc.Properties.Node)
+		}
+
+		if _, ok := arts[key]; !ok {
+			arts[key] = &AllocationTotals{
+				Start:   alloc.Start,
+				End:     alloc.End,
+				Cluster: alloc.Properties.Cluster,
+				Node:    alloc.Properties.Node,
+			}
+		}
+
+		if arts[key].Start.After(alloc.Start) {
+			arts[key].Start = alloc.Start
+		}
+		if arts[key].End.Before(alloc.End) {
+			arts[key].End = alloc.End
+		}
+
+		if arts[key].Node != alloc.Properties.Node {
+			arts[key].Node = ""
+		}
+
+		arts[key].Count++
+		arts[key].CPUCost += alloc.CPUCost
+		arts[key].CPUCostAdjustment += alloc.CPUCostAdjustment
+		arts[key].GPUCost += alloc.GPUCost
+		arts[key].GPUCostAdjustment += alloc.GPUCostAdjustment
+		arts[key].LoadBalancerCost += alloc.LBTotalCost()
+		arts[key].NetworkCost += alloc.NetworkTotalCost()
+		arts[key].PersistentVolumeCost += alloc.PVCost()
+		arts[key].RAMCost += alloc.RAMCost
+		arts[key].RAMCostAdjustment += alloc.RAMCostAdjustment
+	})
+
+	return arts
+}
+
+// AssetTotals represents aggregate costs of all Assets for a given
+// cluster or tuple of (cluster, node) between a given start and end time,
+// where the costs are aggregated per-resource. AssetTotals is designed
+// to be used as a pre-computed intermediate data structure when contextual
+// knowledge is required to carry out a task, but computing totals on-the-fly
+// would be expensive; e.g. idle allocation, shared tenancy costs
+type AssetTotals struct {
+	Start                 time.Time `json:"start"`
+	End                   time.Time `json:"end"`
+	Cluster               string    `json:"cluster"`
+	Node                  string    `json:"node"`
+	Count                 int       `json:"count"`
+	AttachedVolumeCost    float64   `json:"attachedVolumeCost"`
+	ClusterManagementCost float64   `json:"clusterManagementCost"`
+	CPUCost               float64   `json:"cpuCost"`
+	CPUCostAdjustment     float64   `json:"cpuCostAdjustment"`
+	GPUCost               float64   `json:"gpuCost"`
+	GPUCostAdjustment     float64   `json:"gpuCostAdjustment"`
+	PersistentVolumeCost  float64   `json:"persistentVolumeCost"`
+	RAMCost               float64   `json:"ramCost"`
+	RAMCostAdjustment     float64   `json:"ramCostAdjustment"`
+}
+
+// ClearAdjustments sets all adjustment fields to 0.0
+func (art *AssetTotals) ClearAdjustments() {
+	art.CPUCostAdjustment = 0.0
+	art.GPUCostAdjustment = 0.0
+	art.RAMCostAdjustment = 0.0
+}
+
+// TotalCPUCost returns CPU cost with adjustment.
+func (art *AssetTotals) TotalCPUCost() float64 {
+	return art.CPUCost + art.CPUCostAdjustment
+}
+
+// TotalGPUCost returns GPU cost with adjustment.
+func (art *AssetTotals) TotalGPUCost() float64 {
+	return art.GPUCost + art.GPUCostAdjustment
+}
+
+// TotalRAMCost returns RAM cost with adjustment.
+func (art *AssetTotals) TotalRAMCost() float64 {
+	return art.RAMCost + art.RAMCostAdjustment
+}
+
+// TotalCost returns the sum of all costs
+func (art *AssetTotals) TotalCost() float64 {
+	return art.AttachedVolumeCost + art.ClusterManagementCost + art.TotalCPUCost() +
+		art.TotalGPUCost() + art.PersistentVolumeCost + art.TotalRAMCost()
+}
+
+// ComputeAssetTotals totals the resource costs of the given AssetSet,
+// using the given property, i.e. cluster or node, where "node" really means to
+// use the fully-qualified (cluster, node) tuple.
+// NOTE: we're not capturing LoadBalancers here yet, but only because we don't
+// yet need them. They could be added.
+func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotals {
+	arts := map[string]*AssetTotals{}
+
+	// Attached disks are tracked by matching their name with the name of the
+	// node, as is standard for attached disks.
+	nodeNames := map[string]bool{}
+	disks := map[string]*Disk{}
+
+	as.Each(func(name string, asset Asset) {
+		if node, ok := asset.(*Node); ok {
+			// Default to computing totals by Cluster, but allow override to use Node.
+			key := node.Properties().Cluster
+			if prop == AssetNodeProp {
+				key = fmt.Sprintf("%s/%s", node.Properties().Cluster, node.Properties().Name)
+			}
+
+			// Add node name to list of node names, but only if aggregating
+			// by node. (These are to be used later for attached volumes.)
+			nodeNames[key] = true
+
+			// adjustmentRate is used to scale resource costs proportionally
+			// by the adjustment. This is necessary because we only get one
+			// adjustment per Node, not one per-resource-per-Node.
+			//
+			// e.g. total cost = $90, adjustment = -$10 => 0.9
+			// e.g. total cost = $150, adjustment = -$300 => 0.3333
+			// e.g. total cost = $150, adjustment = $50 => 1.5
+			adjustmentRate := 1.0
+			if node.TotalCost()-node.Adjustment() == 0 {
+				// If (totalCost - adjustment) is 0.0 then adjustment cancels
+				// the entire node cost and we should make everything 0
+				// without dividing by 0.
+				adjustmentRate = 0.0
+				log.DedupedWarningf(5, "ComputeTotals: node cost adjusted to $0.00 for %s", node.Properties().Name)
+			} else if node.Adjustment() != 0.0 {
+				// adjustmentRate is the ratio of cost-with-adjustment (i.e. TotalCost)
+				// to cost-without-adjustment (i.e. TotalCost - Adjustment).
+				adjustmentRate = node.TotalCost() / (node.TotalCost() - node.Adjustment())
+			}
+
+			totalCPUCost := node.CPUCost * (1.0 - node.Discount)
+			cpuCost := totalCPUCost * adjustmentRate
+			cpuCostAdjustment := totalCPUCost - cpuCost
+
+			totalGPUCost := node.GPUCost * (1.0 - node.Discount)
+			gpuCost := totalGPUCost * adjustmentRate
+			gpuCostAdjustment := totalGPUCost - gpuCost
+
+			totalRAMCost := node.RAMCost * (1.0 - node.Discount)
+			ramCost := totalRAMCost * adjustmentRate
+			ramCostAdjustment := totalRAMCost - ramCost
+
+			if _, ok := arts[key]; !ok {
+				arts[key] = &AssetTotals{
+					Start:   node.Start(),
+					End:     node.End(),
+					Cluster: node.Properties().Cluster,
+					Node:    node.Properties().Name,
+				}
+			}
+
+			if arts[key].Start.After(node.Start()) {
+				arts[key].Start = node.Start()
+			}
+			if arts[key].End.Before(node.End()) {
+				arts[key].End = node.End()
+			}
+
+			if arts[key].Node != node.Properties().Name {
+				arts[key].Node = ""
+			}
+
+			arts[key].Count++
+			arts[key].CPUCost += cpuCost
+			arts[key].CPUCostAdjustment += cpuCostAdjustment
+			arts[key].RAMCost += ramCost
+			arts[key].RAMCostAdjustment += ramCostAdjustment
+			arts[key].GPUCost += gpuCost
+			arts[key].GPUCostAdjustment += gpuCostAdjustment
+		} else if disk, ok := asset.(*Disk); ok {
+			key := fmt.Sprintf("%s/%s", disk.Properties().Cluster, disk.Properties().Name)
+			disks[key] = disk
+		} else if cm, ok := asset.(*ClusterManagement); ok && prop == AssetClusterProp {
+			// Only record cluster management when prop is Cluster because we
+			// can't break down ClusterManagement by node.
+			key := cm.Properties().Cluster
+
+			if _, ok := arts[key]; !ok {
+				arts[key] = &AssetTotals{
+					Start:   cm.Start(),
+					End:     cm.End(),
+					Cluster: cm.Properties().Cluster,
+				}
+			}
+
+			arts[key].Count++
+			arts[key].ClusterManagementCost += cm.TotalCost()
+		}
+	})
+
+	// Identify attached volumes as disks with names matching a node's name
+	for name := range nodeNames {
+		if disk, ok := disks[name]; ok {
+			// By default, the key will be the name, which is the tuple of
+			// cluster/node. But if we're aggregating by cluster only, then
+			// reset the key to just the cluster.
+			key := name
+			if prop == AssetClusterProp {
+				key = disk.Properties().Cluster
+			}
+
+			if _, ok := arts[key]; !ok {
+				arts[key] = &AssetTotals{
+					Start:   disk.Start(),
+					End:     disk.End(),
+					Cluster: disk.Properties().Cluster,
+				}
+
+				if prop == AssetNodeProp {
+					arts[key].Node = disk.Properties().Name
+				}
+			}
+
+			arts[key].Count++
+			arts[key].AttachedVolumeCost += disk.TotalCost()
+		}
+	}
+
+	return arts
+}
+
+// ComputeIdleCoefficients returns the idle coefficients for CPU, GPU, and RAM
+// (in that order) for the given resource costs and totals.
+func ComputeIdleCoefficients(shareSplit, key string, cpuCost, gpuCost, ramCost float64, allocationTotals map[string]*AllocationTotals) (float64, float64, float64) {
+	if shareSplit == ShareNone {
+		return 0.0, 0.0, 0.0
+	}
+
+	if shareSplit != ShareEven {
+		shareSplit = ShareWeighted
+	}
+
+	var cpuCoeff, gpuCoeff, ramCoeff float64
+
+	if _, ok := allocationTotals[key]; !ok {
+		return 0.0, 0.0, 0.0
+	}
+
+	if shareSplit == ShareEven {
+		coeff := 1.0 / float64(allocationTotals[key].Count)
+		return coeff, coeff, coeff
+	}
+
+	if allocationTotals[key].CPUCost > 0 {
+		cpuCoeff = cpuCost / allocationTotals[key].CPUCost
+	}
+
+	if allocationTotals[key].GPUCost > 0 {
+		gpuCoeff = cpuCost / allocationTotals[key].GPUCost
+	}
+
+	if allocationTotals[key].RAMCost > 0 {
+		ramCoeff = ramCost / allocationTotals[key].RAMCost
+	}
+
+	return cpuCoeff, gpuCoeff, ramCoeff
+}
+
+// TotalsStore acts as both an AllocationTotalsStore and an
+// AssetTotalsStore.
+type TotalsStore interface {
+	AllocationTotalsStore
+	AssetTotalsStore
+}
+
+// AllocationTotalsStore allows for storing (i.e. setting and
+// getting) AllocationTotals by cluster and by node.
+type AllocationTotalsStore interface {
+	GetAllocationTotalsByCluster(start, end time.Time) (map[string]*AllocationTotals, bool)
+	GetAllocationTotalsByNode(start, end time.Time) (map[string]*AllocationTotals, bool)
+	SetAllocationTotalsByCluster(start, end time.Time, rts map[string]*AllocationTotals)
+	SetAllocationTotalsByNode(start, end time.Time, rts map[string]*AllocationTotals)
+}
+
+// UpdateAllocationTotalsStore updates an AllocationTotalsStore
+// by totaling the given AllocationSet and saving the totals.
+func UpdateAllocationTotalsStore(arts AllocationTotalsStore, as *AllocationSet) error {
+	if arts == nil {
+		return errors.New("cannot update nil AllocationTotalsStore")
+	}
+
+	if as == nil {
+		return errors.New("cannot update AllocationTotalsStore from nil AllocationSet")
+	}
+
+	if as.Window.IsOpen() {
+		return errors.New("cannot update AllocationTotalsStore from AllocationSet with open window")
+	}
+
+	start := *as.Window.Start()
+	end := *as.Window.End()
+
+	artsByCluster := ComputeAllocationTotals(as, AllocationClusterProp)
+	arts.SetAllocationTotalsByCluster(start, end, artsByCluster)
+
+	artsByNode := ComputeAllocationTotals(as, AllocationNodeProp)
+	arts.SetAllocationTotalsByNode(start, end, artsByNode)
+
+	log.Infof("ETL: Allocation: updated resource totals for %s", as.Window)
+
+	return nil
+}
+
+// AssetTotalsStore allows for storing (i.e. setting and getting)
+// AssetTotals by cluster and by node.
+type AssetTotalsStore interface {
+	GetAssetTotalsByCluster(start, end time.Time) (map[string]*AssetTotals, bool)
+	GetAssetTotalsByNode(start, end time.Time) (map[string]*AssetTotals, bool)
+	SetAssetTotalsByCluster(start, end time.Time, rts map[string]*AssetTotals)
+	SetAssetTotalsByNode(start, end time.Time, rts map[string]*AssetTotals)
+}
+
+// UpdateAssetTotalsStore updates an AssetTotalsStore
+// by totaling the given AssetSet and saving the totals.
+func UpdateAssetTotalsStore(arts AssetTotalsStore, as *AssetSet) error {
+	if arts == nil {
+		return errors.New("cannot update nil AssetTotalsStore")
+	}
+
+	if as == nil {
+		return errors.New("cannot update AssetTotalsStore from nil AssetSet")
+	}
+
+	if as.Window.IsOpen() {
+		return errors.New("cannot update AssetTotalsStore from AssetSet with open window")
+	}
+
+	start := *as.Window.Start()
+	end := *as.Window.End()
+
+	artsByCluster := ComputeAssetTotals(as, AssetClusterProp)
+	arts.SetAssetTotalsByCluster(start, end, artsByCluster)
+
+	artsByNode := ComputeAssetTotals(as, AssetNodeProp)
+	arts.SetAssetTotalsByNode(start, end, artsByNode)
+
+	log.Infof("ETL: Asset: updated resource totals for %s", as.Window)
+
+	return nil
+}
+
+// MemoryTotalsStore is an in-memory cache TotalsStore
+type MemoryTotalsStore struct {
+	allocTotalsByCluster *cache.Cache
+	allocTotalsByNode    *cache.Cache
+	assetTotalsByCluster *cache.Cache
+	assetTotalsByNode    *cache.Cache
+}
+
+// NewMemoryTotalsStore instantiates a new MemoryTotalsStore,
+// which is composed of four in-memory caches.
+func NewMemoryTotalsStore() *MemoryTotalsStore {
+	return &MemoryTotalsStore{
+		allocTotalsByCluster: cache.New(cache.NoExpiration, cache.NoExpiration),
+		allocTotalsByNode:    cache.New(cache.NoExpiration, cache.NoExpiration),
+		assetTotalsByCluster: cache.New(cache.NoExpiration, cache.NoExpiration),
+		assetTotalsByNode:    cache.New(cache.NoExpiration, cache.NoExpiration),
+	}
+}
+
+// GetAllocationTotalsByCluster retrieves the AllocationTotals
+// by cluster for the given start and end times.
+func (mts *MemoryTotalsStore) GetAllocationTotalsByCluster(start time.Time, end time.Time) (map[string]*AllocationTotals, bool) {
+	k := storeKey(start, end)
+	if raw, ok := mts.allocTotalsByCluster.Get(k); ok {
+		return raw.(map[string]*AllocationTotals), true
+	} else {
+		return map[string]*AllocationTotals{}, false
+	}
+}
+
+// GetAllocationTotalsByNode retrieves the AllocationTotals
+// by node for the given start and end times.
+func (mts *MemoryTotalsStore) GetAllocationTotalsByNode(start time.Time, end time.Time) (map[string]*AllocationTotals, bool) {
+	k := storeKey(start, end)
+	if raw, ok := mts.allocTotalsByNode.Get(k); ok {
+		return raw.(map[string]*AllocationTotals), true
+	} else {
+		return map[string]*AllocationTotals{}, false
+	}
+}
+
+// SetAllocationTotalsByCluster set the per-cluster AllocationTotals
+// to the given values for the given start and end times.
+func (mts *MemoryTotalsStore) SetAllocationTotalsByCluster(start time.Time, end time.Time, arts map[string]*AllocationTotals) {
+	k := storeKey(start, end)
+	mts.allocTotalsByCluster.Set(k, arts, cache.NoExpiration)
+}
+
+// SetAllocationTotalsByNode set the per-node AllocationTotals
+// to the given values for the given start and end times.
+func (mts *MemoryTotalsStore) SetAllocationTotalsByNode(start time.Time, end time.Time, arts map[string]*AllocationTotals) {
+	k := storeKey(start, end)
+	mts.allocTotalsByNode.Set(k, arts, cache.NoExpiration)
+}
+
+// GetAssetTotalsByCluster retrieves the AssetTotals
+// by cluster for the given start and end times.
+func (mts *MemoryTotalsStore) GetAssetTotalsByCluster(start time.Time, end time.Time) (map[string]*AssetTotals, bool) {
+	k := storeKey(start, end)
+	if raw, ok := mts.assetTotalsByCluster.Get(k); ok {
+		return raw.(map[string]*AssetTotals), true
+	} else {
+		return map[string]*AssetTotals{}, false
+	}
+}
+
+// GetAssetTotalsByNode retrieves the AssetTotals
+// by node for the given start and end times.
+func (mts *MemoryTotalsStore) GetAssetTotalsByNode(start time.Time, end time.Time) (map[string]*AssetTotals, bool) {
+	k := storeKey(start, end)
+	if raw, ok := mts.assetTotalsByNode.Get(k); ok {
+		return raw.(map[string]*AssetTotals), true
+	} else {
+		return map[string]*AssetTotals{}, false
+	}
+}
+
+// SetAssetTotalsByCluster set the per-cluster AssetTotals
+// to the given values for the given start and end times.
+func (mts *MemoryTotalsStore) SetAssetTotalsByCluster(start time.Time, end time.Time, arts map[string]*AssetTotals) {
+	k := storeKey(start, end)
+	mts.assetTotalsByCluster.Set(k, arts, cache.NoExpiration)
+}
+
+// SetAssetTotalsByNode set the per-node AssetTotals
+// to the given values for the given start and end times.
+func (mts *MemoryTotalsStore) SetAssetTotalsByNode(start time.Time, end time.Time, arts map[string]*AssetTotals) {
+	k := storeKey(start, end)
+	mts.assetTotalsByNode.Set(k, arts, cache.NoExpiration)
+}
+
+// storeKey creates a storage key based on start and end times
+func storeKey(start, end time.Time) string {
+	startStr := strconv.FormatInt(start.Unix(), 10)
+	endStr := strconv.FormatInt(end.Unix(), 10)
+	return fmt.Sprintf("%s-%s", startStr, endStr)
+}

+ 10 - 2
pkg/kubecost/window.go

@@ -459,8 +459,16 @@ func (w Window) IsOpen() bool {
 // TODO:CLEANUP make this unmarshalable (make Start and End public)
 func (w Window) MarshalJSON() ([]byte, error) {
 	buffer := bytes.NewBufferString("{")
-	buffer.WriteString(fmt.Sprintf("\"start\":\"%s\",", w.start.Format(time.RFC3339)))
-	buffer.WriteString(fmt.Sprintf("\"end\":\"%s\"", w.end.Format(time.RFC3339)))
+	if w.start != nil {
+		buffer.WriteString(fmt.Sprintf("\"start\":\"%s\",", w.start.Format(time.RFC3339)))
+	} else {
+		buffer.WriteString(fmt.Sprintf("\"start\":\"%s\",", "null"))
+	}
+	if w.end != nil {
+		buffer.WriteString(fmt.Sprintf("\"end\":\"%s\"", w.end.Format(time.RFC3339)))
+	} else {
+		buffer.WriteString(fmt.Sprintf("\"end\":\"%s\"", "null"))
+	}
 	buffer.WriteString("}")
 	return buffer.Bytes(), nil
 }