Преглед изворни кода

Address feedback for Summary Allocation work

Niko Kovacevic пре 4 година
родитељ
комит
a4692b5ddb
4 измењених фајлова са 714 додато и 494 уклоњено
  1. 116 44
      pkg/kubecost/allocation.go
  2. 8 7
      pkg/kubecost/allocationprops.go
  3. 344 275
      pkg/kubecost/summaryallocation.go
  4. 246 168
      pkg/kubecost/totals.go

+ 116 - 44
pkg/kubecost/allocation.go

@@ -350,46 +350,82 @@ func (a *Allocation) Equal(that *Allocation) bool {
 
 // TotalCost is the total cost of the Allocation including adjustments
 func (a *Allocation) TotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.CPUTotalCost() + a.GPUTotalCost() + a.RAMTotalCost() + a.PVTotalCost() + a.NetworkTotalCost() + a.LBTotalCost() + a.SharedTotalCost() + a.ExternalCost
 }
 
 // CPUTotalCost calculates total CPU cost of Allocation including adjustment
 func (a *Allocation) CPUTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.CPUCost + a.CPUCostAdjustment
 }
 
 // GPUTotalCost calculates total GPU cost of Allocation including adjustment
 func (a *Allocation) GPUTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.GPUCost + a.GPUCostAdjustment
 }
 
 // RAMTotalCost calculates total RAM cost of Allocation including adjustment
 func (a *Allocation) RAMTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.RAMCost + a.RAMCostAdjustment
 }
 
 // PVTotalCost calculates total PV cost of Allocation including adjustment
 func (a *Allocation) PVTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.PVCost() + a.PVCostAdjustment
 }
 
 // NetworkTotalCost calculates total Network cost of Allocation including adjustment
 func (a *Allocation) NetworkTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.NetworkCost + a.NetworkCostAdjustment
 }
 
 // LBTotalCost calculates total LB cost of Allocation including adjustment
 func (a *Allocation) LBTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.LoadBalancerCost + a.LoadBalancerCostAdjustment
 }
 
 // SharedTotalCost calculates total shared cost of Allocation including adjustment
 func (a *Allocation) SharedTotalCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.SharedCost
 }
 
 // PVCost calculate cumulative cost of all PVs that Allocation is attached to
 func (a *Allocation) PVCost() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	cost := 0.0
 	for _, pv := range a.PVs {
 		cost += pv.Cost
@@ -399,6 +435,10 @@ func (a *Allocation) PVCost() float64 {
 
 // PVByteHours calculate cumulative ByteHours of all PVs that Allocation is attached to
 func (a *Allocation) PVByteHours() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	byteHours := 0.0
 	for _, pv := range a.PVs {
 		byteHours += pv.ByteHours
@@ -410,6 +450,10 @@ func (a *Allocation) PVByteHours() float64 {
 // no usage or cost, then efficiency is zero. If there is no request, but there
 // is usage or cost, then efficiency is 100%.
 func (a *Allocation) CPUEfficiency() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	if a.CPUCoreRequestAverage > 0 {
 		return a.CPUCoreUsageAverage / a.CPUCoreRequestAverage
 	}
@@ -425,6 +469,10 @@ func (a *Allocation) CPUEfficiency() float64 {
 // no usage or cost, then efficiency is zero. If there is no request, but there
 // is usage or cost, then efficiency is 100%.
 func (a *Allocation) RAMEfficiency() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	if a.RAMBytesRequestAverage > 0 {
 		return a.RAMBytesUsageAverage / a.RAMBytesRequestAverage
 	}
@@ -439,6 +487,10 @@ func (a *Allocation) RAMEfficiency() float64 {
 // TotalEfficiency is the cost-weighted average of CPU and RAM efficiency. If
 // there is no cost at all, then efficiency is zero.
 func (a *Allocation) TotalEfficiency() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	if a.RAMTotalCost()+a.CPUTotalCost() > 0 {
 		ramCostEff := a.RAMEfficiency() * a.RAMTotalCost()
 		cpuCostEff := a.CPUEfficiency() * a.CPUTotalCost()
@@ -482,6 +534,10 @@ func (a *Allocation) PVBytes() float64 {
 
 // ResetAdjustments sets all cost adjustment fields to zero
 func (a *Allocation) ResetAdjustments() {
+	if a == nil {
+		return
+	}
+
 	a.CPUCostAdjustment = 0.0
 	a.GPUCostAdjustment = 0.0
 	a.RAMCostAdjustment = 0.0
@@ -550,27 +606,47 @@ func (a *Allocation) IsAggregated() bool {
 
 // IsExternal is true if the given Allocation represents external costs.
 func (a *Allocation) IsExternal() bool {
+	if a == nil {
+		return false
+	}
+
 	return strings.Contains(a.Name, ExternalSuffix)
 }
 
 // IsIdle is true if the given Allocation represents idle costs.
 func (a *Allocation) IsIdle() bool {
+	if a == nil {
+		return false
+	}
+
 	return strings.Contains(a.Name, IdleSuffix)
 }
 
 // IsUnallocated is true if the given Allocation represents unallocated costs.
 func (a *Allocation) IsUnallocated() bool {
+	if a == nil {
+		return false
+	}
+
 	return strings.Contains(a.Name, UnallocatedSuffix)
 }
 
 // IsUnmounted is true if the given Allocation represents unmounted volume costs.
 func (a *Allocation) IsUnmounted() bool {
+	if a == nil {
+		return false
+	}
+
 	return strings.Contains(a.Name, UnmountedSuffix)
 }
 
 // Minutes returns the number of minutes the Allocation represents, as defined
 // by the difference between the end and start times.
 func (a *Allocation) Minutes() float64 {
+	if a == nil {
+		return 0.0
+	}
+
 	return a.End.Sub(a.Start).Minutes()
 }
 
@@ -594,6 +670,10 @@ func (a *Allocation) Share(that *Allocation) (*Allocation, error) {
 
 // String represents the given Allocation as a string
 func (a *Allocation) String() string {
+	if a == nil {
+		return "<nil>"
+	}
+
 	return fmt.Sprintf("%s%s=%.2f", a.Name, NewWindow(&a.Start, &a.End), a.TotalCost())
 }
 
@@ -743,16 +823,16 @@ func NewAllocationSet(start, end time.Time, allocs ...*Allocation) *AllocationSe
 // succeeds, the allocation is marked as a shared resource. ShareIdle is a
 // simple flag for sharing idle resources.
 type AllocationAggregationOptions struct {
-	AllocationResourceTotalsStore ResourceTotalsStore
-	FilterFuncs                   []AllocationMatchFunc
-	IdleByNode                    bool
-	LabelConfig                   *LabelConfig
-	MergeUnallocated              bool
-	ShareFuncs                    []AllocationMatchFunc
-	ShareIdle                     string
-	ShareSplit                    string
-	SharedHourlyCosts             map[string]float64
-	SplitIdle                     bool
+	AllocationTotalsStore AllocationTotalsStore
+	FilterFuncs           []AllocationMatchFunc
+	IdleByNode            bool
+	LabelConfig           *LabelConfig
+	MergeUnallocated      bool
+	ShareFuncs            []AllocationMatchFunc
+	ShareIdle             string
+	ShareSplit            string
+	SharedHourlyCosts     map[string]float64
+	SplitIdle             bool
 }
 
 // AggregateBy aggregates the Allocations in the given AllocationSet by the given
@@ -894,8 +974,6 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		}
 	}
 
-	// TODO shouldn't we just return all shared cost in this case below?
-
 	// It's possible that no more un-shared, non-idle, non-external allocations
 	// remain at this point. This always results in an emptySet, so return early.
 	if len(as.allocations) == 0 {
@@ -1235,8 +1313,6 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		}
 	}
 
-	// TODO move the following to a self-contained function?
-
 	// (11) In the edge case that some idle has not been distributed because
 	// there is no usage of that resource type, add idle back to
 	// aggregations with only that cost applied.
@@ -1257,40 +1333,36 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	// __idle__ $0      $12     $0
 	// kubecost $12     $0      $7
 
-	if idleSet.Length() > 0 && !options.SplitIdle {
-		if undistributedIdleMap["cpu"] || undistributedIdleMap["gpu"] || undistributedIdleMap["ram"] {
-			for _, idleAlloc := range idleSet.allocations {
-				skip := false
-
-				// if the idle does not apply to the non-filtered values, skip it
-				for _, ff := range options.FilterFuncs {
-					if !ff(idleAlloc) {
-						skip = true
-						break
-					}
+	hasUndistributedIdle := undistributedIdleMap["cpu"] || undistributedIdleMap["gpu"] || undistributedIdleMap["ram"]
+	if idleSet.Length() > 0 && hasUndistributedIdle {
+		for _, idleAlloc := range idleSet.allocations {
+			// if the idle does not apply to the non-filtered values, skip it
+			skip := false
+			for _, ff := range options.FilterFuncs {
+				if !ff(idleAlloc) {
+					skip = true
+					break
 				}
+			}
+			if skip {
+				continue
+			}
 
-				if skip {
-					continue
+			// if the idle doesn't have a cost to be shared, also skip it
+			if idleAlloc.CPUCost != 0 && idleAlloc.GPUCost != 0 && idleAlloc.RAMCost != 0 {
+				// artificially set the already shared costs to zero
+				if !undistributedIdleMap["cpu"] {
+					idleAlloc.CPUCost = 0
 				}
-
-				// if the idle doesn't have a cost to be shared, also skip it
-				if idleAlloc.CPUCost != 0 && idleAlloc.GPUCost != 0 && idleAlloc.RAMCost != 0 {
-
-					// artificially set the already shared costs to zero
-					if !undistributedIdleMap["cpu"] {
-						idleAlloc.CPUCost = 0
-					}
-					if !undistributedIdleMap["gpu"] {
-						idleAlloc.GPUCost = 0
-					}
-					if !undistributedIdleMap["ram"] {
-						idleAlloc.RAMCost = 0
-					}
-
-					idleAlloc.Name = IdleSuffix
-					aggSet.Insert(idleAlloc)
+				if !undistributedIdleMap["gpu"] {
+					idleAlloc.GPUCost = 0
+				}
+				if !undistributedIdleMap["ram"] {
+					idleAlloc.RAMCost = 0
 				}
+
+				idleAlloc.Name = IdleSuffix
+				aggSet.Insert(idleAlloc)
 			}
 		}
 	}

+ 8 - 7
pkg/kubecost/allocationprops.go

@@ -129,9 +129,7 @@ func (p *AllocationProperties) Clone() *AllocationProperties {
 	clone.ProviderID = p.ProviderID
 
 	var services []string
-	for _, s := range p.Services {
-		services = append(services, s)
-	}
+	services = append(services, p.Services...)
 	clone.Services = services
 
 	labels := make(map[string]string, len(p.Labels))
@@ -230,6 +228,9 @@ func (p *AllocationProperties) Equal(that *AllocationProperties) bool {
 	return true
 }
 
+// GenerateKey generates a string that represents the key by which the
+// AllocationProperties should be aggregated, given the properties defined by
+// the aggregateBy parameter and the given label configuration.
 func (p *AllocationProperties) GenerateKey(aggregateBy []string, labelConfig *LabelConfig) string {
 	if p == nil {
 		return ""
@@ -481,13 +482,13 @@ func (p *AllocationProperties) String() string {
 	for k, prop := range p.Labels {
 		labelStrs = append(labelStrs, fmt.Sprintf("%s:%s", k, prop))
 	}
-	strs = append(strs, fmt.Sprintf("Labels:{%s}", strings.Join(strs, ",")))
+	strs = append(strs, fmt.Sprintf("Labels:{%s}", strings.Join(labelStrs, ",")))
 
-	var AnnotationStrs []string
+	var annotationStrs []string
 	for k, prop := range p.Annotations {
-		AnnotationStrs = append(AnnotationStrs, fmt.Sprintf("%s:%s", k, prop))
+		annotationStrs = append(annotationStrs, fmt.Sprintf("%s:%s", k, prop))
 	}
-	strs = append(strs, fmt.Sprintf("Annotations:{%s}", strings.Join(strs, ",")))
+	strs = append(strs, fmt.Sprintf("Annotations:{%s}", strings.Join(annotationStrs, ",")))
 
 	return fmt.Sprintf("{%s}", strings.Join(strs, "; "))
 }

+ 344 - 275
pkg/kubecost/summaryallocation.go

@@ -10,14 +10,14 @@ import (
 	"github.com/kubecost/cost-model/pkg/log"
 )
 
-// SummaryAllocation summarizes Allocation, keeping only the fields necessary
-// for providing a high-level view of identifying the Allocation (Name) over a
-// defined period of time (Start, End) and inspecting per-resource costs
-// (subtotal with adjustment), total cost, and efficiency.
-//
-// TODO remove:
-// Diff: 25 primitive 4 structs => 15 primitive, 1 struct
+// SummaryAllocation summarizes an Allocation, keeping only fields necessary
+// for providing a high-level view of identifying the Allocation over a period
+// of time (Start, End) over which it ran, and inspecting the associated per-
+// resource costs (subtotaled with adjustments), total cost, and efficiency.
 //
+// SummaryAllocation does not have a concept of Window (i.e. the time period
+// within which it is defined, as opposed to the Start and End times). That
+// context must be provided by a SummaryAllocationSet.
 type SummaryAllocation struct {
 	Name                   string                `json:"name"`
 	Properties             *AllocationProperties `json:"-"`
@@ -38,16 +38,18 @@ type SummaryAllocation struct {
 	Share                  bool                  `json:"-"`
 }
 
+// NewSummaryAllocation converts an Allocation to a SummaryAllocation by
+// dropping unnecessary fields and consolidating others (e.g. adjustments).
+// Reconciliation happens here because that process is synonymous with the
+// consolidation of adjustment fields.
 func NewSummaryAllocation(alloc *Allocation, reconcile, reconcileNetwork bool) *SummaryAllocation {
 	if alloc == nil {
 		return nil
 	}
 
-	// TODO evaluate performance penalties for "cloning" here
-
 	sa := &SummaryAllocation{
 		Name:                   alloc.Name,
-		Properties:             alloc.Properties.Clone(), // TODO blerg
+		Properties:             alloc.Properties.Clone(),
 		Start:                  alloc.Start,
 		End:                    alloc.End,
 		CPUCoreRequestAverage:  alloc.CPUCoreRequestAverage,
@@ -80,14 +82,21 @@ func NewSummaryAllocation(alloc *Allocation, reconcile, reconcileNetwork bool) *
 	return sa
 }
 
-func (sa *SummaryAllocation) Add(that *SummaryAllocation) {
-	if sa == nil {
-		log.Warningf("SummaryAllocation.Add: trying to add a nil receiver")
-		return
+// Add sums two SummaryAllocations, adding the given SummaryAllocation to the
+// receiving one, thus mutating the receiver. For performance reasons, it
+// simply drops Properties, so a SummaryAllocation can only be Added once.
+func (sa *SummaryAllocation) Add(that *SummaryAllocation) error {
+	if sa == nil || that == nil {
+		return errors.New("cannot Add a nil SummaryAllocation")
+	}
+
+	if sa.Properties == nil {
+		return errors.New("cannot Add a SummaryAllocation without Properties")
 	}
 
-	// TODO do we need to maintain this?
-	// Once Added, a SummaryAllocation has no Properties
+	// Once Added, a SummaryAllocation has no Properties, preventing it from
+	// being Added a second time. This saves us from having to compute the
+	// intersection of two sets of Properties, which is expensive.
 	sa.Properties = nil
 
 	// Sum non-cumulative fields by turning them into cumulative, adding them,
@@ -135,12 +144,18 @@ func (sa *SummaryAllocation) Add(that *SummaryAllocation) {
 	sa.PVCost += that.PVCost
 	sa.RAMCost += that.RAMCost
 	sa.SharedCost += that.SharedCost
+
+	return nil
 }
 
 // CPUEfficiency is the ratio of usage to request. If there is no request and
 // no usage or cost, then efficiency is zero. If there is no request, but there
 // is usage or cost, then efficiency is 100%.
 func (sa *SummaryAllocation) CPUEfficiency() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
 	if sa.CPUCoreRequestAverage > 0 {
 		return sa.CPUCoreUsageAverage / sa.CPUCoreRequestAverage
 	}
@@ -160,29 +175,51 @@ func (sa *SummaryAllocation) generateKey(aggregateBy []string, labelConfig *Labe
 	return sa.Properties.GenerateKey(aggregateBy, labelConfig)
 }
 
-// IsExternal is true if the given Allocation represents external costs.
+// IsExternal is true if the given SummaryAllocation represents external costs.
 func (sa *SummaryAllocation) IsExternal() bool {
+	if sa == nil {
+		return false
+	}
+
 	return strings.Contains(sa.Name, ExternalSuffix)
 }
 
-// IsIdle is true if the given Allocation represents idle costs.
+// IsIdle is true if the given SummaryAllocation represents idle costs.
 func (sa *SummaryAllocation) IsIdle() bool {
+	if sa == nil {
+		return false
+	}
+
 	return strings.Contains(sa.Name, IdleSuffix)
 }
 
-// IsUnallocated is true if the given Allocation represents unallocated costs.
+// IsUnallocated is true if the given SummaryAllocation represents unallocated
+// costs.
 func (sa *SummaryAllocation) IsUnallocated() bool {
+	if sa == nil {
+		return false
+	}
+
 	return strings.Contains(sa.Name, UnallocatedSuffix)
 }
 
-// IsUnmounted is true if the given Allocation represents unmounted volume costs.
+// IsUnmounted is true if the given SummaryAllocation represents unmounted
+// volume costs.
 func (sa *SummaryAllocation) IsUnmounted() bool {
+	if sa == nil {
+		return false
+	}
+
 	return strings.Contains(sa.Name, UnmountedSuffix)
 }
 
 // Minutes returns the number of minutes the SummaryAllocation represents, as
 // defined by the difference between the end and start times.
 func (sa *SummaryAllocation) Minutes() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
 	return sa.End.Sub(sa.Start).Minutes()
 }
 
@@ -190,6 +227,10 @@ func (sa *SummaryAllocation) Minutes() float64 {
 // no usage or cost, then efficiency is zero. If there is no request, but there
 // is usage or cost, then efficiency is 100%.
 func (sa *SummaryAllocation) RAMEfficiency() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
 	if sa.RAMBytesRequestAverage > 0 {
 		return sa.RAMBytesUsageAverage / sa.RAMBytesRequestAverage
 	}
@@ -203,12 +244,20 @@ func (sa *SummaryAllocation) RAMEfficiency() float64 {
 
 // TotalCost is the total cost of the SummaryAllocation
 func (sa *SummaryAllocation) TotalCost() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
 	return sa.CPUCost + sa.GPUCost + sa.RAMCost + sa.PVCost + sa.NetworkCost + sa.LoadBalancerCost + sa.SharedCost + sa.ExternalCost
 }
 
 // TotalEfficiency is the cost-weighted average of CPU and RAM efficiency. If
 // there is no cost at all, then efficiency is zero.
 func (sa *SummaryAllocation) TotalEfficiency() float64 {
+	if sa == nil {
+		return 0.0
+	}
+
 	if sa.RAMCost+sa.CPUCost > 0 {
 		ramCostEff := sa.RAMEfficiency() * sa.RAMCost
 		cpuCostEff := sa.CPUEfficiency() * sa.CPUCost
@@ -229,14 +278,20 @@ type SummaryAllocationSet struct {
 	Window             Window                        `json:"window"`
 }
 
+// NewSummaryAllocationSet converts an AllocationSet to a SummaryAllocationSet.
+// Filter functions, sharing functions, and reconciliation parameters are
+// required for unfortunate reasons to do with performance and legacy order-of-
+// operations details, as well as the fact that reconciliation has been
+// pushed down to the conversion step between Allocation and SummaryAllocation.
 func NewSummaryAllocationSet(as *AllocationSet, ffs, sfs []AllocationMatchFunc, reconcile, reconcileNetwork bool) *SummaryAllocationSet {
 	if as == nil {
 		return nil
 	}
 
-	// TODO comment in function
+	// If we can know the exact size of the map, use it. If filters or sharing
+	// functions are present, we can't know the size, so we make a default map.
 	var sasMap map[string]*SummaryAllocation
-	if len(ffs) == 0 {
+	if len(ffs) == 0 && len(sfs) == 0 {
 		// No filters, so make the map of summary allocations exactly the size
 		// of the origin allocation set.
 		sasMap = make(map[string]*SummaryAllocation, len(as.allocations))
@@ -298,6 +353,13 @@ func NewSummaryAllocationSet(as *AllocationSet, ffs, sfs []AllocationMatchFunc,
 	return sas
 }
 
+// Add sums two SummaryAllocationSets, which Adds all SummaryAllocations in the
+// given SummaryAllocationSet to thier counterparts in the receiving set. Add
+// also expands the Window to include both constituent Windows, in the case
+// that Add is being used from accumulating (as opposed to aggregating). For
+// performance reasons, the function may return either a new set, or an
+// unmodified original, so it should not be assumed that the original sets are
+// safeuly usable after calling Add.
 func (sas *SummaryAllocationSet) Add(that *SummaryAllocationSet) (*SummaryAllocationSet, error) {
 	if sas == nil || len(sas.SummaryAllocations) == 0 {
 		return that, nil
@@ -307,6 +369,10 @@ func (sas *SummaryAllocationSet) Add(that *SummaryAllocationSet) (*SummaryAlloca
 		return sas, nil
 	}
 
+	if sas.Window.IsOpen() {
+		return nil, errors.New("cannot add a SummaryAllocationSet with an open window")
+	}
+
 	// Set start, end to min(start), max(end)
 	start := *sas.Window.Start()
 	end := *sas.Window.End()
@@ -349,46 +415,6 @@ func (sas *SummaryAllocationSet) Add(that *SummaryAllocationSet) (*SummaryAlloca
 // AllocationProperty. This will only be legal if the AllocationSet is divisible by the
 // given AllocationProperty; e.g. Containers can be divided by Namespace, but not vice-a-versa.
 func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *AllocationAggregationOptions) error {
-	// The order of operations for aggregating allocations is as follows:
-	//
-	//  #. Partition external, idle, and shared allocations into separate sets.
-	//     Also, create the aggSet into which the results will be aggregated.
-	//
-	//  #. Retrieve pre-computed allocation resource totals, which are to be
-	//     used to compute idle coefficients, based on the ratio of any given
-	//     allocation's per-resource cost to the allocated per-resource totals.
-	//
-	//  #. Distribute idle allocations according to the idle coefficients
-	//
-	//  #. Generate aggregation key and insert allocation into the output set
-	//
-	//  #. If idle is shared and other allocations are shared, it's probable
-	//     that some amount of idle cost will be shared with a shared resource.
-	//     Distribute that idle allocation, if it exists, to the respective
-	//     shared allocations before sharing them with the aggregated
-	//     allocations.
-	//
-	//  #. Apply idle filtration
-	//
-	//  #. Distribute shared allocations
-	//
-	//  #. Distribute shared tenancy costs
-	//
-	//  TODO
-	//  #. If there are external allocations that can be aggregated into
-	//     the output (i.e. they can be used to generate a valid key for
-	//     the given properties) then aggregate; otherwise... ignore them?
-	//
-	//  #. If the merge idle option is enabled, merge any remaining idle
-	//     allocations into a single idle allocation. If there was any idle
-	//	   whose costs were not distributed because there was no usage of a
-	//     specific resource type, re-add the idle to the aggregation with
-	//     only that type.
-	//
-	//  TODO
-	//  #. Distribute any undistributed idle, in the case that idle
-	//     coefficients end up being zero and some idle is not shared.
-
 	if sas == nil || len(sas.SummaryAllocations) == 0 {
 		return nil
 	}
@@ -415,8 +441,54 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		return nil
 	}
 
-	// aggSet will collect the aggregated allocations
-	aggSet := &SummaryAllocationSet{
+	// The order of operations for aggregating a SummaryAllotionSet is as
+	// follows:
+	//
+	//  1. Partition external, idle, and shared allocations into separate sets.
+	//     Also, create the resultSet into which the results will be aggregated.
+	//
+	//  2. Record resource totals for shared costs and unmounted volumes so
+	//     that we can account for them in computing idle coefficients.
+	//
+	//  3. Retrieve pre-computed allocation resource totals, which will be used
+	//     to compute idle sharing coefficients.
+	//
+	//  4. Compute sharing coefficients per-aggregation, if sharing resources.
+	//
+	//  5. Distribute idle allocations according to the idle coefficients.
+	//
+	//  6. Record allocation resource totals (after filtration) if filters have
+	//     been applied. (Used for filtering proportional amount of idle.)
+	//
+	//  7. Generate aggregation key and insert allocation into the output set
+	//
+	//  8. If idle is shared and resources are shared, it's probable that some
+	//     amount of idle cost will be shared with a shared resource.
+	//     Distribute that idle cost, if it exists, among the respective shared
+	//     allocations before sharing them with the aggregated allocations.
+	//
+	//  9. Apply idle filtration, which "filters" the idle cost, or scales it
+	//     by the proportion of allocation resources remaining after filters
+	//     have been applied.
+	//
+	// 10. Convert shared hourly cost into a cumulative allocation to share,
+	//     and insert it into the share set.
+	//
+	// 11. Distribute shared resources according to sharing coefficients.
+	//
+	// 12. Insert external allocations into the result set.
+	//
+	// 13. Insert any undistributed idle, in the case that idle
+	//     coefficients end up being zero and some idle is not shared.
+	//
+	// 14. Combine all idle allocations into a single idle allocation, unless
+	//     the option to keep idle split by cluster or node is enabled.
+
+	// 1. Partition external, idle, and shared allocations into separate sets.
+	// Also, create the resultSet into which the results will be aggregated.
+
+	// resultSet will collect the aggregated allocations
+	resultSet := &SummaryAllocationSet{
 		Window: sas.Window.Clone(),
 	}
 
@@ -425,13 +497,13 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		Window: sas.Window.Clone(),
 	}
 
-	// idleSet will be shared among aggSet after initial aggregation
+	// idleSet will be shared among resultSet after initial aggregation
 	// is complete
 	idleSet := &SummaryAllocationSet{
 		Window: sas.Window.Clone(),
 	}
 
-	// shareSet will be shared among aggSet after initial aggregation
+	// shareSet will be shared among resultSet after initial aggregation
 	// is complete
 	shareSet := &SummaryAllocationSet{
 		Window: sas.Window.Clone(),
@@ -440,23 +512,25 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 	sas.Lock()
 	defer sas.Unlock()
 
-	// TODO comment
-	sharedResourceTotals := map[string]*ResourceTotals{}
+	// 2. Record resource totals for shared costs, aggregating by cluster or by
+	// node (depending on if idle is partitioned by cluster or node) so that we
+	// can account for them in computing idle coefficients. Do the same for
+	// unmounted volume costs, which only require a total cost.
+	sharedResourceTotals := map[string]*AllocationTotals{}
 	totalUnmountedCost := 0.0
 
-	// TODO comment
+	// 1 & 2. Identify set membership and aggregate aforementioned totals.
 	for _, sa := range sas.SummaryAllocations {
-		// Identify and separate shared allocations into their own set.
 		if sa.Share {
 			var key string
 			if options.IdleByNode {
-				key = sa.Properties.Node
+				key = fmt.Sprintf("%s/%s", sa.Properties.Cluster, sa.Properties.Node)
 			} else {
 				key = sa.Properties.Cluster
 			}
 
 			if _, ok := sharedResourceTotals[key]; !ok {
-				sharedResourceTotals[key] = &ResourceTotals{}
+				sharedResourceTotals[key] = &AllocationTotals{}
 			}
 			sharedResourceTotals[key].CPUCost += sa.CPUCost
 			sharedResourceTotals[key].GPUCost += sa.GPUCost
@@ -465,9 +539,6 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 			sharedResourceTotals[key].PersistentVolumeCost += sa.PVCost
 			sharedResourceTotals[key].RAMCost += sa.RAMCost
 
-			// TODO with shared tenancy costs, do we need to account for
-			// shared cost here too?
-
 			shareSet.Insert(sa)
 			delete(sas.SummaryAllocations, sa.Name)
 
@@ -486,7 +557,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 
 		// Idle allocations should be separated into idleSet if they are to be
 		// shared later on. If they are not to be shared, then add them to the
-		// aggSet like any other allocation.
+		// resultSet like any other allocation.
 		if sa.IsIdle() {
 			delete(sas.idleKeys, sa.Name)
 			delete(sas.SummaryAllocations, sa.Name)
@@ -494,7 +565,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 			if options.ShareIdle == ShareEven || options.ShareIdle == ShareWeighted {
 				idleSet.Insert(sa)
 			} else {
-				aggSet.Insert(sa)
+				resultSet.Insert(sa)
 			}
 
 			continue
@@ -507,99 +578,113 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		}
 	}
 
-	// TODO do we need to handle case where len(SummaryAllocations) == 0?
+	// TODO summary: do we need to handle case where len(SummaryAllocations) == 0?
 
-	// (#) Retrieve pre-computed allocation resource totals, which are to be
-	// used to compute idle coefficients, based on the ratio of any given
-	// allocation's per-resource cost to the allocated per-resource totals.
-	var allocTotals map[string]*ResourceTotals
+	// 3. Retrieve pre-computed allocation resource totals, which will be used
+	// to compute idle coefficients, based on the ratio of an allocation's per-
+	// resource cost to the per-resource totals of that allocation's cluster or
+	// node. Whether to perform this operation based on cluster or node is an
+	// option. (See IdleByNode documentation; defaults to idle-by-cluster.)
+	var allocTotals map[string]*AllocationTotals
+	var ok bool
 	if options.IdleByNode {
-		if options.AllocationResourceTotalsStore != nil {
-			allocTotals = options.AllocationResourceTotalsStore.GetResourceTotalsByNode(*sas.Window.Start(), *sas.Window.End())
-			if allocTotals == nil {
-				// TODO
-				log.Warningf("SummaryAllocation: nil allocTotals by node for %s", sas.Window)
+		if options.AllocationTotalsStore != nil {
+			allocTotals, ok = options.AllocationTotalsStore.GetAllocationTotalsByNode(*sas.Window.Start(), *sas.Window.End())
+			if !ok {
+				return fmt.Errorf("nil allocation resource totals by node for %s", sas.Window)
 			}
-		} else {
-			// TODO ?
 		}
 	} else {
-		if options.AllocationResourceTotalsStore != nil {
-			allocTotals = options.AllocationResourceTotalsStore.GetResourceTotalsByCluster(*sas.Window.Start(), *sas.Window.End())
-			if allocTotals == nil {
-				// TODO
-				log.Warningf("SummaryAllocation: nil allocTotals by cluster for %s", sas.Window)
+		if options.AllocationTotalsStore != nil {
+			allocTotals, ok = options.AllocationTotalsStore.GetAllocationTotalsByCluster(*sas.Window.Start(), *sas.Window.End())
+			if !ok {
+				return fmt.Errorf("nil allocation resource totals by cluster for %s", sas.Window)
 			}
-		} else {
-			// TODO ?
 		}
 	}
 
-	// Instantiate filtered totals map if filters have been applied and there
-	// are un-shared idle allocations in the result set. These will be
-	// populated in step (#) and used to "filter" out the correct proportion
-	// of idle costs, given that non-idle allocations have been filtered.
-	var filteredTotals map[string]*ResourceTotals
-	if len(aggSet.idleKeys) > 0 && len(options.FilterFuncs) > 0 {
-		filteredTotals = make(map[string]*ResourceTotals, len(aggSet.idleKeys))
+	// TODO summary: make sure that we're robust to missing (nil) allocTotals.
+	// To test, pass nil options.AllocationTotalsStore.
+
+	// If filters have been applied, then we need to record allocation resource
+	// totals after filtration (i.e. the allocations that are present) so that
+	// we can identify the proportion of idle cost to keep. That is, we should
+	// only return the idle cost that would be shared with the remaining
+	// allocations, even if we're keeping idle separate. The totals should be
+	// recorded by idle-key (cluster or node, depending on the IdleByNode
+	// option). Instantiating this map is a signal to record the totals.
+	var allocTotalsAfterFilters map[string]*AllocationTotals
+	if len(resultSet.idleKeys) > 0 && len(options.FilterFuncs) > 0 {
+		allocTotalsAfterFilters = make(map[string]*AllocationTotals, len(resultSet.idleKeys))
+	}
 
-		// If costs are shared, record those resource totals here so that idle
-		// for the shared resources gets included.
+	// If we're recording allocTotalsAfterFilters and there are shared costs,
+	// then record those resource totals here so that idle for thpse shared
+	// resources gets included.
+	if allocTotalsAfterFilters != nil {
 		for key, rt := range sharedResourceTotals {
-			if _, ok := filteredTotals[key]; !ok {
-				filteredTotals[key] = &ResourceTotals{}
+			if _, ok := allocTotalsAfterFilters[key]; !ok {
+				allocTotalsAfterFilters[key] = &AllocationTotals{}
 			}
 
-			// TODO do we need PV, Network, LoadBalancer here?
-
-			filteredTotals[key].CPUCost += rt.CPUCost
-			filteredTotals[key].GPUCost += rt.GPUCost
-			filteredTotals[key].RAMCost += rt.RAMCost
+			// Record only those fields required for computing idle
+			allocTotalsAfterFilters[key].CPUCost += rt.CPUCost
+			allocTotalsAfterFilters[key].GPUCost += rt.GPUCost
+			allocTotalsAfterFilters[key].RAMCost += rt.RAMCost
 		}
 	}
 
-	sharingCoeffs := map[string]float64{}
+	// Sharing coefficients are recorded by post-aggregation-key (e.g. if
+	// aggregating by namespace, then the key will be the namespace) and only
+	// need to be recorded if there are shared resources. Instantiating this
+	// map is the signal to record sharing coefficients.
+	var sharingCoeffs map[string]float64
+	if len(shareSet.SummaryAllocations) > 0 {
+		sharingCoeffs = map[string]float64{}
+	}
 
-	// (#) Distribute idle cost, if sharing
-	// (#) Distribute tenancy costs, if sharing
-	// (#) Aggregate
-	// TODO better comment
+	// Loop over all remaining SummaryAllocations (after filters, sharing, &c.)
+	// doing the following, in this order:
+	//  4. Compute sharing coefficients, if there are shared resources
+	//  5. Distribute idle cost, if sharing idle
+	//  6. Record allocTotalsAfterFiltration, if filters have been applied
+	//  7. Aggregate by key
 	for _, sa := range sas.SummaryAllocations {
 		// Generate key to use for aggregation-by-key and allocation name
 		key := sa.generateKey(aggregateBy, options.LabelConfig)
 
-		// TODO comment
-		// Do this before adding idle cost
-		if options.ShareSplit == ShareEven {
-			sharingCoeffs[key] += 1.0
-		} else {
+		// 4. Incrementally add to sharing coefficients before adding idle
+		// cost, which would skew the coefficients. These coefficients will be
+		// later divided by a total, turning them into a coefficient between
+		// 0.0 and 1.0.
+		// NOTE: SummaryAllocation does not support ShareEven, so only record
+		// by cost for cost-weighted distribution.
+		if sharingCoeffs != nil {
 			sharingCoeffs[key] += sa.TotalCost()
 		}
 
-		// Distribute idle allocations according to the idle coefficients
+		// 5. Distribute idle allocations according to the idle coefficients.
 		// NOTE: if idle allocation is off (i.e. ShareIdle == ShareNone) then
-		// all idle allocations will be in the aggSet at this point, so idleSet
+		// all idle allocations will be in the resultSet at this point, so idleSet
 		// will be empty and we won't enter this block.
 		if len(idleSet.SummaryAllocations) > 0 {
-			// Distribute idle allocations by coefficient per-idleId, per-allocation
 			for _, idle := range idleSet.SummaryAllocations {
+				// Idle key is either cluster or node, as determined by the
+				// IdleByNode option.
 				var key string
 
 				// Only share idle allocation with current allocation (sa) if
-				// the relevant property matches (i.e. Cluster or Node,
-				// depending on which idle sharing option is selected)
+				// the relevant properties match (i.e. cluster and/or node)
+				if idle.Properties.Cluster != sa.Properties.Cluster {
+					continue
+				}
+				key = idle.Properties.Cluster
+
 				if options.IdleByNode {
 					if idle.Properties.Node != sa.Properties.Node {
 						continue
 					}
-
-					key = idle.Properties.Node
-				} else {
-					if idle.Properties.Cluster != sa.Properties.Cluster {
-						continue
-					}
-
-					key = idle.Properties.Cluster
+					key = fmt.Sprintf("%s/%s", idle.Properties.Cluster, idle.Properties.Node)
 				}
 
 				cpuCoeff, gpuCoeff, ramCoeff := ComputeIdleCoefficients(options.ShareIdle, key, sa.CPUCost, sa.GPUCost, sa.RAMCost, allocTotals)
@@ -620,24 +705,20 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 			sa.Name = UnallocatedSuffix
 		}
 
-		// TODO do we need to be going off of "cluster/node" for "node" in all these places?
-
-		// Record filtered resource totals for idle allocation filtration, if
-		// necessary
-		if filteredTotals != nil {
-			var key string
+		// 6. Record filtered resource totals for idle allocation filtration,
+		// only if necessary.
+		if allocTotalsAfterFilters != nil {
+			key := sa.Properties.Cluster
 			if options.IdleByNode {
-				key = sa.Properties.Node
-			} else {
-				key = sa.Properties.Cluster
+				key = fmt.Sprintf("%s/%s", sa.Properties.Cluster, sa.Properties.Node)
 			}
 
-			if _, ok := filteredTotals[key]; ok {
-				filteredTotals[key].CPUCost += sa.CPUCost
-				filteredTotals[key].GPUCost += sa.GPUCost
-				filteredTotals[key].RAMCost += sa.RAMCost
+			if _, ok := allocTotalsAfterFilters[key]; ok {
+				allocTotalsAfterFilters[key].CPUCost += sa.CPUCost
+				allocTotalsAfterFilters[key].GPUCost += sa.GPUCost
+				allocTotalsAfterFilters[key].RAMCost += sa.RAMCost
 			} else {
-				filteredTotals[key] = &ResourceTotals{
+				allocTotalsAfterFilters[key] = &AllocationTotals{
 					CPUCost: sa.CPUCost,
 					GPUCost: sa.GPUCost,
 					RAMCost: sa.RAMCost,
@@ -645,15 +726,15 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 			}
 		}
 
-		// Inserting the allocation with the generated key for a name will
-		// perform the actual aggregation step.
-		aggSet.Insert(sa)
+		// 7. Inserting the allocation with the generated key for a name
+		// performs the actual aggregation step.
+		resultSet.Insert(sa)
 	}
 
-	// (#) If idle is shared and other allocations are shared, it's probable
-	// that some amount of idle cost will be shared with a shared resource.
-	// Distribute that idle allocation, if it exists, to the respective shared
-	// allocations before sharing them with the aggregated allocations.
+	// 8. If idle is shared and resources are shared, it's probable that some
+	// amount of idle cost will be shared with a shared resource. Distribute
+	// that idle cost, if it exists, among the respective shared allocations
+	// before sharing them with the aggregated allocations.
 	if len(idleSet.SummaryAllocations) > 0 && len(shareSet.SummaryAllocations) > 0 {
 		for _, sa := range shareSet.SummaryAllocations {
 			for _, idle := range idleSet.SummaryAllocations {
@@ -685,10 +766,14 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		}
 	}
 
-	// (#) Apply idle filtration
-	if filteredTotals != nil {
-		for idleKey := range aggSet.idleKeys {
-			ia := aggSet.SummaryAllocations[idleKey]
+	// 9. Apply idle filtration, which "filters" the idle cost, i.e. scales
+	// idle allocation costs per-resource by the proportion of allocation
+	// resources remaining after filtering. In effect, this returns only the
+	// idle costs that would have been shared with the remaining allocations,
+	// even if idle is kept separated.
+	if allocTotalsAfterFilters != nil {
+		for idleKey := range resultSet.idleKeys {
+			ia := resultSet.SummaryAllocations[idleKey]
 
 			var key string
 			if options.IdleByNode {
@@ -701,18 +786,18 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 			// which equals the proportion of filtered-to-actual cost.
 			cpuFilterCoeff := 0.0
 			if allocTotals[key].CPUCost > 0.0 {
-				cpuFilterCoeff = filteredTotals[key].CPUCost / allocTotals[key].CPUCost
+				cpuFilterCoeff = allocTotalsAfterFilters[key].CPUCost / allocTotals[key].CPUCost
 			}
 
 			gpuFilterCoeff := 0.0
 			if allocTotals[key].RAMCost > 0.0 {
-				gpuFilterCoeff = filteredTotals[key].RAMCost / allocTotals[key].RAMCost
+				gpuFilterCoeff = allocTotalsAfterFilters[key].RAMCost / allocTotals[key].RAMCost
 			}
 
 			ramFilterCoeff := 0.0
 
 			if allocTotals[key].RAMCost > 0.0 {
-				ramFilterCoeff = filteredTotals[key].RAMCost / allocTotals[key].RAMCost
+				ramFilterCoeff = allocTotalsAfterFilters[key].RAMCost / allocTotals[key].RAMCost
 			}
 
 			ia.CPUCost *= cpuFilterCoeff
@@ -721,7 +806,8 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		}
 	}
 
-	// (#) Convert shared hourly cost into a cumulative allocation to share
+	// 10. Convert shared hourly cost into a cumulative allocation to share,
+	// and insert it into the share set.
 	for name, cost := range options.SharedHourlyCosts {
 		if cost > 0.0 {
 			hours := sas.Window.Hours()
@@ -743,50 +829,34 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		}
 	}
 
-	// (#) Distribute shared resources.
+	// 11. Distribute shared resources according to sharing coefficients.
+	// NOTE: ShareEven is not supported
 	if len(shareSet.SummaryAllocations) > 0 {
-		// TODO deprecate ShareEven or accept new definition?
-		// TODO consider sharing by Cluster or Node?
-
 		sharingCoeffDenominator := 0.0
 		for _, rt := range allocTotals {
-			if options.ShareSplit == ShareEven {
-				sharingCoeffDenominator += float64(rt.Count)
-			} else {
-				sharingCoeffDenominator += rt.TotalCost()
-			}
+			sharingCoeffDenominator += rt.TotalCost()
 		}
 
 		// Do not include the shared costs, themselves, when determining
 		// sharing coefficients.
 		for _, rt := range sharedResourceTotals {
-			if options.ShareSplit == ShareEven {
-				sharingCoeffDenominator -= float64(rt.Count)
-			} else {
-				sharingCoeffDenominator -= rt.TotalCost()
-			}
+			sharingCoeffDenominator -= rt.TotalCost()
 		}
 
 		// Do not include the unmounted costs when determining sharing
 		// coefficients becuase they do not receive shared costs.
-		if options.ShareSplit == ShareEven {
-			if totalUnmountedCost > 0.0 {
-				sharingCoeffDenominator -= 1.0
-			}
-		} else {
-			sharingCoeffDenominator -= totalUnmountedCost
-		}
-
-		// Compute sharing coeffs by dividing the thus-far accumulated
-		// numerators by the now-finalized denominator.
-		for key := range sharingCoeffs {
-			sharingCoeffs[key] /= sharingCoeffDenominator
-		}
+		sharingCoeffDenominator -= totalUnmountedCost
 
-		if sharingCoeffDenominator == 0.0 {
-			log.Warningf("SummaryAllocation: sharing coefficient denominator is 0.0")
+		if sharingCoeffDenominator <= 0.0 {
+			log.Warningf("SummaryAllocation: sharing coefficient denominator is %f", sharingCoeffDenominator)
 		} else {
-			for key, sa := range aggSet.SummaryAllocations {
+			// Compute sharing coeffs by dividing the thus-far accumulated
+			// numerators by the now-finalized denominator.
+			for key := range sharingCoeffs {
+				sharingCoeffs[key] /= sharingCoeffDenominator
+			}
+
+			for key, sa := range resultSet.SummaryAllocations {
 				// Idle and unmounted allocations, by definition, do not
 				// receive shared cost
 				if sa.IsIdle() || sa.IsUnmounted() {
@@ -809,12 +879,13 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		}
 	}
 
-	// TODO
-	// (#) External allocations
+	// 12. Insert external allocations into the result set.
 	for _, sa := range externalSet.SummaryAllocations {
 		skip := false
 
-		// TODO deal with filters...
+		// TODO summary: deal with filters... maybe make an Allocation with the
+		// same Properties and test the filter func?
+
 		// for _, ff := range options.FilterFuncs {
 		// 	if !ff(sa) {
 		// 		skip = true
@@ -826,87 +897,73 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 			key := sa.generateKey(aggregateBy, options.LabelConfig)
 
 			sa.Name = key
-			aggSet.Insert(sa)
+			resultSet.Insert(sa)
 		}
 	}
 
-	// (#) Combine all idle allocations into a single "__idle__" allocation
-	if !options.SplitIdle {
-		for _, idleAlloc := range aggSet.IdleAllocations() {
-			aggSet.Delete(idleAlloc.Name)
-			idleAlloc.Name = IdleSuffix
-			aggSet.Insert(idleAlloc)
+	// 13. Distribute remaining, undistributed idle. Undistributed idle is any
+	// per-resource idle cost for which there can be no idle coefficient
+	// computed because there is zero usage across all allocations.
+	for _, ia := range idleSet.SummaryAllocations {
+		key := ia.Properties.Cluster
+		if options.IdleByNode {
+			key = fmt.Sprintf("%s/%s", ia.Properties.Cluster, ia.Properties.Node)
 		}
-	}
-
-	// TODO
-	// (#) Distribute remaining, undistributed idle
-
-	// Replace the existing set's data with the new, aggregated summary data
-	sas.SummaryAllocations = aggSet.SummaryAllocations
-
-	return nil
-}
 
-func (sas *SummaryAllocationSet) InsertIdleSummaryAllocations(rts map[string]*ResourceTotals, prop AssetProperty) error {
-	if sas == nil {
-		return errors.New("cannot compute idle allocation for nil SummaryAllocationSet")
-	}
+		rt, ok := allocTotals[key]
+		if !ok {
+			log.Warningf("SummaryAllocation: AggregateBy: cannot handle undistributed idle for '%s'", key)
+			continue
+		}
 
-	if len(rts) == 0 {
-		return nil
-	}
+		hasUndistributableCost := false
 
-	// TODO argh avoid copy? Not the worst thing at this size... O(clusters) or O(nodes)
-	idleTotals := make(map[string]*ResourceTotals, len(rts))
-	for key, rt := range rts {
-		idleTotals[key] = &ResourceTotals{
-			Start:   rt.Start,
-			End:     rt.End,
-			CPUCost: rt.CPUCost,
-			GPUCost: rt.GPUCost,
-			RAMCost: rt.RAMCost,
+		if ia.CPUCost > 0.0 && rt.CPUCost == 0.0 {
+			// There is idle CPU cost, but no allocated CPU cost, so that cost
+			// is undistributable and must be inserted.
+			hasUndistributableCost = true
+		} else {
+			// Cost was entirely distributed, so zero it out
+			ia.CPUCost = 0.0
 		}
-	}
 
-	// Subtract allocated costs from resource totals, leaving only the remaining
-	// idle totals for each key (cluster or node).
-	sas.Each(func(name string, sa *SummaryAllocation) {
-		key := sa.Properties.Cluster
-		if prop == AssetNodeProp {
-			key = sa.Properties.Node
+		if ia.GPUCost > 0.0 && rt.GPUCost == 0.0 {
+			// There is idle GPU cost, but no allocated GPU cost, so that cost
+			// is undistributable and must be inserted.
+			hasUndistributableCost = true
+		} else {
+			// Cost was entirely distributed, so zero it out
+			ia.GPUCost = 0.0
 		}
 
-		if _, ok := idleTotals[key]; !ok {
-			// Failed to find totals for the allocation's cluster or node.
-			// (Should never happen.)
-			log.Warningf("InsertIdleSummaryAllocations: failed to find %s: %s", prop, key)
-			return
+		if ia.RAMCost > 0.0 && rt.RAMCost == 0.0 {
+			// There is idle CPU cost, but no allocated CPU cost, so that cost
+			// is undistributable and must be inserted.
+			hasUndistributableCost = true
+		} else {
+			// Cost was entirely distributed, so zero it out
+			ia.RAMCost = 0.0
 		}
 
-		idleTotals[key].CPUCost -= sa.CPUCost
-		idleTotals[key].GPUCost -= sa.GPUCost
-		idleTotals[key].RAMCost -= sa.RAMCost
-	})
-
-	// Turn remaining idle totals into idle allocations and insert them.
-	for key, rt := range idleTotals {
-		idleAlloc := &SummaryAllocation{
-			Name: fmt.Sprintf("%s/%s", key, IdleSuffix),
-			Properties: &AllocationProperties{
-				Cluster: rt.Cluster,
-				Node:    rt.Node,
-			},
-			Start:   rt.Start,
-			End:     rt.End,
-			CPUCost: rt.CPUCost,
-			GPUCost: rt.GPUCost,
-			RAMCost: rt.RAMCost,
+		if hasUndistributableCost {
+			ia.Name = fmt.Sprintf("%s/%s", key, IdleSuffix)
+			resultSet.Insert(ia)
 		}
+	}
 
-		sas.Insert(idleAlloc)
+	// 14. Combine all idle allocations into a single idle allocation, unless
+	// the option to keep idle split by cluster or node is enabled.
+	if !options.SplitIdle {
+		for _, ia := range resultSet.idleAllocations() {
+			resultSet.Delete(ia.Name)
+			ia.Name = IdleSuffix
+			resultSet.Insert(ia)
+		}
 	}
 
+	// Replace the existing set's data with the new, aggregated summary data
+	sas.SummaryAllocations = resultSet.SummaryAllocations
+
 	return nil
 }
 
@@ -936,7 +993,7 @@ func (sas *SummaryAllocationSet) Each(f func(string, *SummaryAllocation)) {
 }
 
 // IdleAllocations returns a map of the idle allocations in the AllocationSet.
-func (sas *SummaryAllocationSet) IdleAllocations() map[string]*SummaryAllocation {
+func (sas *SummaryAllocationSet) idleAllocations() map[string]*SummaryAllocation {
 	idles := map[string]*SummaryAllocation{}
 
 	if sas == nil || len(sas.SummaryAllocations) == 0 {
@@ -948,7 +1005,7 @@ func (sas *SummaryAllocationSet) IdleAllocations() map[string]*SummaryAllocation
 
 	for key := range sas.idleKeys {
 		if sa, ok := sas.SummaryAllocations[key]; ok {
-			idles[key] = sa // TODO Clone()?
+			idles[key] = sa
 		}
 	}
 
@@ -963,6 +1020,10 @@ func (sas *SummaryAllocationSet) Insert(sa *SummaryAllocation) error {
 		return fmt.Errorf("cannot insert into nil SummaryAllocationSet")
 	}
 
+	if sa == nil {
+		return fmt.Errorf("cannot insert a nil SummaryAllocation")
+	}
+
 	sas.Lock()
 	defer sas.Unlock()
 
@@ -981,7 +1042,10 @@ func (sas *SummaryAllocationSet) Insert(sa *SummaryAllocation) error {
 	// Add the given Allocation to the existing entry, if there is one;
 	// otherwise just set directly into allocations
 	if _, ok := sas.SummaryAllocations[sa.Name]; ok {
-		sas.SummaryAllocations[sa.Name].Add(sa)
+		err := sas.SummaryAllocations[sa.Name].Add(sa)
+		if err != nil {
+			return fmt.Errorf("SummaryAllocationSet.Insert: error trying to Add: %s", err)
+		}
 	} else {
 		sas.SummaryAllocations[sa.Name] = sa
 	}
@@ -1008,7 +1072,12 @@ type SummaryAllocationSetRange struct {
 }
 
 // NewSummaryAllocationSetRange instantiates a new range composed of the given
-// SummaryAllocationSets in the order provided.
+// SummaryAllocationSets in the order provided. The expectations about the
+// SummaryAllocationSets are as follows:
+// - window durations are all equal
+// - sets are consecutive (i.e. chronologically sorted)
+// - there are no gaps between sets
+// - sets do not have overlapping windows
 func NewSummaryAllocationSetRange(sass ...*SummaryAllocationSet) *SummaryAllocationSetRange {
 	var step time.Duration
 	window := NewWindow(nil, nil)
@@ -1062,6 +1131,8 @@ func (sasr *SummaryAllocationSetRange) AggregateBy(aggregateBy []string, options
 	for _, sas := range sasr.SummaryAllocationSets {
 		err := sas.AggregateBy(aggregateBy, options)
 		if err != nil {
+			// Wipe out data so that corrupt data cannot be mistakenly used
+			sasr.SummaryAllocationSets = []*SummaryAllocationSet{}
 			return err
 		}
 	}
@@ -1109,13 +1180,17 @@ func (sasr *SummaryAllocationSetRange) Each(f func(int, *SummaryAllocationSet))
 	}
 }
 
-// TODO this stinks. Can we do better with external cost so that we can remove?
+// InsertExternalAllocations takes all allocations in the given
+// AllocationSetRange (they should all be considered "external") and inserts
+// them into the receiving SummaryAllocationSetRange.
+// TODO:CLEANUP replace this with a better idea (or get rid of external
+// allocations, as such, altogether)
 func (sasr *SummaryAllocationSetRange) InsertExternalAllocations(that *AllocationSetRange) error {
 	if sasr == nil {
 		return fmt.Errorf("cannot insert range into nil AllocationSetRange")
 	}
 
-	// keys maps window to index in asr
+	// keys maps window to index in range
 	keys := map[string]int{}
 	for i, as := range sasr.SummaryAllocationSets {
 		if as == nil {
@@ -1146,18 +1221,12 @@ func (sasr *SummaryAllocationSetRange) InsertExternalAllocations(that *Allocatio
 		// Insert each Allocation from the given set
 		thatAS.Each(func(k string, alloc *Allocation) {
 			externalSA := NewSummaryAllocation(alloc, true, true)
+			// This error will be returned below
+			// TODO:CLEANUP should Each have early-error-return functionality?
 			err = sas.Insert(externalSA)
-			if err != nil {
-				// TODO ?
-				log.Errorf("SummaryAllocation: error inserting %s: %s", k, err)
-			}
 		})
 	})
 
 	// err might be nil
 	return err
 }
-
-// TODO Custom MarshalJSON and UnmarshalJSON for these?
-// - Step is uintelligible (microseconds??)
-// - TotalCost would be nice

+ 246 - 168
pkg/kubecost/totals.go

@@ -10,33 +10,37 @@ import (
 	"github.com/patrickmn/go-cache"
 )
 
-// TODO can we use ResourceTotals for all things or do we need specific structs
-// like AllocationResourceTotals, AssetResourceTotals, etc.
-
-type ResourceTotals struct {
-	Start                 time.Time `json:"end"`
-	End                   time.Time `json:"start"`
-	Cluster               string    `json:"cluster,omitempty"`
-	Node                  string    `json:"node,omitempty"`
-	Count                 int       `json:"count"`
-	AttachedVolumeCost    float64   `json:"attachedVolumeCost"`
-	ClusterManagementCost float64   `json:"clusterManagementCost"`
-	CPUCost               float64   `json:"cpuCost"`
-	GPUCost               float64   `json:"gpuCost"`
-	LoadBalancerCost      float64   `json:"loadBalancerCost"`
-	NetworkCost           float64   `json:"networkCost"`
-	PersistentVolumeCost  float64   `json:"persistentVolumeCost"`
-	RAMCost               float64   `json:"ramCost"`
+// AllocationTotals represents aggregate costs of all Allocations for
+// a given cluster or tuple of (cluster, node) between a given start and end
+// time, where the costs are aggregated per-resource. AllocationTotals
+// is designed to be used as a pre-computed intermediate data structure when
+// contextual knowledge is required to carry out a task, but computing totals
+// on-the-fly would be expensive; e.g. idle allocation; sharing coefficients
+// for idle or shared resources, etc.
+type AllocationTotals struct {
+	Start                time.Time `json:"end"`
+	End                  time.Time `json:"start"`
+	Cluster              string    `json:"cluster"`
+	Node                 string    `json:"node"`
+	Count                int       `json:"count"`
+	CPUCost              float64   `json:"cpuCost"`
+	GPUCost              float64   `json:"gpuCost"`
+	LoadBalancerCost     float64   `json:"loadBalancerCost"`
+	NetworkCost          float64   `json:"networkCost"`
+	PersistentVolumeCost float64   `json:"persistentVolumeCost"`
+	RAMCost              float64   `json:"ramCost"`
 }
 
-// TODO Do we need TotalAllocationCost? Maybe just different types?
-
-func (rt *ResourceTotals) TotalCost() float64 {
-	return rt.CPUCost + rt.GPUCost + rt.LoadBalancerCost + rt.AttachedVolumeCost + rt.ClusterManagementCost + rt.NetworkCost + rt.PersistentVolumeCost + rt.RAMCost
+// TotalCost returns the sum of all costs
+func (art *AllocationTotals) TotalCost() float64 {
+	return art.CPUCost + art.GPUCost + art.LoadBalancerCost + art.NetworkCost + art.PersistentVolumeCost + art.RAMCost
 }
 
-func ComputeResourceTotalsFromAllocations(as *AllocationSet, prop string) map[string]*ResourceTotals {
-	rts := map[string]*ResourceTotals{}
+// ComputeAllocationTotals totals the resource costs of the given AllocationSet
+// using the given property, i.e. cluster or node, where "node" really means to
+// use the fully-qualified (cluster, node) tuple.
+func ComputeAllocationTotals(as *AllocationSet, prop string) map[string]*AllocationTotals {
+	arts := map[string]*AllocationTotals{}
 
 	as.Each(func(name string, alloc *Allocation) {
 		// Do not count idle or unmounted allocations
@@ -47,59 +51,75 @@ func ComputeResourceTotalsFromAllocations(as *AllocationSet, prop string) map[st
 		// Default to computing totals by Cluster, but allow override to use Node.
 		key := alloc.Properties.Cluster
 		if prop == AllocationNodeProp {
-			key = alloc.Properties.Node
+			key = fmt.Sprintf("%s/%s", alloc.Properties.Cluster, alloc.Properties.Node)
 		}
 
-		if rt, ok := rts[key]; ok {
-			if rt.Start.After(alloc.Start) {
-				rt.Start = alloc.Start
-			}
-			if rt.End.Before(alloc.End) {
-				rt.End = alloc.End
+		if _, ok := arts[key]; !ok {
+			arts[key] = &AllocationTotals{
+				Start:   alloc.Start,
+				End:     alloc.End,
+				Cluster: alloc.Properties.Cluster,
+				Node:    alloc.Properties.Node,
 			}
+		}
 
-			if rt.Node != alloc.Properties.Node {
-				rt.Node = ""
-			}
+		if arts[key].Start.After(alloc.Start) {
+			arts[key].Start = alloc.Start
+		}
+		if arts[key].End.Before(alloc.End) {
+			arts[key].End = alloc.End
+		}
 
-			rt.Count++
-			rt.CPUCost += alloc.CPUTotalCost()
-			rt.GPUCost += alloc.GPUTotalCost()
-			rt.LoadBalancerCost += alloc.LBTotalCost()
-			rt.NetworkCost += alloc.NetworkTotalCost()
-			rt.PersistentVolumeCost += alloc.PVCost()
-			rt.RAMCost += alloc.RAMTotalCost()
-		} else {
-			rts[key] = &ResourceTotals{
-				Start:                alloc.Start,
-				End:                  alloc.End,
-				Cluster:              alloc.Properties.Cluster,
-				Node:                 alloc.Properties.Node,
-				Count:                1,
-				CPUCost:              alloc.CPUTotalCost(),
-				GPUCost:              alloc.GPUTotalCost(),
-				LoadBalancerCost:     alloc.LBTotalCost(),
-				NetworkCost:          alloc.NetworkTotalCost(),
-				PersistentVolumeCost: alloc.PVCost(),
-				RAMCost:              alloc.RAMTotalCost(),
-			}
+		if arts[key].Node != alloc.Properties.Node {
+			arts[key].Node = ""
 		}
+
+		arts[key].Count++
+		arts[key].CPUCost += alloc.CPUTotalCost()
+		arts[key].GPUCost += alloc.GPUTotalCost()
+		arts[key].LoadBalancerCost += alloc.LBTotalCost()
+		arts[key].NetworkCost += alloc.NetworkTotalCost()
+		arts[key].PersistentVolumeCost += alloc.PVCost()
+		arts[key].RAMCost += alloc.RAMTotalCost()
 	})
 
-	// TODO clean up
-	total := 0.0
-	for _, rt := range rts {
-		total += rt.TotalCost()
-	}
-	log.Infof("ResourceTotals: recorded %.4f over %d %ss for %s", total, len(rts), prop, as.Window)
+	return arts
+}
 
-	return rts
+// AssetTotals represents aggregate costs of all Assets for a given
+// cluster or tuple of (cluster, node) between a given start and end time,
+// where the costs are aggregated per-resource. AssetTotals is designed
+// to be used as a pre-computed intermediate data structure when contextual
+// knowledge is required to carry out a task, but computing totals on-the-fly
+// would be expensive; e.g. idle allocation, shared tenancy costs
+type AssetTotals struct {
+	Start                 time.Time `json:"end"`
+	End                   time.Time `json:"start"`
+	Cluster               string    `json:"cluster"`
+	Node                  string    `json:"node"`
+	Count                 int       `json:"count"`
+	AttachedVolumeCost    float64   `json:"attachedVolumeCost"`
+	ClusterManagementCost float64   `json:"clusterManagementCost"`
+	CPUCost               float64   `json:"cpuCost"`
+	GPUCost               float64   `json:"gpuCost"`
+	PersistentVolumeCost  float64   `json:"persistentVolumeCost"`
+	RAMCost               float64   `json:"ramCost"`
 }
 
-func ComputeResourceTotalsFromAssets(as *AssetSet, prop AssetProperty) map[string]*ResourceTotals {
-	rts := map[string]*ResourceTotals{}
+// TotalCost returns the sum of all costs
+func (art *AssetTotals) TotalCost() float64 {
+	return art.AttachedVolumeCost + art.ClusterManagementCost + art.CPUCost + art.GPUCost + art.PersistentVolumeCost + art.RAMCost
+}
+
+// ComputeAssetTotals totals the resource costs of the given AssetSet,
+// using the given property, i.e. cluster or node, where "node" really means to
+// use the fully-qualified (cluster, node) tuple.
+// TODO summary: should we be capturing load balancers here?
+func ComputeAssetTotals(as *AssetSet, prop AssetProperty) map[string]*AssetTotals {
+	arts := map[string]*AssetTotals{}
 
-	// TODO comment
+	// Attached disks are tracked by matching their name with the name of the
+	// node, as is standard for attached disks.
 	nodeNames := map[string]bool{}
 	disks := map[string]*Disk{}
 
@@ -108,13 +128,11 @@ func ComputeResourceTotalsFromAssets(as *AssetSet, prop AssetProperty) map[strin
 			// Default to computing totals by Cluster, but allow override to use Node.
 			key := node.Properties().Cluster
 			if prop == AssetNodeProp {
-				key = node.Properties().Name
-			}
+				key = fmt.Sprintf("%s/%s", node.Properties().Cluster, node.Properties().Name)
 
-			// Add node name to list of node names, but only if aggregating
-			// by node. (These are to be used later for attached volumes.)
-			if prop == AssetNodeProp {
-				nodeNames[node.Properties().Name] = true
+				// Add node name to list of node names, but only if aggregating
+				// by node. (These are to be used later for attached volumes.)
+				nodeNames[key] = true
 			}
 
 			// adjustmentRate is used to scale resource costs proportionally
@@ -130,7 +148,7 @@ func ComputeResourceTotalsFromAssets(as *AssetSet, prop AssetProperty) map[strin
 				// the entire node cost and we should make everything 0
 				// without dividing by 0.
 				adjustmentRate = 0.0
-				log.DedupedWarningf(5, "ComputeResourceTotals: node cost adjusted to $0.00 for %s", node.Properties().Name)
+				log.DedupedWarningf(5, "ComputeTotals: node cost adjusted to $0.00 for %s", node.Properties().Name)
 			} else if node.Adjustment() != 0.0 {
 				// adjustmentRate is the ratio of cost-with-adjustment (i.e. TotalCost)
 				// to cost-without-adjustment (i.e. TotalCost - Adjustment).
@@ -141,97 +159,76 @@ func ComputeResourceTotalsFromAssets(as *AssetSet, prop AssetProperty) map[strin
 			gpuCost := node.GPUCost * (1.0 - node.Discount) * adjustmentRate
 			ramCost := node.RAMCost * (1.0 - node.Discount) * adjustmentRate
 
-			if rt, ok := rts[key]; ok {
-				if rt.Start.After(node.Start()) {
-					rt.Start = node.Start()
-				}
-				if rt.End.Before(node.End()) {
-					rt.End = node.End()
-				}
-
-				if rt.Node != node.Properties().Name {
-					rt.Node = ""
-				}
-
-				rt.Count++
-				rt.CPUCost += cpuCost
-				rt.RAMCost += ramCost
-				rt.GPUCost += gpuCost
-			} else {
-				rts[key] = &ResourceTotals{
+			if _, ok := arts[key]; !ok {
+				arts[key] = &AssetTotals{
 					Start:   node.Start(),
 					End:     node.End(),
 					Cluster: node.Properties().Cluster,
 					Node:    node.Properties().Name,
-					Count:   1,
-					CPUCost: cpuCost,
-					RAMCost: ramCost,
-					GPUCost: gpuCost,
 				}
 			}
+
+			if arts[key].Start.After(node.Start()) {
+				arts[key].Start = node.Start()
+			}
+			if arts[key].End.Before(node.End()) {
+				arts[key].End = node.End()
+			}
+
+			if arts[key].Node != node.Properties().Name {
+				arts[key].Node = ""
+			}
+
+			arts[key].Count++
+			arts[key].CPUCost += cpuCost
+			arts[key].RAMCost += ramCost
+			arts[key].GPUCost += gpuCost
 		} else if disk, ok := asset.(*Disk); ok && prop == AssetNodeProp {
 			// Only record attached disks when prop is Node
-			disks[disk.Properties().Name] = disk
+			// TODO summary: why?
+			key := fmt.Sprintf("%s/%s", disk.Properties().Cluster, disk.Properties().Name)
+			disks[key] = disk
 		} else if cm, ok := asset.(*ClusterManagement); ok && prop == AssetClusterProp {
-			// TODO ?
 			// Only record cluster management when prop is Cluster because we
 			// can't break down ClusterManagement by node.
 			key := cm.Properties().Cluster
 
-			if _, ok := rts[key]; !ok {
-				rts[key] = &ResourceTotals{
+			if _, ok := arts[key]; !ok {
+				arts[key] = &AssetTotals{
 					Start:   cm.Start(),
 					End:     cm.End(),
 					Cluster: cm.Properties().Cluster,
 				}
 			}
 
-			rts[key].Count++
-			rts[key].ClusterManagementCost += cm.TotalCost()
+			arts[key].Count++
+			arts[key].ClusterManagementCost += cm.TotalCost()
 		}
 	})
 
 	// Identify attached volumes as disks with names matching a node's name
-	for name := range nodeNames {
-		if disk, ok := disks[name]; ok {
-			// Default to computing totals by Cluster, but allow override to use Node.
-			key := disk.Properties().Cluster
-			if prop == AssetNodeProp {
-				key = disk.Properties().Name
-			}
-
-			if key == "" {
-				// TODO ?
-				log.Warningf("ResourceTotals: disk missing key: %s", disk.Properties().Name)
-			}
-
-			if _, ok := rts[key]; !ok {
-				rts[key] = &ResourceTotals{
+	for key := range nodeNames {
+		if disk, ok := disks[key]; ok {
+			if _, ok := arts[key]; !ok {
+				arts[key] = &AssetTotals{
 					Start:   disk.Start(),
 					End:     disk.End(),
 					Cluster: disk.Properties().Cluster,
-					Node:    name,
+					Node:    disk.Properties().Name,
 				}
 			}
 
-			rts[key].Count++
-			rts[key].AttachedVolumeCost += disk.TotalCost()
+			arts[key].Count++
+			arts[key].AttachedVolumeCost += disk.TotalCost()
 		}
 	}
 
-	// TODO clean up
-	total := 0.0
-	for _, rt := range rts {
-		total += rt.TotalCost()
-	}
-	log.Infof("ResourceTotals: recorded %.4f over %d %ss for %s", total, len(rts), prop, as.Window)
-
-	return rts
+	return arts
 }
 
 // ComputeIdleCoefficients returns the idle coefficients for CPU, GPU, and RAM
 // (in that order) for the given resource costs and totals.
-func ComputeIdleCoefficients(shareSplit, key string, cpuCost, gpuCost, ramCost float64, allocationTotals map[string]*ResourceTotals) (float64, float64, float64) {
+func ComputeIdleCoefficients(shareSplit, key string, cpuCost, gpuCost, ramCost float64, allocationTotals map[string]*AllocationTotals) (float64, float64, float64) {
 	if shareSplit == ShareNone {
 		return 0.0, 0.0, 0.0
 	}
@@ -266,97 +263,178 @@ func ComputeIdleCoefficients(shareSplit, key string, cpuCost, gpuCost, ramCost f
 	return cpuCoeff, gpuCoeff, ramCoeff
 }
 
-type ResourceTotalsStore interface {
-	GetResourceTotalsByCluster(start, end time.Time) map[string]*ResourceTotals
-	GetResourceTotalsByNode(start, end time.Time) map[string]*ResourceTotals
-	SetResourceTotalsByCluster(start, end time.Time, rts map[string]*ResourceTotals)
-	SetResourceTotalsByNode(start, end time.Time, rts map[string]*ResourceTotals)
+// TotalsStore acts as both an AllocationTotalsStore and an
+// AssetTotalsStore.
+type TotalsStore interface {
+	AllocationTotalsStore
+	AssetTotalsStore
 }
 
-func UpdateResourceTotalsStoreFromAllocationSet(rts ResourceTotalsStore, as *AllocationSet) error {
-	if rts == nil {
-		return errors.New("cannot update resource totals from AllocationSet for nil ResourceTotalsStore")
+// AllocationTotalsStore allows for storing (i.e. setting and
+// getting) AllocationTotals by cluster and by node.
+type AllocationTotalsStore interface {
+	GetAllocationTotalsByCluster(start, end time.Time) (map[string]*AllocationTotals, bool)
+	GetAllocationTotalsByNode(start, end time.Time) (map[string]*AllocationTotals, bool)
+	SetAllocationTotalsByCluster(start, end time.Time, rts map[string]*AllocationTotals)
+	SetAllocationTotalsByNode(start, end time.Time, rts map[string]*AllocationTotals)
+}
+
+// UpdateAllocationTotalsStore updates an AllocationTotalsStore
+// by totaling the given AllocationSet and saving the totals.
+func UpdateAllocationTotalsStore(arts AllocationTotalsStore, as *AllocationSet) error {
+	if arts == nil {
+		return errors.New("cannot update nil AllocationTotalsStore")
 	}
 
 	if as == nil {
-		return errors.New("cannot update resource totals from AllocationSet for nil AllocationSet")
+		return errors.New("cannot update AllocationTotalsStore from nil AllocationSet")
+	}
+
+	if as.Window.IsOpen() {
+		return errors.New("cannot update AllocationTotalsStore from AllocationSet with open window")
 	}
 
 	start := *as.Window.Start()
 	end := *as.Window.End()
 
-	rtsByCluster := ComputeResourceTotalsFromAllocations(as, AllocationClusterProp)
-	rts.SetResourceTotalsByCluster(start, end, rtsByCluster)
+	artsByCluster := ComputeAllocationTotals(as, AllocationClusterProp)
+	arts.SetAllocationTotalsByCluster(start, end, artsByCluster)
 
-	rtsByNode := ComputeResourceTotalsFromAllocations(as, AllocationNodeProp)
-	rts.SetResourceTotalsByNode(start, end, rtsByNode)
+	artsByNode := ComputeAllocationTotals(as, AllocationNodeProp)
+	arts.SetAllocationTotalsByNode(start, end, artsByNode)
 
-	log.Infof("ETL: Allocation: updated resource totals: %s", as.Window)
+	log.Infof("ETL: Allocation: updated resource totals for %s", as.Window)
 
 	return nil
 }
 
-func UpdateResourceTotalsStoreFromAssetSet(rts ResourceTotalsStore, as *AssetSet) error {
-	if rts == nil {
-		return errors.New("cannot update resource totals from AssetSet for nil ResourceTotalsStore")
+// AssetTotalsStore allows for storing (i.e. setting and getting)
+// AssetTotals by cluster and by node.
+type AssetTotalsStore interface {
+	GetAssetTotalsByCluster(start, end time.Time) (map[string]*AssetTotals, bool)
+	GetAssetTotalsByNode(start, end time.Time) (map[string]*AssetTotals, bool)
+	SetAssetTotalsByCluster(start, end time.Time, rts map[string]*AssetTotals)
+	SetAssetTotalsByNode(start, end time.Time, rts map[string]*AssetTotals)
+}
+
+// UpdateAssetTotalsStore updates an AssetTotalsStore
+// by totaling the given AssetSet and saving the totals.
+func UpdateAssetTotalsStore(arts AssetTotalsStore, as *AssetSet) error {
+	if arts == nil {
+		return errors.New("cannot update nil AssetTotalsStore")
 	}
 
 	if as == nil {
-		return errors.New("cannot update resource totals from AssetSet for nil AssetSet")
+		return errors.New("cannot update AssetTotalsStore from nil AssetSet")
+	}
+
+	if as.Window.IsOpen() {
+		return errors.New("cannot update AssetTotalsStore from AssetSet with open window")
 	}
 
 	start := *as.Window.Start()
 	end := *as.Window.End()
 
-	rtsByCluster := ComputeResourceTotalsFromAssets(as, AssetClusterProp)
-	rts.SetResourceTotalsByCluster(start, end, rtsByCluster)
+	artsByCluster := ComputeAssetTotals(as, AssetClusterProp)
+	arts.SetAssetTotalsByCluster(start, end, artsByCluster)
 
-	rtsByNode := ComputeResourceTotalsFromAssets(as, AssetNodeProp)
-	rts.SetResourceTotalsByNode(start, end, rtsByNode)
+	artsByNode := ComputeAssetTotals(as, AssetNodeProp)
+	arts.SetAssetTotalsByNode(start, end, artsByNode)
 
-	log.Infof("ETL: Asset: updated resource totals: %s", as.Window)
+	log.Infof("ETL: Asset: updated resource totals for %s", as.Window)
 
 	return nil
 }
 
-type MemoryResourceTotalsStore struct {
-	byCluster *cache.Cache
-	byNode    *cache.Cache
+// MemoryTotalsStore is an in-memory cache TotalsStore
+type MemoryTotalsStore struct {
+	allocTotalsByCluster *cache.Cache
+	allocTotalsByNode    *cache.Cache
+	assetTotalsByCluster *cache.Cache
+	assetTotalsByNode    *cache.Cache
+}
+
+// NewMemoryTotalsStore instantiates a new MemoryTotalsStore,
+// which is composed of four in-memory caches.
+func NewMemoryTotalsStore() *MemoryTotalsStore {
+	return &MemoryTotalsStore{
+		allocTotalsByCluster: cache.New(cache.NoExpiration, cache.NoExpiration),
+		allocTotalsByNode:    cache.New(cache.NoExpiration, cache.NoExpiration),
+		assetTotalsByCluster: cache.New(cache.NoExpiration, cache.NoExpiration),
+		assetTotalsByNode:    cache.New(cache.NoExpiration, cache.NoExpiration),
+	}
 }
 
-func NewResourceTotalsStore() *MemoryResourceTotalsStore {
-	return &MemoryResourceTotalsStore{
-		byCluster: cache.New(cache.NoExpiration, cache.NoExpiration),
-		byNode:    cache.New(cache.NoExpiration, cache.NoExpiration),
+// GetAllocationTotalsByCluster retrieves the AllocationTotals
+// by cluster for the given start and end times.
+func (mts *MemoryTotalsStore) GetAllocationTotalsByCluster(start time.Time, end time.Time) (map[string]*AllocationTotals, bool) {
+	k := storeKey(start, end)
+	if raw, ok := mts.allocTotalsByCluster.Get(k); ok {
+		return raw.(map[string]*AllocationTotals), true
+	} else {
+		return map[string]*AllocationTotals{}, false
 	}
 }
 
-func (mrts *MemoryResourceTotalsStore) GetResourceTotalsByCluster(start time.Time, end time.Time) map[string]*ResourceTotals {
+// GetAllocationTotalsByNode retrieves the AllocationTotals
+// by node for the given start and end times.
+func (mts *MemoryTotalsStore) GetAllocationTotalsByNode(start time.Time, end time.Time) (map[string]*AllocationTotals, bool) {
+	k := storeKey(start, end)
+	if raw, ok := mts.allocTotalsByNode.Get(k); ok {
+		return raw.(map[string]*AllocationTotals), true
+	} else {
+		return map[string]*AllocationTotals{}, false
+	}
+}
+
+// SetAllocationTotalsByCluster set the per-cluster AllocationTotals
+// to the given values for the given start and end times.
+func (mts *MemoryTotalsStore) SetAllocationTotalsByCluster(start time.Time, end time.Time, arts map[string]*AllocationTotals) {
+	k := storeKey(start, end)
+	mts.allocTotalsByCluster.Set(k, arts, cache.NoExpiration)
+}
+
+// SetAllocationTotalsByNode set the per-node AllocationTotals
+// to the given values for the given start and end times.
+func (mts *MemoryTotalsStore) SetAllocationTotalsByNode(start time.Time, end time.Time, arts map[string]*AllocationTotals) {
+	k := storeKey(start, end)
+	mts.allocTotalsByNode.Set(k, arts, cache.NoExpiration)
+}
+
+// GetAssetTotalsByCluster retrieves the AssetTotals
+// by cluster for the given start and end times.
+func (mts *MemoryTotalsStore) GetAssetTotalsByCluster(start time.Time, end time.Time) (map[string]*AssetTotals, bool) {
 	k := storeKey(start, end)
-	if raw, ok := mrts.byCluster.Get(k); ok {
-		return raw.(map[string]*ResourceTotals)
+	if raw, ok := mts.assetTotalsByCluster.Get(k); ok {
+		return raw.(map[string]*AssetTotals), true
 	} else {
-		return nil
+		return map[string]*AssetTotals{}, false
 	}
 }
 
-func (mrts *MemoryResourceTotalsStore) GetResourceTotalsByNode(start time.Time, end time.Time) map[string]*ResourceTotals {
+// GetAssetTotalsByNode retrieves the AssetTotals
+// by node for the given start and end times.
+func (mts *MemoryTotalsStore) GetAssetTotalsByNode(start time.Time, end time.Time) (map[string]*AssetTotals, bool) {
 	k := storeKey(start, end)
-	if raw, ok := mrts.byNode.Get(k); ok {
-		return raw.(map[string]*ResourceTotals)
+	if raw, ok := mts.assetTotalsByNode.Get(k); ok {
+		return raw.(map[string]*AssetTotals), true
 	} else {
-		return nil
+		return map[string]*AssetTotals{}, false
 	}
 }
 
-func (mrts *MemoryResourceTotalsStore) SetResourceTotalsByCluster(start time.Time, end time.Time, rts map[string]*ResourceTotals) {
+// SetAssetTotalsByCluster set the per-cluster AssetTotals
+// to the given values for the given start and end times.
+func (mts *MemoryTotalsStore) SetAssetTotalsByCluster(start time.Time, end time.Time, arts map[string]*AssetTotals) {
 	k := storeKey(start, end)
-	mrts.byCluster.Set(k, rts, cache.NoExpiration)
+	mts.assetTotalsByCluster.Set(k, arts, cache.NoExpiration)
 }
 
-func (mrts *MemoryResourceTotalsStore) SetResourceTotalsByNode(start time.Time, end time.Time, rts map[string]*ResourceTotals) {
+// SetAssetTotalsByNode set the per-node AssetTotals
+// to the given values for the given start and end times.
+func (mts *MemoryTotalsStore) SetAssetTotalsByNode(start time.Time, end time.Time, arts map[string]*AssetTotals) {
 	k := storeKey(start, end)
-	mrts.byNode.Set(k, rts, cache.NoExpiration)
+	mts.assetTotalsByNode.Set(k, arts, cache.NoExpiration)
 }
 
 // storeKey creates a storage key based on start and end times