Explorar o código

Move pkg/kubecost and pkg/utils

Niko Kovacevic %!s(int64=5) %!d(string=hai) anos
pai
achega
92cb10a106

+ 46 - 0
pkg/costmodel/aggregation.go

@@ -0,0 +1,46 @@
+package costmodel
+
+import (
+	"github.com/kubecost/cost-model/pkg/kubecost"
+	"github.com/kubecost/cost-model/pkg/util"
+)
+
+type Aggregation struct {
+	Aggregator                 string               `json:"aggregation"`
+	Subfields                  []string             `json:"subfields,omitempty"`
+	Environment                string               `json:"environment"`
+	Cluster                    string               `json:"cluster,omitempty"`
+	Properties                 *kubecost.Properties `json:"-"`
+	CPUAllocationHourlyAverage float64              `json:"cpuAllocationAverage"`
+	CPUAllocationVectors       []*util.Vector       `json:"-"`
+	CPUAllocationTotal         float64              `json:"-"`
+	CPUCost                    float64              `json:"cpuCost"`
+	CPUCostVector              []*util.Vector       `json:"cpuCostVector,omitempty"`
+	CPUEfficiency              float64              `json:"cpuEfficiency"`
+	CPURequestedVectors        []*util.Vector       `json:"-"`
+	CPUUsedVectors             []*util.Vector       `json:"-"`
+	Efficiency                 float64              `json:"efficiency"`
+	GPUAllocationHourlyAverage float64              `json:"gpuAllocationAverage"`
+	GPUAllocationVectors       []*util.Vector       `json:"-"`
+	GPUCost                    float64              `json:"gpuCost"`
+	GPUCostVector              []*util.Vector       `json:"gpuCostVector,omitempty"`
+	GPUAllocationTotal         float64              `json:"-"`
+	RAMAllocationHourlyAverage float64              `json:"ramAllocationAverage"`
+	RAMAllocationVectors       []*util.Vector       `json:"-"`
+	RAMAllocationTotal         float64              `json:"-"`
+	RAMCost                    float64              `json:"ramCost"`
+	RAMCostVector              []*util.Vector       `json:"ramCostVector,omitempty"`
+	RAMEfficiency              float64              `json:"ramEfficiency"`
+	RAMRequestedVectors        []*util.Vector       `json:"-"`
+	RAMUsedVectors             []*util.Vector       `json:"-"`
+	PVAllocationHourlyAverage  float64              `json:"pvAllocationAverage"`
+	PVAllocationVectors        []*util.Vector       `json:"-"`
+	PVAllocationTotal          float64              `json:"-"`
+	PVCost                     float64              `json:"pvCost"`
+	PVCostVector               []*util.Vector       `json:"pvCostVector,omitempty"`
+	NetworkCost                float64              `json:"networkCost"`
+	NetworkCostVector          []*util.Vector       `json:"networkCostVector,omitempty"`
+	SharedCost                 float64              `json:"sharedCost"`
+	TotalCost                  float64              `json:"totalCost"`
+	TotalCostVector            []*util.Vector       `json:"totalCostVector,omitempty"`
+}

+ 1444 - 0
pkg/kubecost/allocation.go

@@ -0,0 +1,1444 @@
+package kubecost
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/log"
+)
+
+// IdleSuffix indicates an idle allocation property
+const IdleSuffix = "__idle__"
+
+// SharedSuffix indicates an shared allocation property
+const SharedSuffix = "__shared__"
+
+// UnallocatedSuffix indicates an unallocated allocation property
+const UnallocatedSuffix = "__unallocated__"
+
+// ShareWeighted indicates that a shared resource should be shared as a
+// proportion of the cost of the remaining allocations.
+const ShareWeighted = "__weighted__"
+
+// ShareEven indicates that a shared resource should be shared evenly across
+// all remaining allocations.
+const ShareEven = "__even__"
+
+// ShareNone indicates that a shareable resource should not be shared
+const ShareNone = "__none__"
+
+// Allocation is a unit of resource allocation and cost for a given window
+// of time and for a given kubernetes construct with its associated set of
+// properties.
+type Allocation struct {
+	Name            string     `json:"name"`
+	Properties      Properties `json:"properties,omitempty"`
+	Start           time.Time  `json:"start"`
+	End             time.Time  `json:"end"`
+	Minutes         float64    `json:"minutes"`
+	ActiveStart     time.Time  `json:"-"`
+	CPUCoreHours    float64    `json:"cpuCoreHours"`
+	CPUCost         float64    `json:"cpuCost"`
+	CPUEfficiency   float64    `json:"cpuEfficiency"`
+	GPUHours        float64    `json:"gpuHours"`
+	GPUCost         float64    `json:"gpuCost"`
+	NetworkCost     float64    `json:"networkCost"`
+	PVByteHours     float64    `json:"pvByteHours"`
+	PVCost          float64    `json:"pvCost"`
+	RAMByteHours    float64    `json:"ramByteHours"`
+	RAMCost         float64    `json:"ramCost"`
+	RAMEfficiency   float64    `json:"ramEfficiency"`
+	SharedCost      float64    `json:"sharedCost"`
+	TotalCost       float64    `json:"totalCost"`
+	TotalEfficiency float64    `json:"totalEfficiency"`
+	// Profiler        *log.Profiler `json:"-"`
+}
+
+// AllocationMatchFunc is a function that can be used to match Allocations by
+// returning true for any given Allocation if a condition is met.
+type AllocationMatchFunc func(*Allocation) bool
+
+// Add returns the result of summing the two given Allocations, which sums the
+// summary fields (e.g. costs, resources) and recomputes efficiency. Neither of
+// the two original Allocations are mutated in the process.
+func (a *Allocation) Add(that *Allocation) (*Allocation, error) {
+	if a == nil {
+		return that.Clone(), nil
+	}
+
+	if !a.Start.Equal(that.Start) || !a.End.Equal(that.End) {
+		return nil, fmt.Errorf("error adding Allocations: mismatched windows")
+	}
+
+	agg := a.Clone()
+	// agg.Profiler = a.Profiler
+	agg.add(that, false, false)
+
+	return agg, nil
+}
+
+// Clone returns a deep copy of the given Allocation
+func (a *Allocation) Clone() *Allocation {
+	if a == nil {
+		return nil
+	}
+
+	return &Allocation{
+		Name:            a.Name,
+		Properties:      a.Properties.Clone(),
+		Start:           a.Start,
+		End:             a.End,
+		Minutes:         a.Minutes,
+		ActiveStart:     a.ActiveStart,
+		CPUCoreHours:    a.CPUCoreHours,
+		CPUCost:         a.CPUCost,
+		CPUEfficiency:   a.CPUEfficiency,
+		GPUHours:        a.GPUHours,
+		GPUCost:         a.GPUCost,
+		NetworkCost:     a.NetworkCost,
+		PVByteHours:     a.PVByteHours,
+		PVCost:          a.PVCost,
+		RAMByteHours:    a.RAMByteHours,
+		RAMCost:         a.RAMCost,
+		RAMEfficiency:   a.RAMEfficiency,
+		SharedCost:      a.SharedCost,
+		TotalCost:       a.TotalCost,
+		TotalEfficiency: a.TotalEfficiency,
+	}
+}
+
+func (a *Allocation) Equal(that *Allocation) bool {
+	if a == nil || that == nil {
+		return false
+	}
+
+	if a.Name != that.Name {
+		return false
+	}
+	if !a.Start.Equal(that.Start) {
+		return false
+	}
+	if !a.End.Equal(that.End) {
+		return false
+	}
+	if a.Minutes != that.Minutes {
+		return false
+	}
+	if !a.ActiveStart.Equal(that.ActiveStart) {
+		return false
+	}
+	if a.CPUCoreHours != that.CPUCoreHours {
+		return false
+	}
+	if a.CPUCost != that.CPUCost {
+		return false
+	}
+	if a.CPUEfficiency != that.CPUEfficiency {
+		return false
+	}
+	if a.GPUHours != that.GPUHours {
+		return false
+	}
+	if a.GPUCost != that.GPUCost {
+		return false
+	}
+	if a.NetworkCost != that.NetworkCost {
+		return false
+	}
+	if a.PVByteHours != that.PVByteHours {
+		return false
+	}
+	if a.PVCost != that.PVCost {
+		return false
+	}
+	if a.RAMByteHours != that.RAMByteHours {
+		return false
+	}
+	if a.RAMCost != that.RAMCost {
+		return false
+	}
+	if a.RAMEfficiency != that.RAMEfficiency {
+		return false
+	}
+	if a.SharedCost != that.SharedCost {
+		return false
+	}
+	if a.TotalCost != that.TotalCost {
+		return false
+	}
+	if a.TotalEfficiency != that.TotalEfficiency {
+		return false
+	}
+	if !a.Properties.Equal(&that.Properties) {
+		return false
+	}
+
+	return true
+}
+
+// Resolution returns the duration of time covered by the Allocation
+func (a *Allocation) Resolution() time.Duration {
+	return a.End.Sub(a.Start)
+}
+
+// IsAggregated is true if the given Allocation has been aggregated, which we
+// define by a lack of Properties.
+func (a *Allocation) IsAggregated() bool {
+	return a == nil || a.Properties == nil
+}
+
+// IsIdle is true if the given Allocation represents idle costs.
+func (a *Allocation) IsIdle() bool {
+	return strings.Contains(a.Name, IdleSuffix)
+}
+
+// IsUnallocated is true if the given Allocation represents unallocated costs.
+func (a *Allocation) IsUnallocated() bool {
+	return strings.Contains(a.Name, UnallocatedSuffix)
+}
+
+// MatchesFilter returns true if the Allocation passes the given AllocationFilter
+func (a *Allocation) MatchesFilter(f AllocationMatchFunc) bool {
+	return f(a)
+}
+
+// MatchesAll takes a variadic list of Properties, returning true iff the
+// Allocation matches each set of Properties.
+func (a *Allocation) MatchesAll(ps ...Properties) bool {
+	// nil Allocation don't match any Properties
+	if a == nil {
+		return false
+	}
+
+	for _, p := range ps {
+		if !a.Properties.Matches(p) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// MatchesOne takes a variadic list of Properties, returning true iff the
+// Allocation matches at least one of the set of Properties.
+func (a *Allocation) MatchesOne(ps ...Properties) bool {
+	// nil Allocation don't match any Properties
+	if a == nil {
+		return false
+	}
+
+	for _, p := range ps {
+		if a.Properties.Matches(p) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Share works like Add, but converts the entire cost of the given Allocation
+// to SharedCost, rather than adding to the individual resource costs.
+func (a *Allocation) Share(that *Allocation) (*Allocation, error) {
+	if a == nil {
+		return that.Clone(), nil
+	}
+
+	if !a.Start.Equal(that.Start) {
+		return nil, fmt.Errorf("mismatched start time: expected %s, received %s", a.Start, that.Start)
+	}
+	if !a.End.Equal(that.End) {
+		return nil, fmt.Errorf("mismatched start time: expected %s, received %s", a.End, that.End)
+	}
+
+	agg := a.Clone()
+	agg.add(that, true, false)
+
+	return agg, nil
+}
+
+// String represents the given Allocation as a string
+func (a *Allocation) String() string {
+	return fmt.Sprintf("%s%s=%.2f", a.Name, NewWindow(&a.Start, &a.End), a.TotalCost)
+}
+
+func (a *Allocation) add(that *Allocation, isShared, isAccumulating bool) {
+	if a == nil {
+		a = that
+
+		// reset properties
+		thatCluster, _ := that.Properties.GetCluster()
+		thatNode, _ := that.Properties.GetNode()
+		a.Properties = Properties{ClusterProp: thatCluster, NodeProp: thatNode}
+
+		return
+	}
+
+	aCluster, _ := a.Properties.GetCluster()
+	thatCluster, _ := that.Properties.GetCluster()
+	aNode, _ := a.Properties.GetNode()
+	thatNode, _ := that.Properties.GetNode()
+
+	// reset properties
+	a.Properties = nil
+
+	// ensure that we carry cluster ID and/or node over if they're the same
+	// required for idle/shared cost allocation
+	if aCluster == thatCluster {
+		a.Properties = Properties{ClusterProp: aCluster}
+	}
+	if aNode == thatNode {
+		if a.Properties == nil {
+			a.Properties = Properties{NodeProp: aNode}
+		} else {
+			a.Properties.SetNode(aNode)
+		}
+	}
+
+	if that.ActiveStart.Before(a.ActiveStart) {
+		a.ActiveStart = that.ActiveStart
+	}
+
+	if isAccumulating {
+		if a.Start.After(that.Start) {
+			a.Start = that.Start
+		}
+
+		if a.End.Before(that.End) {
+			a.End = that.End
+		}
+
+		a.Minutes += that.Minutes
+	} else if that.Minutes > a.Minutes {
+		a.Minutes = that.Minutes
+	}
+
+	// isShared determines whether the given allocation should be spread evenly
+	// across resources (e.g. sharing idle allocation) or lumped into a shared
+	// cost category (e.g. sharing namespace, labels).
+	if isShared {
+		a.SharedCost += that.TotalCost
+	} else {
+		a.CPUCoreHours += that.CPUCoreHours
+		a.GPUHours += that.GPUHours
+		a.RAMByteHours += that.RAMByteHours
+		a.PVByteHours += that.PVByteHours
+
+		aggCPUCost := a.CPUCost + that.CPUCost
+		if aggCPUCost > 0 {
+			a.CPUEfficiency = (a.CPUEfficiency*a.CPUCost + that.CPUEfficiency*that.CPUCost) / aggCPUCost
+		} else {
+			a.CPUEfficiency = 0.0
+		}
+
+		aggRAMCost := a.RAMCost + that.RAMCost
+		if aggRAMCost > 0 {
+			a.RAMEfficiency = (a.RAMEfficiency*a.RAMCost + that.RAMEfficiency*that.RAMCost) / aggRAMCost
+		} else {
+			a.RAMEfficiency = 0.0
+		}
+
+		aggTotalCost := a.TotalCost + that.TotalCost
+		if aggTotalCost > 0 {
+			a.TotalEfficiency = (a.TotalEfficiency*a.TotalCost + that.TotalEfficiency*that.TotalCost) / aggTotalCost
+		} else {
+			aggTotalCost = 0.0
+		}
+
+		a.SharedCost += that.SharedCost
+		a.CPUCost += that.CPUCost
+		a.GPUCost += that.GPUCost
+		a.NetworkCost += that.NetworkCost
+		a.RAMCost += that.RAMCost
+		a.PVCost += that.PVCost
+	}
+
+	a.TotalCost += that.TotalCost
+}
+
+// AllocationSet stores a set of Allocations, each with a unique name, that share
+// a window. An AllocationSet is mutable, so treat it like a threadsafe map.
+type AllocationSet struct {
+	sync.RWMutex
+	// Profiler    *log.Profiler
+	allocations map[string]*Allocation
+	idleKeys    map[string]bool
+	Window      Window
+}
+
+// NewAllocationSet instantiates a new AllocationSet and, optionally, inserts
+// the given list of Allocations
+func NewAllocationSet(start, end time.Time, allocs ...*Allocation) *AllocationSet {
+	as := &AllocationSet{
+		allocations: map[string]*Allocation{},
+		Window:      NewWindow(&start, &end),
+	}
+
+	for _, a := range allocs {
+		as.Insert(a)
+	}
+
+	return as
+}
+
+// AllocationAggregationOptions provide advanced functionality to AggregateBy, including
+// filtering results and sharing allocations. FilterFuncs are a list of match
+// functions such that, if any function fails, the allocation is ignored.
+// ShareFuncs are a list of match functions such that, if any function
+// succeeds, the allocation is marked as a shared resource. ShareIdle is a
+// simple flag for sharing idle resources.
+type AllocationAggregationOptions struct {
+	FilterFuncs       []AllocationMatchFunc
+	SplitIdle         bool
+	MergeUnallocated  bool
+	ShareFuncs        []AllocationMatchFunc
+	ShareIdle         string
+	ShareSplit        string
+	SharedHourlyCosts map[string]float64
+}
+
+// AggregateBy aggregates the Allocations in the given AllocationSet by the given
+// Property. This will only be legal if the AllocationSet is divisible by the
+// given Property; e.g. Containers can be divided by Namespace, but not vice-a-versa.
+func (as *AllocationSet) AggregateBy(properties Properties, options *AllocationAggregationOptions) error {
+	// The order of operations for aggregating allocations is as follows:
+	// 1. move shared and/or idle allocations to separate sets if options
+	//    indicate that they should be shared
+	// 2. idle coefficients
+	// 2.a) if idle allocation is to be shared, compute idle coefficients
+	//      (do not compute shared coefficients here, see step 5)
+	// 2.b) if idle allocation is NOT shared, but filters are present, compute
+	//      idle filtration coefficients for the purpose of only returning the
+	//      portion of idle allocation that would have been shared with the
+	//      unfiltered results set. (See unit tests 5.a,b,c)
+	// 3. ignore allocation if it fails any of the FilterFuncs
+	// 4. generate aggregation key and insert allocation into the output set
+	// 5. if there are shared allocations, compute sharing coefficients on
+	//    the aggregated set, then share allocation accordingly
+	// 6. if the merge idle option is enabled, merge any remaining idle
+	//    allocations into a single idle allocation
+
+	// TODO niko/etl revisit (ShareIdle: ShareEven) case, which is probably wrong
+	// (and, frankly, ill-defined; i.e. evenly across clusters? within clusters?)
+
+	if options == nil {
+		options = &AllocationAggregationOptions{}
+	}
+
+	if as.IsEmpty() {
+		return nil
+	}
+
+	// aggSet will collect the aggregated allocations
+	aggSet := &AllocationSet{
+		// Profiler: as.Profiler,
+		Window: as.Window.Clone(),
+	}
+
+	// idleSet will be shared among aggSet after initial aggregation
+	// is complete
+	idleSet := &AllocationSet{
+		// Profiler: as.Profiler,
+		Window: as.Window.Clone(),
+	}
+
+	// shareSet will be shared among aggSet after initial aggregation
+	// is complete
+	shareSet := &AllocationSet{
+		// Profiler: as.Profiler
+		Window: as.Window.Clone(),
+	}
+
+	for name, cost := range options.SharedHourlyCosts {
+		if cost > 0.0 {
+			hours := as.Resolution().Hours()
+
+			// If set ends in the future, adjust hours accordingly
+			diff := time.Now().Sub(as.End())
+			if diff < 0.0 {
+				hours += diff.Hours()
+			}
+
+			totalSharedCost := cost * hours
+
+			shareSet.Insert(&Allocation{
+				Name:       fmt.Sprintf("%s/%s", name, SharedSuffix),
+				Start:      as.Start(),
+				End:        as.End(),
+				SharedCost: totalSharedCost,
+				TotalCost:  totalSharedCost,
+			})
+		}
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	// Loop and find all of the idle and shared allocations initially. Add
+	// them to their respective sets, removing them from the set of
+	// allocations to aggregate.
+	for _, alloc := range as.allocations {
+		cluster, err := alloc.Properties.GetCluster()
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: missing cluster for allocation: %s", alloc.Name)
+			return err
+		}
+
+		// Idle allocation doesn't get aggregated, so it can be passed through,
+		// whether or not it is shared. If it is shared, it is put in idleSet
+		// because shareSet may be split by different rules (even/weighted).
+		if alloc.IsIdle() {
+			// Can't recursively call Delete() due to lock acquisition
+			delete(as.idleKeys, alloc.Name)
+			delete(as.allocations, alloc.Name)
+
+			if options.ShareIdle == ShareEven || options.ShareIdle == ShareWeighted {
+				idleSet.Insert(alloc)
+			} else {
+				aggSet.Insert(alloc)
+			}
+		}
+
+		// If any of the share funcs succeed, share the allocation. Do this
+		// prior to filtering so that shared namespaces, etc do not get
+		// filtered out before we have a chance to share them.
+		for _, sf := range options.ShareFuncs {
+			if sf(alloc) {
+				// Can't recursively call Delete() due to lock acquisition
+				delete(as.idleKeys, alloc.Name)
+				delete(as.allocations, alloc.Name)
+
+				alloc.Name = fmt.Sprintf("%s/%s", cluster, SharedSuffix)
+				shareSet.Insert(alloc)
+				break
+			}
+		}
+	}
+
+	if len(as.allocations) == 0 {
+		log.Warningf("ETL: AggregateBy: no allocations to aggregate")
+		emptySet := &AllocationSet{
+			Window: as.Window.Clone(),
+		}
+		as.allocations = emptySet.allocations
+		return nil
+	}
+
+	// In order to correctly apply idle and shared resource coefficients appropriately,
+	// we need to determine the coefficients for the full set of data. The ensures that
+	// the ratios are maintained through filtering.
+	// idleCoefficients are organized by [cluster][allocation][resource]=coeff
+	var idleCoefficients map[string]map[string]map[string]float64
+	// shareCoefficients are organized by [allocation][resource]=coeff (no cluster)
+	var shareCoefficients map[string]float64
+	var err error
+
+	if idleSet.Length() > 0 && options.ShareIdle != ShareNone {
+		idleCoefficients, err = computeIdleCoeffs(properties, options, as)
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: compute idle coeff: %s", err)
+			return err
+		}
+	}
+
+	// If we're not sharing idle and we're filtering, we need to track the
+	// amount of each idle allocation to "delete" in order to maintain parity
+	// with the idle-allocated results. That is, we want to return only the
+	// idle cost that would have been shared with the unfiltered portion of
+	// the results, not the full idle cost.
+	var idleFiltrationCoefficients map[string]map[string]map[string]float64
+	if len(options.FilterFuncs) > 0 && options.ShareIdle == ShareNone {
+		idleFiltrationCoefficients, err = computeIdleCoeffs(properties, options, as)
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: compute idle coeff: %s", err)
+			return err
+		}
+	}
+
+	for _, alloc := range as.allocations {
+		cluster, err := alloc.Properties.GetCluster()
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: missing cluster for allocation: %s", alloc.Name)
+			return err
+		}
+
+		skip := false
+
+		// If any of the filter funcs fail, immediately skip the allocation.
+		for _, ff := range options.FilterFuncs {
+			if !ff(alloc) {
+				skip = true
+				break
+			}
+		}
+		if skip {
+			// If we are tracking idle filtration coefficients, delete the
+			// entry corresponding to the filtered allocation. (Deleting the
+			// entry will result in that proportional amount being removed
+			// from the idle allocation at the end of the process.)
+			if idleFiltrationCoefficients != nil {
+				if ifcc, ok := idleFiltrationCoefficients[cluster]; ok {
+					delete(ifcc, alloc.Name)
+				}
+			}
+
+			continue
+		}
+
+		// Split idle allocations and distribute among aggregated allocations
+		// NOTE: if idle allocation is off (i.e. ShareIdle == ShareNone) then all
+		// idle allocations will be in the aggSet at this point.
+		if idleSet.Length() > 0 {
+			// Distribute idle allocations by coefficient per-cluster, per-allocation
+			for _, idleAlloc := range idleSet.allocations {
+				// Only share idle if the cluster matches; i.e. the allocation
+				// is from the same cluster as the idle costs
+				idleCluster, err := idleAlloc.Properties.GetCluster()
+				if err != nil {
+					return err
+				}
+				if idleCluster != cluster {
+					continue
+				}
+
+				// Make sure idle coefficients exist
+				if _, ok := idleCoefficients[cluster]; !ok {
+					log.Errorf("ETL: share (idle) allocation: error getting allocation coefficient [no cluster: '%s' in coefficients] for '%s'", cluster, alloc.Name)
+					continue
+				}
+				if _, ok := idleCoefficients[cluster][alloc.Name]; !ok {
+					log.Errorf("ETL: share (idle) allocation: error getting allocation coefficienct for '%s'", alloc.Name)
+					continue
+				}
+
+				alloc.CPUCoreHours += idleAlloc.CPUCoreHours * idleCoefficients[cluster][alloc.Name]["cpu"]
+				alloc.GPUHours += idleAlloc.GPUHours * idleCoefficients[cluster][alloc.Name]["gpu"]
+				alloc.RAMByteHours += idleAlloc.RAMByteHours * idleCoefficients[cluster][alloc.Name]["ram"]
+
+				idleCPUCost := idleAlloc.CPUCost * idleCoefficients[cluster][alloc.Name]["cpu"]
+				idleGPUCost := idleAlloc.GPUCost * idleCoefficients[cluster][alloc.Name]["gpu"]
+				idleRAMCost := idleAlloc.RAMCost * idleCoefficients[cluster][alloc.Name]["ram"]
+				alloc.CPUCost += idleCPUCost
+				alloc.GPUCost += idleGPUCost
+				alloc.RAMCost += idleRAMCost
+				alloc.TotalCost += idleCPUCost + idleGPUCost + idleRAMCost
+			}
+		}
+
+		key, err := alloc.generateKey(properties)
+		if err != nil {
+			return err
+		}
+
+		alloc.Name = key
+		if options.MergeUnallocated && alloc.IsUnallocated() {
+			alloc.Name = UnallocatedSuffix
+		}
+
+		aggSet.Insert(alloc)
+	}
+
+	var clusterIdleFiltrationCoeffs map[string]map[string]float64
+	if idleFiltrationCoefficients != nil {
+		clusterIdleFiltrationCoeffs = map[string]map[string]float64{}
+
+		for cluster, m := range idleFiltrationCoefficients {
+			if _, ok := clusterIdleFiltrationCoeffs[cluster]; !ok {
+				clusterIdleFiltrationCoeffs[cluster] = map[string]float64{
+					"cpu": 0.0,
+					"gpu": 0.0,
+					"ram": 0.0,
+				}
+			}
+
+			for _, n := range m {
+				for resource, val := range n {
+					clusterIdleFiltrationCoeffs[cluster][resource] += val
+				}
+			}
+		}
+	}
+
+	// If we have filters, and so have computed coefficients for scaling idle
+	// allocation costs by cluster, then use those coefficients to scale down
+	// each idle coefficient in the aggSet.
+	if len(aggSet.idleKeys) > 0 && clusterIdleFiltrationCoeffs != nil {
+		for idleKey := range aggSet.idleKeys {
+			idleAlloc := aggSet.Get(idleKey)
+
+			cluster, err := idleAlloc.Properties.GetCluster()
+			if err != nil {
+				log.Warningf("AggregateBy: idle allocation without cluster: %s", idleAlloc)
+			}
+
+			if resourceCoeffs, ok := clusterIdleFiltrationCoeffs[cluster]; ok {
+				idleAlloc.CPUCost *= resourceCoeffs["cpu"]
+				idleAlloc.CPUCoreHours *= resourceCoeffs["cpu"]
+				idleAlloc.RAMCost *= resourceCoeffs["ram"]
+				idleAlloc.RAMByteHours *= resourceCoeffs["ram"]
+				idleAlloc.TotalCost = idleAlloc.CPUCost + idleAlloc.RAMCost
+			}
+
+		}
+	}
+
+	// Split shared allocations and distribute among aggregated allocations
+	if shareSet.Length() > 0 {
+		shareCoefficients, err = computeShareCoeffs(properties, options, aggSet)
+		if err != nil {
+			log.Warningf("AllocationSet.AggregateBy: compute shared coeff: missing cluster ID: %s", err)
+			return err
+		}
+
+		for _, alloc := range aggSet.allocations {
+			if alloc.IsIdle() {
+				// Skip idle allocations (they do not receive shared allocation)
+				continue
+			}
+
+			// Distribute shared allocations by coefficient per-allocation
+			// NOTE: share coefficients do not partition by cluster, like
+			// idle coefficients do.
+			for _, sharedAlloc := range shareSet.allocations {
+				if _, ok := shareCoefficients[alloc.Name]; !ok {
+					log.Errorf("ETL: share allocation: error getting allocation coefficienct for '%s'", alloc.Name)
+					continue
+				}
+
+				alloc.SharedCost += sharedAlloc.TotalCost * shareCoefficients[alloc.Name]
+				alloc.TotalCost += sharedAlloc.TotalCost * shareCoefficients[alloc.Name]
+			}
+		}
+	}
+
+	// Combine all idle allocations into a single "__idle__" allocation
+	if !options.SplitIdle {
+		for _, idleAlloc := range aggSet.IdleAllocations() {
+			aggSet.Delete(idleAlloc.Name)
+			idleAlloc.Name = IdleSuffix
+			aggSet.Insert(idleAlloc)
+		}
+	}
+
+	as.allocations = aggSet.allocations
+
+	return nil
+}
+
+// TODO niko/etl deprecate the use of a map of resources here, we only use totals
+func computeShareCoeffs(properties Properties, options *AllocationAggregationOptions, as *AllocationSet) (map[string]float64, error) {
+	// Compute coeffs by totalling per-allocation, then dividing by the total.
+	coeffs := map[string]float64{}
+
+	// Compute totals for all allocations
+	total := 0.0
+
+	// ShareEven counts each aggregation with even weight, whereas ShareWeighted
+	// counts each aggregation proportionally to its respective costs
+	shareType := options.ShareSplit
+
+	// Record allocation values first, then normalize by totals to get percentages
+	for name, alloc := range as.allocations {
+		if alloc.IsIdle() {
+			// Skip idle allocations in coefficient calculation
+			continue
+		}
+
+		if shareType == ShareEven {
+			// Not additive - set to 1.0 for even distribution
+			coeffs[name] = 1.0
+			// Total is always additive
+			total += 1.0
+		} else {
+			// Both are additive for weighted distribution
+			coeffs[name] += alloc.TotalCost
+			total += alloc.TotalCost
+		}
+	}
+
+	// Normalize coefficients by totals
+	for a := range coeffs {
+		if coeffs[a] > 0 && total > 0 {
+			coeffs[a] /= total
+		} else {
+			log.Warningf("ETL: invalid values for shared coefficients: %d, %d", coeffs[a], total)
+			coeffs[a] = 0.0
+		}
+	}
+
+	return coeffs, nil
+}
+
+func computeIdleCoeffs(properties Properties, options *AllocationAggregationOptions, as *AllocationSet) (map[string]map[string]map[string]float64, error) {
+	types := []string{"cpu", "gpu", "ram"}
+
+	// Compute idle coefficients, then save them in AllocationAggregationOptions
+	coeffs := map[string]map[string]map[string]float64{}
+
+	// Compute totals per resource for CPU, GPU, RAM, and PV
+	totals := map[string]map[string]float64{}
+
+	// ShareEven counts each allocation with even weight, whereas ShareWeighted
+	// counts each allocation proportionally to its respective costs
+	shareType := options.ShareIdle
+
+	// Record allocation values first, then normalize by totals to get percentages
+	for _, alloc := range as.allocations {
+		if alloc.IsIdle() {
+			// Skip idle allocations in coefficient calculation
+			continue
+		}
+
+		// If any of the share funcs succeed, share the allocation. Do this
+		// prior to filtering so that shared namespaces, etc do not get
+		// filtered out before we have a chance to share them.
+		skip := false
+		for _, sf := range options.ShareFuncs {
+			if sf(alloc) {
+				skip = true
+				break
+			}
+		}
+		if skip {
+			continue
+		}
+
+		// We need to key the allocations by cluster id
+		clusterID, err := alloc.Properties.GetCluster()
+		if err != nil {
+			return nil, err
+		}
+
+		// get the name key for the allocation
+		name := alloc.Name
+
+		// Create cluster based tables if they don't exist
+		if _, ok := coeffs[clusterID]; !ok {
+			coeffs[clusterID] = map[string]map[string]float64{}
+		}
+		if _, ok := totals[clusterID]; !ok {
+			totals[clusterID] = map[string]float64{}
+		}
+
+		if _, ok := coeffs[clusterID][name]; !ok {
+			coeffs[clusterID][name] = map[string]float64{}
+		}
+
+		if shareType == ShareEven {
+			for _, r := range types {
+				// Not additive - hard set to 1.0
+				coeffs[clusterID][name][r] = 1.0
+
+				// totals are additive
+				totals[clusterID][r] += 1.0
+			}
+		} else {
+			coeffs[clusterID][name]["cpu"] += alloc.CPUCost
+			coeffs[clusterID][name]["gpu"] += alloc.GPUCost
+			coeffs[clusterID][name]["ram"] += alloc.RAMCost
+
+			totals[clusterID]["cpu"] += alloc.CPUCost
+			totals[clusterID]["gpu"] += alloc.GPUCost
+			totals[clusterID]["ram"] += alloc.RAMCost
+		}
+	}
+
+	// Normalize coefficients by totals
+	for c := range coeffs {
+		for a := range coeffs[c] {
+			for _, r := range types {
+				if coeffs[c][a][r] > 0 && totals[c][r] > 0 {
+					coeffs[c][a][r] /= totals[c][r]
+				}
+			}
+		}
+	}
+
+	return coeffs, nil
+}
+
+func (alloc *Allocation) generateKey(properties Properties) (string, error) {
+	// Names will ultimately be joined into a single name, which uniquely
+	// identifies allocations.
+	names := []string{}
+
+	if properties.HasCluster() {
+		cluster, err := alloc.Properties.GetCluster()
+		if err != nil {
+			return "", err
+		}
+		names = append(names, cluster)
+	}
+
+	if properties.HasNode() {
+		node, err := alloc.Properties.GetNode()
+		if err != nil {
+			return "", err
+		}
+		names = append(names, node)
+	}
+
+	if properties.HasNamespace() {
+		namespace, err := alloc.Properties.GetNamespace()
+		if err != nil {
+			return "", err
+		}
+		names = append(names, namespace)
+	}
+
+	if properties.HasControllerKind() {
+		controllerKind, err := alloc.Properties.GetControllerKind()
+		if err != nil {
+			// Indicate that allocation has no controller
+			controllerKind = UnallocatedSuffix
+		}
+
+		if prop, _ := properties.GetControllerKind(); prop != "" && prop != controllerKind {
+			// The allocation does not have the specified controller kind
+			controllerKind = UnallocatedSuffix
+		}
+		names = append(names, controllerKind)
+	}
+
+	if properties.HasController() {
+		if !properties.HasControllerKind() {
+			controllerKind, err := alloc.Properties.GetControllerKind()
+			if err == nil {
+				names = append(names, controllerKind)
+			}
+		}
+
+		controller, err := alloc.Properties.GetController()
+		if err != nil {
+			// Indicate that allocation has no controller
+			controller = UnallocatedSuffix
+		}
+
+		names = append(names, controller)
+	}
+
+	if properties.HasPod() {
+		pod, err := alloc.Properties.GetPod()
+		if err != nil {
+			return "", err
+		}
+
+		names = append(names, pod)
+	}
+
+	if properties.HasContainer() {
+		container, err := alloc.Properties.GetContainer()
+		if err != nil {
+			return "", err
+		}
+
+		names = append(names, container)
+	}
+
+	if properties.HasService() {
+		services, err := alloc.Properties.GetServices()
+		if err != nil {
+			// Indicate that allocation has no services
+			names = append(names, UnallocatedSuffix)
+		} else {
+			// TODO niko/etl support multi-service aggregation
+			if len(services) > 0 {
+				for _, service := range services {
+					names = append(names, service)
+					break
+				}
+			} else {
+				// Indicate that allocation has no services
+				names = append(names, UnallocatedSuffix)
+			}
+		}
+	}
+
+	if properties.HasLabel() {
+		labels, err := alloc.Properties.GetLabels() // labels that the individual allocation possesses
+		if err != nil {
+			// Indicate that allocation has no labels
+			names = append(names, UnallocatedSuffix)
+		} else {
+			labelNames := []string{}
+
+			aggLabels, err := properties.GetLabels() // potential labels to aggregate on supplied by the API caller
+			if err != nil {
+				// We've already checked HasLabel, so this should never occur
+				return "", err
+			}
+			// calvin - support multi-label aggregation
+			for labelName := range aggLabels {
+				if val, ok := labels[labelName]; ok {
+					labelNames = append(labelNames, fmt.Sprintf("%s=%s", labelName, val))
+				} else if indexOf(UnallocatedSuffix, labelNames) == -1 { // if UnallocatedSuffix not already in names
+					labelNames = append(labelNames, UnallocatedSuffix)
+				}
+			}
+			// resolve arbitrary ordering. e.g., app=app0/env=env0 is the same agg as env=env0/app=app0
+			if len(labelNames) > 1 {
+				sort.Strings(labelNames)
+			}
+			unallocatedSuffixIndex := indexOf(UnallocatedSuffix, labelNames)
+			// suffix should be at index 0 if it exists b/c of underscores
+			if unallocatedSuffixIndex != -1 {
+				labelNames = append(labelNames[:unallocatedSuffixIndex], labelNames[unallocatedSuffixIndex+1:]...)
+				labelNames = append(labelNames, UnallocatedSuffix) // append to end
+			}
+
+			names = append(names, labelNames...)
+		}
+	}
+
+	return strings.Join(names, "/"), nil
+}
+
+// Helper function to check for slice membership. Not sure if repeated elsewhere in our codebase.
+func indexOf(v string, arr []string) int {
+	for i, s := range arr {
+		// This is caseless equivalence
+		if strings.EqualFold(v, s) {
+			return i
+		}
+	}
+	return -1
+}
+
+// Clone returns a new AllocationSet with a deep copy of the given
+// AllocationSet's allocations.
+func (as *AllocationSet) Clone() *AllocationSet {
+	if as == nil {
+		return nil
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	allocs := map[string]*Allocation{}
+	for k, v := range as.allocations {
+		allocs[k] = v.Clone()
+	}
+
+	return &AllocationSet{
+		allocations: allocs,
+		Window:      as.Window.Clone(),
+	}
+}
+
+// Delete removes the allocation with the given name from the set
+func (as *AllocationSet) Delete(name string) {
+	if as == nil {
+		return
+	}
+
+	as.Lock()
+	defer as.Unlock()
+	delete(as.idleKeys, name)
+	delete(as.allocations, name)
+}
+
+// Each invokes the given function for each Allocation in the set
+func (as *AllocationSet) Each(f func(string, *Allocation)) {
+	if as == nil {
+		return
+	}
+
+	for k, a := range as.allocations {
+		f(k, a)
+	}
+}
+
+// End returns the End time of the AllocationSet window
+func (as *AllocationSet) End() time.Time {
+	if as == nil {
+		log.Warningf("Allocation ETL: calling End on nil AllocationSet")
+		return time.Unix(0, 0)
+	}
+	if as.Window.End() == nil {
+		log.Warningf("Allocation ETL: AllocationSet with illegal window: End is nil; len(as.allocations)=%d", len(as.allocations))
+		return time.Unix(0, 0)
+	}
+	return *as.Window.End()
+}
+
+// Get returns the Allocation at the given key in the AllocationSet
+func (as *AllocationSet) Get(key string) *Allocation {
+	as.RLock()
+	defer as.RUnlock()
+
+	if alloc, ok := as.allocations[key]; ok {
+		return alloc
+	}
+
+	return nil
+}
+
+// IdleAllocations returns a map of the idle allocations in the AllocationSet.
+// Returns clones of the actual Allocations, so mutability is not a problem.
+func (as *AllocationSet) IdleAllocations() map[string]*Allocation {
+	idles := map[string]*Allocation{}
+
+	if as.IsEmpty() {
+		return idles
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	for key := range as.idleKeys {
+		if alloc, ok := as.allocations[key]; ok {
+			idles[key] = alloc.Clone()
+		}
+	}
+
+	return idles
+}
+
+// Insert aggregates the current entry in the AllocationSet by the given Allocation,
+// but only if the Allocation is valid, i.e. matches the AllocationSet's window. If
+// there is no existing entry, one is created. Nil error response indicates success.
+func (as *AllocationSet) Insert(that *Allocation) error {
+	return as.insert(that, false)
+}
+
+func (as *AllocationSet) insert(that *Allocation, accumulate bool) error {
+	if as.IsEmpty() {
+		as.Lock()
+		as.allocations = map[string]*Allocation{}
+		as.idleKeys = map[string]bool{}
+		as.Unlock()
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	// Add the given Allocation to the existing entry, if there is one;
+	// otherwise just set directly into allocations
+	if _, ok := as.allocations[that.Name]; !ok {
+		as.allocations[that.Name] = that
+	} else {
+		as.allocations[that.Name].add(that, false, accumulate)
+	}
+
+	// If the given Allocation is an idle one, record that
+	if that.IsIdle() {
+		as.idleKeys[that.Name] = true
+	}
+
+	return nil
+}
+
+// IsEmpty returns true if the AllocationSet is nil, or if it contains
+// zero allocations.
+func (as *AllocationSet) IsEmpty() bool {
+	if as == nil || len(as.allocations) == 0 {
+		return true
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+	return as.allocations == nil || len(as.allocations) == 0
+}
+
+// Length returns the number of Allocations in the set
+func (as *AllocationSet) Length() int {
+	if as == nil {
+		return 0
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+	return len(as.allocations)
+}
+
+// Map clones and returns a map of the AllocationSet's Allocations
+func (as *AllocationSet) Map() map[string]*Allocation {
+	if as.IsEmpty() {
+		return map[string]*Allocation{}
+	}
+
+	return as.Clone().allocations
+}
+
+// MarshalJSON JSON-encodes the AllocationSet
+func (as *AllocationSet) MarshalJSON() ([]byte, error) {
+	as.RLock()
+	defer as.RUnlock()
+	return json.Marshal(as.allocations)
+}
+
+// Resolution returns the AllocationSet's window duration
+func (as *AllocationSet) Resolution() time.Duration {
+	return as.Window.Duration()
+}
+
+func (as *AllocationSet) Set(alloc *Allocation) error {
+	if as.IsEmpty() {
+		as.Lock()
+		as.allocations = map[string]*Allocation{}
+		as.idleKeys = map[string]bool{}
+		as.Unlock()
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	as.allocations[alloc.Name] = alloc
+
+	// If the given Allocation is an idle one, record that
+	if alloc.IsIdle() {
+		as.idleKeys[alloc.Name] = true
+	}
+
+	return nil
+}
+
+// Start returns the Start time of the AllocationSet window
+func (as *AllocationSet) Start() time.Time {
+	if as == nil {
+		log.Warningf("Allocation ETL: calling Start on nil AllocationSet")
+		return time.Unix(0, 0)
+	}
+	if as.Window.Start() == nil {
+		log.Warningf("Allocation ETL: AllocationSet with illegal window: Start is nil; len(as.allocations)=%d", len(as.allocations))
+		return time.Unix(0, 0)
+	}
+	return *as.Window.Start()
+}
+
+// String represents the given Allocation as a string
+func (as *AllocationSet) String() string {
+	if as == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("AllocationSet{length: %d; window: %s; totalCost: %.2f}",
+		as.Length(), as.Window, as.TotalCost())
+}
+
+// TotalCost returns the sum of all TotalCosts of the allocations contained
+func (as *AllocationSet) TotalCost() float64 {
+	if as.IsEmpty() {
+		return 0.0
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	tc := 0.0
+	for _, a := range as.allocations {
+		tc += a.TotalCost
+	}
+	return tc
+}
+
+func (as *AllocationSet) UTCOffset() time.Duration {
+	_, zone := as.Start().Zone()
+	return time.Duration(zone) * time.Second
+}
+
+func (as *AllocationSet) accumulate(that *AllocationSet) (*AllocationSet, error) {
+	if as.IsEmpty() {
+		return that, nil
+	}
+
+	if that.IsEmpty() {
+		return as, nil
+	}
+
+	if that.Start().Before(as.End()) {
+		timefmt := "2006-01-02T15:04:05"
+		err := fmt.Sprintf("that [%s, %s); that [%s, %s)\n", as.Start().Format(timefmt), as.End().Format(timefmt), that.Start().Format(timefmt), that.End().Format(timefmt))
+		return nil, fmt.Errorf("error accumulating AllocationSets: overlapping windows: %s", err)
+	}
+
+	// Set start, end to min(start), max(end)
+	start := as.Start()
+	end := as.End()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	if that.End().After(end) {
+		end = that.End()
+	}
+
+	acc := NewAllocationSet(start, end)
+
+	as.RLock()
+	defer as.RUnlock()
+
+	that.RLock()
+	defer that.RUnlock()
+
+	for _, alloc := range as.allocations {
+		// Change Start and End to match the new window. However, do not
+		// change Minutes because that will be accounted for during the
+		// insert step, if in fact there are two allocations to add.
+		alloc.Start = start
+		alloc.End = end
+
+		err := acc.insert(alloc, true)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	for _, alloc := range that.allocations {
+		// Change Start and End to match the new window. However, do not
+		// change Minutes because that will be accounted for during the
+		// insert step, if in fact there are two allocations to add.
+		alloc.Start = start
+		alloc.End = end
+
+		err := acc.insert(alloc, true)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return acc, nil
+}
+
+type AllocationSetRange struct {
+	sync.RWMutex
+	allocations []*AllocationSet
+}
+
+func NewAllocationSetRange(allocs ...*AllocationSet) *AllocationSetRange {
+	return &AllocationSetRange{
+		allocations: allocs,
+	}
+}
+
+// Accumulate sums each AllocationSet in the given range, returning a single cumulative
+// AllocationSet for the entire range.
+func (asr *AllocationSetRange) Accumulate() (*AllocationSet, error) {
+	var allocSet *AllocationSet
+	var err error
+
+	asr.RLock()
+	defer asr.RUnlock()
+
+	for _, as := range asr.allocations {
+		allocSet, err = allocSet.accumulate(as)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return allocSet, nil
+}
+
+// TODO niko/etl accumulate into lower-resolution chunks of the given resolution
+// func (asr *AllocationSetRange) AccumulateBy(resolution time.Duration) *AllocationSetRange
+
+func (asr *AllocationSetRange) AggregateBy(properties Properties, options *AllocationAggregationOptions) error {
+	aggRange := &AllocationSetRange{allocations: []*AllocationSet{}}
+
+	asr.Lock()
+	defer asr.Unlock()
+
+	for _, as := range asr.allocations {
+		err := as.AggregateBy(properties, options)
+		if err != nil {
+			return err
+		}
+		aggRange.allocations = append(aggRange.allocations, as)
+	}
+
+	asr.allocations = aggRange.allocations
+
+	return nil
+}
+
+func (asr *AllocationSetRange) Append(that *AllocationSet) {
+	asr.Lock()
+	defer asr.Unlock()
+	asr.allocations = append(asr.allocations, that)
+}
+
+// Each invokes the given function for each AllocationSet in the range
+func (asr *AllocationSetRange) Each(f func(int, *AllocationSet)) {
+	if asr == nil {
+		return
+	}
+
+	for i, as := range asr.allocations {
+		f(i, as)
+	}
+}
+
+func (asr *AllocationSetRange) Get(i int) (*AllocationSet, error) {
+	if i < 0 || i >= len(asr.allocations) {
+		return nil, fmt.Errorf("AllocationSetRange: index out of range: %d", i)
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	return asr.allocations[i], nil
+}
+
+func (asr *AllocationSetRange) Length() int {
+	if asr == nil || asr.allocations == nil {
+		return 0
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	return len(asr.allocations)
+}
+
+func (asr *AllocationSetRange) MarshalJSON() ([]byte, error) {
+	asr.RLock()
+	asr.RUnlock()
+	return json.Marshal(asr.allocations)
+}
+
+func (asr *AllocationSetRange) Slice() []*AllocationSet {
+	if asr == nil || asr.allocations == nil {
+		return nil
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	copy := []*AllocationSet{}
+	for _, as := range asr.allocations {
+		copy = append(copy, as.Clone())
+	}
+	return copy
+}
+
+// String represents the given AllocationSetRange as a string
+func (asr *AllocationSetRange) String() string {
+	if asr == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("AllocationSetRange{length: %d}", asr.Length())
+}
+
+func (asr *AllocationSetRange) UTCOffset() time.Duration {
+	if asr.Length() == 0 {
+		return 0
+	}
+
+	as, err := asr.Get(0)
+	if err != nil {
+		return 0
+	}
+	return as.UTCOffset()
+}
+
+// Window returns the full window that the AllocationSetRange spans, from the
+// start of the first AllocationSet to the end of the last one.
+func (asr *AllocationSetRange) Window() Window {
+	if asr == nil || asr.Length() == 0 {
+		return NewWindow(nil, nil)
+	}
+
+	start := asr.allocations[0].Start()
+	end := asr.allocations[asr.Length()-1].End()
+
+	return NewWindow(&start, &end)
+}

+ 1153 - 0
pkg/kubecost/allocation_test.go

@@ -0,0 +1,1153 @@
+package kubecost
+
+import (
+	"fmt"
+	"math"
+	"testing"
+	"time"
+)
+
+const day = 24 * time.Hour
+
+func NewUnitAllocation(name string, start time.Time, resolution time.Duration, props *Properties) *Allocation {
+	if name == "" {
+		name = "cluster1/namespace1/pod1/container1"
+	}
+
+	properties := &Properties{}
+	if props == nil {
+		properties.SetCluster("cluster1")
+		properties.SetNode("node1")
+		properties.SetNamespace("namespace1")
+		properties.SetControllerKind("deployment")
+		properties.SetController("deployment1")
+		properties.SetPod("pod1")
+		properties.SetContainer("container1")
+	} else {
+		properties = props
+	}
+
+	end := start.Add(resolution)
+
+	alloc := &Allocation{
+		Name:            name,
+		Properties:      *properties,
+		Start:           start,
+		End:             end,
+		Minutes:         1440,
+		CPUCoreHours:    1,
+		CPUCost:         1,
+		CPUEfficiency:   1,
+		GPUHours:        1,
+		GPUCost:         1,
+		NetworkCost:     1,
+		PVByteHours:     1,
+		PVCost:          1,
+		RAMByteHours:    1,
+		RAMCost:         1,
+		RAMEfficiency:   1,
+		TotalCost:       5,
+		TotalEfficiency: 1,
+	}
+
+	// If idle allocation, remove non-idle costs, but maintain total cost
+	if alloc.IsIdle() {
+		alloc.PVByteHours = 0.0
+		alloc.PVCost = 0.0
+		alloc.NetworkCost = 0.0
+
+		alloc.CPUCoreHours += 1.0
+		alloc.CPUCost += 1.0
+		alloc.RAMByteHours += 1.0
+		alloc.RAMCost += 1.0
+	}
+
+	return alloc
+}
+
+func TestAllocation_Add(t *testing.T) {
+	var nilAlloc *Allocation
+	zeroAlloc := &Allocation{}
+
+	// nil + nil == nil
+	nilNilSum, err := nilAlloc.Add(nilAlloc)
+	if err != nil {
+		t.Fatalf("Allocation.Add unexpected error: %s", err)
+	}
+	if nilNilSum != nil {
+		t.Fatalf("Allocation.Add failed; exp: nil; act: %s", nilNilSum)
+	}
+
+	// nil + zero == zero
+	nilZeroSum, err := nilAlloc.Add(zeroAlloc)
+	if err != nil {
+		t.Fatalf("Allocation.Add unexpected error: %s", err)
+	}
+	if nilZeroSum == nil || nilZeroSum.TotalCost != 0.0 {
+		t.Fatalf("Allocation.Add failed; exp: 0.0; act: %s", nilZeroSum)
+	}
+
+	// TODO niko/etl more
+}
+
+// TODO niko/etl
+// func TestAllocation_Clone(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocation_IsIdle(t *testing.T) {}
+
+func TestAllocation_MatchesAll(t *testing.T) {
+	var alloc *Allocation
+
+	// nil Allocations never match
+	if alloc.MatchesAll() {
+		t.Fatalf("Allocation.MatchesAll: expected no match on nil allocation")
+	}
+
+	today := time.Now().UTC().Truncate(day)
+	alloc = NewUnitAllocation("", today, day, nil)
+
+	// Matches when no Properties are given
+	if !alloc.MatchesAll() {
+		t.Fatalf("Allocation.MatchesAll: expected match on no conditions")
+	}
+
+	// Matches when all Properties match
+	if !alloc.MatchesAll(Properties{
+		NamespaceProp: "namespace1",
+	}, Properties{
+		ClusterProp:        "cluster1",
+		ControllerKindProp: "deployment",
+	}, Properties{
+		NodeProp: "node1",
+	}) {
+		t.Fatalf("Allocation.MatchesAll: expected match when all Properties are met")
+	}
+
+	// Doesn't match when one Property doesn't match
+	if alloc.MatchesAll(Properties{
+		NamespaceProp: "namespace1",
+		ServiceProp:   []string{"missing"},
+	}, Properties{
+		ClusterProp:        "cluster1",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesAll: expected no match when one Properties is not met")
+	}
+
+	// Doesn't match when no Properties are met
+	if alloc.MatchesAll(Properties{
+		NamespaceProp: "namespace1",
+		ServiceProp:   []string{"missing"},
+	}, Properties{
+		ClusterProp:        "cluster2",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesAll: expected no match when no Properties are met")
+	}
+}
+
+func TestAllocation_MatchesOne(t *testing.T) {
+	var alloc *Allocation
+
+	// nil Allocations never match
+	if alloc.MatchesOne() {
+		t.Fatalf("Allocation.MatchesOne: expected no match on nil allocation")
+	}
+
+	today := time.Now().UTC().Truncate(day)
+	alloc = NewUnitAllocation("", today, day, nil)
+
+	// Doesn't match when no Properties are given
+	if alloc.MatchesOne() {
+		t.Fatalf("Allocation.MatchesOne: expected no match on no conditions")
+	}
+
+	// Matches when all Properties match
+	if !alloc.MatchesOne(Properties{
+		NamespaceProp: "namespace1",
+	}, Properties{
+		ClusterProp:        "cluster1",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesOne: expected match when all Properties are met")
+	}
+
+	// Matches when one Property doesn't match
+	if !alloc.MatchesOne(Properties{
+		NamespaceProp: "namespace1",
+		ServiceProp:   []string{"missing"},
+	}, Properties{
+		ClusterProp:        "cluster1",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesOne: expected match when one Properties is met")
+	}
+
+	// Doesn't match when no Properties are met
+	if alloc.MatchesOne(Properties{
+		NamespaceProp: "namespace1",
+		ServiceProp:   []string{"missing"},
+	}, Properties{
+		ClusterProp:        "cluster2",
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Allocation.MatchesOne: expected no match when no Properties are met")
+	}
+}
+
+func TestAllocation_String(t *testing.T) {
+	// TODO niko/etl
+}
+
+func TestNewAllocationSet(t *testing.T) {
+	// TODO niko/etl
+}
+
+func generateAllocationSet(start time.Time) *AllocationSet {
+	// Idle allocations
+	a1i := NewUnitAllocation(fmt.Sprintf("cluster1/%s", IdleSuffix), start, day, &Properties{
+		ClusterProp: "cluster1",
+		NodeProp: "node1",
+	})
+	a1i.CPUCost = 5.0
+	a1i.RAMCost = 15.0
+	a1i.GPUCost = 0.0
+	a1i.TotalCost = 20.0
+
+	a2i := NewUnitAllocation(fmt.Sprintf("cluster2/%s", IdleSuffix), start, day, &Properties{
+		ClusterProp: "cluster2",
+	})
+	a2i.CPUCost = 5.0
+	a2i.RAMCost = 5.0
+	a2i.GPUCost = 0.0
+	a2i.TotalCost = 10.0
+
+	// Active allocations
+	a1111 := NewUnitAllocation("cluster1/namespace1/pod1/container1", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace1",
+		PodProp:       "pod1",
+		ContainerProp: "container1",
+	})
+	a1111.RAMCost = 11.00
+	a1111.TotalCost = 15.00
+
+	a11abc2 := NewUnitAllocation("cluster1/namespace1/pod-abc/container2", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace1",
+		PodProp:       "pod-abc",
+		ContainerProp: "container2",
+	})
+
+	a11def3 := NewUnitAllocation("cluster1/namespace1/pod-def/container3", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace1",
+		PodProp:       "pod-def",
+		ContainerProp: "container3",
+	})
+
+	a12ghi4 := NewUnitAllocation("cluster1/namespace2/pod-ghi/container4", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-ghi",
+		ContainerProp: "container4",
+	})
+
+	a12ghi5 := NewUnitAllocation("cluster1/namespace2/pod-ghi/container5", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-ghi",
+		ContainerProp: "container5",
+	})
+
+	a12jkl6 := NewUnitAllocation("cluster1/namespace2/pod-jkl/container6", start, day, &Properties{
+		ClusterProp:   "cluster1",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-jkl",
+		ContainerProp: "container6",
+	})
+
+	a22mno4 := NewUnitAllocation("cluster2/namespace2/pod-mno/container4", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-mno",
+		ContainerProp: "container4",
+	})
+
+	a22mno5 := NewUnitAllocation("cluster2/namespace2/pod-mno/container5", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-mno",
+		ContainerProp: "container5",
+	})
+
+	a22pqr6 := NewUnitAllocation("cluster2/namespace2/pod-pqr/container6", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace2",
+		PodProp:       "pod-pqr",
+		ContainerProp: "container6",
+	})
+
+	a23stu7 := NewUnitAllocation("cluster2/namespace3/pod-stu/container7", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace3",
+		PodProp:       "pod-stu",
+		ContainerProp: "container7",
+	})
+
+	a23vwx8 := NewUnitAllocation("cluster2/namespace3/pod-vwx/container8", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace3",
+		PodProp:       "pod-vwx",
+		ContainerProp: "container8",
+	})
+
+	a23vwx9 := NewUnitAllocation("cluster2/namespace3/pod-vwx/container9", start, day, &Properties{
+		ClusterProp:   "cluster2",
+		NamespaceProp: "namespace3",
+		PodProp:       "pod-vwx",
+		ContainerProp: "container9",
+	})
+
+	// Controllers
+
+	a11abc2.Properties.SetControllerKind("deployment")
+	a11abc2.Properties.SetController("deployment1")
+	a11def3.Properties.SetControllerKind("deployment")
+	a11def3.Properties.SetController("deployment1")
+
+	a12ghi4.Properties.SetControllerKind("deployment")
+	a12ghi4.Properties.SetController("deployment2")
+	a12ghi5.Properties.SetControllerKind("deployment")
+	a12ghi5.Properties.SetController("deployment2")
+	a22mno4.Properties.SetControllerKind("deployment")
+	a22mno4.Properties.SetController("deployment2")
+	a22mno5.Properties.SetControllerKind("deployment")
+	a22mno5.Properties.SetController("deployment2")
+
+	a23stu7.Properties.SetControllerKind("deployment")
+	a23stu7.Properties.SetController("deployment3")
+
+	a12jkl6.Properties.SetControllerKind("daemonset")
+	a12jkl6.Properties.SetController("daemonset1")
+	a22pqr6.Properties.SetControllerKind("daemonset")
+	a22pqr6.Properties.SetController("daemonset1")
+
+	a23vwx8.Properties.SetControllerKind("statefulset")
+	a23vwx8.Properties.SetController("statefulset1")
+	a23vwx9.Properties.SetControllerKind("statefulset")
+	a23vwx9.Properties.SetController("statefulset1")
+
+	// Labels
+
+	a1111.Properties.SetLabels(map[string]string{"app": "app1", "env": "env1"})
+	a12ghi4.Properties.SetLabels(map[string]string{"app": "app2", "env": "env2"})
+	a12ghi5.Properties.SetLabels(map[string]string{"app": "app2", "env": "env2"})
+	a22mno4.Properties.SetLabels(map[string]string{"app": "app2"})
+	a22mno5.Properties.SetLabels(map[string]string{"app": "app2"})
+
+	// Services
+
+	a12jkl6.Properties.SetServices([]string{"service1"})
+	a22pqr6.Properties.SetServices([]string{"service1"})
+
+	return NewAllocationSet(start, start.Add(day),
+		// idle
+		a1i, a2i,
+		// cluster 1, namespace1
+		a1111, a11abc2, a11def3,
+		// cluster 1, namespace 2
+		a12ghi4, a12ghi5, a12jkl6,
+		// cluster 2, namespace 2
+		a22mno4, a22mno5, a22pqr6,
+		// cluster 2, namespace 3
+		a23stu7, a23vwx8, a23vwx9,
+	)
+}
+
+func assertAllocationSetTotals(t *testing.T, as *AllocationSet, msg string, err error, length int, totalCost float64) {
+	if err != nil {
+		t.Fatalf("AllocationSet.AggregateBy[%s]: unexpected error: %s", msg, err)
+	}
+	if as.Length() != length {
+		t.Fatalf("AllocationSet.AggregateBy[%s]: expected set of length %d, actual %d", msg, length, as.Length())
+	}
+	if math.Round(as.TotalCost()*100) != math.Round(totalCost*100) {
+		t.Fatalf("AllocationSet.AggregateBy[%s]: expected total cost %.2f, actual %.2f", msg, totalCost, as.TotalCost())
+	}
+}
+
+func assertAllocationTotals(t *testing.T, as *AllocationSet, msg string, exps map[string]float64) {
+	as.Each(func(k string, a *Allocation) {
+		if exp, ok := exps[a.Name]; ok {
+			if math.Round(a.TotalCost*100) != math.Round(exp*100) {
+				t.Fatalf("AllocationSet.AggregateBy[%s]: expected total cost %.2f, actual %.2f", msg, exp, a.TotalCost)
+			}
+		} else {
+			t.Fatalf("AllocationSet.AggregateBy[%s]: unexpected allocation: %s", msg, a.Name)
+		}
+	})
+}
+
+func assertAllocationWindow(t *testing.T, as *AllocationSet, msg string, expStart, expEnd time.Time, expMinutes float64) {
+	as.Each(func(k string, a *Allocation) {
+		if !a.Start.Equal(expStart) {
+			t.Fatalf("AllocationSet.AggregateBy[%s]: expected start %s, actual %s", msg, expStart, a.Start)
+		}
+		if !a.End.Equal(expEnd) {
+			t.Fatalf("AllocationSet.AggregateBy[%s]: expected end %s, actual %s", msg, expEnd, a.End)
+		}
+		if a.Minutes != expMinutes {
+			t.Fatalf("AllocationSet.AggregateBy[%s]: expected minutes %f, actual %f", msg, expMinutes, a.Minutes)
+		}
+	})
+}
+
+func printAllocationSet(msg string, as *AllocationSet) {
+	fmt.Printf("--- %s ---\n", msg)
+	as.Each(func(k string, a *Allocation) {
+		fmt.Printf(" > %s\n", a)
+	})
+}
+
+func TestAllocationSet_AggregateBy(t *testing.T) {
+	// Test AggregateBy against the following workload topology, which is
+	// generated by generateAllocationSet:
+
+	// | Hierarchy                              | Cost |  CPU |  RAM |  GPU |   PV |  Net |
+	// +----------------------------------------+------+------+------+------+------+------+
+	//   cluster1:
+	//     idle:                                  20.00   5.00  15.00   0.00   0.00   0.00
+	//     namespace1:
+	//       pod1:
+	//         container1: [app=app1, env=env1]   15.00   1.00  11.00   1.00   1.00   1.00
+	//       pod-abc: (deployment1)
+	//         container2:                         5.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-def: (deployment1)
+	//         container3:                         5.00   1.00   1.00   1.00   1.00   1.00
+	//     namespace2:
+	//       pod-ghi: (deployment2)
+	//         container4: [app=app2, env=env2]    5.00   1.00   1.00   1.00   1.00   1.00
+	//         container5: [app=app2, env=env2]    5.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-jkl: (daemonset1)
+	//         container6: {service1}              5.00   1.00   1.00   1.00   1.00   1.00
+	// +-----------------------------------------+------+------+------+------+------+------+
+	//   cluster1 subtotal                        60.00  11.00  31.00   6.00   6.00   6.00
+	// +-----------------------------------------+------+------+------+------+------+------+
+	//   cluster2:
+	//     idle:                                  10.00   5.00   5.00   0.00   0.00   0.00
+	//     namespace2:
+	//       pod-mno: (deployment2)
+	//         container4: [app=app2]              5.00   1.00   1.00   1.00   1.00   1.00
+	//         container5: [app=app2]              5.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-pqr: (daemonset1)
+	//         container6: {service1}              5.00   1.00   1.00   1.00   1.00   1.00
+	//     namespace3:
+	//       pod-stu: (deployment3)
+	//         container7:                         5.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-vwx: (statefulset1)
+	//         container8:                         5.00   1.00   1.00   1.00   1.00   1.00
+	//         container9:                         5.00   1.00   1.00   1.00   1.00   1.00
+	// +----------------------------------------+------+------+------+------+------+------+
+	//   cluster2 subtotal                        40.00  11.00  11.00   6.00   6.00   6.00
+	// +----------------------------------------+------+------+------+------+------+------+
+	//   total                                   100.00  22.00  42.00  12.00  12.00  12.00
+	// +----------------------------------------+------+------+------+------+------+------+
+
+	// Scenarios to test:
+
+	// 1  Single-aggregation
+	// 1a AggregationProperties=(Cluster)
+	// 1b AggregationProperties=(Namespace)
+	// 1c AggregationProperties=(Pod)
+	// 1d AggregationProperties=(Container)
+	// 1e AggregationProperties=(ControllerKind)
+	// 1f AggregationProperties=(Controller)
+	// 1g AggregationProperties=(Service)
+	// 1h AggregationProperties=(Label:app)
+
+	// 2  Multi-aggregation
+	// 2a AggregationProperties=(Cluster, Namespace)
+	// 2b AggregationProperties=(Namespace, Label:app)
+	// 2c AggregationProperties=(Cluster, Namespace, Pod, Container)
+	// 2d AggregationProperties=(Label:app, Label:environment)
+
+	// 3  Share idle
+	// 3a AggregationProperties=(Namespace) ShareIdle=ShareWeighted
+	// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven (TODO niko/etl)
+
+	// 4  Share resources
+	// 4a Share namespace ShareEven
+	// 4b Share cluster ShareWeighted
+	// 4c Share label ShareEven
+	// 4d Share overhead ShareWeighted
+
+	// 5  Filters
+	// 5a Filter by cluster with separate idle
+	// 5b Filter by cluster with shared idle
+	// TODO niko/idle more filter tests
+
+	// 6  Combinations and options
+	// 6a SplitIdle
+	// 6b Share idle with filters
+	// 6c Share resources with filters
+	// 6d Share idle and share resources
+
+	// 7  Edge cases and errors
+	// 7a Empty AggregationProperties
+	// 7b Filter all
+	// 7c Share all
+	// 7d Share and filter the same allocations
+
+	// Definitions and set-up:
+
+	var as *AllocationSet
+	var err error
+
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+
+	numClusters := 2
+	numNamespaces := 3
+	numPods := 9
+	numContainers := 9
+	numControllerKinds := 3
+	numControllers := 5
+	numServices := 1
+	numLabelApps := 2
+
+	// By default, idle is reported as a single, merged allocation
+	numIdle := 1
+	// There will only ever be one __unallocated__
+	numUnallocated := 1
+	// There are two clusters, so each gets an idle entry when they are split
+	numSplitIdle := 2
+
+	activeTotalCost := 70.0
+	idleTotalCost := 30.0
+	sharedOverheadHourlyCost := 7.0
+
+	isNamespace3 := func(a *Allocation) bool {
+		ns, err := a.Properties.GetNamespace()
+		return err == nil && ns == "namespace3"
+	}
+
+	isApp1 := func(a *Allocation) bool {
+		ls, _ := a.Properties.GetLabels()
+		if app, ok := ls["app"]; ok && app == "app1" {
+			return true
+		}
+		return false
+	}
+
+	end := time.Now().UTC().Truncate(day)
+	start := end.Add(-day)
+
+	// Tests:
+
+	// 1  Single-aggregation
+
+	// 1a AggregationProperties=(Cluster)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: ""}, nil)
+	assertAllocationSetTotals(t, as, "1a", err, numClusters+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1a", map[string]float64{
+		"cluster1": 40.00,
+		"cluster2": 30.00,
+		IdleSuffix: 30.00,
+	})
+	assertAllocationWindow(t, as, "1a", startYesterday, endYesterday, 1440.0)
+
+	// 1b AggregationProperties=(Namespace)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1b", err, numNamespaces+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1b", map[string]float64{
+		"namespace1": 25.00,
+		"namespace2": 30.00,
+		"namespace3": 15.00,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "1b", startYesterday, endYesterday, 1440.0)
+
+	// 1c AggregationProperties=(Pod)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{PodProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1c", err, numPods+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1c", map[string]float64{
+		"pod-jkl":  5.00,
+		"pod-stu":  5.00,
+		"pod-abc":  5.00,
+		"pod-pqr":  5.00,
+		"pod-def":  5.00,
+		"pod-vwx":  10.00,
+		"pod1":     15.00,
+		"pod-mno":  10.00,
+		"pod-ghi":  10.00,
+		IdleSuffix: 30.00,
+	})
+	assertAllocationWindow(t, as, "1c", startYesterday, endYesterday, 1440.0)
+
+	// 1d AggregationProperties=(Container)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ContainerProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1d", err, numContainers+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1d", map[string]float64{
+		"container2": 5.00,
+		"container9": 5.00,
+		"container6": 10.00,
+		"container3": 5.00,
+		"container4": 10.00,
+		"container7": 5.00,
+		"container8": 5.00,
+		"container5": 10.00,
+		"container1": 15.00,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "1d", startYesterday, endYesterday, 1440.0)
+
+	// 1e AggregationProperties=(ControllerKind)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ControllerKindProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1e", err, numControllerKinds+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1e", map[string]float64{
+		"daemonset":       10.00,
+		"deployment":      35.00,
+		"statefulset":     10.00,
+		IdleSuffix:        30.00,
+		UnallocatedSuffix: 15.00,
+	})
+	assertAllocationWindow(t, as, "1e", startYesterday, endYesterday, 1440.0)
+
+	// 1f AggregationProperties=(Controller)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ControllerProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1f", err, numControllers+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1f", map[string]float64{
+		"deployment/deployment2":   20.00,
+		"daemonset/daemonset1":     10.00,
+		"deployment/deployment3":   5.00,
+		"statefulset/statefulset1": 10.00,
+		"deployment/deployment1":   10.00,
+		IdleSuffix:                 30.00,
+		UnallocatedSuffix:          15.00,
+	})
+	assertAllocationWindow(t, as, "1f", startYesterday, endYesterday, 1440.0)
+
+	// 1g AggregationProperties=(Service)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ServiceProp: true}, nil)
+	assertAllocationSetTotals(t, as, "1g", err, numServices+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1g", map[string]float64{
+		"service1":        10.00,
+		IdleSuffix:        30.00,
+		UnallocatedSuffix: 60.00,
+	})
+	assertAllocationWindow(t, as, "1g", startYesterday, endYesterday, 1440.0)
+
+	// 1h AggregationProperties=(Label:app)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{LabelProp: map[string]string{"app": ""}}, nil)
+	assertAllocationSetTotals(t, as, "1h", err, numLabelApps+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1h", map[string]float64{
+		"app=app1":        15.00,
+		"app=app2":        20.00,
+		IdleSuffix:        30.00,
+		UnallocatedSuffix: 35.00,
+	})
+	assertAllocationWindow(t, as, "1h", startYesterday, endYesterday, 1440.0)
+
+	// 1i AggregationProperties=(ControllerKind:deployment)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ControllerKindProp: "deployment"}, nil)
+	assertAllocationSetTotals(t, as, "1i", err, 1+numIdle+numUnallocated, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "1i", map[string]float64{
+		"deployment":      35.00,
+		IdleSuffix:        30.00,
+		UnallocatedSuffix: 35.00,
+	})
+	assertAllocationWindow(t, as, "1i", startYesterday, endYesterday, 1440.0)
+
+	// 2  Multi-aggregation
+
+	// 2a AggregationProperties=(Cluster, Namespace)
+	// 2b AggregationProperties=(Namespace, Label:app)
+	// 2c AggregationProperties=(Cluster, Namespace, Pod, Container)
+
+	// 2d AggregationProperties=(Label:app, Label:environment)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{LabelProp: map[string]string{"app": "", "env": ""}}, nil)
+	// sets should be {idle, unallocated, app1/env1, app2/env2, app2/unallocated}
+	assertAllocationSetTotals(t, as, "2d", err, numIdle+numUnallocated+3, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "2d", map[string]float64{
+		"app=app1/env=env1":             15.00,
+		"app=app2/env=env2":             10.00,
+		"app=app2/" + UnallocatedSuffix: 10.00,
+		IdleSuffix:                      30.00,
+		UnallocatedSuffix:               35.00,
+	})
+
+	// 2e AggregationProperties=(Cluster, Label:app, Label:environment)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: "", LabelProp: map[string]string{"app": "", "env": ""}}, nil)
+	assertAllocationSetTotals(t, as, "2e", err, 6, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "2e", map[string]float64{
+		"cluster1/app=app2/env=env2":             10.00,
+		"__idle__":                               30.00,
+		"cluster1/app=app1/env=env1":             15.00,
+		"cluster1/" + UnallocatedSuffix:          15.00,
+		"cluster2/app=app2/" + UnallocatedSuffix: 10.00,
+		"cluster2/" + UnallocatedSuffix:          20.00,
+	})
+
+	// // TODO niko/etl
+
+	// // 3  Share idle
+
+	// 3a AggregationProperties=(Namespace) ShareIdle=ShareWeighted
+	// namespace1: 39.6875 = 25.00 + 5.00*(3.00/6.00) + 15.0*(13.0/16.0)
+	// namespace2: 40.3125 = 30.00 + 5.0*(3.0/6.0) + 15.0*(3.0/16.0) + 5.0*(3.0/6.0) + 5.0*(3.0/6.0)
+	// namespace3: 20.0000 = 15.00 + 5.0*(3.0/6.0) + 5.0*(3.0/6.0)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{ShareIdle: ShareWeighted})
+	assertAllocationSetTotals(t, as, "3a", err, numNamespaces, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "3a", map[string]float64{
+		"namespace1": 39.69,
+		"namespace2": 40.31,
+		"namespace3": 20.00,
+	})
+	assertAllocationWindow(t, as, "3a", startYesterday, endYesterday, 1440.0)
+
+	// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven
+	// namespace1: 35.0000 = 25.00 + 5.00*(1.0/2.0) + 15.0*(1.0/2.0)
+	// namespace2: 45.0000 = 30.00 + 5.0*(1.0/2.0) + 15.0*(1.0/2.0) + 5.0*(1.0/2.0) + 5.0*(1.0/2.0)
+	// namespace3: 20.0000 = 15.00 + 5.0*(1.0/2.0) + 5.0*(1.0/2.0)
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{ShareIdle: ShareEven})
+	assertAllocationSetTotals(t, as, "3a", err, numNamespaces, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "3a", map[string]float64{
+		"namespace1": 35.00,
+		"namespace2": 45.00,
+		"namespace3": 20.00,
+	})
+	assertAllocationWindow(t, as, "3b", startYesterday, endYesterday, 1440.0)
+
+	// 4  Share resources
+
+	// 4a Share namespace ShareEven
+	// namespace1: 32.5000 = 25.00 + 15.00*(1.0/2.0)
+	// namespace2: 37.5000 = 30.00 + 15.00*(1.0/2.0)
+	// idle:       30.0000
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{
+		ShareFuncs: []AllocationMatchFunc{isNamespace3},
+		ShareSplit: ShareEven,
+	})
+	assertAllocationSetTotals(t, as, "4a", err, numNamespaces, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "4a", map[string]float64{
+		"namespace1": 32.50,
+		"namespace2": 37.50,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "4a", startYesterday, endYesterday, 1440.0)
+
+	// 4b Share namespace ShareWeighted
+	// namespace1: 32.5000 =
+	// namespace2: 37.5000 =
+	// idle:       30.0000
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{
+		ShareFuncs: []AllocationMatchFunc{isNamespace3},
+		ShareSplit: ShareWeighted,
+	})
+	assertAllocationSetTotals(t, as, "4b", err, numNamespaces, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "4b", map[string]float64{
+		"namespace1": 31.82,
+		"namespace2": 38.18,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "4b", startYesterday, endYesterday, 1440.0)
+
+	// 4c Share label ShareEven
+	// namespace1: 15.0000 = 25.00 - 15.00 + 15.00*(1.0/3.0)
+	// namespace2: 35.0000 = 30.00 + 15.00*(1.0/3.0)
+	// namespace3: 20.0000 = 15.00 + 15.00*(1.0/3.0)
+	// idle:       30.0000
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{
+		ShareFuncs: []AllocationMatchFunc{isApp1},
+		ShareSplit: ShareEven,
+	})
+	assertAllocationSetTotals(t, as, "4c", err, numNamespaces+numIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "4c", map[string]float64{
+		"namespace1": 15.00,
+		"namespace2": 35.00,
+		"namespace3": 20.00,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "4c", startYesterday, endYesterday, 1440.0)
+
+	// 4d Share overhead ShareWeighted
+	// namespace1: 37.5000 = 25.00 + (7.0*24.0)*(25.00/70.00)
+	// namespace2: 45.0000 = 30.00 + (7.0*24.0)*(30.00/70.00)
+	// namespace3: 22.5000 = 15.00 + (7.0*24.0)*(15.00/70.00)
+	// idle:       30.0000
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{
+		SharedHourlyCosts: map[string]float64{"total": sharedOverheadHourlyCost},
+		ShareSplit:        ShareWeighted,
+	})
+	assertAllocationSetTotals(t, as, "4d", err, numNamespaces+numIdle, activeTotalCost+idleTotalCost+(sharedOverheadHourlyCost*24.0))
+	assertAllocationTotals(t, as, "4d", map[string]float64{
+		"namespace1": 85.00,
+		"namespace2": 102.00,
+		"namespace3": 51.00,
+		IdleSuffix:   30.00,
+	})
+	assertAllocationWindow(t, as, "4d", startYesterday, endYesterday, 1440.0)
+
+	// 5  Filters
+
+	isCluster := func(matchCluster string) func(*Allocation) bool {
+		return func(a *Allocation) bool {
+			cluster, err := a.Properties.GetCluster()
+			return err == nil && cluster == matchCluster
+		}
+	}
+
+	isNamespace := func(matchNamespace string) func(*Allocation) bool {
+		return func(a *Allocation) bool {
+			namespace, err := a.Properties.GetNamespace()
+			return err == nil && namespace == matchNamespace
+		}
+	}
+
+	// 5a Filter by cluster with separate idle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
+		ShareIdle:   ShareNone,
+	})
+	assertAllocationSetTotals(t, as, "5a", err, 2, 60.0)
+	assertAllocationTotals(t, as, "5a", map[string]float64{
+		"cluster1": 40.00,
+		IdleSuffix: 20.00,
+	})
+	assertAllocationWindow(t, as, "5a", startYesterday, endYesterday, 1440.0)
+
+	// 5b Filter by cluster with shared idle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
+		ShareIdle:   ShareWeighted,
+	})
+	assertAllocationSetTotals(t, as, "5b", err, 1, 60.0)
+	assertAllocationTotals(t, as, "5b", map[string]float64{
+		"cluster1": 60.00,
+	})
+	assertAllocationWindow(t, as, "5b", startYesterday, endYesterday, 1440.0)
+
+	// 5c Filter by cluster, agg by namespace, with separate idle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isCluster("cluster1")},
+		ShareIdle:   ShareNone,
+	})
+	assertAllocationSetTotals(t, as, "5c", err, 3, 60.0)
+	assertAllocationTotals(t, as, "5c", map[string]float64{
+		"namespace1": 25.00,
+		"namespace2": 15.00,
+		IdleSuffix:   20.00,
+	})
+	assertAllocationWindow(t, as, "5c", startYesterday, endYesterday, 1440.0)
+
+	// 5d Filter by namespace, agg by cluster, with separate idle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{ClusterProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
+		ShareIdle:   ShareNone,
+	})
+	assertAllocationSetTotals(t, as, "5d", err, 3, 40.31)
+	assertAllocationTotals(t, as, "5d", map[string]float64{
+		"cluster1": 15.00,
+		"cluster2": 15.00,
+		IdleSuffix: 10.31,
+	})
+	assertAllocationWindow(t, as, "5d", startYesterday, endYesterday, 1440.0)
+
+	// 6  Combinations and options
+
+	// 6a SplitIdle
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: ""}, &AllocationAggregationOptions{SplitIdle: true})
+	assertAllocationSetTotals(t, as, "6a", err, numNamespaces+numSplitIdle, activeTotalCost+idleTotalCost)
+	assertAllocationTotals(t, as, "6a", map[string]float64{
+		"namespace1":                           25.00,
+		"namespace2":                           30.00,
+		"namespace3":                           15.00,
+		fmt.Sprintf("cluster1/%s", IdleSuffix): 20.00,
+		fmt.Sprintf("cluster2/%s", IdleSuffix): 10.00,
+	})
+	assertAllocationWindow(t, as, "6a", startYesterday, endYesterday, 1440.0)
+
+	// 6b Share idle weighted with filters
+
+	// Should match values from unfiltered aggregation
+	// as = generateAllocationSet(start)
+	// err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{ShareIdle: ShareWeighted})
+	// printAllocationSet("6b unfiltered", as)
+
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
+		ShareIdle:   ShareWeighted,
+	})
+	assertAllocationSetTotals(t, as, "6b", err, 1, 40.31)
+	assertAllocationTotals(t, as, "6b", map[string]float64{
+		"namespace2": 40.31,
+	})
+	assertAllocationWindow(t, as, "6b", startYesterday, endYesterday, 1440.0)
+
+	// 6c Share idle even with filters
+
+	// Should match values from unfiltered aggregation
+	// as = generateAllocationSet(start)
+	// err = as.AggregateBy(Properties{NamespaceProp: true}, &AllocationAggregationOptions{ShareIdle: ShareEven})
+	// printAllocationSet("6c unfiltered", as)
+
+	as = generateAllocationSet(start)
+	err = as.AggregateBy(Properties{NamespaceProp: ""}, &AllocationAggregationOptions{
+		FilterFuncs: []AllocationMatchFunc{isNamespace("namespace2")},
+		ShareIdle:   ShareEven,
+	})
+	assertAllocationSetTotals(t, as, "6b", err, 1, 45.00)
+	assertAllocationTotals(t, as, "6b", map[string]float64{
+		"namespace2": 45.00,
+	})
+	assertAllocationWindow(t, as, "6b", startYesterday, endYesterday, 1440.0)
+
+	// 6d Share resources with filters
+	// 6e Share idle and share resources
+
+	// 7  Edge cases and errors
+
+	// 7a Empty AggregationProperties
+	// 7b Filter all
+	// 7c Share all
+	// 7d Share and filter the same allocations
+}
+
+// TODO niko/etl
+//func TestAllocationSet_Clone(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Delete(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_End(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_IdleAllocations(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Insert(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_IsEmpty(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Length(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Map(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_MarshalJSON(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Resolution(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Seconds(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Set(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_Start(t *testing.T) {}
+
+// TODO niko/etl
+//func TestAllocationSet_TotalCost(t *testing.T) {}
+
+// TODO niko/etl
+//func TestNewAllocationSetRange(t *testing.T) {}
+
+func TestAllocationSetRange_Accumulate(t *testing.T) {
+	ago2d := time.Now().UTC().Truncate(day).Add(-2 * day)
+	yesterday := time.Now().UTC().Truncate(day).Add(-day)
+	today := time.Now().UTC().Truncate(day)
+	tomorrow := time.Now().UTC().Truncate(day).Add(day)
+
+	// Accumulating any combination of nil and/or empty set should result in empty set
+	result, err := NewAllocationSetRange(nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating nil AllocationSetRange: %s", err)
+	}
+	if !result.IsEmpty() {
+		t.Fatalf("accumulating nil AllocationSetRange: expected empty; actual %s", result)
+	}
+
+	result, err = NewAllocationSetRange(nil, nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating nil AllocationSetRange: %s", err)
+	}
+	if !result.IsEmpty() {
+		t.Fatalf("accumulating nil AllocationSetRange: expected empty; actual %s", result)
+	}
+
+	result, err = NewAllocationSetRange(NewAllocationSet(yesterday, today)).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating nil AllocationSetRange: %s", err)
+	}
+	if !result.IsEmpty() {
+		t.Fatalf("accumulating nil AllocationSetRange: expected empty; actual %s", result)
+	}
+
+	result, err = NewAllocationSetRange(nil, NewAllocationSet(ago2d, yesterday), nil, NewAllocationSet(today, tomorrow), nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating nil AllocationSetRange: %s", err)
+	}
+	if !result.IsEmpty() {
+		t.Fatalf("accumulating nil AllocationSetRange: expected empty; actual %s", result)
+	}
+
+	todayAS := NewAllocationSet(today, tomorrow)
+	todayAS.Set(NewUnitAllocation("", today, day, nil))
+
+	yesterdayAS := NewAllocationSet(yesterday, today)
+	yesterdayAS.Set(NewUnitAllocation("", yesterday, day, nil))
+
+	// Accumulate non-nil with nil should result in copy of non-nil, regardless of order
+	result, err = NewAllocationSetRange(nil, todayAS).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating AllocationSetRange of length 1: %s", err)
+	}
+	if result == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected AllocationSet; actual %s", result)
+	}
+	if result.TotalCost() != 5.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected total cost 5.0; actual %f", result.TotalCost())
+	}
+
+	result, err = NewAllocationSetRange(todayAS, nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating AllocationSetRange of length 1: %s", err)
+	}
+	if result == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected AllocationSet; actual %s", result)
+	}
+	if result.TotalCost() != 5.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected total cost 5.0; actual %f", result.TotalCost())
+	}
+
+	result, err = NewAllocationSetRange(nil, todayAS, nil).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating AllocationSetRange of length 1: %s", err)
+	}
+	if result == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected AllocationSet; actual %s", result)
+	}
+	if result.TotalCost() != 5.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected total cost 5.0; actual %f", result.TotalCost())
+	}
+
+	// Accumulate two non-nil should result in sum of both with appropriate start, end
+	result, err = NewAllocationSetRange(yesterdayAS, todayAS).Accumulate()
+	if err != nil {
+		t.Fatalf("unexpected error accumulating AllocationSetRange of length 1: %s", err)
+	}
+	if result == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected AllocationSet; actual %s", result)
+	}
+	if result.TotalCost() != 10.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected total cost 10.0; actual %f", result.TotalCost())
+	}
+	allocMap := result.Map()
+	if len(allocMap) != 1 {
+		t.Fatalf("accumulating AllocationSetRange: expected length 1; actual length %d", len(allocMap))
+	}
+	alloc := allocMap["cluster1/namespace1/pod1/container1"]
+	if alloc == nil {
+		t.Fatalf("accumulating AllocationSetRange: expected allocation 'cluster1/namespace1/pod1/container1'")
+	}
+	if alloc.CPUCoreHours != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", result.TotalCost())
+	}
+	if alloc.CPUCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.CPUCost)
+	}
+	if alloc.CPUEfficiency != 1.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 1.0; actual %f", alloc.CPUEfficiency)
+	}
+	if alloc.GPUHours != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.GPUHours)
+	}
+	if alloc.GPUCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.GPUCost)
+	}
+	if alloc.NetworkCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.NetworkCost)
+	}
+	if alloc.PVByteHours != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.PVByteHours)
+	}
+	if alloc.PVCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.PVCost)
+	}
+	if alloc.RAMByteHours != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.RAMByteHours)
+	}
+	if alloc.RAMCost != 2.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 2.0; actual %f", alloc.RAMCost)
+	}
+	if alloc.RAMEfficiency != 1.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 1.0; actual %f", alloc.RAMEfficiency)
+	}
+	if alloc.TotalCost != 10.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 10.0; actual %f", alloc.TotalCost)
+	}
+	if alloc.TotalEfficiency != 1.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected 1.0; actual %f", alloc.TotalEfficiency)
+	}
+	if !alloc.Start.Equal(yesterday) {
+		t.Fatalf("accumulating AllocationSetRange: expected to start %s; actual %s", yesterday, alloc.Start)
+	}
+	if !alloc.End.Equal(tomorrow) {
+		t.Fatalf("accumulating AllocationSetRange: expected to end %s; actual %s", tomorrow, alloc.End)
+	}
+	if alloc.Minutes != 2880.0 {
+		t.Fatalf("accumulating AllocationSetRange: expected %f minutes; actual %f", 2880.0, alloc.Minutes)
+	}
+}
+
+// TODO niko/etl
+// func TestAllocationSetRange_AccumulateBy(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_AggregateBy(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_Append(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_Length(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_MarshalJSON(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_Slice(t *testing.T) {}
+
+// TODO niko/etl
+// func TestAllocationSetRange_Window(t *testing.T) {}

+ 2810 - 0
pkg/kubecost/asset.go

@@ -0,0 +1,2810 @@
+package kubecost
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/json"
+	"fmt"
+	"math"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/kubecost/cost-model/pkg/log"
+)
+
+const timeFmt = "2006-01-02T15:04:05-0700"
+
+// Asset defines an entity within a cluster that has a defined cost over a
+// given period of time.
+type Asset interface {
+	// Type identifies the kind of Asset, which must always exist and should
+	// be defined by the underlying type implementing the interface.
+	Type() AssetType
+
+	// Properties are a map of predefined traits, which may or may not exist,
+	// but must conform to the AssetProperty schema
+	Properties() *AssetProperties
+	SetProperties(*AssetProperties)
+
+	// Labels are a map of undefined string-to-string values
+	Labels() AssetLabels
+	SetLabels(AssetLabels)
+
+	// Monetary values
+	Adjustment() float64
+	SetAdjustment(float64)
+	TotalCost() float64
+
+	// Temporal values
+	Start() time.Time
+	End() time.Time
+	Minutes() float64
+	Window() Window
+	ExpandWindow(Window)
+	SetStartEnd(time.Time, time.Time)
+
+	// Operations and comparisons
+	Add(Asset) Asset
+	Clone() Asset
+	Equal(Asset) bool
+
+	// Representations
+	encoding.BinaryMarshaler
+	encoding.BinaryUnmarshaler
+	json.Marshaler
+	fmt.Stringer
+}
+
+// key is used to determine uniqueness of an Asset, for instance during Insert
+// to determine if two Assets should be combined. Passing nil props indicates
+// that all available props should be used. Passing empty props indicates that
+// no props should be used (e.g. to aggregate all assets). Passing one or more
+// props will key by only those props.
+func key(a Asset, props []AssetProperty) string {
+	keys := []string{}
+
+	if props == nil {
+		props = []AssetProperty{
+			AssetProviderProp,
+			AssetAccountProp,
+			AssetProjectProp,
+			AssetCategoryProp,
+			AssetClusterProp,
+			AssetTypeProp,
+			AssetServiceProp,
+			AssetProviderIDProp,
+			AssetNameProp,
+		}
+	}
+
+	for _, prop := range props {
+		switch true {
+		case prop == AssetProviderProp && a.Properties().Provider != "":
+			keys = append(keys, a.Properties().Provider)
+		case prop == AssetAccountProp && a.Properties().Account != "":
+			keys = append(keys, a.Properties().Account)
+		case prop == AssetProjectProp && a.Properties().Project != "":
+			keys = append(keys, a.Properties().Project)
+		case prop == AssetClusterProp && a.Properties().Cluster != "":
+			keys = append(keys, a.Properties().Cluster)
+		case prop == AssetCategoryProp && a.Properties().Category != "":
+			keys = append(keys, a.Properties().Category)
+		case prop == AssetTypeProp && a.Type().String() != "":
+			keys = append(keys, a.Type().String())
+		case prop == AssetServiceProp && a.Properties().Service != "":
+			keys = append(keys, a.Properties().Service)
+		case prop == AssetProviderIDProp && a.Properties().ProviderID != "":
+			keys = append(keys, a.Properties().ProviderID)
+		case prop == AssetNameProp && a.Properties().Name != "":
+			keys = append(keys, a.Properties().Name)
+		}
+	}
+
+	return strings.Join(keys, "/")
+}
+
+func toString(a Asset) string {
+	return fmt.Sprintf("%s{%s}%s=%.2f", a.Type().String(), a.Properties(), a.Window(), a.TotalCost())
+}
+
+// AssetLabels is a schema-free mapping of key/value pairs that can be
+// attributed to an Asset as a flexible a
+type AssetLabels map[string]string
+
+// Clone returns a cloned map of labels
+func (al AssetLabels) Clone() AssetLabels {
+	clone := AssetLabels{}
+
+	for label, value := range al {
+		clone[label] = value
+	}
+
+	return clone
+}
+
+// Equal returns true only if the two set of labels are exact matches
+func (al AssetLabels) Equal(that AssetLabels) bool {
+	if len(al) != len(that) {
+		return false
+	}
+
+	for label, value := range al {
+		if thatValue, ok := that[label]; !ok || thatValue != value {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Merge retains only the labels shared with the given AssetLabels
+func (al AssetLabels) Merge(that AssetLabels) AssetLabels {
+	result := AssetLabels{}
+
+	for label, value := range al {
+		if thatValue, ok := that[label]; ok && thatValue == value {
+			result[label] = value
+		}
+	}
+
+	return result
+}
+
+// AssetMatchFunc is a function that can be used to match Assets by
+// returning true for any given Asset if a condition is met.
+type AssetMatchFunc func(Asset) bool
+
+// AssetType identifies a type of Asset
+type AssetType int
+
+const (
+	// AnyAssetType describes the Any AssetType
+	AnyAssetType AssetType = iota
+
+	// CloudAssetType describes the Cloud AssetType
+	CloudAssetType
+
+	// ClusterManagementAssetType describes the ClusterManagement AssetType
+	ClusterManagementAssetType
+
+	// DiskAssetType describes the Disk AssetType
+	DiskAssetType
+
+	// LoadBalancerAssetType describes the LoadBalancer AssetType
+	LoadBalancerAssetType
+
+	// NetworkAssetType describes the Network AssetType
+	NetworkAssetType
+
+	// NodeAssetType describes the Node AssetType
+	NodeAssetType
+
+	// SharedAssetType describes the Shared AssetType
+	SharedAssetType
+)
+
+// ParseAssetType attempts to parse the given string into an AssetType
+func ParseAssetType(text string) (AssetType, error) {
+	switch strings.TrimSpace(strings.ToLower(text)) {
+	case "cloud":
+		return CloudAssetType, nil
+	case "clustermanagement":
+		return ClusterManagementAssetType, nil
+	case "disk":
+		return DiskAssetType, nil
+	case "loadbalancer":
+		return LoadBalancerAssetType, nil
+	case "network":
+		return NetworkAssetType, nil
+	case "node":
+		return NodeAssetType, nil
+	case "shared":
+		return SharedAssetType, nil
+	}
+	return AnyAssetType, fmt.Errorf("invalid asset type: %s", text)
+}
+
+// String converts the given AssetType to a string
+func (at AssetType) String() string {
+	return [...]string{
+		"Asset",
+		"Cloud",
+		"ClusterManagement",
+		"Disk",
+		"LoadBalancer",
+		"Network",
+		"Node",
+		"Shared",
+	}[at]
+}
+
+// Any is the most general Asset, which is usually created as a result of
+// adding two Assets of different types.
+type Any struct {
+	labels     AssetLabels
+	properties *AssetProperties
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+}
+
+// NewAsset creates a new Any-type Asset for the given period of time
+func NewAsset(start, end time.Time, window Window) *Any {
+	return &Any{
+		labels:     AssetLabels{},
+		properties: &AssetProperties{},
+		start:      start,
+		end:        end,
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the Asset's type
+func (a *Any) Type() AssetType {
+	return AnyAssetType
+}
+
+// Properties returns the Asset's properties
+func (a *Any) Properties() *AssetProperties {
+	return a.properties
+}
+
+// SetProperties sets the Asset's properties
+func (a *Any) SetProperties(props *AssetProperties) {
+	a.properties = props
+}
+
+// Labels returns the Asset's labels
+func (a *Any) Labels() AssetLabels {
+	return a.labels
+}
+
+// SetLabels sets the Asset's labels
+func (a *Any) SetLabels(labels AssetLabels) {
+	a.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (a *Any) Adjustment() float64 {
+	return a.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (a *Any) SetAdjustment(adj float64) {
+	a.adjustment = adj
+}
+
+// TotalCost returns the Asset's TotalCost
+func (a *Any) TotalCost() float64 {
+	return a.Cost + a.adjustment
+}
+
+// Start returns the Asset's start time within the window
+func (a *Any) Start() time.Time {
+	return a.start
+}
+
+// End returns the Asset's end time within the window
+func (a *Any) End() time.Time {
+	return a.end
+}
+
+// Minutes returns the number of minutes the Asset was active within the window
+func (a *Any) Minutes() float64 {
+	return a.End().Sub(a.Start()).Minutes()
+}
+
+// Window returns the Asset's window
+func (a *Any) Window() Window {
+	return a.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (a *Any) ExpandWindow(window Window) {
+	a.window = a.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (a *Any) SetStartEnd(start, end time.Time) {
+	if a.Window().Contains(start) {
+		a.start = start
+	} else {
+		log.Warningf("Any.SetStartEnd: start %s not in %s", start, a.Window())
+	}
+
+	if a.Window().Contains(end) {
+		a.end = end
+	} else {
+		log.Warningf("Any.SetStartEnd: end %s not in %s", end, a.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (a *Any) Add(that Asset) Asset {
+	this := a.Clone().(*Any)
+
+	props := a.Properties().Merge(that.Properties())
+	labels := a.Labels().Merge(that.Labels())
+
+	start := a.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := a.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := a.Window().Expand(that.Window())
+
+	this.start = start
+	this.end = end
+	this.window = window
+	this.SetProperties(props)
+	this.SetLabels(labels)
+	this.adjustment += that.Adjustment()
+	this.Cost += (that.TotalCost() - that.Adjustment())
+
+	return this
+}
+
+// Clone returns a cloned instance of the Asset
+func (a *Any) Clone() Asset {
+	return &Any{
+		labels:     a.labels.Clone(),
+		properties: a.properties.Clone(),
+		start:      a.start,
+		end:        a.end,
+		window:     a.window.Clone(),
+		adjustment: a.adjustment,
+		Cost:       a.Cost,
+	}
+}
+
+// Equal returns true if the given Asset is an exact match of the receiver
+func (a *Any) Equal(that Asset) bool {
+	t, ok := that.(*Any)
+	if !ok {
+		return false
+	}
+
+	if !a.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !a.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !a.start.Equal(t.start) {
+		return false
+	}
+	if !a.end.Equal(t.end) {
+		return false
+	}
+	if !a.window.Equal(t.window) {
+		return false
+	}
+
+	if a.Cost != t.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshaler
+func (a *Any) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncode(buffer, "properties", a.Properties(), ",")
+	jsonEncode(buffer, "labels", a.Labels(), ",")
+	jsonEncodeString(buffer, "window", a.Window().String(), ",")
+	jsonEncodeString(buffer, "start", a.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", a.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", a.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "adjustment", a.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", a.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (a *Any) String() string {
+	return toString(a)
+}
+
+// Cloud describes a cloud asset
+type Cloud struct {
+	labels     AssetLabels
+	properties *AssetProperties
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+}
+
+// NewCloud returns a new Cloud Asset
+func NewCloud(category, providerID string, start, end time.Time, window Window) *Cloud {
+	properties := &AssetProperties{
+		Category:   category,
+		ProviderID: providerID,
+	}
+
+	return &Cloud{
+		labels:     AssetLabels{},
+		properties: properties,
+		start:      start,
+		end:        end,
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the AssetType
+func (ca *Cloud) Type() AssetType {
+	return CloudAssetType
+}
+
+// Properties returns the AssetProperties
+func (ca *Cloud) Properties() *AssetProperties {
+	return ca.properties
+}
+
+// SetProperties sets the Asset's properties
+func (ca *Cloud) SetProperties(props *AssetProperties) {
+	ca.properties = props
+}
+
+// Labels returns the AssetLabels
+func (ca *Cloud) Labels() AssetLabels {
+	return ca.labels
+}
+
+// SetLabels sets the Asset's labels
+func (ca *Cloud) SetLabels(labels AssetLabels) {
+	ca.labels = labels
+}
+
+// Adjustment returns the Asset's adjustment value
+func (ca *Cloud) Adjustment() float64 {
+	return ca.adjustment
+}
+
+// SetAdjustment sets the Asset's adjustment value
+func (ca *Cloud) SetAdjustment(adj float64) {
+	ca.adjustment = adj
+}
+
+// TotalCost returns the Asset's total cost
+func (ca *Cloud) TotalCost() float64 {
+	return ca.Cost + ca.adjustment
+}
+
+// Start returns the Asset's precise start time within the window
+func (ca *Cloud) Start() time.Time {
+	return ca.start
+}
+
+// End returns the Asset's precise end time within the window
+func (ca *Cloud) End() time.Time {
+	return ca.end
+}
+
+// Minutes returns the number of Minutes the Asset ran
+func (ca *Cloud) Minutes() float64 {
+	return ca.End().Sub(ca.Start()).Minutes()
+}
+
+// Window returns the window within which the Asset ran
+func (ca *Cloud) Window() Window {
+	return ca.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (ca *Cloud) ExpandWindow(window Window) {
+	ca.window = ca.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (ca *Cloud) SetStartEnd(start, end time.Time) {
+	if ca.Window().Contains(start) {
+		ca.start = start
+	} else {
+		log.Warningf("Cloud.SetStartEnd: start %s not in %s", start, ca.Window())
+	}
+
+	if ca.Window().Contains(end) {
+		ca.end = end
+	} else {
+		log.Warningf("Cloud.SetStartEnd: end %s not in %s", end, ca.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (ca *Cloud) Add(a Asset) Asset {
+	// Cloud + Cloud = Cloud
+	if that, ok := a.(*Cloud); ok {
+		this := ca.Clone().(*Cloud)
+		this.add(that)
+		return this
+	}
+
+	props := ca.Properties().Merge(a.Properties())
+	labels := ca.Labels().Merge(a.Labels())
+
+	start := ca.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := ca.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := ca.Window().Expand(a.Window())
+
+	// Cloud + !Cloud = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = ca.Adjustment() + a.Adjustment()
+	any.Cost = (ca.TotalCost() - ca.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (ca *Cloud) add(that *Cloud) {
+	if ca == nil {
+		ca = that
+		return
+	}
+
+	props := ca.Properties().Merge(that.Properties())
+	labels := ca.Labels().Merge(that.Labels())
+
+	start := ca.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := ca.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := ca.Window().Expand(that.Window())
+
+	ca.start = start
+	ca.end = end
+	ca.window = window
+	ca.SetProperties(props)
+	ca.SetLabels(labels)
+	ca.adjustment += that.adjustment
+	ca.Cost += that.Cost
+}
+
+// Clone returns a cloned instance of the Asset
+func (ca *Cloud) Clone() Asset {
+	return &Cloud{
+		labels:     ca.labels.Clone(),
+		properties: ca.properties.Clone(),
+		start:      ca.start,
+		end:        ca.end,
+		window:     ca.window.Clone(),
+		adjustment: ca.adjustment,
+		Cost:       ca.Cost,
+	}
+}
+
+// Equal returns true if the given Asset precisely equals the Asset
+func (ca *Cloud) Equal(a Asset) bool {
+	that, ok := a.(*Cloud)
+	if !ok {
+		return false
+	}
+
+	if !ca.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !ca.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !ca.start.Equal(that.start) {
+		return false
+	}
+	if !ca.end.Equal(that.end) {
+		return false
+	}
+	if !ca.window.Equal(that.window) {
+		return false
+	}
+
+	if ca.adjustment != that.adjustment {
+		return false
+	}
+
+	if ca.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshaler
+func (ca *Cloud) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", ca.Type().String(), ",")
+	jsonEncode(buffer, "properties", ca.Properties(), ",")
+	jsonEncode(buffer, "labels", ca.Labels(), ",")
+	jsonEncodeString(buffer, "window", ca.Window().String(), ",")
+	jsonEncodeString(buffer, "start", ca.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", ca.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", ca.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "adjustment", ca.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", ca.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (ca *Cloud) String() string {
+	return toString(ca)
+}
+
+// ClusterManagement describes a provider's cluster management fee
+type ClusterManagement struct {
+	labels     AssetLabels
+	properties *AssetProperties
+	window     Window
+	Cost       float64
+}
+
+// NewClusterManagement creates and returns a new ClusterManagement instance
+func NewClusterManagement(provider, cluster string, window Window) *ClusterManagement {
+	properties := &AssetProperties{
+		Category: ManagementCategory,
+		Provider: ParseProvider(provider),
+		Cluster:  cluster,
+		Service:  KubernetesService,
+	}
+
+	return &ClusterManagement{
+		labels:     AssetLabels{},
+		properties: properties,
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the Asset's type
+func (cm *ClusterManagement) Type() AssetType {
+	return ClusterManagementAssetType
+}
+
+// Properties returns the Asset's properties
+func (cm *ClusterManagement) Properties() *AssetProperties {
+	return cm.properties
+}
+
+// SetProperties sets the Asset's properties
+func (cm *ClusterManagement) SetProperties(props *AssetProperties) {
+	cm.properties = props
+}
+
+// Labels returns the Asset's labels
+func (cm *ClusterManagement) Labels() AssetLabels {
+	return cm.labels
+}
+
+// SetLabels sets the Asset's properties
+func (cm *ClusterManagement) SetLabels(props AssetLabels) {
+	cm.labels = props
+}
+
+// Adjustment does not apply to ClusterManagement
+func (cm *ClusterManagement) Adjustment() float64 {
+	return 0.0
+}
+
+// SetAdjustment does not apply to ClusterManagement
+func (cm *ClusterManagement) SetAdjustment(float64) {
+	return
+}
+
+// TotalCost returns the Asset's total cost
+func (cm *ClusterManagement) TotalCost() float64 {
+	return cm.Cost
+}
+
+// Start returns the Asset's precise start time within the window
+func (cm *ClusterManagement) Start() time.Time {
+	return *cm.window.Start()
+}
+
+// End returns the Asset's precise end time within the window
+func (cm *ClusterManagement) End() time.Time {
+	return *cm.window.End()
+}
+
+// Minutes returns the number of minutes the Asset ran
+func (cm *ClusterManagement) Minutes() float64 {
+	return cm.Window().Minutes()
+}
+
+// Window return the Asset's window
+func (cm *ClusterManagement) Window() Window {
+	return cm.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (cm *ClusterManagement) ExpandWindow(window Window) {
+	cm.window = cm.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields (not applicable here)
+func (cm *ClusterManagement) SetStartEnd(start, end time.Time) {
+	return
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (cm *ClusterManagement) Add(a Asset) Asset {
+	// ClusterManagement + ClusterManagement = ClusterManagement
+	if that, ok := a.(*ClusterManagement); ok {
+		this := cm.Clone().(*ClusterManagement)
+		this.add(that)
+		return this
+	}
+
+	props := cm.Properties().Merge(a.Properties())
+	labels := cm.Labels().Merge(a.Labels())
+
+	start := cm.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := cm.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := cm.Window().Expand(a.Window())
+
+	// ClusterManagement + !ClusterManagement = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = cm.Adjustment() + a.Adjustment()
+	any.Cost = (cm.TotalCost() - cm.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (cm *ClusterManagement) add(that *ClusterManagement) {
+	if cm == nil {
+		cm = that
+		return
+	}
+
+	props := cm.Properties().Merge(that.Properties())
+	labels := cm.Labels().Merge(that.Labels())
+	window := cm.Window().Expand(that.Window())
+
+	cm.window = window
+	cm.SetProperties(props)
+	cm.SetLabels(labels)
+	cm.Cost += that.Cost
+}
+
+// Clone returns a cloned instance of the Asset
+func (cm *ClusterManagement) Clone() Asset {
+	return &ClusterManagement{
+		labels:     cm.labels.Clone(),
+		properties: cm.properties.Clone(),
+		window:     cm.window.Clone(),
+		Cost:       cm.Cost,
+	}
+}
+
+// Equal returns true if the given Asset exactly matches the Asset
+func (cm *ClusterManagement) Equal(a Asset) bool {
+	that, ok := a.(*ClusterManagement)
+	if !ok {
+		return false
+	}
+
+	if !cm.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !cm.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !cm.window.Equal(that.window) {
+		return false
+	}
+
+	if cm.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshler
+func (cm *ClusterManagement) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", cm.Type().String(), ",")
+	jsonEncode(buffer, "properties", cm.Properties(), ",")
+	jsonEncode(buffer, "labels", cm.Labels(), ",")
+	jsonEncodeString(buffer, "window", cm.Window().String(), ",")
+	jsonEncodeString(buffer, "start", cm.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", cm.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", cm.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", cm.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (cm *ClusterManagement) String() string {
+	return toString(cm)
+}
+
+// Disk represents an in-cluster disk Asset
+type Disk struct {
+	labels     AssetLabels
+	properties *AssetProperties
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+	ByteHours  float64
+	Local      float64
+	Breakdown  *Breakdown
+}
+
+// NewDisk creates and returns a new Disk Asset
+func NewDisk(name, cluster, providerID string, start, end time.Time, window Window) *Disk {
+	properties := &AssetProperties{
+		Category:   StorageCategory,
+		Name:       name,
+		Cluster:    cluster,
+		ProviderID: providerID,
+		Service:    KubernetesService,
+	}
+
+	return &Disk{
+		labels:     AssetLabels{},
+		properties: properties,
+		start:      start,
+		end:        end,
+		window:     window,
+		Breakdown:  &Breakdown{},
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (d *Disk) Type() AssetType {
+	return DiskAssetType
+}
+
+// Properties returns the Asset's properties
+func (d *Disk) Properties() *AssetProperties {
+	return d.properties
+}
+
+// SetProperties sets the Asset's properties
+func (d *Disk) SetProperties(props *AssetProperties) {
+	d.properties = props
+}
+
+// Labels returns the Asset's labels
+func (d *Disk) Labels() AssetLabels {
+	return d.labels
+}
+
+// SetLabels sets the Asset's labels
+func (d *Disk) SetLabels(labels AssetLabels) {
+	d.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (d *Disk) Adjustment() float64 {
+	return d.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (d *Disk) SetAdjustment(adj float64) {
+	d.adjustment = adj
+}
+
+// TotalCost returns the Asset's total cost
+func (d *Disk) TotalCost() float64 {
+	return d.Cost + d.adjustment
+}
+
+// Start returns the precise start time of the Asset within the window
+func (d *Disk) Start() time.Time {
+	return d.start
+}
+
+// End returns the precise start time of the Asset within the window
+func (d *Disk) End() time.Time {
+	return d.end
+}
+
+// Minutes returns the number of minutes the Asset ran
+func (d *Disk) Minutes() float64 {
+	diskMins := d.end.Sub(d.start).Minutes()
+	windowMins := d.window.Minutes()
+
+	if diskMins > windowMins {
+		log.Warningf("Asset ETL: Disk.Minutes exceeds window: %.2f > %.2f", diskMins, windowMins)
+		diskMins = windowMins
+	}
+
+	if diskMins < 0 {
+		diskMins = 0
+	}
+
+	return diskMins
+}
+
+// Window returns the window within which the Asset
+func (d *Disk) Window() Window {
+	return d.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (d *Disk) ExpandWindow(window Window) {
+	d.window = d.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (d *Disk) SetStartEnd(start, end time.Time) {
+	if d.Window().Contains(start) {
+		d.start = start
+	} else {
+		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, d.Window())
+	}
+
+	if d.Window().Contains(end) {
+		d.end = end
+	} else {
+		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, d.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (d *Disk) Add(a Asset) Asset {
+	// Disk + Disk = Disk
+	if that, ok := a.(*Disk); ok {
+		this := d.Clone().(*Disk)
+		this.add(that)
+		return this
+	}
+
+	props := d.Properties().Merge(a.Properties())
+	labels := d.Labels().Merge(a.Labels())
+
+	start := d.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := d.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := d.Window().Expand(a.Window())
+
+	// Disk + !Disk = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = d.Adjustment() + a.Adjustment()
+	any.Cost = (d.TotalCost() - d.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (d *Disk) add(that *Disk) {
+	if d == nil {
+		d = that
+		return
+	}
+
+	props := d.Properties().Merge(that.Properties())
+	labels := d.Labels().Merge(that.Labels())
+	d.SetProperties(props)
+	d.SetLabels(labels)
+
+	start := d.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := d.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := d.Window().Expand(that.Window())
+	d.start = start
+	d.end = end
+	d.window = window
+
+	totalCost := d.Cost + that.Cost
+	if totalCost > 0.0 {
+		d.Breakdown.Idle = (d.Breakdown.Idle*d.Cost + that.Breakdown.Idle*that.Cost) / totalCost
+		d.Breakdown.Other = (d.Breakdown.Other*d.Cost + that.Breakdown.Other*that.Cost) / totalCost
+		d.Breakdown.System = (d.Breakdown.System*d.Cost + that.Breakdown.System*that.Cost) / totalCost
+		d.Breakdown.User = (d.Breakdown.User*d.Cost + that.Breakdown.User*that.Cost) / totalCost
+
+		d.Local = (d.TotalCost()*d.Local + that.TotalCost()*that.Local) / (d.TotalCost() + that.TotalCost())
+	} else {
+		d.Local = (d.Local + that.Local) / 2.0
+	}
+
+	d.adjustment += that.adjustment
+	d.Cost += that.Cost
+
+	d.ByteHours += that.ByteHours
+}
+
+// Clone returns a cloned instance of the Asset
+func (d *Disk) Clone() Asset {
+	return &Disk{
+		properties: d.properties.Clone(),
+		labels:     d.labels.Clone(),
+		start:      d.start,
+		end:        d.end,
+		window:     d.window.Clone(),
+		adjustment: d.adjustment,
+		Cost:       d.Cost,
+		ByteHours:  d.ByteHours,
+		Local:      d.Local,
+		Breakdown:  d.Breakdown.Clone(),
+	}
+}
+
+// Equal returns true if the two Assets match exactly
+func (d *Disk) Equal(a Asset) bool {
+	that, ok := a.(*Disk)
+	if !ok {
+		return false
+	}
+
+	if !d.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !d.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !d.Start().Equal(that.Start()) {
+		return false
+	}
+	if !d.End().Equal(that.End()) {
+		return false
+	}
+	if !d.window.Equal(that.window) {
+		return false
+	}
+
+	if d.adjustment != that.adjustment {
+		return false
+	}
+	if d.Cost != that.Cost {
+		return false
+	}
+
+	if d.ByteHours != that.ByteHours {
+		return false
+	}
+	if d.Local != that.Local {
+		return false
+	}
+	if !d.Breakdown.Equal(that.Breakdown) {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements the json.Marshaler interface
+func (d *Disk) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", d.Type().String(), ",")
+	jsonEncode(buffer, "properties", d.Properties(), ",")
+	jsonEncode(buffer, "labels", d.Labels(), ",")
+	jsonEncodeString(buffer, "window", d.Window().String(), ",")
+	jsonEncodeString(buffer, "start", d.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", d.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", d.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "byteHours", d.ByteHours, ",")
+	jsonEncodeFloat64(buffer, "bytes", d.Bytes(), ",")
+	jsonEncode(buffer, "breakdown", d.Breakdown, ",")
+	jsonEncodeFloat64(buffer, "adjustment", d.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", d.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (d *Disk) String() string {
+	return toString(d)
+}
+
+// Bytes returns the number of bytes belonging to the disk. This could be
+// fractional because it's the number of byte*hours divided by the number of
+// hours running; e.g. the sum of a 100GiB disk running for the first 10 hours
+// and a 30GiB disk running for the last 20 hours of the same 24-hour window
+// would produce:
+//   (100*10 + 30*20) / 24 = 66.667GiB
+// However, any number of disks running for the full span of a window will
+// report the actual number of bytes of the static disk; e.g. the above
+// scenario for one entire 24-hour window:
+//   (100*24 + 30*24) / 24 = (100 + 30) = 130GiB
+func (d *Disk) Bytes() float64 {
+	// [b*hr]*([min/hr]*[1/min]) = [b*hr]/[hr] = b
+	return d.ByteHours * (60.0 / d.Minutes())
+}
+
+// Breakdown describes a resource's use as a percentage of various usage types
+type Breakdown struct {
+	Idle   float64 `json:"idle"`
+	Other  float64 `json:"other"`
+	System float64 `json:"system"`
+	User   float64 `json:"user"`
+}
+
+// Clone returns a cloned instance of the Breakdown
+func (b *Breakdown) Clone() *Breakdown {
+	if b == nil {
+		return nil
+	}
+
+	return &Breakdown{
+		Idle:   b.Idle,
+		Other:  b.Other,
+		System: b.System,
+		User:   b.User,
+	}
+}
+
+// Equal returns true if the two Breakdowns are exact matches
+func (b *Breakdown) Equal(that *Breakdown) bool {
+	if b == nil || that == nil {
+		return false
+	}
+
+	if b.Idle != that.Idle {
+		return false
+	}
+	if b.Other != that.Other {
+		return false
+	}
+	if b.System != that.System {
+		return false
+	}
+	if b.User != that.User {
+		return false
+	}
+
+	return true
+}
+
+// Network is an Asset representing a single node's network costs
+type Network struct {
+	properties *AssetProperties
+	labels     AssetLabels
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+}
+
+// NewNetwork creates and returns a new Network Asset
+func NewNetwork(name, cluster, providerID string, start, end time.Time, window Window) *Network {
+	properties := &AssetProperties{
+		Category:   NetworkCategory,
+		Name:       name,
+		Cluster:    cluster,
+		ProviderID: providerID,
+		Service:    KubernetesService,
+	}
+
+	return &Network{
+		properties: properties,
+		labels:     AssetLabels{},
+		start:      start,
+		end:        end,
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (n *Network) Type() AssetType {
+	return NetworkAssetType
+}
+
+// Properties returns the Asset's properties
+func (n *Network) Properties() *AssetProperties {
+	return n.properties
+}
+
+// SetProperties sets the Asset's properties
+func (n *Network) SetProperties(props *AssetProperties) {
+	n.properties = props
+}
+
+// Labels returns the Asset's labels
+func (n *Network) Labels() AssetLabels {
+	return n.labels
+}
+
+// SetLabels sets the Asset's labels
+func (n *Network) SetLabels(labels AssetLabels) {
+	n.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (n *Network) Adjustment() float64 {
+	return n.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (n *Network) SetAdjustment(adj float64) {
+	n.adjustment = adj
+}
+
+// TotalCost returns the Asset's total cost
+func (n *Network) TotalCost() float64 {
+	return n.Cost + n.adjustment
+}
+
+// Start returns the precise start time of the Asset within the window
+func (n *Network) Start() time.Time {
+	return n.start
+}
+
+// End returns the precise end time of the Asset within the window
+func (n *Network) End() time.Time {
+	return n.end
+}
+
+// Minutes returns the number of minutes the Asset ran within the window
+func (n *Network) Minutes() float64 {
+	netMins := n.end.Sub(n.start).Minutes()
+	windowMins := n.window.Minutes()
+
+	if netMins > windowMins {
+		log.Warningf("Asset ETL: Network.Minutes exceeds window: %.2f > %.2f", netMins, windowMins)
+		netMins = windowMins
+	}
+
+	if netMins < 0 {
+		netMins = 0
+	}
+
+	return netMins
+}
+
+// Window returns the window within which the Asset ran
+func (n *Network) Window() Window {
+	return n.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (n *Network) ExpandWindow(window Window) {
+	n.window = n.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (n *Network) SetStartEnd(start, end time.Time) {
+	if n.Window().Contains(start) {
+		n.start = start
+	} else {
+		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, n.Window())
+	}
+
+	if n.Window().Contains(end) {
+		n.end = end
+	} else {
+		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, n.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (n *Network) Add(a Asset) Asset {
+	// Network + Network = Network
+	if that, ok := a.(*Network); ok {
+		this := n.Clone().(*Network)
+		this.add(that)
+		return this
+	}
+
+	props := n.Properties().Merge(a.Properties())
+	labels := n.Labels().Merge(a.Labels())
+
+	start := n.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := n.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := n.Window().Expand(a.Window())
+
+	// Network + !Network = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = n.Adjustment() + a.Adjustment()
+	any.Cost = (n.TotalCost() - n.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (n *Network) add(that *Network) {
+	if n == nil {
+		n = that
+		return
+	}
+
+	props := n.Properties().Merge(that.Properties())
+	labels := n.Labels().Merge(that.Labels())
+	n.SetProperties(props)
+	n.SetLabels(labels)
+
+	start := n.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := n.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := n.Window().Expand(that.Window())
+	n.start = start
+	n.end = end
+	n.window = window
+
+	n.Cost += that.Cost
+	n.adjustment += that.adjustment
+}
+
+// Clone returns a deep copy of the given Network
+func (n *Network) Clone() Asset {
+	if n == nil {
+		return nil
+	}
+
+	return &Network{
+		properties: n.properties.Clone(),
+		labels:     n.labels.Clone(),
+		start:      n.start,
+		end:        n.end,
+		window:     n.window.Clone(),
+		adjustment: n.adjustment,
+		Cost:       n.Cost,
+	}
+}
+
+// Equal returns true if the tow Assets match exactly
+func (n *Network) Equal(a Asset) bool {
+	that, ok := a.(*Network)
+	if !ok {
+		return false
+	}
+
+	if !n.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !n.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !n.Start().Equal(that.Start()) {
+		return false
+	}
+	if !n.End().Equal(that.End()) {
+		return false
+	}
+	if !n.window.Equal(that.window) {
+		return false
+	}
+
+	if n.adjustment != that.adjustment {
+		return false
+	}
+	if n.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshal interface
+func (n *Network) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", n.Type().String(), ",")
+	jsonEncode(buffer, "properties", n.Properties(), ",")
+	jsonEncode(buffer, "labels", n.Labels(), ",")
+	jsonEncodeString(buffer, "window", n.Window().String(), ",")
+	jsonEncodeString(buffer, "start", n.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", n.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", n.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "adjustment", n.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", n.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (n *Network) String() string {
+	return toString(n)
+}
+
+// Node is an Asset representing a single node in a cluster
+type Node struct {
+	properties   *AssetProperties
+	labels       AssetLabels
+	start        time.Time
+	end          time.Time
+	window       Window
+	adjustment   float64
+	NodeType     string
+	CPUCoreHours float64
+	RAMByteHours float64
+	CPUBreakdown *Breakdown
+	RAMBreakdown *Breakdown
+	CPUCost      float64
+	GPUCost      float64
+	RAMCost      float64
+	Discount     float64
+	Preemptible  float64
+}
+
+// NewNode creates and returns a new Node Asset
+func NewNode(name, cluster, providerID string, start, end time.Time, window Window) *Node {
+	properties := &AssetProperties{
+		Category:   ComputeCategory,
+		Name:       name,
+		Cluster:    cluster,
+		ProviderID: providerID,
+		Service:    KubernetesService,
+	}
+
+	return &Node{
+		properties:   properties,
+		labels:       AssetLabels{},
+		start:        start,
+		end:          end,
+		window:       window.Clone(),
+		CPUBreakdown: &Breakdown{},
+		RAMBreakdown: &Breakdown{},
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (n *Node) Type() AssetType {
+	return NodeAssetType
+}
+
+// Properties returns the Asset's properties
+func (n *Node) Properties() *AssetProperties {
+	return n.properties
+}
+
+// SetProperties sets the Asset's properties
+func (n *Node) SetProperties(props *AssetProperties) {
+	n.properties = props
+}
+
+// Labels returns the Asset's labels
+func (n *Node) Labels() AssetLabels {
+	return n.labels
+}
+
+// SetLabels sets the Asset's labels
+func (n *Node) SetLabels(labels AssetLabels) {
+	n.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (n *Node) Adjustment() float64 {
+	return n.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (n *Node) SetAdjustment(adj float64) {
+	n.adjustment = adj
+}
+
+// TotalCost returns the Asset's total cost
+func (n *Node) TotalCost() float64 {
+	return ((n.CPUCost + n.RAMCost) * (1.0 - n.Discount)) + n.GPUCost + n.adjustment
+}
+
+// Start returns the precise start time of the Asset within the window
+func (n *Node) Start() time.Time {
+	return n.start
+}
+
+// End returns the precise end time of the Asset within the window
+func (n *Node) End() time.Time {
+	return n.end
+}
+
+// Minutes returns the number of minutes the Asset ran within the window
+func (n *Node) Minutes() float64 {
+	nodeMins := n.end.Sub(n.start).Minutes()
+	windowMins := n.window.Minutes()
+
+	if nodeMins > windowMins {
+		log.Warningf("Asset ETL: Node.Minutes exceeds window: %.2f > %.2f", nodeMins, windowMins)
+		nodeMins = windowMins
+	}
+
+	if nodeMins < 0 {
+		nodeMins = 0
+	}
+
+	return nodeMins
+}
+
+// Window returns the window within which the Asset ran
+func (n *Node) Window() Window {
+	return n.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (n *Node) ExpandWindow(window Window) {
+	n.window = n.window.Expand(window)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (n *Node) SetStartEnd(start, end time.Time) {
+	if n.Window().Contains(start) {
+		n.start = start
+	} else {
+		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, n.Window())
+	}
+
+	if n.Window().Contains(end) {
+		n.end = end
+	} else {
+		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, n.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (n *Node) Add(a Asset) Asset {
+	// Node + Node = Node
+	if that, ok := a.(*Node); ok {
+		this := n.Clone().(*Node)
+		this.add(that)
+		return this
+	}
+
+	props := n.Properties().Merge(a.Properties())
+	labels := n.Labels().Merge(a.Labels())
+
+	start := n.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := n.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := n.Window().Expand(a.Window())
+
+	// Node + !Node = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = n.Adjustment() + a.Adjustment()
+	any.Cost = (n.TotalCost() - n.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (n *Node) add(that *Node) {
+	if n == nil {
+		n = that
+		return
+	}
+
+	props := n.Properties().Merge(that.Properties())
+	labels := n.Labels().Merge(that.Labels())
+	n.SetProperties(props)
+	n.SetLabels(labels)
+
+	if n.NodeType != that.NodeType {
+		n.NodeType = ""
+	}
+
+	start := n.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := n.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := n.Window().Expand(that.Window())
+	n.start = start
+	n.end = end
+	n.window = window
+
+	// Order of operations for node costs is:
+	//   Discount(CPU + RAM) + GPU + Adjustment
+
+	// Combining discounts, then involves weighting each discount by each
+	// respective (CPU + RAM) cost. Combining preemptible, on the other
+	// hand, is done with all three (but not Adjustment, which can change
+	// without triggering a re-computation of Preemtible).
+
+	disc := (n.CPUCost+n.RAMCost)*(1.0-n.Discount) + (that.CPUCost+that.RAMCost)*(1.0-that.Discount)
+	nonDisc := (n.CPUCost + n.RAMCost) + (that.CPUCost + that.RAMCost)
+	if nonDisc > 0 {
+		n.Discount = 1.0 - (disc / nonDisc)
+	} else {
+		n.Discount = (n.Discount + that.Discount) / 2.0
+	}
+
+	nNoAdj := n.TotalCost() - n.Adjustment()
+	thatNoAdj := that.TotalCost() - that.Adjustment()
+	if (nNoAdj + thatNoAdj) > 0 {
+		n.Preemptible = (nNoAdj*n.Preemptible + thatNoAdj*that.Preemptible) / (nNoAdj + thatNoAdj)
+	} else {
+		n.Preemptible = (n.Preemptible + that.Preemptible) / 2.0
+	}
+
+	totalCPUCost := n.CPUCost + that.CPUCost
+	if totalCPUCost > 0.0 {
+		n.CPUBreakdown.Idle = (n.CPUBreakdown.Idle*n.CPUCost + that.CPUBreakdown.Idle*that.CPUCost) / totalCPUCost
+		n.CPUBreakdown.Other = (n.CPUBreakdown.Other*n.CPUCost + that.CPUBreakdown.Other*that.CPUCost) / totalCPUCost
+		n.CPUBreakdown.System = (n.CPUBreakdown.System*n.CPUCost + that.CPUBreakdown.System*that.CPUCost) / totalCPUCost
+		n.CPUBreakdown.User = (n.CPUBreakdown.User*n.CPUCost + that.CPUBreakdown.User*that.CPUCost) / totalCPUCost
+	}
+
+	totalRAMCost := n.RAMCost + that.RAMCost
+	if totalRAMCost > 0.0 {
+		n.RAMBreakdown.Idle = (n.RAMBreakdown.Idle*n.RAMCost + that.RAMBreakdown.Idle*that.RAMCost) / totalRAMCost
+		n.RAMBreakdown.Other = (n.RAMBreakdown.Other*n.RAMCost + that.RAMBreakdown.Other*that.RAMCost) / totalRAMCost
+		n.RAMBreakdown.System = (n.RAMBreakdown.System*n.RAMCost + that.RAMBreakdown.System*that.RAMCost) / totalRAMCost
+		n.RAMBreakdown.User = (n.RAMBreakdown.User*n.RAMCost + that.RAMBreakdown.User*that.RAMCost) / totalRAMCost
+	}
+
+	n.CPUCoreHours += that.CPUCoreHours
+	n.RAMByteHours += that.RAMByteHours
+
+	n.CPUCost += that.CPUCost
+	n.GPUCost += that.GPUCost
+	n.RAMCost += that.RAMCost
+	n.adjustment += that.adjustment
+}
+
+// Clone returns a deep copy of the given Node
+func (n *Node) Clone() Asset {
+	if n == nil {
+		return nil
+	}
+
+	return &Node{
+		properties:   n.properties.Clone(),
+		labels:       n.labels.Clone(),
+		start:        n.start,
+		end:          n.end,
+		window:       n.window.Clone(),
+		adjustment:   n.adjustment,
+		NodeType:     n.NodeType,
+		CPUCoreHours: n.CPUCoreHours,
+		RAMByteHours: n.RAMByteHours,
+		CPUBreakdown: n.CPUBreakdown.Clone(),
+		RAMBreakdown: n.RAMBreakdown.Clone(),
+		CPUCost:      n.CPUCost,
+		GPUCost:      n.GPUCost,
+		RAMCost:      n.RAMCost,
+		Preemptible:  n.Preemptible,
+		Discount:     n.Discount,
+	}
+}
+
+// Equal returns true if the tow Assets match exactly
+func (n *Node) Equal(a Asset) bool {
+	that, ok := a.(*Node)
+	if !ok {
+		return false
+	}
+
+	if !n.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !n.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !n.Start().Equal(that.Start()) {
+		return false
+	}
+	if !n.End().Equal(that.End()) {
+		return false
+	}
+	if !n.window.Equal(that.window) {
+		return false
+	}
+
+	if n.adjustment != that.adjustment {
+		return false
+	}
+
+	if n.NodeType != that.NodeType {
+		return false
+	}
+	if n.CPUCoreHours != that.CPUCoreHours {
+		return false
+	}
+	if n.RAMByteHours != that.RAMByteHours {
+		return false
+	}
+	if !n.CPUBreakdown.Equal(that.CPUBreakdown) {
+		return false
+	}
+	if !n.RAMBreakdown.Equal(that.RAMBreakdown) {
+		return false
+	}
+	if n.CPUCost != that.CPUCost {
+		return false
+	}
+	if n.GPUCost != that.GPUCost {
+		return false
+	}
+	if n.RAMCost != that.RAMCost {
+		return false
+	}
+	if n.Discount != that.Discount {
+		return false
+	}
+	if n.Preemptible != that.Preemptible {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshal interface
+func (n *Node) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", n.Type().String(), ",")
+	jsonEncode(buffer, "properties", n.Properties(), ",")
+	jsonEncode(buffer, "labels", n.Labels(), ",")
+	jsonEncodeString(buffer, "window", n.Window().String(), ",")
+	jsonEncodeString(buffer, "start", n.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", n.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", n.Minutes(), ",")
+	jsonEncodeString(buffer, "nodeType", n.NodeType, ",")
+	jsonEncodeFloat64(buffer, "cpuCores", n.CPUCores(), ",")
+	jsonEncodeFloat64(buffer, "ramBytes", n.RAMBytes(), ",")
+	jsonEncodeFloat64(buffer, "cpuCoreHours", n.CPUCoreHours, ",")
+	jsonEncodeFloat64(buffer, "ramByteHours", n.RAMByteHours, ",")
+	jsonEncode(buffer, "cpuBreakdown", n.CPUBreakdown, ",")
+	jsonEncode(buffer, "ramBreakdown", n.RAMBreakdown, ",")
+	jsonEncodeFloat64(buffer, "preemptible", n.Preemptible, ",")
+	jsonEncodeFloat64(buffer, "discount", n.Discount, ",")
+	jsonEncodeFloat64(buffer, "cpuCost", n.CPUCost, ",")
+	jsonEncodeFloat64(buffer, "gpuCost", n.GPUCost, ",")
+	jsonEncodeFloat64(buffer, "ramCost", n.RAMCost, ",")
+	jsonEncodeFloat64(buffer, "adjustment", n.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", n.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (n *Node) String() string {
+	return toString(n)
+}
+
+// IsPreemptible returns true if the node is 100% preemptible. It's possible
+// to be "partially preemptible" by adding a preemptible node with a
+// non-preemptible node.
+func (n *Node) IsPreemptible() bool {
+	return n.Preemptible == 1.0
+}
+
+// CPUCores returns the number of cores belonging to the node. This could be
+// fractional because it's the number of core*hours divided by the number of
+// hours running; e.g. the sum of a 4-core node running for the first 10 hours
+// and a 3-core node running for the last 20 hours of the same 24-hour window
+// would produce:
+//   (4*10 + 3*20) / 24 = 4.167 cores
+// However, any number of cores running for the full span of a window will
+// report the actual number of cores of the static node; e.g. the above
+// scenario for one entire 24-hour window:
+//   (4*24 + 3*24) / 24 = (4 + 3) = 7 cores
+func (n *Node) CPUCores() float64 {
+	// [core*hr]*([min/hr]*[1/min]) = [core*hr]/[hr] = core
+	return n.CPUCoreHours * (60.0 / n.Minutes())
+}
+
+// RAMBytes returns the amount of RAM belonging to the node. This could be
+// fractional because it's the number of byte*hours divided by the number of
+// hours running; e.g. the sum of a 12GiB-RAM node running for the first 10 hours
+// and a 16GiB-RAM node running for the last 20 hours of the same 24-hour window
+// would produce:
+//   (12*10 + 16*20) / 24 = 18.333GiB RAM
+// However, any number of cores running for the full span of a window will
+// report the actual number of cores of the static node; e.g. the above
+// scenario for one entire 24-hour window:
+//   (12*24 + 16*24) / 24 = (12 + 16) = 28 cores
+func (n *Node) RAMBytes() float64 {
+	// [b*hr]*([min/hr]*[1/min]) = [b*hr]/[hr] = b
+	return n.RAMByteHours * (60.0 / n.Minutes())
+}
+
+// LoadBalancer is an Asset representing a single load balancer in a cluster
+// TODO: add GB of ingress processed, numForwardingRules once we start recording those to prometheus metric
+type LoadBalancer struct {
+	properties *AssetProperties
+	labels     AssetLabels
+	start      time.Time
+	end        time.Time
+	window     Window
+	adjustment float64
+	Cost       float64
+}
+
+// NewLoadBalancer instantiates and returns a new LoadBalancer
+func NewLoadBalancer(name, cluster, providerID string, start, end time.Time, window Window) *LoadBalancer {
+	properties := &AssetProperties{
+		Category:   NetworkCategory,
+		Name:       name,
+		Cluster:    cluster,
+		ProviderID: providerID,
+		Service:    KubernetesService,
+	}
+
+	return &LoadBalancer{
+		properties: properties,
+		labels:     AssetLabels{},
+		start:      start,
+		end:        end,
+		window:     window,
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (lb *LoadBalancer) Type() AssetType {
+	return LoadBalancerAssetType
+}
+
+// Properties returns the Asset's properties
+func (lb *LoadBalancer) Properties() *AssetProperties {
+	return lb.properties
+}
+
+// SetProperties sets the Asset's properties
+func (lb *LoadBalancer) SetProperties(props *AssetProperties) {
+	lb.properties = props
+}
+
+// Labels returns the Asset's labels
+func (lb *LoadBalancer) Labels() AssetLabels {
+	return lb.labels
+}
+
+// SetLabels sets the Asset's labels
+func (lb *LoadBalancer) SetLabels(labels AssetLabels) {
+	lb.labels = labels
+}
+
+// Adjustment returns the Asset's cost adjustment
+func (lb *LoadBalancer) Adjustment() float64 {
+	return lb.adjustment
+}
+
+// SetAdjustment sets the Asset's cost adjustment
+func (lb *LoadBalancer) SetAdjustment(adj float64) {
+	lb.adjustment = adj
+}
+
+// TotalCost returns the total cost of the Asset
+func (lb *LoadBalancer) TotalCost() float64 {
+	return lb.Cost + lb.adjustment
+}
+
+// Start returns the preceise start point of the Asset within the window
+func (lb *LoadBalancer) Start() time.Time {
+	return lb.start
+}
+
+// End returns the preceise end point of the Asset within the window
+func (lb *LoadBalancer) End() time.Time {
+	return lb.end
+}
+
+// Minutes returns the number of minutes the Asset ran within the window
+func (lb *LoadBalancer) Minutes() float64 {
+	return lb.end.Sub(lb.start).Minutes()
+}
+
+// Window returns the window within which the Asset ran
+func (lb *LoadBalancer) Window() Window {
+	return lb.window
+}
+
+// ExpandWindow expands the Asset's window by the given window
+func (lb *LoadBalancer) ExpandWindow(w Window) {
+	lb.window = lb.window.Expand(w)
+}
+
+// SetStartEnd sets the Asset's Start and End fields
+func (lb *LoadBalancer) SetStartEnd(start, end time.Time) {
+	if lb.Window().Contains(start) {
+		lb.start = start
+	} else {
+		log.Warningf("Disk.SetStartEnd: start %s not in %s", start, lb.Window())
+	}
+
+	if lb.Window().Contains(end) {
+		lb.end = end
+	} else {
+		log.Warningf("Disk.SetStartEnd: end %s not in %s", end, lb.Window())
+	}
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (lb *LoadBalancer) Add(a Asset) Asset {
+	// LoadBalancer + LoadBalancer = LoadBalancer
+	if that, ok := a.(*LoadBalancer); ok {
+		this := lb.Clone().(*LoadBalancer)
+		this.add(that)
+		return this
+	}
+
+	props := lb.Properties().Merge(a.Properties())
+	labels := lb.Labels().Merge(a.Labels())
+
+	start := lb.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := lb.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := lb.Window().Expand(a.Window())
+
+	// LoadBalancer + !LoadBalancer = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = lb.Adjustment() + a.Adjustment()
+	any.Cost = (lb.TotalCost() - lb.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (lb *LoadBalancer) add(that *LoadBalancer) {
+	if lb == nil {
+		lb = that
+		return
+	}
+
+	props := lb.Properties().Merge(that.Properties())
+	labels := lb.Labels().Merge(that.Labels())
+	lb.SetProperties(props)
+	lb.SetLabels(labels)
+
+	start := lb.Start()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	end := lb.End()
+	if that.End().After(end) {
+		end = that.End()
+	}
+	window := lb.Window().Expand(that.Window())
+	lb.start = start
+	lb.end = end
+	lb.window = window
+
+	lb.Cost += that.Cost
+	lb.adjustment += that.adjustment
+}
+
+// Clone returns a cloned instance of the given Asset
+func (lb *LoadBalancer) Clone() Asset {
+	return &LoadBalancer{
+		properties: lb.properties.Clone(),
+		labels:     lb.labels.Clone(),
+		start:      lb.start,
+		end:        lb.end,
+		window:     lb.window.Clone(),
+		adjustment: lb.adjustment,
+		Cost:       lb.Cost,
+	}
+}
+
+// Equal returns true if the tow Assets match precisely
+func (lb *LoadBalancer) Equal(a Asset) bool {
+	that, ok := a.(*LoadBalancer)
+	if !ok {
+		return false
+	}
+
+	if !lb.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !lb.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !lb.Start().Equal(that.Start()) {
+		return false
+	}
+	if !lb.End().Equal(that.End()) {
+		return false
+	}
+	if !lb.window.Equal(that.window) {
+		return false
+	}
+
+	if lb.adjustment != that.adjustment {
+		return false
+	}
+	if lb.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshal
+func (lb *LoadBalancer) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", lb.Type().String(), ",")
+	jsonEncode(buffer, "properties", lb.Properties(), ",")
+	jsonEncode(buffer, "labels", lb.Labels(), ",")
+	jsonEncodeString(buffer, "window", lb.Window().String(), ",")
+	jsonEncodeString(buffer, "start", lb.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", lb.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", lb.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "adjustment", lb.Adjustment(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", lb.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (lb *LoadBalancer) String() string {
+	return toString(lb)
+}
+
+// SharedAsset is an Asset representing a shared cost
+type SharedAsset struct {
+	properties *AssetProperties
+	labels     AssetLabels
+	window     Window
+	Cost       float64
+}
+
+// NewSharedAsset creates and returns a new SharedAsset
+func NewSharedAsset(name string, window Window) *SharedAsset {
+	properties := &AssetProperties{
+		Name:     name,
+		Category: SharedCategory,
+		Service:  OtherCategory,
+	}
+
+	return &SharedAsset{
+		properties: properties,
+		labels:     AssetLabels{},
+		window:     window.Clone(),
+	}
+}
+
+// Type returns the AssetType of the Asset
+func (sa *SharedAsset) Type() AssetType {
+	return SharedAssetType
+}
+
+// Properties returns the Asset's properties
+func (sa *SharedAsset) Properties() *AssetProperties {
+	return sa.properties
+}
+
+// SetProperties sets the Asset's properties
+func (sa *SharedAsset) SetProperties(props *AssetProperties) {
+	sa.properties = props
+}
+
+// Labels returns the Asset's labels
+func (sa *SharedAsset) Labels() AssetLabels {
+	return sa.labels
+}
+
+// SetLabels sets the Asset's labels
+func (sa *SharedAsset) SetLabels(labels AssetLabels) {
+	sa.labels = labels
+}
+
+// Adjustment is not relevant to SharedAsset, but required to implement Asset
+func (sa *SharedAsset) Adjustment() float64 {
+	return 0.0
+}
+
+// SetAdjustment is not relevant to SharedAsset, but required to implement Asset
+func (sa *SharedAsset) SetAdjustment(float64) {
+	return
+}
+
+// TotalCost returns the Asset's total cost
+func (sa *SharedAsset) TotalCost() float64 {
+	return sa.Cost
+}
+
+// Start returns the start time of the Asset
+func (sa *SharedAsset) Start() time.Time {
+	return *sa.window.start
+}
+
+// End returns the end time of the Asset
+func (sa *SharedAsset) End() time.Time {
+	return *sa.window.end
+}
+
+// Minutes returns the number of minutes the SharedAsset ran within the window
+func (sa *SharedAsset) Minutes() float64 {
+	return sa.window.Minutes()
+}
+
+// Window returns the window within the SharedAsset ran
+func (sa *SharedAsset) Window() Window {
+	return sa.window
+}
+
+// ExpandWindow expands the Asset's window
+func (sa *SharedAsset) ExpandWindow(w Window) {
+	sa.window = sa.window.Expand(w)
+}
+
+// SetStartEnd sets the Asset's Start and End fields (not applicable here)
+func (sa *SharedAsset) SetStartEnd(start, end time.Time) {
+	return
+}
+
+// Add sums the Asset with the given Asset to produce a new Asset, maintaining
+// as much relevant information as possible (i.e. type, properties, labels).
+func (sa *SharedAsset) Add(a Asset) Asset {
+	// SharedAsset + SharedAsset = SharedAsset
+	if that, ok := a.(*SharedAsset); ok {
+		this := sa.Clone().(*SharedAsset)
+		this.add(that)
+		return this
+	}
+
+	props := sa.Properties().Merge(a.Properties())
+	labels := sa.Labels().Merge(a.Labels())
+
+	start := sa.Start()
+	if a.Start().Before(start) {
+		start = a.Start()
+	}
+	end := sa.End()
+	if a.End().After(end) {
+		end = a.End()
+	}
+	window := sa.Window().Expand(a.Window())
+
+	// SharedAsset + !SharedAsset = Any
+	any := NewAsset(start, end, window)
+	any.SetProperties(props)
+	any.SetLabels(labels)
+	any.adjustment = sa.Adjustment() + a.Adjustment()
+	any.Cost = (sa.TotalCost() - sa.Adjustment()) + (a.TotalCost() - a.Adjustment())
+
+	return any
+}
+
+func (sa *SharedAsset) add(that *SharedAsset) {
+	if sa == nil {
+		sa = that
+		return
+	}
+
+	props := sa.Properties().Merge(that.Properties())
+	labels := sa.Labels().Merge(that.Labels())
+	sa.SetProperties(props)
+	sa.SetLabels(labels)
+
+	window := sa.Window().Expand(that.Window())
+	sa.window = window
+
+	sa.Cost += that.Cost
+}
+
+// Clone returns a deep copy of the given SharedAsset
+func (sa *SharedAsset) Clone() Asset {
+	if sa == nil {
+		return nil
+	}
+
+	return &SharedAsset{
+		properties: sa.properties.Clone(),
+		labels:     sa.labels.Clone(),
+		window:     sa.window.Clone(),
+		Cost:       sa.Cost,
+	}
+}
+
+// Equal returns true if the two Assets are exact matches
+func (sa *SharedAsset) Equal(a Asset) bool {
+	that, ok := a.(*SharedAsset)
+	if !ok {
+		return false
+	}
+
+	if !sa.Labels().Equal(that.Labels()) {
+		return false
+	}
+	if !sa.Properties().Equal(that.Properties()) {
+		return false
+	}
+
+	if !sa.Start().Equal(that.Start()) {
+		return false
+	}
+	if !sa.End().Equal(that.End()) {
+		return false
+	}
+	if !sa.window.Equal(that.window) {
+		return false
+	}
+
+	if sa.Cost != that.Cost {
+		return false
+	}
+
+	return true
+}
+
+// MarshalJSON implements json.Marshaler
+func (sa *SharedAsset) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	jsonEncodeString(buffer, "type", sa.Type().String(), ",")
+	jsonEncode(buffer, "properties", sa.Properties(), ",")
+	jsonEncode(buffer, "labels", sa.Labels(), ",")
+	jsonEncode(buffer, "properties", sa.Properties(), ",")
+	jsonEncode(buffer, "labels", sa.Labels(), ",")
+	jsonEncodeString(buffer, "window", sa.Window().String(), ",")
+	jsonEncodeString(buffer, "start", sa.Start().Format(timeFmt), ",")
+	jsonEncodeString(buffer, "end", sa.End().Format(timeFmt), ",")
+	jsonEncodeFloat64(buffer, "minutes", sa.Minutes(), ",")
+	jsonEncodeFloat64(buffer, "totalCost", sa.TotalCost(), "")
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+// String implements fmt.Stringer
+func (sa *SharedAsset) String() string {
+	return toString(sa)
+}
+
+// AssetSet stores a set of Assets, each with a unique name, that share
+// a window. An AssetSet is mutable, so treat it like a threadsafe map.
+type AssetSet struct {
+	sync.RWMutex
+	assets map[string]Asset
+	props  []AssetProperty
+	Window Window
+}
+
+// NewAssetSet instantiates a new AssetSet and, optionally, inserts
+// the given list of Assets
+func NewAssetSet(start, end time.Time, assets ...Asset) *AssetSet {
+	as := &AssetSet{
+		assets: map[string]Asset{},
+		Window: NewWindow(&start, &end),
+	}
+
+	for _, a := range assets {
+		as.Insert(a)
+	}
+
+	return as
+}
+
+// AggregateBy aggregates the Assets in the AssetSet by the given list of
+// AssetProperties, such that each asset is binned by a key determined by its
+// relevant property values.
+func (as *AssetSet) AggregateBy(props []AssetProperty, opts *AssetAggregationOptions) error {
+	if opts == nil {
+		opts = &AssetAggregationOptions{}
+	}
+
+	if as.IsEmpty() && len(opts.SharedHourlyCosts) == 0 {
+		return nil
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	aggSet := NewAssetSet(as.Start(), as.End())
+	aggSet.props = props
+
+	// Compute hours of the given AssetSet, and if it ends in the future,
+	// adjust the hours accordingly
+	hours := as.Window.Minutes() / 60.0
+	diff := time.Now().Sub(as.End())
+	if diff < 0.0 {
+		hours += diff.Hours()
+	}
+
+	// Insert a shared asset for each shared cost
+	for name, hourlyCost := range opts.SharedHourlyCosts {
+		sa := NewSharedAsset(name, as.Window.Clone())
+		sa.Cost = hourlyCost * hours
+
+		aggSet.Insert(sa)
+	}
+
+	// Delete the Assets that don't pass each filter
+	for _, ff := range opts.FilterFuncs {
+		for key, asset := range as.assets {
+			if !ff(asset) {
+				delete(as.assets, key)
+			}
+		}
+	}
+
+	// Insert each asset into the new set, which will be keyed by the props
+	// on aggSet, resulting in aggregation.
+	for _, asset := range as.assets {
+		aggSet.Insert(asset)
+	}
+
+	// Assign the aggregated values back to the original set
+	as.assets = aggSet.assets
+	as.props = props
+
+	return nil
+}
+
+// Clone returns a new AssetSet with a deep copy of the given
+// AssetSet's assets.
+func (as *AssetSet) Clone() *AssetSet {
+	if as == nil {
+		return nil
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+
+	assets := map[string]Asset{}
+	for k, v := range as.assets {
+		assets[k] = v.Clone()
+	}
+
+	var props []AssetProperty
+	if as.props != nil {
+		props = []AssetProperty{}
+		for _, p := range as.props {
+			props = append(props, p)
+		}
+	}
+
+	s := as.Start()
+	e := as.End()
+
+	return &AssetSet{
+		Window: NewWindow(&s, &e),
+		assets: assets,
+		props:  props,
+	}
+}
+
+// Each invokes the given function for each Asset in the set
+func (as *AssetSet) Each(f func(string, Asset)) {
+	if as == nil {
+		return
+	}
+
+	for k, a := range as.assets {
+		f(k, a)
+	}
+}
+
+// End returns the end time of the AssetSet's window
+func (as *AssetSet) End() time.Time {
+	return *as.Window.End()
+}
+
+// FindMatch attempts to find a match in the AssetSet for the given Asset on
+// the provided properties and labels. If a match is not found, FindMatch
+// returns nil and a Not Found error.
+func (as *AssetSet) FindMatch(query Asset, props []AssetProperty) (Asset, error) {
+	as.RLock()
+	defer as.RUnlock()
+
+	matchKey := key(query, props)
+	for _, asset := range as.assets {
+		if key(asset, props) == matchKey {
+			return asset, nil
+		}
+	}
+
+	return nil, fmt.Errorf("Asset not found to match %s on %v", query, props)
+}
+
+// ReconciliationMatch attempts to find an exact match in the AssetSet on
+// (Category, ProviderID). If a match is found, it returns the Asset with the
+// intent to adjuts it. If no match exists, it attempts to find one on only
+// (ProviderID). If that match is found, it returns the Asset with the intent
+// to insert the associated Cloud cost.
+func (as *AssetSet) ReconciliationMatch(query Asset) (Asset, bool, error) {
+	as.RLock()
+	defer as.RUnlock()
+
+	// Full match means matching on (Category, ProviderID)
+	fullMatchProps := []AssetProperty{AssetCategoryProp, AssetProviderIDProp}
+	fullMatchKey := key(query, fullMatchProps)
+
+	// Partial match means matching only on (ProviderID)
+	providerIDMatchProps := []AssetProperty{AssetProviderIDProp}
+	providerIDMatchKey := key(query, providerIDMatchProps)
+
+	var providerIDMatch Asset
+	for _, asset := range as.assets {
+		if key(asset, fullMatchProps) == fullMatchKey {
+			return asset, true, nil
+		}
+		if key(asset, providerIDMatchProps) == providerIDMatchKey {
+			// Found a partial match. Save it until after all other options
+			// have been checked for full matches.
+			providerIDMatch = asset
+		}
+	}
+
+	// No full match was found, so return partial match, if found.
+	if providerIDMatch != nil {
+		return providerIDMatch, false, nil
+	}
+
+	return nil, false, fmt.Errorf("Asset not found to match %s", query)
+}
+
+// Get returns the Asset in the AssetSet at the given key, or nil and false
+// if no Asset exists for the given key
+func (as *AssetSet) Get(key string) (Asset, bool) {
+	as.RLock()
+	defer as.RUnlock()
+
+	if a, ok := as.assets[key]; ok {
+		return a, true
+	}
+	return nil, false
+}
+
+// Insert inserts the given Asset into the AssetSet, using the AssetSet's
+// configured properties to determine the key under which the Asset will
+// be inserted.
+func (as *AssetSet) Insert(asset Asset) error {
+	if as.IsEmpty() {
+		as.Lock()
+		as.assets = map[string]Asset{}
+		as.Unlock()
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	// Determine key into which to Insert the Asset.
+	k := key(asset, as.props)
+
+	// Add the given Asset to the existing entry, if there is one;
+	// otherwise just set directly into assets
+	if _, ok := as.assets[k]; !ok {
+		as.assets[k] = asset
+	} else {
+		as.assets[k] = as.assets[k].Add(asset)
+	}
+
+	// Expand the window, just to be safe. It's possible that the asset will
+	// be set into the map without expanding it to the AssetSet's window.
+	as.assets[k].ExpandWindow(as.Window)
+
+	return nil
+}
+
+// IsEmpty returns true if the AssetSet is nil, or if it contains
+// zero assets.
+func (as *AssetSet) IsEmpty() bool {
+	if as == nil || len(as.assets) == 0 {
+		return true
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+	return as.assets == nil || len(as.assets) == 0
+}
+
+func (as *AssetSet) Length() int {
+	if as == nil {
+		return 0
+	}
+
+	as.RLock()
+	defer as.RUnlock()
+	return len(as.assets)
+}
+
+// Map clones and returns a map of the AssetSet's Assets
+func (as *AssetSet) Map() map[string]Asset {
+	if as.IsEmpty() {
+		return map[string]Asset{}
+	}
+
+	return as.Clone().assets
+}
+
+// MarshalJSON JSON-encodes the AssetSet
+func (as *AssetSet) MarshalJSON() ([]byte, error) {
+	as.RLock()
+	defer as.RUnlock()
+	return json.Marshal(as.assets)
+}
+
+func (as *AssetSet) Set(asset Asset, props []AssetProperty) {
+	if as.IsEmpty() {
+		as.Lock()
+		as.assets = map[string]Asset{}
+		as.Unlock()
+	}
+
+	as.Lock()
+	defer as.Unlock()
+
+	// Expand the window to match the AssetSet, then set it
+	asset.ExpandWindow(as.Window)
+	as.assets[key(asset, props)] = asset
+}
+
+func (as *AssetSet) Start() time.Time {
+	return *as.Window.Start()
+}
+
+func (as *AssetSet) TotalCost() float64 {
+	tc := 0.0
+
+	as.Lock()
+	defer as.Unlock()
+
+	for _, a := range as.assets {
+		tc += a.TotalCost()
+	}
+
+	return tc
+}
+
+func (as *AssetSet) UTCOffset() time.Duration {
+	_, zone := as.Start().Zone()
+	return time.Duration(zone) * time.Second
+}
+
+func (as *AssetSet) accumulate(that *AssetSet) (*AssetSet, error) {
+	if as == nil {
+		return that, nil
+	}
+
+	if that == nil {
+		return as, nil
+	}
+
+	// In the case of an AssetSetRange with empty entries, we may end up with
+	// an incoming as without props, even though we are trying to aggregate
+	// by props. This handles that case, assigning the correct props.
+	if !propsEqual(as.props, that.props) {
+		if len(as.props) == 0 {
+			as.props = that.props
+		}
+	}
+
+	// Set start, end to min(start), max(end)
+	start := as.Start()
+	end := as.End()
+	if that.Start().Before(start) {
+		start = that.Start()
+	}
+	if that.End().After(end) {
+		end = that.End()
+	}
+
+	if as.IsEmpty() && that.IsEmpty() {
+		return NewAssetSet(start, end), nil
+	}
+
+	acc := NewAssetSet(start, end)
+	acc.props = as.props
+
+	as.RLock()
+	defer as.RUnlock()
+
+	that.RLock()
+	defer that.RUnlock()
+
+	for _, asset := range as.assets {
+		err := acc.Insert(asset)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	for _, asset := range that.assets {
+		err := acc.Insert(asset)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return acc, nil
+}
+
+type AssetSetRange struct {
+	sync.RWMutex
+	assets []*AssetSet
+}
+
+func NewAssetSetRange(assets ...*AssetSet) *AssetSetRange {
+	return &AssetSetRange{
+		assets: assets,
+	}
+}
+
+// Accumulate sums each AssetSet in the given range, returning a single cumulative
+// AssetSet for the entire range.
+func (asr *AssetSetRange) Accumulate() (*AssetSet, error) {
+	var assetSet *AssetSet
+	var err error
+
+	asr.RLock()
+	defer asr.RUnlock()
+
+	for _, as := range asr.assets {
+		assetSet, err = assetSet.accumulate(as)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return assetSet, nil
+}
+
+type AssetAggregationOptions struct {
+	SharedHourlyCosts map[string]float64
+	FilterFuncs       []AssetMatchFunc
+}
+
+func (asr *AssetSetRange) AggregateBy(props []AssetProperty, opts *AssetAggregationOptions) error {
+	aggRange := &AssetSetRange{assets: []*AssetSet{}}
+
+	asr.Lock()
+	defer asr.Unlock()
+
+	for _, as := range asr.assets {
+		err := as.AggregateBy(props, opts)
+		if err != nil {
+			return err
+		}
+
+		aggRange.assets = append(aggRange.assets, as)
+	}
+
+	asr.assets = aggRange.assets
+
+	return nil
+}
+
+func (asr *AssetSetRange) Append(that *AssetSet) {
+	asr.Lock()
+	defer asr.Unlock()
+	asr.assets = append(asr.assets, that)
+}
+
+// Each invokes the given function for each AssetSet in the range
+func (asr *AssetSetRange) Each(f func(int, *AssetSet)) {
+	if asr == nil {
+		return
+	}
+
+	for i, as := range asr.assets {
+		f(i, as)
+	}
+}
+
+func (asr *AssetSetRange) Get(i int) (*AssetSet, error) {
+	if i < 0 || i >= len(asr.assets) {
+		return nil, fmt.Errorf("AssetSetRange: index out of range: %d", i)
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	return asr.assets[i], nil
+}
+
+func (asr *AssetSetRange) Length() int {
+	if asr == nil || asr.assets == nil {
+		return 0
+	}
+
+	asr.RLock()
+	defer asr.RUnlock()
+	return len(asr.assets)
+}
+
+func (asr *AssetSetRange) MarshalJSON() ([]byte, error) {
+	asr.RLock()
+	asr.RUnlock()
+	return json.Marshal(asr.assets)
+}
+
+func (asr *AssetSetRange) UTCOffset() time.Duration {
+	if asr.Length() == 0 {
+		return 0
+	}
+
+	as, err := asr.Get(0)
+	if err != nil {
+		return 0
+	}
+	return as.UTCOffset()
+}
+
+// Window returns the full window that the AssetSetRange spans, from the
+// start of the first AssetSet to the end of the last one.
+func (asr *AssetSetRange) Window() Window {
+	if asr == nil || asr.Length() == 0 {
+		return NewWindow(nil, nil)
+	}
+
+	start := asr.assets[0].Start()
+	end := asr.assets[asr.Length()-1].End()
+
+	return NewWindow(&start, &end)
+}
+
+// TODO move everything below to a separate package
+
+func jsonEncodeFloat64(buffer *bytes.Buffer, name string, val float64, comma string) {
+	var encoding string
+	if math.IsNaN(val) {
+		encoding = fmt.Sprintf("\"%s\":null%s", name, comma)
+	} else {
+		encoding = fmt.Sprintf("\"%s\":%f%s", name, val, comma)
+	}
+
+	buffer.WriteString(encoding)
+}
+
+func jsonEncodeString(buffer *bytes.Buffer, name, val, comma string) {
+	buffer.WriteString(fmt.Sprintf("\"%s\":\"%s\"%s", name, val, comma))
+}
+
+func jsonEncode(buffer *bytes.Buffer, name string, obj interface{}, comma string) {
+	buffer.WriteString(fmt.Sprintf("\"%s\":", name))
+	if bytes, err := json.Marshal(obj); err != nil {
+		buffer.WriteString("null")
+	} else {
+		buffer.Write(bytes)
+	}
+	buffer.WriteString(comma)
+}

+ 1013 - 0
pkg/kubecost/asset_test.go

@@ -0,0 +1,1013 @@
+package kubecost
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"testing"
+	"time"
+)
+
+var start1 = time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
+var start2 = start1.Add(day)
+var start3 = start2.Add(day)
+var start4 = start2.Add(day)
+
+var windows = []Window{
+	NewWindow(&start1, &start2),
+	NewWindow(&start2, &start3),
+	NewWindow(&start3, &start4),
+}
+
+const delta = 0.00001
+const gb = 1024 * 1024 * 1024
+
+func approx(a, b, delta float64) bool {
+	return math.Abs(a-b) < delta
+}
+
+func TestAny_Add(t *testing.T) {
+	any1 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any1.SetProperties(&AssetProperties{
+		Name:       "any1",
+		Cluster:    "cluster1",
+		ProviderID: "any1",
+	})
+	any1.Cost = 9.0
+	any1.SetAdjustment(1.0)
+
+	any2 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any2.SetProperties(&AssetProperties{
+		Name:       "any2",
+		Cluster:    "cluster1",
+		ProviderID: "any2",
+	})
+	any2.Cost = 4.0
+	any2.SetAdjustment(1.0)
+
+	any3 := any1.Add(any2)
+
+	// Check that the sums and properties are correct
+	if any3.TotalCost() != 15.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 15.0, any3.TotalCost())
+	}
+	if any3.Adjustment() != 2.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 2.0, any3.Adjustment())
+	}
+	if any3.Properties().Cluster != "cluster1" {
+		t.Fatalf("Any.Add: expected %s; got %s", "cluster1", any3.Properties().Cluster)
+	}
+	if any3.Type() != AnyAssetType {
+		t.Fatalf("Any.Add: expected %s; got %s", AnyAssetType, any3.Type())
+	}
+	if any3.Properties().ProviderID != "" {
+		t.Fatalf("Any.Add: expected %s; got %s", "", any3.Properties().ProviderID)
+	}
+	if any3.Properties().Name != "" {
+		t.Fatalf("Any.Add: expected %s; got %s", "", any3.Properties().Name)
+	}
+
+	// Check that the original assets are unchanged
+	if any1.TotalCost() != 10.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 10.0, any1.TotalCost())
+	}
+	if any1.Adjustment() != 1.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 1.0, any1.Adjustment())
+	}
+	if any2.TotalCost() != 5.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 5.0, any2.TotalCost())
+	}
+	if any2.Adjustment() != 1.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 1.0, any2.Adjustment())
+	}
+}
+
+func TestAny_Clone(t *testing.T) {
+	any1 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any1.SetProperties(&AssetProperties{
+		Name:       "any1",
+		Cluster:    "cluster1",
+		ProviderID: "any1",
+	})
+	any1.Cost = 9.0
+	any1.SetAdjustment(1.0)
+
+	any2 := any1.Clone()
+
+	any1.Cost = 18.0
+	any1.SetAdjustment(2.0)
+
+	// any2 should match any1, even after mutating any1
+	if any2.TotalCost() != 10.0 {
+		t.Fatalf("Any.Clone: expected %f; got %f", 10.0, any2.TotalCost())
+	}
+	if any2.Adjustment() != 1.0 {
+		t.Fatalf("Any.Clone: expected %f; got %f", 1.0, any2.Adjustment())
+	}
+}
+
+func TestAny_MarshalJSON(t *testing.T) {
+	any1 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any1.SetProperties(&AssetProperties{
+		Name:       "any1",
+		Cluster:    "cluster1",
+		ProviderID: "any1",
+	})
+	any1.Cost = 9.0
+	any1.SetAdjustment(1.0)
+
+	_, err := json.Marshal(any1)
+	if err != nil {
+		t.Fatalf("Any.MarshalJSON: unexpected error: %s", err)
+	}
+
+	any2 := NewAsset(*windows[0].start, *windows[0].end, windows[0])
+	any2.SetProperties(&AssetProperties{
+		Name:       "any2",
+		Cluster:    "cluster1",
+		ProviderID: "any2",
+	})
+	any2.Cost = math.NaN()
+	any2.SetAdjustment(1.0)
+
+	_, err = json.Marshal(any2)
+	if err != nil {
+		t.Fatalf("Any.MarshalJSON: unexpected error: %s", err)
+	}
+}
+
+func TestDisk_Add(t *testing.T) {
+	// 1. aggregate: add size, local
+	// 2. accumulate: don't add size, local
+
+	hours := windows[0].Duration().Hours()
+
+	// Aggregate: two disks, one window
+	disk1 := NewDisk("disk1", "cluster1", "disk1", *windows[0].start, *windows[0].end, windows[0])
+	disk1.ByteHours = 100.0 * gb * hours
+	disk1.Cost = 9.0
+	disk1.SetAdjustment(1.0)
+
+	if disk1.Bytes() != 100.0*gb {
+		t.Fatalf("Disk.Add: expected %f; got %f", 100.0*gb, disk1.Bytes())
+	}
+
+	disk2 := NewDisk("disk2", "cluster1", "disk2", *windows[0].start, *windows[0].end, windows[0])
+	disk2.ByteHours = 60.0 * gb * hours
+	disk2.Cost = 4.0
+	disk2.Local = 1.0
+	disk2.SetAdjustment(1.0)
+
+	if disk2.Bytes() != 60.0*gb {
+		t.Fatalf("Disk.Add: expected %f; got %f", 60.0*gb, disk2.Bytes())
+	}
+
+	diskT := disk1.Add(disk2).(*Disk)
+
+	// Check that the sums and properties are correct
+	if diskT.TotalCost() != 15.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 15.0, diskT.TotalCost())
+	}
+	if diskT.Adjustment() != 2.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 2.0, diskT.Adjustment())
+	}
+	if diskT.Properties().Cluster != "cluster1" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "cluster1", diskT.Properties().Cluster)
+	}
+	if diskT.Type() != DiskAssetType {
+		t.Fatalf("Disk.Add: expected %s; got %s", AnyAssetType, diskT.Type())
+	}
+	if diskT.Properties().ProviderID != "" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "", diskT.Properties().ProviderID)
+	}
+	if diskT.Properties().Name != "" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "", diskT.Properties().Name)
+	}
+	if diskT.Bytes() != 160.0*gb {
+		t.Fatalf("Disk.Add: expected %f; got %f", 160.0*gb, diskT.Bytes())
+	}
+	if !approx(diskT.Local, 0.333333, delta) {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.333333, diskT.Local)
+	}
+
+	// Check that the original assets are unchanged
+	if disk1.TotalCost() != 10.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 10.0, disk1.TotalCost())
+	}
+	if disk1.Adjustment() != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, disk1.Adjustment())
+	}
+	if disk1.Local != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, disk1.Local)
+	}
+	if disk2.TotalCost() != 5.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 5.0, disk2.TotalCost())
+	}
+	if disk2.Adjustment() != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, disk2.Adjustment())
+	}
+	if disk2.Local != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, disk2.Local)
+	}
+
+	disk3 := NewDisk("disk3", "cluster1", "disk3", *windows[0].start, *windows[0].end, windows[0])
+	disk3.ByteHours = 0.0 * hours
+	disk3.Cost = 0.0
+	disk3.Local = 0.0
+	disk3.SetAdjustment(0.0)
+
+	disk4 := NewDisk("disk4", "cluster1", "disk4", *windows[0].start, *windows[0].end, windows[0])
+	disk4.ByteHours = 0.0 * hours
+	disk4.Cost = 0.0
+	disk4.Local = 1.0
+	disk4.SetAdjustment(0.0)
+
+	diskT = disk3.Add(disk4).(*Disk)
+
+	if diskT.TotalCost() != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, diskT.TotalCost())
+	}
+	if diskT.Local != 0.5 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.5, diskT.Local)
+	}
+
+	// Accumulate: one disks, two windows
+	diskA1 := NewDisk("diskA1", "cluster1", "diskA1", *windows[0].start, *windows[0].end, windows[0])
+	diskA1.ByteHours = 100 * gb * hours
+	diskA1.Cost = 9.0
+	diskA1.SetAdjustment(1.0)
+
+	diskA2 := NewDisk("diskA2", "cluster1", "diskA2", *windows[1].start, *windows[1].end, windows[1])
+	diskA2.ByteHours = 100 * gb * hours
+	diskA2.Cost = 9.0
+	diskA2.SetAdjustment(1.0)
+
+	diskAT := diskA1.Add(diskA2).(*Disk)
+
+	// Check that the sums and properties are correct
+	if diskAT.TotalCost() != 20.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 20.0, diskAT.TotalCost())
+	}
+	if diskAT.Adjustment() != 2.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 2.0, diskAT.Adjustment())
+	}
+	if diskAT.Properties().Cluster != "cluster1" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "cluster1", diskAT.Properties().Cluster)
+	}
+	if diskAT.Type() != DiskAssetType {
+		t.Fatalf("Disk.Add: expected %s; got %s", AnyAssetType, diskAT.Type())
+	}
+	if diskAT.Properties().ProviderID != "" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "", diskAT.Properties().ProviderID)
+	}
+	if diskAT.Properties().Name != "" {
+		t.Fatalf("Disk.Add: expected %s; got %s", "", diskAT.Properties().Name)
+	}
+	if diskAT.Bytes() != 100.0*gb {
+		t.Fatalf("Disk.Add: expected %f; got %f", 100.0*gb, diskT.Bytes())
+	}
+	if diskAT.Local != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, diskAT.Local)
+	}
+
+	// Check that the original assets are unchanged
+	if diskA1.TotalCost() != 10.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 10.0, diskA1.TotalCost())
+	}
+	if diskA1.Adjustment() != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, diskA1.Adjustment())
+	}
+	if diskA1.Local != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, diskA1.Local)
+	}
+	if diskA2.TotalCost() != 10.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 10.0, diskA2.TotalCost())
+	}
+	if diskA2.Adjustment() != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, diskA2.Adjustment())
+	}
+	if diskA2.Local != 0.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 0.0, diskA2.Local)
+	}
+}
+
+func TestDisk_Clone(t *testing.T) {
+	disk1 := NewDisk("disk1", "cluster1", "disk1", *windows[0].start, *windows[0].end, windows[0])
+	disk1.Local = 0.0
+	disk1.Cost = 9.0
+	disk1.SetAdjustment(1.0)
+
+	disk2 := disk1.Clone().(*Disk)
+
+	disk2.Local = 1.0
+	disk1.Cost = 18.0
+	disk1.SetAdjustment(2.0)
+
+	// disk2 should match disk1, even after mutating disk1
+	if disk2.TotalCost() != 10.0 {
+		t.Fatalf("Any.Clone: expected %f; got %f", 10.0, disk2.TotalCost())
+	}
+	if disk2.Adjustment() != 1.0 {
+		t.Fatalf("Any.Clone: expected %f; got %f", 1.0, disk2.Adjustment())
+	}
+	if disk2.Local != 1.0 {
+		t.Fatalf("Disk.Add: expected %f; got %f", 1.0, disk2.Local)
+	}
+}
+
+func TestDisk_MarshalJSON(t *testing.T) {
+	disk := NewDisk("disk", "cluster", "providerID", *windows[0].start, *windows[0].end, windows[0])
+	disk.SetLabels(AssetLabels{
+		"label": "value",
+	})
+	disk.Cost = 9.0
+	disk.SetAdjustment(1.0)
+
+	_, err := json.Marshal(disk)
+	if err != nil {
+		t.Fatalf("Disk.MarshalJSON: unexpected error: %s", err)
+	}
+}
+
+func TestNode_Add(t *testing.T) {
+	// 1. aggregate: add size, local
+	// 2. accumulate: don't add size, local
+
+	hours := windows[0].Duration().Hours()
+
+	// Aggregate: two nodes, one window
+	node1 := NewNode("node1", "cluster1", "node1", *windows[0].start, *windows[0].end, windows[0])
+	node1.CPUCoreHours = 1.0 * hours
+	node1.RAMByteHours = 2.0 * gb * hours
+	node1.GPUCost = 0.0
+	node1.CPUCost = 8.0
+	node1.RAMCost = 4.0
+	node1.Discount = 0.3
+	node1.CPUBreakdown = &Breakdown{
+		Idle:   0.6,
+		System: 0.2,
+		User:   0.2,
+		Other:  0.0,
+	}
+	node1.RAMBreakdown = &Breakdown{
+		Idle:   0.6,
+		System: 0.2,
+		User:   0.2,
+		Other:  0.0,
+	}
+	node1.SetAdjustment(1.6)
+
+	node2 := NewNode("node2", "cluster1", "node2", *windows[0].start, *windows[0].end, windows[0])
+	node2.CPUCoreHours = 1.0 * hours
+	node2.RAMByteHours = 2.0 * gb * hours
+	node2.GPUCost = 0.0
+	node2.CPUCost = 3.0
+	node2.RAMCost = 1.0
+	node2.Discount = 0.0
+	node1.CPUBreakdown = &Breakdown{
+		Idle:   0.9,
+		System: 0.05,
+		User:   0.0,
+		Other:  0.05,
+	}
+	node1.RAMBreakdown = &Breakdown{
+		Idle:   0.9,
+		System: 0.05,
+		User:   0.0,
+		Other:  0.05,
+	}
+	node2.SetAdjustment(1.0)
+
+	nodeT := node1.Add(node2).(*Node)
+
+	// Check that the sums and properties are correct
+	if !approx(nodeT.TotalCost(), 15.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 15.0, nodeT.TotalCost())
+	}
+	if nodeT.Adjustment() != 2.6 {
+		t.Fatalf("Node.Add: expected %f; got %f", 2.6, nodeT.Adjustment())
+	}
+	if nodeT.Properties().Cluster != "cluster1" {
+		t.Fatalf("Node.Add: expected %s; got %s", "cluster1", nodeT.Properties().Cluster)
+	}
+	if nodeT.Type() != NodeAssetType {
+		t.Fatalf("Node.Add: expected %s; got %s", AnyAssetType, nodeT.Type())
+	}
+	if nodeT.Properties().ProviderID != "" {
+		t.Fatalf("Node.Add: expected %s; got %s", "", nodeT.Properties().ProviderID)
+	}
+	if nodeT.Properties().Name != "" {
+		t.Fatalf("Node.Add: expected %s; got %s", "", nodeT.Properties().Name)
+	}
+	if nodeT.CPUCores() != 2.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 2.0, nodeT.CPUCores())
+	}
+	if nodeT.RAMBytes() != 4.0*gb {
+		t.Fatalf("Node.Add: expected %f; got %f", 4.0*gb, nodeT.RAMBytes())
+	}
+
+	// Check that the original assets are unchanged
+	if !approx(node1.TotalCost(), 10.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 10.0, node1.TotalCost())
+	}
+	if node1.Adjustment() != 1.6 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, node1.Adjustment())
+	}
+	if !approx(node2.TotalCost(), 5.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 5.0, node2.TotalCost())
+	}
+	if node2.Adjustment() != 1.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, node2.Adjustment())
+	}
+
+	// Check that we don't divide by zero computing Local
+	node3 := NewNode("node3", "cluster1", "node3", *windows[0].start, *windows[0].end, windows[0])
+	node3.CPUCoreHours = 0 * hours
+	node3.RAMByteHours = 0 * hours
+	node3.GPUCost = 0
+	node3.CPUCost = 0.0
+	node3.RAMCost = 0.0
+	node3.Discount = 0.3
+	node3.SetAdjustment(0.0)
+
+	node4 := NewNode("node4", "cluster1", "node4", *windows[0].start, *windows[0].end, windows[0])
+	node4.CPUCoreHours = 0 * hours
+	node4.RAMByteHours = 0 * hours
+	node4.GPUCost = 0
+	node4.CPUCost = 0.0
+	node4.RAMCost = 0.0
+	node4.Discount = 0.1
+	node4.SetAdjustment(0.0)
+
+	nodeT = node3.Add(node4).(*Node)
+
+	// Check that the sums and properties are correct and without NaNs
+	if nodeT.TotalCost() != 0.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 0.0, nodeT.TotalCost())
+	}
+	if nodeT.Discount != 0.2 {
+		t.Fatalf("Node.Add: expected %f; got %f", 0.2, nodeT.Discount)
+	}
+
+	// Accumulate: one nodes, two window
+	nodeA1 := NewNode("nodeA1", "cluster1", "nodeA1", *windows[0].start, *windows[0].end, windows[0])
+	nodeA1.CPUCoreHours = 1.0 * hours
+	nodeA1.RAMByteHours = 2.0 * gb * hours
+	nodeA1.GPUCost = 0.0
+	nodeA1.CPUCost = 8.0
+	nodeA1.RAMCost = 4.0
+	nodeA1.Discount = 0.3
+	nodeA1.SetAdjustment(1.6)
+
+	nodeA2 := NewNode("nodeA2", "cluster1", "nodeA2", *windows[1].start, *windows[1].end, windows[1])
+	nodeA2.CPUCoreHours = 1.0 * hours
+	nodeA2.RAMByteHours = 2.0 * gb * hours
+	nodeA2.GPUCost = 0.0
+	nodeA2.CPUCost = 3.0
+	nodeA2.RAMCost = 1.0
+	nodeA2.Discount = 0.0
+	nodeA2.SetAdjustment(1.0)
+
+	nodeAT := nodeA1.Add(nodeA2).(*Node)
+
+	// Check that the sums and properties are correct
+	if !approx(nodeAT.TotalCost(), 15.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 15.0, nodeAT.TotalCost())
+	}
+	if nodeAT.Adjustment() != 2.6 {
+		t.Fatalf("Node.Add: expected %f; got %f", 2.6, nodeAT.Adjustment())
+	}
+	if nodeAT.Properties().Cluster != "cluster1" {
+		t.Fatalf("Node.Add: expected %s; got %s", "cluster1", nodeAT.Properties().Cluster)
+	}
+	if nodeAT.Type() != NodeAssetType {
+		t.Fatalf("Node.Add: expected %s; got %s", AnyAssetType, nodeAT.Type())
+	}
+	if nodeAT.Properties().ProviderID != "" {
+		t.Fatalf("Node.Add: expected %s; got %s", "", nodeAT.Properties().ProviderID)
+	}
+	if nodeAT.Properties().Name != "" {
+		t.Fatalf("Node.Add: expected %s; got %s", "", nodeAT.Properties().Name)
+	}
+	if nodeAT.CPUCores() != 1.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, nodeAT.CPUCores())
+	}
+	if nodeAT.RAMBytes() != 2.0*gb {
+		t.Fatalf("Node.Add: expected %f; got %f", 2.0*gb, nodeAT.RAMBytes())
+	}
+
+	// Check that the original assets are unchanged
+	if !approx(nodeA1.TotalCost(), 10.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 10.0, nodeA1.TotalCost())
+	}
+	if nodeA1.Adjustment() != 1.6 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, nodeA1.Adjustment())
+	}
+	if !approx(nodeA2.TotalCost(), 5.0, delta) {
+		t.Fatalf("Node.Add: expected %f; got %f", 5.0, nodeA2.TotalCost())
+	}
+	if nodeA2.Adjustment() != 1.0 {
+		t.Fatalf("Node.Add: expected %f; got %f", 1.0, nodeA2.Adjustment())
+	}
+}
+
+func TestNode_Clone(t *testing.T) {
+	// TODO
+}
+
+func TestNode_MarshalJSON(t *testing.T) {
+	node := NewNode("node", "cluster", "providerID", *windows[0].start, *windows[0].end, windows[0])
+	node.SetLabels(AssetLabels{
+		"label": "value",
+	})
+	node.CPUCost = 9.0
+	node.RAMCost = 0.0
+	node.CPUCoreHours = 123.0
+	node.RAMByteHours = 13323.0
+	node.SetAdjustment(1.0)
+
+	_, err := json.Marshal(node)
+	if err != nil {
+		t.Fatalf("Node.MarshalJSON: unexpected error: %s", err)
+	}
+}
+
+func TestClusterManagement_Add(t *testing.T) {
+	cm1 := NewClusterManagement("gcp", "cluster1", windows[0])
+	cm1.Cost = 9.0
+
+	cm2 := NewClusterManagement("gcp", "cluster1", windows[0])
+	cm2.Cost = 4.0
+
+	cm3 := cm1.Add(cm2)
+
+	// Check that the sums and properties are correct
+	if cm3.TotalCost() != 13.0 {
+		t.Fatalf("ClusterManagement.Add: expected %f; got %f", 13.0, cm3.TotalCost())
+	}
+	if cm3.Properties().Cluster != "cluster1" {
+		t.Fatalf("ClusterManagement.Add: expected %s; got %s", "cluster1", cm3.Properties().Cluster)
+	}
+	if cm3.Type() != ClusterManagementAssetType {
+		t.Fatalf("ClusterManagement.Add: expected %s; got %s", ClusterManagementAssetType, cm3.Type())
+	}
+
+	// Check that the original assets are unchanged
+	if cm1.TotalCost() != 9.0 {
+		t.Fatalf("ClusterManagement.Add: expected %f; got %f", 9.0, cm1.TotalCost())
+	}
+	if cm2.TotalCost() != 4.0 {
+		t.Fatalf("ClusterManagement.Add: expected %f; got %f", 4.0, cm2.TotalCost())
+	}
+}
+
+func TestClusterManagement_Clone(t *testing.T) {
+	// TODO
+}
+
+func TestCloudAny_Add(t *testing.T) {
+	ca1 := NewCloud(ComputeCategory, "ca1", *windows[0].start, *windows[0].end, windows[0])
+	ca1.Cost = 9.0
+	ca1.SetAdjustment(1.0)
+
+	ca2 := NewCloud(StorageCategory, "ca2", *windows[0].start, *windows[0].end, windows[0])
+	ca2.Cost = 4.0
+	ca2.SetAdjustment(1.0)
+
+	ca3 := ca1.Add(ca2)
+
+	// Check that the sums and properties are correct
+	if ca3.TotalCost() != 15.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 15.0, ca3.TotalCost())
+	}
+	if ca3.Adjustment() != 2.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 2.0, ca3.Adjustment())
+	}
+	if ca3.Type() != CloudAssetType {
+		t.Fatalf("Any.Add: expected %s; got %s", CloudAssetType, ca3.Type())
+	}
+
+	// Check that the original assets are unchanged
+	if ca1.TotalCost() != 10.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 10.0, ca1.TotalCost())
+	}
+	if ca1.Adjustment() != 1.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 1.0, ca1.Adjustment())
+	}
+	if ca2.TotalCost() != 5.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 5.0, ca2.TotalCost())
+	}
+	if ca2.Adjustment() != 1.0 {
+		t.Fatalf("Any.Add: expected %f; got %f", 1.0, ca2.Adjustment())
+	}
+}
+
+func TestCloudAny_Clone(t *testing.T) {
+	// TODO
+}
+
+func TestAssetSet_AggregateBy(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	window := NewWindow(&startYesterday, &endYesterday)
+
+	// Scenarios to test:
+
+	// 1  Single-aggregation
+	// 1a []AssetProperty=[Cluster]
+	// 1b []AssetProperty=[Type]
+	// 1c []AssetProperty=[Nil]
+	// 1d []AssetProperty=nil
+
+	// 2  Multi-aggregation
+	// 2a []AssetProperty=[Cluster,Type]
+
+	// 3  Share resources
+	// 3a Shared hourly cost > 0.0
+
+	// Definitions and set-up:
+
+	var as *AssetSet
+	var err error
+
+	// Tests:
+
+	// 1  Single-aggregation
+
+	// 1a []AssetProperty=[Cluster]
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{AssetClusterProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1a", window, map[string]float64{
+		"cluster1": 26.0,
+		"cluster2": 15.0,
+		"cluster3": 19.0,
+	}, nil)
+
+	// 1b []AssetProperty=[Type]
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{AssetTypeProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1b", window, map[string]float64{
+		"Node":              49.0,
+		"Disk":              8.0,
+		"ClusterManagement": 3.0,
+	}, nil)
+
+	// 1c []AssetProperty=[Nil]
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{}, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1c", window, map[string]float64{
+		"": 60.0,
+	}, nil)
+
+	// 1d []AssetProperty=nil
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy(nil, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1d", window, map[string]float64{
+		"Compute/cluster1/Node/Kubernetes/gcp-node1/node1":     7.00,
+		"Compute/cluster1/Node/Kubernetes/gcp-node2/node2":     5.50,
+		"Compute/cluster1/Node/Kubernetes/gcp-node3/node3":     6.50,
+		"Storage/cluster1/Disk/Kubernetes/gcp-disk1/disk1":     2.50,
+		"Storage/cluster1/Disk/Kubernetes/gcp-disk2/disk2":     1.50,
+		"GCP/Management/cluster1/ClusterManagement/Kubernetes": 3.00,
+		"Compute/cluster2/Node/Kubernetes/gcp-node4/node4":     11.00,
+		"Storage/cluster2/Disk/Kubernetes/gcp-disk3/disk3":     2.50,
+		"Storage/cluster2/Disk/Kubernetes/gcp-disk4/disk4":     1.50,
+		"GCP/Management/cluster2/ClusterManagement/Kubernetes": 0.00,
+		"Compute/cluster3/Node/Kubernetes/aws-node5/node5":     19.00,
+	}, nil)
+
+	// 2  Multi-aggregation
+
+	// 2a []AssetProperty=[Cluster,Type]
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{AssetClusterProp, AssetTypeProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "2a", window, map[string]float64{
+		"cluster1/Node":              19.0,
+		"cluster1/Disk":              4.0,
+		"cluster1/ClusterManagement": 3.0,
+		"cluster2/Node":              11.0,
+		"cluster2/Disk":              4.0,
+		"cluster2/ClusterManagement": 0.0,
+		"cluster3/Node":              19.0,
+	}, nil)
+
+	// 3  Share resources
+
+	// 3a Shared hourly cost > 0.0
+	as = generateAssetSet(startYesterday)
+	err = as.AggregateBy([]AssetProperty{AssetTypeProp}, &AssetAggregationOptions{
+		SharedHourlyCosts: map[string]float64{"shared1": 0.5},
+	})
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1a", window, map[string]float64{
+		"Node":              49.0,
+		"Disk":              8.0,
+		"ClusterManagement": 3.0,
+		"Shared":            12.0,
+	}, nil)
+}
+
+func TestAssetSet_FindMatch(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	s, e := startYesterday, endYesterday
+	w := NewWindow(&s, &e)
+
+	var query, match Asset
+	var as *AssetSet
+	var err error
+
+	// Assert success of a simple match of Type and ProviderID
+	as = generateAssetSet(startYesterday)
+	query = NewNode("", "", "gcp-node3", s, e, w)
+	match, err = as.FindMatch(query, []AssetProperty{AssetTypeProp, AssetProviderIDProp})
+	if err != nil {
+		t.Fatalf("AssetSet.FindMatch: unexpected error: %s", err)
+	}
+
+	// Assert error of a simple non-match of Type and ProviderID
+	as = generateAssetSet(startYesterday)
+	query = NewNode("", "", "aws-node3", s, e, w)
+	match, err = as.FindMatch(query, []AssetProperty{AssetTypeProp, AssetProviderIDProp})
+	if err == nil {
+		t.Fatalf("AssetSet.FindMatch: expected error (no match); found %s", match)
+	}
+
+	// Assert error of matching ProviderID, but not Type
+	as = generateAssetSet(startYesterday)
+	query = NewCloud(ComputeCategory, "gcp-node3", s, e, w)
+	match, err = as.FindMatch(query, []AssetProperty{AssetTypeProp, AssetProviderIDProp})
+	if err == nil {
+		t.Fatalf("AssetSet.FindMatch: expected error (no match); found %s", match)
+	}
+}
+
+func TestAssetSetRange_Accumulate(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+
+	startD2 := startYesterday
+	startD1 := startD2.Add(-day)
+	startD0 := startD1.Add(-day)
+
+	window := NewWindow(&startD0, &endYesterday)
+
+	var asr *AssetSetRange
+	var as *AssetSet
+	var err error
+
+	asr = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy(nil, nil)
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1a", window, map[string]float64{
+		"Compute/cluster1/Node/Kubernetes/gcp-node1/node1":     21.00,
+		"Compute/cluster1/Node/Kubernetes/gcp-node2/node2":     16.50,
+		"Compute/cluster1/Node/Kubernetes/gcp-node3/node3":     19.50,
+		"Storage/cluster1/Disk/Kubernetes/gcp-disk1/disk1":     7.50,
+		"Storage/cluster1/Disk/Kubernetes/gcp-disk2/disk2":     4.50,
+		"GCP/Management/cluster1/ClusterManagement/Kubernetes": 9.00,
+		"Compute/cluster2/Node/Kubernetes/gcp-node4/node4":     33.00,
+		"Storage/cluster2/Disk/Kubernetes/gcp-disk3/disk3":     7.50,
+		"Storage/cluster2/Disk/Kubernetes/gcp-disk4/disk4":     4.50,
+		"GCP/Management/cluster2/ClusterManagement/Kubernetes": 0.00,
+		"Compute/cluster3/Node/Kubernetes/aws-node5/node5":     57.00,
+	}, nil)
+
+	asr = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy([]AssetProperty{}, nil)
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1b", window, map[string]float64{
+		"": 180.00,
+	}, nil)
+
+	asr = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy([]AssetProperty{AssetTypeProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1c", window, map[string]float64{
+		"Node":              147.0,
+		"Disk":              24.0,
+		"ClusterManagement": 9.0,
+	}, nil)
+
+	asr = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy([]AssetProperty{AssetClusterProp}, nil)
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1c", window, map[string]float64{
+		"cluster1": 78.0,
+		"cluster2": 45.0,
+		"cluster3": 57.0,
+	}, nil)
+
+	// Accumulation with aggregation should work, even when the first AssetSet
+	// is empty (this was previously an issue)
+	asr = NewAssetSetRange(
+		NewAssetSet(startD0, startD1),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+	err = asr.AggregateBy([]AssetProperty{AssetTypeProp}, nil)
+	as, err = asr.Accumulate()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
+	assertAssetSet(t, as, "1d", window, map[string]float64{
+		"Node":              98.00,
+		"Disk":              16.00,
+		"ClusterManagement": 6.00,
+	}, nil)
+}
+
+func assertAssetSet(t *testing.T, as *AssetSet, msg string, window Window, exps map[string]float64, err error) {
+	if err != nil {
+		t.Fatalf("AssetSet.AggregateBy[%s]: unexpected error: %s", msg, err)
+	}
+	if as.Length() != len(exps) {
+		t.Fatalf("AssetSet.AggregateBy[%s]: expected set of length %d, actual %d", msg, len(exps), as.Length())
+	}
+	if !as.Window.Equal(window) {
+		t.Fatalf("AssetSet.AggregateBy[%s]: expected window %s, actual %s", msg, window, as.Window)
+	}
+	as.Each(func(key string, a Asset) {
+		if exp, ok := exps[key]; ok {
+			if math.Round(a.TotalCost()*100) != math.Round(exp*100) {
+				t.Fatalf("AssetSet.AggregateBy[%s]: key %s expected total cost %.2f, actual %.2f", msg, key, exp, a.TotalCost())
+			}
+			if !a.Window().Equal(window) {
+				t.Fatalf("AssetSet.AggregateBy[%s]: key %s expected window %s, actual %s", msg, key, window, as.Window)
+			}
+		} else {
+			t.Fatalf("AssetSet.AggregateBy[%s]: unexpected asset: %s", msg, key)
+		}
+	})
+}
+
+// generateAssetSet generates the following topology:
+//
+// | Asset                        | Cost |  Adj |
+// +------------------------------+------+------+
+//   cluster1:
+//     node1:                        6.00   1.00
+//     node2:                        4.00   1.50
+//     node3:                        7.00  -0.50
+//     disk1:                        2.50   0.00
+//     disk2:                        1.50   0.00
+//     clusterManagement1:           3.00   0.00
+// +------------------------------+------+------+
+//   cluster1 subtotal              24.00   2.00
+// +------------------------------+------+------+
+//   cluster2:
+//     node4:                       12.00  -1.00
+//     disk3:                        2.50   0.00
+//     disk4:                        1.50   0.00
+//     clusterManagement2:           0.00   0.00
+// +------------------------------+------+------+
+//   cluster2 subtotal              16.00  -1.00
+// +------------------------------+------+------+
+//   cluster3:
+//     node5:                       17.00   2.00
+// +------------------------------+------+------+
+//   cluster3 subtotal              17.00   2.00
+// +------------------------------+------+------+
+//   total                          57.00   3.00
+// +------------------------------+------+------+
+func generateAssetSet(start time.Time) *AssetSet {
+	end := start.Add(day)
+	window := NewWindow(&start, &end)
+
+	hours := window.Duration().Hours()
+
+	node1 := NewNode("node1", "cluster1", "gcp-node1", *window.Clone().start, *window.Clone().end, window.Clone())
+	node1.CPUCost = 4.0
+	node1.RAMCost = 4.0
+	node1.GPUCost = 2.0
+	node1.Discount = 0.5
+	node1.CPUCoreHours = 2.0 * hours
+	node1.RAMByteHours = 4.0 * gb * hours
+	node1.SetAdjustment(1.0)
+
+	node2 := NewNode("node2", "cluster1", "gcp-node2", *window.Clone().start, *window.Clone().end, window.Clone())
+	node2.CPUCost = 4.0
+	node2.RAMCost = 4.0
+	node2.GPUCost = 0.0
+	node2.Discount = 0.5
+	node2.CPUCoreHours = 2.0 * hours
+	node2.RAMByteHours = 4.0 * gb * hours
+	node2.SetAdjustment(1.5)
+
+	node3 := NewNode("node3", "cluster1", "gcp-node3", *window.Clone().start, *window.Clone().end, window.Clone())
+	node3.CPUCost = 4.0
+	node3.RAMCost = 4.0
+	node3.GPUCost = 3.0
+	node3.Discount = 0.5
+	node3.CPUCoreHours = 2.0 * hours
+	node3.RAMByteHours = 4.0 * gb * hours
+	node3.SetAdjustment(-0.5)
+
+	node4 := NewNode("node4", "cluster2", "gcp-node4", *window.Clone().start, *window.Clone().end, window.Clone())
+	node4.CPUCost = 10.0
+	node4.RAMCost = 6.0
+	node4.GPUCost = 0.0
+	node4.Discount = 0.25
+	node4.CPUCoreHours = 4.0 * hours
+	node4.RAMByteHours = 12.0 * gb * hours
+	node4.SetAdjustment(-1.0)
+
+	node5 := NewNode("node5", "cluster3", "aws-node5", *window.Clone().start, *window.Clone().end, window.Clone())
+	node5.CPUCost = 10.0
+	node5.RAMCost = 7.0
+	node5.GPUCost = 0.0
+	node5.Discount = 0.0
+	node5.CPUCoreHours = 8.0 * hours
+	node5.RAMByteHours = 24.0 * gb * hours
+	node5.SetAdjustment(2.0)
+
+	disk1 := NewDisk("disk1", "cluster1", "gcp-disk1", *window.Clone().start, *window.Clone().end, window.Clone())
+	disk1.Cost = 2.5
+	disk1.ByteHours = 100 * gb * hours
+
+	disk2 := NewDisk("disk2", "cluster1", "gcp-disk2", *window.Clone().start, *window.Clone().end, window.Clone())
+	disk2.Cost = 1.5
+	disk2.ByteHours = 60 * gb * hours
+
+	disk3 := NewDisk("disk3", "cluster2", "gcp-disk3", *window.Clone().start, *window.Clone().end, window.Clone())
+	disk3.Cost = 2.5
+	disk3.ByteHours = 100 * gb * hours
+
+	disk4 := NewDisk("disk4", "cluster2", "gcp-disk4", *window.Clone().start, *window.Clone().end, window.Clone())
+	disk4.Cost = 1.5
+	disk4.ByteHours = 100 * gb * hours
+
+	cm1 := NewClusterManagement("gcp", "cluster1", window.Clone())
+	cm1.Cost = 3.0
+
+	cm2 := NewClusterManagement("gcp", "cluster2", window.Clone())
+	cm2.Cost = 0.0
+
+	return NewAssetSet(
+		start, end,
+		// cluster 1
+		node1, node2, node3, disk1, disk2, cm1,
+		// cluster 2
+		node4, disk3, disk4, cm2,
+		// cluster 3
+		node5,
+	)
+}
+
+func printAssetSet(msg string, as *AssetSet) {
+	fmt.Printf("--- %s ---\n", msg)
+	as.Each(func(key string, a Asset) {
+		fmt.Printf(" > %s: %s\n", key, a)
+	})
+}

+ 343 - 0
pkg/kubecost/assetprops.go

@@ -0,0 +1,343 @@
+package kubecost
+
+import (
+	"fmt"
+	"strings"
+)
+
+// AssetProperty is a kind of property belonging to an Asset
+type AssetProperty string
+
+const (
+	// AssetNilProp is the zero-value of AssetProperty
+	AssetNilProp AssetProperty = ""
+
+	// AssetAccountProp describes the account of the Asset
+	AssetAccountProp AssetProperty = "account"
+
+	// AssetCategoryProp describes the category of the Asset
+	AssetCategoryProp AssetProperty = "category"
+
+	// AssetClusterProp describes the cluster of the Asset
+	AssetClusterProp AssetProperty = "cluster"
+
+	// AssetNameProp describes the name of the Asset
+	AssetNameProp AssetProperty = "name"
+
+	// AssetProjectProp describes the project of the Asset
+	AssetProjectProp AssetProperty = "project"
+
+	// AssetProviderProp describes the provider of the Asset
+	AssetProviderProp AssetProperty = "provider"
+
+	// AssetProviderIDProp describes the providerID of the Asset
+	AssetProviderIDProp AssetProperty = "providerID"
+
+	// AssetServiceProp describes the service of the Asset
+	AssetServiceProp AssetProperty = "service"
+
+	// AssetTypeProp describes the type of the Asset
+	AssetTypeProp AssetProperty = "type"
+)
+
+// ParseAssetProperty attempts to parse a string into an AssetProperty
+func ParseAssetProperty(text string) (AssetProperty, error) {
+	switch strings.TrimSpace(strings.ToLower(text)) {
+	case "account":
+		return AssetAccountProp, nil
+	case "category":
+		return AssetCategoryProp, nil
+	case "cluster":
+		return AssetClusterProp, nil
+	case "name":
+		return AssetNameProp, nil
+	case "project":
+		return AssetProjectProp, nil
+	case "provider":
+		return AssetProviderProp, nil
+	case "providerID":
+		return AssetProviderIDProp, nil
+	case "service":
+		return AssetServiceProp, nil
+	case "type":
+		return AssetTypeProp, nil
+	}
+	return AssetNilProp, fmt.Errorf("invalid asset property: %s", text)
+}
+
+func propsEqual(p1, p2 []AssetProperty) bool {
+	if len(p1) != len(p2) {
+		return false
+	}
+
+	for _, p := range p1 {
+		if !hasProp(p2, p) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Category options
+
+// ComputeCategory signifies the Compute Category
+const ComputeCategory = "Compute"
+
+// StorageCategory signifies the Storage Category
+const StorageCategory = "Storage"
+
+// NetworkCategory signifies the Network Category
+const NetworkCategory = "Network"
+
+// ManagementCategory signifies the Management Category
+const ManagementCategory = "Management"
+
+// SharedCategory signifies an unassigned Category
+const SharedCategory = "Shared"
+
+// OtherCategory signifies an unassigned Category
+const OtherCategory = "Other"
+
+// Provider options
+
+// AWSProvider describes the provider AWS
+const AWSProvider = "AWS"
+
+// GCPProvider describes the provider GCP
+const GCPProvider = "GCP"
+
+// AzureProvider describes the provider Azure
+const AzureProvider = "Azure"
+
+// NilProvider describes unknown provider
+const NilProvider = "-"
+
+// Service options
+
+const KubernetesService = "Kubernetes"
+
+// ParseProvider attempts to parse and return a known provider, given a string
+func ParseProvider(str string) string {
+	switch strings.ToLower(strings.TrimSpace(str)) {
+	case "aws", "eks", "amazon":
+		return AWSProvider
+	case "gcp", "gke", "google":
+		return GCPProvider
+	case "azure":
+		return AzureProvider
+	default:
+		return NilProvider
+	}
+}
+
+// AssetProperties describes all properties assigned to an Asset.
+type AssetProperties struct {
+	Category   string `json:"category,omitempty"`
+	Provider   string `json:"provider,omitempty"`
+	Account    string `json:"account,omitempty"`
+	Project    string `json:"project,omitempty"`
+	Service    string `json:"service,omitempty"`
+	Cluster    string `json:"cluster,omitempty"`
+	Name       string `json:"name,omitempty"`
+	ProviderID string `json:"providerID,omitempty"`
+}
+
+// Clone returns a cloned instance of the given AssetProperties
+func (ap *AssetProperties) Clone() *AssetProperties {
+	if ap == nil {
+		return nil
+	}
+
+	clone := &AssetProperties{}
+	clone.Category = ap.Category
+	clone.Provider = ap.Provider
+	clone.Account = ap.Account
+	clone.Project = ap.Project
+	clone.Service = ap.Service
+	clone.Cluster = ap.Cluster
+	clone.Name = ap.Name
+	clone.ProviderID = ap.ProviderID
+
+	return clone
+}
+
+// Equal returns true only if both AssetProperties are non-nil exact matches
+func (ap *AssetProperties) Equal(that *AssetProperties) bool {
+	if ap == nil || that == nil {
+		return false
+	}
+
+	if ap.Category != that.Category {
+		return false
+	}
+
+	if ap.Provider != that.Provider {
+		return false
+	}
+
+	if ap.Account != that.Account {
+		return false
+	}
+
+	if ap.Project != that.Project {
+		return false
+	}
+
+	if ap.Service != that.Service {
+		return false
+	}
+
+	if ap.Cluster != that.Cluster {
+		return false
+	}
+
+	if ap.Name != that.Name {
+		return false
+	}
+
+	if ap.ProviderID != that.ProviderID {
+		return false
+	}
+
+	return true
+}
+
+// Keys returns the list of string values used to key the Asset based on the
+// list of properties provided.
+func (ap *AssetProperties) Keys(props []AssetProperty) []string {
+	keys := []string{}
+
+	if ap == nil {
+		return keys
+	}
+
+	if (props == nil || hasProp(props, AssetCategoryProp)) && ap.Category != "" {
+		keys = append(keys, ap.Category)
+	}
+
+	if (props == nil || hasProp(props, AssetProviderProp)) && ap.Provider != "" {
+		keys = append(keys, ap.Provider)
+	}
+
+	if (props == nil || hasProp(props, AssetAccountProp)) && ap.Account != "" {
+		keys = append(keys, ap.Account)
+	}
+
+	if (props == nil || hasProp(props, AssetProjectProp)) && ap.Project != "" {
+		keys = append(keys, ap.Project)
+	}
+
+	if (props == nil || hasProp(props, AssetServiceProp)) && ap.Service != "" {
+		keys = append(keys, ap.Service)
+	}
+
+	if (props == nil || hasProp(props, AssetClusterProp)) && ap.Cluster != "" {
+		keys = append(keys, ap.Cluster)
+	}
+
+	if (props == nil || hasProp(props, AssetNameProp)) && ap.Name != "" {
+		keys = append(keys, ap.Name)
+	}
+
+	if (props == nil || hasProp(props, AssetProviderIDProp)) && ap.ProviderID != "" {
+		keys = append(keys, ap.ProviderID)
+	}
+
+	return keys
+}
+
+// Merge retains only the properties shared with the given AssetProperties
+func (ap *AssetProperties) Merge(that *AssetProperties) *AssetProperties {
+	if ap == nil || that == nil {
+		return nil
+	}
+
+	result := &AssetProperties{}
+
+	if ap.Category == that.Category {
+		result.Category = ap.Category
+	}
+
+	if ap.Provider == that.Provider {
+		result.Provider = ap.Provider
+	}
+
+	if ap.Account == that.Account {
+		result.Account = ap.Account
+	}
+
+	if ap.Project == that.Project {
+		result.Project = ap.Project
+	}
+
+	if ap.Service == that.Service {
+		result.Service = ap.Service
+	}
+
+	if ap.Cluster == that.Cluster {
+		result.Cluster = ap.Cluster
+	}
+
+	if ap.Name == that.Name {
+		result.Name = ap.Name
+	}
+
+	if ap.ProviderID == that.ProviderID {
+		result.ProviderID = ap.ProviderID
+	}
+
+	return result
+}
+
+// String represents the properties as a string
+func (ap *AssetProperties) String() string {
+	if ap == nil {
+		return "<nil>"
+	}
+
+	strs := []string{}
+
+	if ap.Category != "" {
+		strs = append(strs, "Category:"+ap.Category)
+	}
+
+	if ap.Provider != "" {
+		strs = append(strs, "Provider:"+ap.Provider)
+	}
+
+	if ap.Account != "" {
+		strs = append(strs, "Account:"+ap.Account)
+	}
+
+	if ap.Project != "" {
+		strs = append(strs, "Project:"+ap.Project)
+	}
+
+	if ap.Service != "" {
+		strs = append(strs, "Service:"+ap.Service)
+	}
+
+	if ap.Cluster != "" {
+		strs = append(strs, "Cluster:"+ap.Cluster)
+	}
+
+	if ap.Name != "" {
+		strs = append(strs, "Name:"+ap.Name)
+	}
+
+	if ap.ProviderID != "" {
+		strs = append(strs, "ProviderID:"+ap.ProviderID)
+	}
+
+	return strings.Join(strs, ",")
+}
+
+func hasProp(props []AssetProperty, prop AssetProperty) bool {
+	for _, p := range props {
+		if p == prop {
+			return true
+		}
+	}
+	return false
+}

+ 24 - 0
pkg/kubecost/bingen.go

@@ -0,0 +1,24 @@
+package kubecost
+
+// @bingen:generate:Any
+// @bingen:generate:Asset
+// @bingen:generate:AssetLabels
+// @bingen:generate:AssetProperties
+// @bingen:generate:AssetProperty
+// @bingen:generate:AssetSet
+// @bingen:generate:AssetSetRange
+// @bingen:generate:Breakdown
+// @bingen:generate:Cloud
+// @bingen:generate:ClusterManagement
+// @bingen:generate:Disk
+// @bingen:generate:LoadBalancer
+// @bingen:generate:Network
+// @bingen:generate:Node
+// @bingen:generate:SharedAsset
+// @bingen:generate:Window
+
+// @bingen:generate:Allocation
+// @bingen:generate:AllocationSet
+// @bingen:generate:AllocationSetRange
+
+//go:generate bingen -package=kubecost -version=3 -buffer=github.com/kubecost/cost-model/pkg/util

+ 2299 - 0
pkg/kubecost/kubecost_codecs.go

@@ -0,0 +1,2299 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+//                             DO NOT MODIFY
+//
+//                          ┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻
+//
+//
+//            This source file was automatically generated by bingen.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+package kubecost
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+	"strings"
+	"time"
+
+	util "github.com/kubecost/cost-model/pkg/util"
+)
+
+// GeneratorPackageName is the package the generator is targetting
+const GeneratorPackageName string = "kubecost"
+
+//--------------------------------------------------------------------------
+//  Type Map
+//--------------------------------------------------------------------------
+
+// Generated type map for resolving interface implementations to
+// to concrete types
+var typeMap map[string]reflect.Type = map[string]reflect.Type{
+	"Allocation":         reflect.TypeOf((*Allocation)(nil)).Elem(),
+	"AllocationSet":      reflect.TypeOf((*AllocationSet)(nil)).Elem(),
+	"AllocationSetRange": reflect.TypeOf((*AllocationSetRange)(nil)).Elem(),
+	"Any":                reflect.TypeOf((*Any)(nil)).Elem(),
+	"AssetProperties":    reflect.TypeOf((*AssetProperties)(nil)).Elem(),
+	"AssetSet":           reflect.TypeOf((*AssetSet)(nil)).Elem(),
+	"AssetSetRange":      reflect.TypeOf((*AssetSetRange)(nil)).Elem(),
+	"Breakdown":          reflect.TypeOf((*Breakdown)(nil)).Elem(),
+	"Cloud":              reflect.TypeOf((*Cloud)(nil)).Elem(),
+	"ClusterManagement":  reflect.TypeOf((*ClusterManagement)(nil)).Elem(),
+	"Disk":               reflect.TypeOf((*Disk)(nil)).Elem(),
+	"LoadBalancer":       reflect.TypeOf((*LoadBalancer)(nil)).Elem(),
+	"Network":            reflect.TypeOf((*Network)(nil)).Elem(),
+	"Node":               reflect.TypeOf((*Node)(nil)).Elem(),
+	"SharedAsset":        reflect.TypeOf((*SharedAsset)(nil)).Elem(),
+	"Window":             reflect.TypeOf((*Window)(nil)).Elem(),
+}
+
+//--------------------------------------------------------------------------
+//  Type Helpers
+//--------------------------------------------------------------------------
+
+// typeToString determines the basic properties of the type, the qualifier, package path, and
+// type name, and returns the qualified type
+func typeToString(f interface{}) string {
+	qual := ""
+	t := reflect.TypeOf(f)
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+		qual = "*"
+	}
+
+	return fmt.Sprintf("%s%s.%s", qual, t.PkgPath(), t.Name())
+}
+
+// resolveType uses the name of a type and returns the package, base type name, and whether
+// or not it's a pointer.
+func resolveType(t string) (pkg string, name string, isPtr bool) {
+	isPtr = t[:1] == "*"
+	if isPtr {
+		t = t[1:]
+	}
+
+	slashIndex := strings.LastIndex(t, "/")
+	if slashIndex >= 0 {
+		t = t[slashIndex+1:]
+	}
+	parts := strings.Split(t, ".")
+	if parts[0] == GeneratorPackageName {
+		parts[0] = ""
+	}
+
+	pkg = parts[0]
+	name = parts[1]
+	return
+}
+
+//--------------------------------------------------------------------------
+//  Allocation
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Allocation instance
+// into a byte array
+func (target *Allocation) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	buff.WriteString(target.Name) // write string
+	// --- [begin][write][reference](Properties) ---
+	a, errA := target.Properties.MarshalBinary()
+	if errA != nil {
+		return nil, errA
+	}
+	buff.WriteInt(len(a))
+	buff.WriteBytes(a)
+	// --- [end][write][reference](Properties) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.Start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.End.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	buff.WriteFloat64(target.Minutes) // write float64
+	// --- [begin][write][reference](time.Time) ---
+	d, errD := target.ActiveStart.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][reference](time.Time) ---
+
+	buff.WriteFloat64(target.CPUCoreHours)    // write float64
+	buff.WriteFloat64(target.CPUCost)         // write float64
+	buff.WriteFloat64(target.CPUEfficiency)   // write float64
+	buff.WriteFloat64(target.GPUHours)        // write float64
+	buff.WriteFloat64(target.GPUCost)         // write float64
+	buff.WriteFloat64(target.NetworkCost)     // write float64
+	buff.WriteFloat64(target.PVByteHours)     // write float64
+	buff.WriteFloat64(target.PVCost)          // write float64
+	buff.WriteFloat64(target.RAMByteHours)    // write float64
+	buff.WriteFloat64(target.RAMCost)         // write float64
+	buff.WriteFloat64(target.RAMEfficiency)   // write float64
+	buff.WriteFloat64(target.SharedCost)      // write float64
+	buff.WriteFloat64(target.TotalCost)       // write float64
+	buff.WriteFloat64(target.TotalEfficiency) // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Allocation type
+func (target *Allocation) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling Allocation. Expected 3, got %d", version)
+	}
+
+	a := buff.ReadString() // read string
+	target.Name = a
+
+	// --- [begin][read][reference](Properties) ---
+	b := &Properties{}
+	c := buff.ReadInt()    // byte array length
+	d := buff.ReadBytes(c) // byte array
+	errA := b.UnmarshalBinary(d)
+	if errA != nil {
+		return errA
+	}
+	target.Properties = *b
+	// --- [end][read][reference](Properties) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	e := &time.Time{}
+	f := buff.ReadInt()    // byte array length
+	g := buff.ReadBytes(f) // byte array
+	errB := e.UnmarshalBinary(g)
+	if errB != nil {
+		return errB
+	}
+	target.Start = *e
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	h := &time.Time{}
+	l := buff.ReadInt()    // byte array length
+	m := buff.ReadBytes(l) // byte array
+	errC := h.UnmarshalBinary(m)
+	if errC != nil {
+		return errC
+	}
+	target.End = *h
+	// --- [end][read][reference](time.Time) ---
+
+	n := buff.ReadFloat64() // read float64
+	target.Minutes = n
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errD := o.UnmarshalBinary(q)
+	if errD != nil {
+		return errD
+	}
+	target.ActiveStart = *o
+	// --- [end][read][reference](time.Time) ---
+
+	r := buff.ReadFloat64() // read float64
+	target.CPUCoreHours = r
+
+	s := buff.ReadFloat64() // read float64
+	target.CPUCost = s
+
+	t := buff.ReadFloat64() // read float64
+	target.CPUEfficiency = t
+
+	u := buff.ReadFloat64() // read float64
+	target.GPUHours = u
+
+	w := buff.ReadFloat64() // read float64
+	target.GPUCost = w
+
+	x := buff.ReadFloat64() // read float64
+	target.NetworkCost = x
+
+	y := buff.ReadFloat64() // read float64
+	target.PVByteHours = y
+
+	z := buff.ReadFloat64() // read float64
+	target.PVCost = z
+
+	aa := buff.ReadFloat64() // read float64
+	target.RAMByteHours = aa
+
+	bb := buff.ReadFloat64() // read float64
+	target.RAMCost = bb
+
+	cc := buff.ReadFloat64() // read float64
+	target.RAMEfficiency = cc
+
+	dd := buff.ReadFloat64() // read float64
+	target.SharedCost = dd
+
+	ee := buff.ReadFloat64() // read float64
+	target.TotalCost = ee
+
+	ff := buff.ReadFloat64() // read float64
+	target.TotalEfficiency = ff
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AllocationSet
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AllocationSet instance
+// into a byte array
+func (target *AllocationSet) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.allocations == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]*Allocation) ---
+		buff.WriteInt(len(target.allocations)) // map length
+		for k, v := range target.allocations {
+			buff.WriteString(k) // write string
+			if v == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](Allocation) ---
+				a, errA := v.MarshalBinary()
+				if errA != nil {
+					return nil, errA
+				}
+				buff.WriteInt(len(a))
+				buff.WriteBytes(a)
+				// --- [end][write][struct](Allocation) ---
+
+			}
+		}
+		// --- [end][write][map](map[string]*Allocation) ---
+
+	}
+	if target.idleKeys == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]bool) ---
+		buff.WriteInt(len(target.idleKeys)) // map length
+		for kk, vv := range target.idleKeys {
+			buff.WriteString(kk) // write string
+			buff.WriteBool(vv)   // write bool
+		}
+		// --- [end][write][map](map[string]bool) ---
+
+	}
+	// --- [begin][write][struct](Window) ---
+	b, errB := target.Window.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][struct](Window) ---
+
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AllocationSet type
+func (target *AllocationSet) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling AllocationSet. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.allocations = nil
+	} else {
+		// --- [begin][read][map](map[string]*Allocation) ---
+		a := make(map[string]*Allocation)
+		b := buff.ReadInt() // map len
+		for i := 0; i < b; i++ {
+			var k string
+			c := buff.ReadString() // read string
+			k = c
+
+			var v *Allocation
+			if buff.ReadUInt8() == uint8(0) {
+				v = nil
+			} else {
+				// --- [begin][read][struct](Allocation) ---
+				d := &Allocation{}
+				e := buff.ReadInt()    // byte array length
+				f := buff.ReadBytes(e) // byte array
+				errA := d.UnmarshalBinary(f)
+				if errA != nil {
+					return errA
+				}
+				v = d
+				// --- [end][read][struct](Allocation) ---
+
+			}
+			a[k] = v
+		}
+		target.allocations = a
+		// --- [end][read][map](map[string]*Allocation) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.idleKeys = nil
+	} else {
+		// --- [begin][read][map](map[string]bool) ---
+		g := make(map[string]bool)
+		h := buff.ReadInt() // map len
+		for j := 0; j < h; j++ {
+			var kk string
+			l := buff.ReadString() // read string
+			kk = l
+
+			var vv bool
+			m := buff.ReadBool() // read bool
+			vv = m
+
+			g[kk] = vv
+		}
+		target.idleKeys = g
+		// --- [end][read][map](map[string]bool) ---
+
+	}
+	// --- [begin][read][struct](Window) ---
+	n := &Window{}
+	o := buff.ReadInt()    // byte array length
+	p := buff.ReadBytes(o) // byte array
+	errB := n.UnmarshalBinary(p)
+	if errB != nil {
+		return errB
+	}
+	target.Window = *n
+	// --- [end][read][struct](Window) ---
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AllocationSetRange
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AllocationSetRange instance
+// into a byte array
+func (target *AllocationSetRange) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.allocations == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]*AllocationSet) ---
+		buff.WriteInt(len(target.allocations)) // array length
+		for i := 0; i < len(target.allocations); i++ {
+			if target.allocations[i] == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](AllocationSet) ---
+				a, errA := target.allocations[i].MarshalBinary()
+				if errA != nil {
+					return nil, errA
+				}
+				buff.WriteInt(len(a))
+				buff.WriteBytes(a)
+				// --- [end][write][struct](AllocationSet) ---
+
+			}
+		}
+		// --- [end][write][slice]([]*AllocationSet) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AllocationSetRange type
+func (target *AllocationSetRange) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling AllocationSetRange. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.allocations = nil
+	} else {
+		// --- [begin][read][slice]([]*AllocationSet) ---
+		b := buff.ReadInt() // array len
+		a := make([]*AllocationSet, b)
+		for i := 0; i < b; i++ {
+			var c *AllocationSet
+			if buff.ReadUInt8() == uint8(0) {
+				c = nil
+			} else {
+				// --- [begin][read][struct](AllocationSet) ---
+				d := &AllocationSet{}
+				e := buff.ReadInt()    // byte array length
+				f := buff.ReadBytes(e) // byte array
+				errA := d.UnmarshalBinary(f)
+				if errA != nil {
+					return errA
+				}
+				c = d
+				// --- [end][read][struct](AllocationSet) ---
+
+			}
+			a[i] = c
+		}
+		target.allocations = a
+		// --- [end][read][slice]([]*AllocationSet) ---
+
+	}
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Any
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Any instance
+// into a byte array
+func (target *Any) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Any type
+func (target *Any) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling Any. Expected 3, got %d", version)
+	}
+
+	// --- [begin][read][alias](AssetLabels) ---
+	var a map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		a = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		b := make(map[string]string)
+		c := buff.ReadInt() // map len
+		for i := 0; i < c; i++ {
+			var k string
+			d := buff.ReadString() // read string
+			k = d
+
+			var v string
+			e := buff.ReadString() // read string
+			v = e
+
+			b[k] = v
+		}
+		a = b
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(a)
+	// --- [end][read][alias](AssetLabels) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		f := &AssetProperties{}
+		g := buff.ReadInt()    // byte array length
+		h := buff.ReadBytes(g) // byte array
+		errA := f.UnmarshalBinary(h)
+		if errA != nil {
+			return errA
+		}
+		target.properties = f
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AssetProperties
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AssetProperties instance
+// into a byte array
+func (target *AssetProperties) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	buff.WriteString(target.Category)   // write string
+	buff.WriteString(target.Provider)   // write string
+	buff.WriteString(target.Account)    // write string
+	buff.WriteString(target.Project)    // write string
+	buff.WriteString(target.Service)    // write string
+	buff.WriteString(target.Cluster)    // write string
+	buff.WriteString(target.Name)       // write string
+	buff.WriteString(target.ProviderID) // write string
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AssetProperties type
+func (target *AssetProperties) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling AssetProperties. Expected 3, got %d", version)
+	}
+
+	a := buff.ReadString() // read string
+	target.Category = a
+
+	b := buff.ReadString() // read string
+	target.Provider = b
+
+	c := buff.ReadString() // read string
+	target.Account = c
+
+	d := buff.ReadString() // read string
+	target.Project = d
+
+	e := buff.ReadString() // read string
+	target.Service = e
+
+	f := buff.ReadString() // read string
+	target.Cluster = f
+
+	g := buff.ReadString() // read string
+	target.Name = g
+
+	h := buff.ReadString() // read string
+	target.ProviderID = h
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AssetSet
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AssetSet instance
+// into a byte array
+func (target *AssetSet) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.assets == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]Asset) ---
+		buff.WriteInt(len(target.assets)) // map length
+		for k, v := range target.assets {
+			buff.WriteString(k) // write string
+			if v == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][interface](Asset) ---
+				a := reflect.ValueOf(v).Interface()
+				b, okA := a.(encoding.BinaryMarshaler)
+				if !okA {
+					return nil, fmt.Errorf("Type: %s does not implement encoding.BinaryMarshaler", typeToString(v))
+				}
+				c, errA := b.MarshalBinary()
+				if errA != nil {
+					return nil, errA
+				}
+				buff.WriteString(typeToString(v))
+				buff.WriteInt(len(c))
+				buff.WriteBytes(c)
+				// --- [end][write][interface](Asset) ---
+
+			}
+		}
+		// --- [end][write][map](map[string]Asset) ---
+
+	}
+	if target.props == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]AssetProperty) ---
+		buff.WriteInt(len(target.props)) // array length
+		for i := 0; i < len(target.props); i++ {
+			// --- [begin][write][alias](AssetProperty) ---
+			buff.WriteString(string(target.props[i])) // write string
+			// --- [end][write][alias](AssetProperty) ---
+
+		}
+		// --- [end][write][slice]([]AssetProperty) ---
+
+	}
+	// --- [begin][write][struct](Window) ---
+	d, errB := target.Window.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AssetSet type
+func (target *AssetSet) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling AssetSet. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.assets = nil
+	} else {
+		// --- [begin][read][map](map[string]Asset) ---
+		a := make(map[string]Asset)
+		b := buff.ReadInt() // map len
+		for i := 0; i < b; i++ {
+			var k string
+			c := buff.ReadString() // read string
+			k = c
+
+			var v Asset
+			if buff.ReadUInt8() == uint8(0) {
+				v = nil
+			} else {
+				// --- [begin][read][interface](Asset) ---
+				d := buff.ReadString()
+				_, e, _ := resolveType(d)
+				if _, ok := typeMap[e]; !ok {
+					return fmt.Errorf("Unknown Type: %s", e)
+				}
+				f, okA := reflect.New(typeMap[e]).Interface().(interface{ UnmarshalBinary([]byte) error })
+				if !okA {
+					return fmt.Errorf("Type: %s does not implement UnmarshalBinary([]byte) error", e)
+				}
+				g := buff.ReadInt()    // byte array length
+				h := buff.ReadBytes(g) // byte array
+				errA := f.UnmarshalBinary(h)
+				if errA != nil {
+					return errA
+				}
+				v = f.(Asset)
+				// --- [end][read][interface](Asset) ---
+
+			}
+			a[k] = v
+		}
+		target.assets = a
+		// --- [end][read][map](map[string]Asset) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.props = nil
+	} else {
+		// --- [begin][read][slice]([]AssetProperty) ---
+		m := buff.ReadInt() // array len
+		l := make([]AssetProperty, m)
+		for j := 0; j < m; j++ {
+			// --- [begin][read][alias](AssetProperty) ---
+			var o string
+			p := buff.ReadString() // read string
+			o = p
+
+			n := AssetProperty(o)
+			// --- [end][read][alias](AssetProperty) ---
+
+			l[j] = n
+		}
+		target.props = l
+		// --- [end][read][slice]([]AssetProperty) ---
+
+	}
+	// --- [begin][read][struct](Window) ---
+	q := &Window{}
+	r := buff.ReadInt()    // byte array length
+	s := buff.ReadBytes(r) // byte array
+	errB := q.UnmarshalBinary(s)
+	if errB != nil {
+		return errB
+	}
+	target.Window = *q
+	// --- [end][read][struct](Window) ---
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  AssetSetRange
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this AssetSetRange instance
+// into a byte array
+func (target *AssetSetRange) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.assets == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]*AssetSet) ---
+		buff.WriteInt(len(target.assets)) // array length
+		for i := 0; i < len(target.assets); i++ {
+			if target.assets[i] == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](AssetSet) ---
+				a, errA := target.assets[i].MarshalBinary()
+				if errA != nil {
+					return nil, errA
+				}
+				buff.WriteInt(len(a))
+				buff.WriteBytes(a)
+				// --- [end][write][struct](AssetSet) ---
+
+			}
+		}
+		// --- [end][write][slice]([]*AssetSet) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the AssetSetRange type
+func (target *AssetSetRange) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling AssetSetRange. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.assets = nil
+	} else {
+		// --- [begin][read][slice]([]*AssetSet) ---
+		b := buff.ReadInt() // array len
+		a := make([]*AssetSet, b)
+		for i := 0; i < b; i++ {
+			var c *AssetSet
+			if buff.ReadUInt8() == uint8(0) {
+				c = nil
+			} else {
+				// --- [begin][read][struct](AssetSet) ---
+				d := &AssetSet{}
+				e := buff.ReadInt()    // byte array length
+				f := buff.ReadBytes(e) // byte array
+				errA := d.UnmarshalBinary(f)
+				if errA != nil {
+					return errA
+				}
+				c = d
+				// --- [end][read][struct](AssetSet) ---
+
+			}
+			a[i] = c
+		}
+		target.assets = a
+		// --- [end][read][slice]([]*AssetSet) ---
+
+	}
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Breakdown
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Breakdown instance
+// into a byte array
+func (target *Breakdown) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	buff.WriteFloat64(target.Idle)   // write float64
+	buff.WriteFloat64(target.Other)  // write float64
+	buff.WriteFloat64(target.System) // write float64
+	buff.WriteFloat64(target.User)   // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Breakdown type
+func (target *Breakdown) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling Breakdown. Expected 3, got %d", version)
+	}
+
+	a := buff.ReadFloat64() // read float64
+	target.Idle = a
+
+	b := buff.ReadFloat64() // read float64
+	target.Other = b
+
+	c := buff.ReadFloat64() // read float64
+	target.System = c
+
+	d := buff.ReadFloat64() // read float64
+	target.User = d
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Cloud
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Cloud instance
+// into a byte array
+func (target *Cloud) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Cloud type
+func (target *Cloud) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling Cloud. Expected 3, got %d", version)
+	}
+
+	// --- [begin][read][alias](AssetLabels) ---
+	var a map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		a = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		b := make(map[string]string)
+		c := buff.ReadInt() // map len
+		for i := 0; i < c; i++ {
+			var k string
+			d := buff.ReadString() // read string
+			k = d
+
+			var v string
+			e := buff.ReadString() // read string
+			v = e
+
+			b[k] = v
+		}
+		a = b
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(a)
+	// --- [end][read][alias](AssetLabels) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		f := &AssetProperties{}
+		g := buff.ReadInt()    // byte array length
+		h := buff.ReadBytes(g) // byte array
+		errA := f.UnmarshalBinary(h)
+		if errA != nil {
+			return errA
+		}
+		target.properties = f
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  ClusterManagement
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this ClusterManagement instance
+// into a byte array
+func (target *ClusterManagement) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][struct](Window) ---
+	b, errB := target.window.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.Cost) // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the ClusterManagement type
+func (target *ClusterManagement) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling ClusterManagement. Expected 3, got %d", version)
+	}
+
+	// --- [begin][read][alias](AssetLabels) ---
+	var a map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		a = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		b := make(map[string]string)
+		c := buff.ReadInt() // map len
+		for i := 0; i < c; i++ {
+			var k string
+			d := buff.ReadString() // read string
+			k = d
+
+			var v string
+			e := buff.ReadString() // read string
+			v = e
+
+			b[k] = v
+		}
+		a = b
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(a)
+	// --- [end][read][alias](AssetLabels) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		f := &AssetProperties{}
+		g := buff.ReadInt()    // byte array length
+		h := buff.ReadBytes(g) // byte array
+		errA := f.UnmarshalBinary(h)
+		if errA != nil {
+			return errA
+		}
+		target.properties = f
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][struct](Window) ---
+	l := &Window{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.window = *l
+	// --- [end][read][struct](Window) ---
+
+	o := buff.ReadFloat64() // read float64
+	target.Cost = o
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Disk
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Disk instance
+// into a byte array
+func (target *Disk) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	buff.WriteFloat64(target.ByteHours)  // write float64
+	buff.WriteFloat64(target.Local)      // write float64
+	if target.Breakdown == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](Breakdown) ---
+		e, errE := target.Breakdown.MarshalBinary()
+		if errE != nil {
+			return nil, errE
+		}
+		buff.WriteInt(len(e))
+		buff.WriteBytes(e)
+		// --- [end][write][struct](Breakdown) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Disk type
+func (target *Disk) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling Disk. Expected 3, got %d", version)
+	}
+
+	// --- [begin][read][alias](AssetLabels) ---
+	var a map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		a = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		b := make(map[string]string)
+		c := buff.ReadInt() // map len
+		for i := 0; i < c; i++ {
+			var k string
+			d := buff.ReadString() // read string
+			k = d
+
+			var v string
+			e := buff.ReadString() // read string
+			v = e
+
+			b[k] = v
+		}
+		a = b
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(a)
+	// --- [end][read][alias](AssetLabels) ---
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		f := &AssetProperties{}
+		g := buff.ReadInt()    // byte array length
+		h := buff.ReadBytes(g) // byte array
+		errA := f.UnmarshalBinary(h)
+		if errA != nil {
+			return errA
+		}
+		target.properties = f
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	x := buff.ReadFloat64() // read float64
+	target.ByteHours = x
+
+	y := buff.ReadFloat64() // read float64
+	target.Local = y
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.Breakdown = nil
+	} else {
+		// --- [begin][read][struct](Breakdown) ---
+		z := &Breakdown{}
+		aa := buff.ReadInt()     // byte array length
+		bb := buff.ReadBytes(aa) // byte array
+		errE := z.UnmarshalBinary(bb)
+		if errE != nil {
+			return errE
+		}
+		target.Breakdown = z
+		// --- [end][read][struct](Breakdown) ---
+
+	}
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  LoadBalancer
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this LoadBalancer instance
+// into a byte array
+func (target *LoadBalancer) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the LoadBalancer type
+func (target *LoadBalancer) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling LoadBalancer. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		a := &AssetProperties{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.properties = a
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][alias](AssetLabels) ---
+	var d map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		d = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		e := make(map[string]string)
+		f := buff.ReadInt() // map len
+		for i := 0; i < f; i++ {
+			var k string
+			g := buff.ReadString() // read string
+			k = g
+
+			var v string
+			h := buff.ReadString() // read string
+			v = h
+
+			e[k] = v
+		}
+		d = e
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(d)
+	// --- [end][read][alias](AssetLabels) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Network
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Network instance
+// into a byte array
+func (target *Network) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment) // write float64
+	buff.WriteFloat64(target.Cost)       // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Network type
+func (target *Network) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling Network. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		a := &AssetProperties{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.properties = a
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][alias](AssetLabels) ---
+	var d map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		d = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		e := make(map[string]string)
+		f := buff.ReadInt() // map len
+		for i := 0; i < f; i++ {
+			var k string
+			g := buff.ReadString() // read string
+			k = g
+
+			var v string
+			h := buff.ReadString() // read string
+			v = h
+
+			e[k] = v
+		}
+		d = e
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(d)
+	// --- [end][read][alias](AssetLabels) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadFloat64() // read float64
+	target.Cost = w
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Node
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Node instance
+// into a byte array
+func (target *Node) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.start.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	c, errC := target.end.MarshalBinary()
+	if errC != nil {
+		return nil, errC
+	}
+	buff.WriteInt(len(c))
+	buff.WriteBytes(c)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][struct](Window) ---
+	d, errD := target.window.MarshalBinary()
+	if errD != nil {
+		return nil, errD
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.adjustment)   // write float64
+	buff.WriteString(target.NodeType)      // write string
+	buff.WriteFloat64(target.CPUCoreHours) // write float64
+	buff.WriteFloat64(target.RAMByteHours) // write float64
+	if target.CPUBreakdown == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](Breakdown) ---
+		e, errE := target.CPUBreakdown.MarshalBinary()
+		if errE != nil {
+			return nil, errE
+		}
+		buff.WriteInt(len(e))
+		buff.WriteBytes(e)
+		// --- [end][write][struct](Breakdown) ---
+
+	}
+	if target.RAMBreakdown == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](Breakdown) ---
+		f, errF := target.RAMBreakdown.MarshalBinary()
+		if errF != nil {
+			return nil, errF
+		}
+		buff.WriteInt(len(f))
+		buff.WriteBytes(f)
+		// --- [end][write][struct](Breakdown) ---
+
+	}
+	buff.WriteFloat64(target.CPUCost)     // write float64
+	buff.WriteFloat64(target.GPUCost)     // write float64
+	buff.WriteFloat64(target.RAMCost)     // write float64
+	buff.WriteFloat64(target.Discount)    // write float64
+	buff.WriteFloat64(target.Preemptible) // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Node type
+func (target *Node) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling Node. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		a := &AssetProperties{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.properties = a
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][alias](AssetLabels) ---
+	var d map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		d = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		e := make(map[string]string)
+		f := buff.ReadInt() // map len
+		for i := 0; i < f; i++ {
+			var k string
+			g := buff.ReadString() // read string
+			k = g
+
+			var v string
+			h := buff.ReadString() // read string
+			v = h
+
+			e[k] = v
+		}
+		d = e
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(d)
+	// --- [end][read][alias](AssetLabels) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	l := &time.Time{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.start = *l
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][reference](time.Time) ---
+	o := &time.Time{}
+	p := buff.ReadInt()    // byte array length
+	q := buff.ReadBytes(p) // byte array
+	errC := o.UnmarshalBinary(q)
+	if errC != nil {
+		return errC
+	}
+	target.end = *o
+	// --- [end][read][reference](time.Time) ---
+
+	// --- [begin][read][struct](Window) ---
+	r := &Window{}
+	s := buff.ReadInt()    // byte array length
+	t := buff.ReadBytes(s) // byte array
+	errD := r.UnmarshalBinary(t)
+	if errD != nil {
+		return errD
+	}
+	target.window = *r
+	// --- [end][read][struct](Window) ---
+
+	u := buff.ReadFloat64() // read float64
+	target.adjustment = u
+
+	w := buff.ReadString() // read string
+	target.NodeType = w
+
+	x := buff.ReadFloat64() // read float64
+	target.CPUCoreHours = x
+
+	y := buff.ReadFloat64() // read float64
+	target.RAMByteHours = y
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.CPUBreakdown = nil
+	} else {
+		// --- [begin][read][struct](Breakdown) ---
+		z := &Breakdown{}
+		aa := buff.ReadInt()     // byte array length
+		bb := buff.ReadBytes(aa) // byte array
+		errE := z.UnmarshalBinary(bb)
+		if errE != nil {
+			return errE
+		}
+		target.CPUBreakdown = z
+		// --- [end][read][struct](Breakdown) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.RAMBreakdown = nil
+	} else {
+		// --- [begin][read][struct](Breakdown) ---
+		cc := &Breakdown{}
+		dd := buff.ReadInt()     // byte array length
+		ee := buff.ReadBytes(dd) // byte array
+		errF := cc.UnmarshalBinary(ee)
+		if errF != nil {
+			return errF
+		}
+		target.RAMBreakdown = cc
+		// --- [end][read][struct](Breakdown) ---
+
+	}
+	ff := buff.ReadFloat64() // read float64
+	target.CPUCost = ff
+
+	gg := buff.ReadFloat64() // read float64
+	target.GPUCost = gg
+
+	hh := buff.ReadFloat64() // read float64
+	target.RAMCost = hh
+
+	ll := buff.ReadFloat64() // read float64
+	target.Discount = ll
+
+	mm := buff.ReadFloat64() // read float64
+	target.Preemptible = mm
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  SharedAsset
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this SharedAsset instance
+// into a byte array
+func (target *SharedAsset) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.properties == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](AssetProperties) ---
+		a, errA := target.properties.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][struct](AssetProperties) ---
+
+	}
+	// --- [begin][write][alias](AssetLabels) ---
+	if map[string]string(target.labels) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(map[string]string(target.labels))) // map length
+		for k, v := range map[string]string(target.labels) {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [end][write][alias](AssetLabels) ---
+
+	// --- [begin][write][struct](Window) ---
+	b, errB := target.window.MarshalBinary()
+	if errB != nil {
+		return nil, errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][struct](Window) ---
+
+	buff.WriteFloat64(target.Cost) // write float64
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the SharedAsset type
+func (target *SharedAsset) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling SharedAsset. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.properties = nil
+	} else {
+		// --- [begin][read][struct](AssetProperties) ---
+		a := &AssetProperties{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.properties = a
+		// --- [end][read][struct](AssetProperties) ---
+
+	}
+	// --- [begin][read][alias](AssetLabels) ---
+	var d map[string]string
+	if buff.ReadUInt8() == uint8(0) {
+		d = nil
+	} else {
+		// --- [begin][read][map](map[string]string) ---
+		e := make(map[string]string)
+		f := buff.ReadInt() // map len
+		for i := 0; i < f; i++ {
+			var k string
+			g := buff.ReadString() // read string
+			k = g
+
+			var v string
+			h := buff.ReadString() // read string
+			v = h
+
+			e[k] = v
+		}
+		d = e
+		// --- [end][read][map](map[string]string) ---
+
+	}
+	target.labels = AssetLabels(d)
+	// --- [end][read][alias](AssetLabels) ---
+
+	// --- [begin][read][struct](Window) ---
+	l := &Window{}
+	m := buff.ReadInt()    // byte array length
+	n := buff.ReadBytes(m) // byte array
+	errB := l.UnmarshalBinary(n)
+	if errB != nil {
+		return errB
+	}
+	target.window = *l
+	// --- [end][read][struct](Window) ---
+
+	o := buff.ReadFloat64() // read float64
+	target.Cost = o
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Window
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Window instance
+// into a byte array
+func (target *Window) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	if target.start == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][reference](time.Time) ---
+		a, errA := target.start.MarshalBinary()
+		if errA != nil {
+			return nil, errA
+		}
+		buff.WriteInt(len(a))
+		buff.WriteBytes(a)
+		// --- [end][write][reference](time.Time) ---
+
+	}
+	if target.end == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][reference](time.Time) ---
+		b, errB := target.end.MarshalBinary()
+		if errB != nil {
+			return nil, errB
+		}
+		buff.WriteInt(len(b))
+		buff.WriteBytes(b)
+		// --- [end][write][reference](time.Time) ---
+
+	}
+	return buff.Bytes(), nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Window type
+func (target *Window) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+
+	// Codec Version Check
+	version := buff.ReadUInt8()
+	if version != 3 {
+		return fmt.Errorf("Invalid Version Unmarshaling Window. Expected 3, got %d", version)
+	}
+
+	if buff.ReadUInt8() == uint8(0) {
+		target.start = nil
+	} else {
+		// --- [begin][read][reference](time.Time) ---
+		a := &time.Time{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.start = a
+		// --- [end][read][reference](time.Time) ---
+
+	}
+	if buff.ReadUInt8() == uint8(0) {
+		target.end = nil
+	} else {
+		// --- [begin][read][reference](time.Time) ---
+		d := &time.Time{}
+		e := buff.ReadInt()    // byte array length
+		f := buff.ReadBytes(e) // byte array
+		errB := d.UnmarshalBinary(f)
+		if errB != nil {
+			return errB
+		}
+		target.end = d
+		// --- [end][read][reference](time.Time) ---
+
+	}
+	return nil
+}

+ 638 - 0
pkg/kubecost/kubecost_codecs_test.go

@@ -0,0 +1,638 @@
+package kubecost
+
+import (
+	"testing"
+	"time"
+)
+
+func TestAllocation_BinaryEncoding(t *testing.T) {
+	// TODO niko/etl
+}
+
+func TestAllocationSet_BinaryEncoding(t *testing.T) {
+	// TODO niko/etl
+}
+
+func BenchmarkAllocationSetRange_BinaryEncoding(b *testing.B) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	startD2 := startYesterday
+	startD1 := startD2.Add(-day)
+	startD0 := startD1.Add(-day)
+
+	var asr0, asr1 *AllocationSetRange
+	var bs []byte
+	var err error
+
+	asr0 = NewAllocationSetRange(
+		generateAllocationSet(startD0),
+		generateAllocationSet(startD1),
+		generateAllocationSet(startD2),
+	)
+
+	for it := 0; it < b.N; it++ {
+		bs, err = asr0.MarshalBinary()
+		if err != nil {
+			b.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+			return
+		}
+
+		asr1 = &AllocationSetRange{}
+		err = asr1.UnmarshalBinary(bs)
+		if err != nil {
+			b.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+			return
+		}
+
+		if asr0.Length() != asr1.Length() {
+			b.Fatalf("AllocationSetRange.Binary: expected %d; found %d", asr0.Length(), asr1.Length())
+		}
+		if !asr0.Window().Equal(asr1.Window()) {
+			b.Fatalf("AllocationSetRange.Binary: expected %s; found %s", asr0.Window(), asr1.Window())
+		}
+
+		asr0.Each(func(i int, as0 *AllocationSet) {
+			as1, err := asr1.Get(i)
+			if err != nil {
+				b.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+			}
+
+			if as0.Length() != as1.Length() {
+				b.Fatalf("AllocationSetRange.Binary: expected %d; found %d", as0.Length(), as1.Length())
+			}
+			if !as0.Window.Equal(as1.Window) {
+				b.Fatalf("AllocationSetRange.Binary: expected %s; found %s", as0.Window, as1.Window)
+			}
+
+			as0.Each(func(k string, a0 *Allocation) {
+				a1 := as1.Get(k)
+				if a1 == nil {
+					b.Fatalf("AllocationSetRange.Binary: missing Allocation: %s", a0)
+				}
+
+				if !a0.Equal(a1) {
+					b.Fatalf("AllocationSetRange.Binary: unequal Allocations \"%s\": expected %s; found %s", k, a0, a1)
+				}
+			})
+		})
+	}
+}
+
+func TestAllocationSetRange_BinaryEncoding(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	startD2 := startYesterday
+	startD1 := startD2.Add(-day)
+	startD0 := startD1.Add(-day)
+
+	var asr0, asr1 *AllocationSetRange
+	var bs []byte
+	var err error
+
+	asr0 = NewAllocationSetRange(
+		generateAllocationSet(startD0),
+		generateAllocationSet(startD1),
+		generateAllocationSet(startD2),
+	)
+
+	bs, err = asr0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+		return
+	}
+
+	asr1 = &AllocationSetRange{}
+	err = asr1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+		return
+	}
+
+	if asr0.Length() != asr1.Length() {
+		t.Fatalf("AllocationSetRange.Binary: expected %d; found %d", asr0.Length(), asr1.Length())
+	}
+	if !asr0.Window().Equal(asr1.Window()) {
+		t.Fatalf("AllocationSetRange.Binary: expected %s; found %s", asr0.Window(), asr1.Window())
+	}
+
+	asr0.Each(func(i int, as0 *AllocationSet) {
+		as1, err := asr1.Get(i)
+		if err != nil {
+			t.Fatalf("AllocationSetRange.Binary: unexpected error: %s", err)
+		}
+
+		if as0.Length() != as1.Length() {
+			t.Fatalf("AllocationSetRange.Binary: expected %d; found %d", as0.Length(), as1.Length())
+		}
+		if !as0.Window.Equal(as1.Window) {
+			t.Fatalf("AllocationSetRange.Binary: expected %s; found %s", as0.Window, as1.Window)
+		}
+
+		as0.Each(func(k string, a0 *Allocation) {
+			a1 := as1.Get(k)
+			if a1 == nil {
+				t.Fatalf("AllocationSetRange.Binary: missing Allocation: %s", a0)
+			}
+
+			if !a0.Equal(a1) {
+				t.Fatalf("AllocationSetRange.Binary: unequal Allocations \"%s\": expected %s; found %s", k, a0, a1)
+			}
+		})
+	})
+}
+
+func TestAny_BinaryEncoding(t *testing.T) {
+	start := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	end := start.Add(24 * time.Hour)
+	window := NewWindow(&start, &end)
+
+	var a0, a1 *Any
+	var bs []byte
+	var err error
+
+	a0 = NewAsset(*window.start, *window.end, window)
+	a0.SetProperties(&AssetProperties{
+		Name:       "any1",
+		Cluster:    "cluster1",
+		ProviderID: "世界",
+	})
+	a0.Cost = 123.45
+	a0.SetAdjustment(1.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Any.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &Any{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Any.Binary: unexpected error: %s", err)
+	}
+
+	if a1.Properties().Name != a0.Properties().Name {
+		t.Fatalf("Any.Binary: expected %s, found %s", a0.Properties().Name, a1.Properties().Name)
+	}
+	if a1.Properties().Cluster != a0.Properties().Cluster {
+		t.Fatalf("Any.Binary: expected %s, found %s", a0.Properties().Cluster, a1.Properties().Cluster)
+	}
+	if a1.Properties().ProviderID != a0.Properties().ProviderID {
+		t.Fatalf("Any.Binary: expected %s, found %s", a0.Properties().ProviderID, a1.Properties().ProviderID)
+	}
+	if a1.Adjustment() != a0.Adjustment() {
+		t.Fatalf("Any.Binary: expected %f, found %f", a0.Adjustment(), a1.Adjustment())
+	}
+	if a1.TotalCost() != a0.TotalCost() {
+		t.Fatalf("Any.Binary: expected %f, found %f", a0.TotalCost(), a1.TotalCost())
+	}
+	if !a1.Window().Equal(a0.Window()) {
+		t.Fatalf("Any.Binary: expected %s, found %s", a0.Window(), a1.Window())
+	}
+}
+
+func TestAsset_BinaryEncoding(t *testing.T) {
+	// TODO niko/etl
+}
+
+func TestAssetSet_BinaryEncoding(t *testing.T) {
+	// TODO niko/etl
+}
+
+func TestAssetSetRange_BinaryEncoding(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+	startD2 := startYesterday
+	startD1 := startD2.Add(-day)
+	startD0 := startD1.Add(-day)
+
+	var asr0, asr1 *AssetSetRange
+	var bs []byte
+	var err error
+
+	asr0 = NewAssetSetRange(
+		generateAssetSet(startD0),
+		generateAssetSet(startD1),
+		generateAssetSet(startD2),
+	)
+
+	bs, err = asr0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("AssetSetRange.Binary: unexpected error: %s", err)
+		return
+	}
+
+	asr1 = &AssetSetRange{}
+	err = asr1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("AssetSetRange.Binary: unexpected error: %s", err)
+		return
+	}
+
+	if asr0.Length() != asr1.Length() {
+		t.Fatalf("AssetSetRange.Binary: expected %d; found %d", asr0.Length(), asr1.Length())
+	}
+	if !asr0.Window().Equal(asr1.Window()) {
+		t.Fatalf("AssetSetRange.Binary: expected %s; found %s", asr0.Window(), asr1.Window())
+	}
+
+	asr0.Each(func(i int, as0 *AssetSet) {
+		as1, err := asr1.Get(i)
+		if err != nil {
+			t.Fatalf("AssetSetRange.Binary: unexpected error: %s", err)
+		}
+
+		if as0.Length() != as1.Length() {
+			t.Fatalf("AssetSetRange.Binary: expected %d; found %d", as0.Length(), as1.Length())
+		}
+		if !as0.Window.Equal(as1.Window) {
+			t.Fatalf("AssetSetRange.Binary: expected %s; found %s", as0.Window, as1.Window)
+		}
+
+		as0.Each(func(k string, a0 Asset) {
+			a1, ok := as1.Get(k)
+			if !ok {
+				t.Fatalf("AssetSetRange.Binary: missing Asset: %s", a0)
+			}
+
+			if !a0.Equal(a1) {
+				t.Fatalf("AssetSetRange.Binary: unequal Assets \"%s\": expected %s; found %s", k, a0, a1)
+			}
+		})
+	})
+}
+
+func TestBreakdown_BinaryEncoding(t *testing.T) {
+	var b0, b1 *Breakdown
+	var bs []byte
+	var err error
+
+	b0 = &Breakdown{
+		Idle:   0.75,
+		Other:  0.1,
+		System: 0.0,
+		User:   0.15,
+	}
+
+	bs, err = b0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Breakdown.Binary: unexpected error: %s", err)
+	}
+
+	b1 = &Breakdown{}
+	err = b1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Breakdown.Binary: unexpected error: %s", err)
+	}
+
+	if b1.Idle != b0.Idle {
+		t.Fatalf("Breakdown.Binary: expected %f, found %f", b0.Idle, b1.Idle)
+	}
+	if b1.Other != b0.Other {
+		t.Fatalf("Breakdown.Binary: expected %f, found %f", b0.Other, b1.Other)
+	}
+	if b1.System != b0.System {
+		t.Fatalf("Breakdown.Binary: expected %f, found %f", b0.System, b1.System)
+	}
+	if b1.User != b0.User {
+		t.Fatalf("Breakdown.Binary: expected %f, found %f", b0.User, b1.User)
+	}
+}
+
+func TestCloudAny_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+
+	var a0, a1 *Cloud
+	var bs []byte
+	var err error
+
+	a0 = NewCloud(ComputeCategory, "providerid1", *window.start, *window.end, window)
+	a0.Cost = 6.09
+	a0.SetAdjustment(-1.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("CloudAny.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &Cloud{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("CloudAny.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("CloudAny.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestClusterManagement_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+
+	var a0, a1 *ClusterManagement
+	var bs []byte
+	var err error
+
+	a0 = NewClusterManagement("aws", "cluster1", window)
+	a0.Cost = 4.003
+	a0.SetAdjustment(-3.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("ClusterManagement.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &ClusterManagement{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("ClusterManagement.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("ClusterManagement.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestDisk_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+	hours := window.Duration().Hours()
+
+	start := time.Date(2020, time.September, 16, 3, 0, 0, 0, time.UTC)
+	end := time.Date(2020, time.September, 16, 15, 12, 0, 0, time.UTC)
+
+	var a0, a1 *Disk
+	var bs []byte
+	var err error
+
+	a0 = NewDisk("any1", "cluster1", "世界", start, end, window)
+	a0.ByteHours = 100 * 1024 * 1024 * 1024 * hours
+	a0.Cost = 4.003
+	a0.Local = 0.4
+	a0.Breakdown = &Breakdown{
+		Idle:   0.9,
+		Other:  0.05,
+		System: 0.05,
+		User:   0.0,
+	}
+	a0.SetAdjustment(-3.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Disk.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &Disk{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Disk.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("Disk.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestNode_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+	hours := window.Duration().Hours()
+
+	start := time.Date(2020, time.September, 16, 3, 0, 0, 0, time.UTC)
+	end := time.Date(2020, time.September, 16, 15, 12, 0, 0, time.UTC)
+
+	var a0, a1 *Node
+	var bs []byte
+	var err error
+
+	a0 = NewNode("any1", "cluster1", "世界", start, end, window)
+	a0.NodeType = "n2-standard"
+	a0.Preemptible = 1.0
+	a0.CPUCoreHours = 2.0 * hours
+	a0.RAMByteHours = 12.0 * gb * hours
+	a0.CPUCost = 1.50
+	a0.GPUCost = 30.44
+	a0.RAMCost = 15.0
+	a0.Discount = 0.9
+	a0.CPUBreakdown = &Breakdown{
+		Idle:   0.9,
+		Other:  0.05,
+		System: 0.05,
+		User:   0.0,
+	}
+	a0.RAMBreakdown = &Breakdown{
+		Idle:   0.4,
+		Other:  0.05,
+		System: 0.05,
+		User:   0.5,
+	}
+	a0.SetAdjustment(1.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Node.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &Node{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Node.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("Node.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestProperties_BinaryEncoding(t *testing.T) {
+	var p0, p1 *Properties
+	var bs []byte
+	var err error
+
+	// empty properties
+	p0 = &Properties{}
+	bs, err = p0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	p1 = &Properties{}
+	err = p1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	if !p0.Equal(p1) {
+		t.Fatalf("Properties.Binary: expected %s; found %s", p0, p1)
+	}
+
+	// complete properties
+	p0 = &Properties{}
+	p0.SetCluster("cluster1")
+	p0.SetContainer("container-abc-1")
+	p0.SetController("daemonset-abc")
+	p0.SetControllerKind("daemonset")
+	p0.SetNamespace("namespace1")
+	p0.SetNode("node1")
+	p0.SetPod("daemonset-abc-123")
+	p0.SetLabels(map[string]string{
+		"app":  "cost-analyzer",
+		"tier": "frontend",
+	})
+	p0.SetServices([]string{"kubecost-frontend"})
+	bs, err = p0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	p1 = &Properties{}
+	err = p1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	if !p0.Equal(p1) {
+		t.Fatalf("Properties.Binary: expected %s; found %s", p0, p1)
+	}
+
+	// incomplete properties
+	p0 = &Properties{}
+	p0.SetCluster("cluster1")
+	p0.SetController("daemonset-abc")
+	p0.SetControllerKind("daemonset")
+	p0.SetNamespace("namespace1")
+	p0.SetServices([]string{})
+	bs, err = p0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	p1 = &Properties{}
+	err = p1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Properties.Binary: unexpected error: %s", err)
+	}
+
+	if !p0.Equal(p1) {
+		t.Fatalf("Properties.Binary: expected %s; found %s", p0, p1)
+	}
+}
+
+func TestShared_BinaryEncoding(t *testing.T) {
+	ws := time.Date(2020, time.September, 16, 0, 0, 0, 0, time.UTC)
+	we := ws.Add(24 * time.Hour)
+	window := NewWindow(&ws, &we)
+
+	var a0, a1 *SharedAsset
+	var bs []byte
+	var err error
+
+	a0 = NewSharedAsset("any1", window)
+	a0.Cost = 4.04
+	a0.SetAdjustment(1.23)
+
+	bs, err = a0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("SharedAsset.Binary: unexpected error: %s", err)
+	}
+
+	a1 = &SharedAsset{}
+	err = a1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("SharedAsset.Binary: unexpected error: %s", err)
+	}
+
+	if !a0.Equal(a1) {
+		t.Fatalf("SharedAsset.Binary: expected %v, found %v", a0, a1)
+	}
+}
+
+func TestWindow_BinaryEncoding(t *testing.T) {
+	var w0, w1 Window
+	var bs []byte
+	var err error
+
+	// Window (nil, nil)
+	w0 = NewWindow(nil, nil)
+	bs, err = w0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	err = w1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	if w1.Start() != w0.Start() {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.Start(), w1.Start())
+	}
+	if w1.End() != w0.End() {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.End(), w1.End())
+	}
+
+	// Window (time, nil)
+	ts := time.Now()
+	w0 = NewWindow(&ts, nil)
+	bs, err = w0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	err = w1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	if !w1.Start().Equal(*w0.Start()) {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.Start(), w1.Start())
+	}
+	if w1.End() != w0.End() {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.End(), w1.End())
+	}
+
+	// Window (nil, time)
+	te := time.Now()
+	w0 = NewWindow(nil, &te)
+	bs, err = w0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	err = w1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	if w1.Start() != w0.Start() {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.Start(), w1.Start())
+	}
+	if !w1.End().Equal(*w0.End()) {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.End(), w1.End())
+	}
+
+	// Window (time, time)
+	ts, te = time.Now(), time.Now()
+	w0 = NewWindow(&ts, &te)
+	bs, err = w0.MarshalBinary()
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	err = w1.UnmarshalBinary(bs)
+	if err != nil {
+		t.Fatalf("Window.Binary: unexpected error: %s", err)
+	}
+
+	if !w1.Start().Equal(*w0.Start()) {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.Start(), w1.Start())
+	}
+	if !w1.End().Equal(*w0.End()) {
+		t.Fatalf("Window.Binary: expected %v; found %v", w0.End(), w1.End())
+	}
+}

+ 686 - 0
pkg/kubecost/properties.go

@@ -0,0 +1,686 @@
+package kubecost
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"github.com/kubecost/cost-model/pkg/util"
+)
+
+type Property string
+
+const (
+	NilProp            Property = ""
+	ClusterProp        Property = "cluster"
+	NodeProp           Property = "node"
+	ContainerProp      Property = "container"
+	ControllerProp     Property = "controller"
+	ControllerKindProp Property = "controllerKind"
+	LabelProp          Property = "label"
+	NamespaceProp      Property = "namespace"
+	PodProp            Property = "pod"
+	ServiceProp        Property = "service"
+)
+
+var availableProperties []Property = []Property{
+	NilProp,
+	ClusterProp,
+	NodeProp,
+	ContainerProp,
+	ControllerProp,
+	ControllerKindProp,
+	LabelProp,
+	NamespaceProp,
+	PodProp,
+	ServiceProp,
+}
+
+func ParseProperty(prop string) Property {
+	for _, property := range availableProperties {
+		if strings.ToLower(string(property)) == strings.ToLower(prop) {
+			return property
+		}
+	}
+	return NilProp
+}
+
+func (p Property) String() string {
+	return string(p)
+}
+
+type PropertyValue struct {
+	Property Property
+	Value    interface{}
+}
+
+// Properties describes a set of Kubernetes objects.
+type Properties map[Property]interface{}
+
+// TODO niko/etl make sure Services deep copy works correctly
+func (p *Properties) Clone() Properties {
+	if p == nil {
+		return nil
+	}
+
+	clone := Properties{}
+	for k, v := range *p {
+		clone[k] = v
+	}
+	return clone
+}
+
+func (p *Properties) Equal(that *Properties) bool {
+	if p == nil || that == nil {
+		return false
+	}
+
+	if p.Length() != that.Length() {
+		return false
+	}
+
+	pCluster, _ := p.GetCluster()
+	thatCluster, _ := that.GetCluster()
+	if pCluster != thatCluster {
+		return false
+	}
+
+	pNode, _ := p.GetNode()
+	thatNode, _ := that.GetNode()
+	if pNode != thatNode {
+		return false
+	}
+
+	pContainer, _ := p.GetContainer()
+	thatContainer, _ := that.GetContainer()
+	if pContainer != thatContainer {
+		return false
+	}
+
+	pController, _ := p.GetController()
+	thatController, _ := that.GetController()
+	if pController != thatController {
+		return false
+	}
+
+	pControllerKind, _ := p.GetControllerKind()
+	thatControllerKind, _ := that.GetControllerKind()
+	if pControllerKind != thatControllerKind {
+		return false
+	}
+
+	pNamespace, _ := p.GetNamespace()
+	thatNamespace, _ := that.GetNamespace()
+	if pNamespace != thatNamespace {
+		return false
+	}
+
+	pPod, _ := p.GetPod()
+	thatPod, _ := that.GetPod()
+	if pPod != thatPod {
+		return false
+	}
+
+	pLabels, _ := p.GetLabels()
+	thatLabels, _ := that.GetLabels()
+	if len(pLabels) != len(thatLabels) {
+		for k, pv := range pLabels {
+			tv, ok := thatLabels[k]
+			if !ok || tv != pv {
+				return false
+			}
+		}
+		return false
+	}
+
+	pServices, _ := p.GetServices()
+	thatServices, _ := that.GetServices()
+	if len(pServices) != len(thatServices) {
+		sort.Strings(pServices)
+		sort.Strings(thatServices)
+		for i, pv := range pServices {
+			tv := thatServices[i]
+			if tv != pv {
+				return false
+			}
+		}
+		return false
+	}
+
+	return true
+}
+
+func (p *Properties) Intersection(that Properties) Properties {
+	spec := &Properties{}
+
+	sCluster, sErr := p.GetCluster()
+	tCluster, tErr := that.GetCluster()
+	if sErr == nil && tErr == nil && sCluster == tCluster {
+		spec.SetCluster(sCluster)
+	}
+
+	sNode, sErr := p.GetNode()
+	tNode, tErr := that.GetNode()
+	if sErr == nil && tErr == nil && sNode == tNode {
+		spec.SetNode(sNode)
+	}
+
+	sContainer, sErr := p.GetContainer()
+	tContainer, tErr := that.GetContainer()
+	if sErr == nil && tErr == nil && sContainer == tContainer {
+		spec.SetContainer(sContainer)
+	}
+
+	sController, sErr := p.GetController()
+	tController, tErr := that.GetController()
+	if sErr == nil && tErr == nil && sController == tController {
+		spec.SetController(sController)
+	}
+
+	sControllerKind, sErr := p.GetControllerKind()
+	tControllerKind, tErr := that.GetControllerKind()
+	if sErr == nil && tErr == nil && sControllerKind == tControllerKind {
+		spec.SetControllerKind(sControllerKind)
+	}
+
+	sNamespace, sErr := p.GetNamespace()
+	tNamespace, tErr := that.GetNamespace()
+	if sErr == nil && tErr == nil && sNamespace == tNamespace {
+		spec.SetNamespace(sNamespace)
+	}
+
+	sPod, sErr := p.GetPod()
+	tPod, tErr := that.GetPod()
+	if sErr == nil && tErr == nil && sPod == tPod {
+		spec.SetPod(sPod)
+	}
+
+	// TODO niko/etl intersection of services and labels
+
+	return *spec
+}
+
+// Length returns the number of Properties
+func (p *Properties) Length() int {
+	if p == nil {
+		return 0
+	}
+	return len(*p)
+}
+
+func (p *Properties) Matches(that Properties) bool {
+	// The only Properties that a nil Properties matches is an empty one
+	if p == nil {
+		return that.Length() == 0
+	}
+
+	// Matching on cluster, namespace, controller, controller kind, pod,
+	// and container are simple string equality comparisons. By default,
+	// we assume a match. For each Property given to match, we say that the
+	// match fails if we don't have that Property, or if we have it but the
+	// strings are not equal.
+
+	if thatCluster, thatErr := that.GetCluster(); thatErr == nil {
+		if thisCluster, thisErr := p.GetCluster(); thisErr != nil || thisCluster != thatCluster {
+			return false
+		}
+	}
+
+	if thatNode, thatErr := that.GetNode(); thatErr == nil {
+		if thisNode, thisErr := p.GetNode(); thisErr != nil || thisNode != thatNode {
+			return false
+		}
+	}
+
+	if thatNamespace, thatErr := that.GetNamespace(); thatErr == nil {
+		if thisNamespace, thisErr := p.GetNamespace(); thisErr != nil || thisNamespace != thatNamespace {
+			return false
+		}
+	}
+
+	if thatController, thatErr := that.GetController(); thatErr == nil {
+		if thisController, thisErr := p.GetController(); thisErr != nil || thisController != thatController {
+			return false
+		}
+	}
+
+	if thatControllerKind, thatErr := that.GetControllerKind(); thatErr == nil {
+		if thisControllerKind, thisErr := p.GetControllerKind(); thisErr != nil || thisControllerKind != thatControllerKind {
+			return false
+		}
+	}
+
+	if thatPod, thatErr := that.GetPod(); thatErr == nil {
+		if thisPod, thisErr := p.GetPod(); thisErr != nil || thisPod != thatPod {
+			return false
+		}
+	}
+
+	if thatContainer, thatErr := that.GetContainer(); thatErr == nil {
+		if thisContainer, thisErr := p.GetContainer(); thisErr != nil || thisContainer != thatContainer {
+			return false
+		}
+	}
+
+	// Matching on Services only occurs if a non-zero length slice of strings
+	// is given. The comparison fails if there exists a string to match that is
+	// not present in our slice of services.
+	if thatServices, thatErr := that.GetServices(); thatErr == nil && len(thatServices) > 0 {
+		thisServices, thisErr := p.GetServices()
+		if thisErr != nil {
+			return false
+		}
+
+		for _, service := range thatServices {
+			match := false
+			for _, s := range thisServices {
+				if s == service {
+					match = true
+					break
+				}
+			}
+			if !match {
+				return false
+			}
+		}
+	}
+
+	// Matching on Labels only occurs if a non-zero length map of strings is
+	// given. The comparison fails if there exists a key/value pair to match
+	// that is not present in our set of labels.
+	if thatServices, thatErr := that.GetServices(); thatErr == nil && len(thatServices) > 0 {
+		thisServices, thisErr := p.GetServices()
+		if thisErr != nil {
+			return false
+		}
+
+		for _, service := range thatServices {
+			match := false
+			for _, s := range thisServices {
+				if s == service {
+					match = true
+					break
+				}
+			}
+			if !match {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+func (p *Properties) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+
+	strs := []string{}
+	for key, prop := range *p {
+		strs = append(strs, fmt.Sprintf("%s:%s", key, prop))
+	}
+	return fmt.Sprintf("{%s}", strings.Join(strs, "; "))
+}
+
+func (p *Properties) Get(prop Property) (string, error) {
+	if raw, ok := (*p)[prop]; ok {
+		if result, ok := raw.(string); ok {
+			return result, nil
+		}
+		return "", fmt.Errorf("%s is not a string", prop)
+	}
+	return "", fmt.Errorf("%s not set", prop)
+}
+
+func (p *Properties) Has(prop Property) bool {
+	_, ok := (*p)[prop]
+	return ok
+}
+
+func (p *Properties) Set(prop Property, value string) {
+	(*p)[prop] = value
+}
+
+func (p *Properties) GetCluster() (string, error) {
+	if raw, ok := (*p)[ClusterProp]; ok {
+		if cluster, ok := raw.(string); ok {
+			return cluster, nil
+		}
+		return "", fmt.Errorf("ClusterProp is not a string")
+	}
+	return "", fmt.Errorf("ClusterProp not set")
+}
+
+func (p *Properties) HasCluster() bool {
+	_, ok := (*p)[ClusterProp]
+	return ok
+}
+
+func (p *Properties) SetCluster(cluster string) {
+	(*p)[ClusterProp] = cluster
+}
+
+func (p *Properties) GetNode() (string, error) {
+	if raw, ok := (*p)[NodeProp]; ok {
+		if node, ok := raw.(string); ok {
+			return node, nil
+		}
+		return "", fmt.Errorf("NodeProp is not a string")
+	}
+	return "", fmt.Errorf("NodeProp not set")
+}
+
+func (p *Properties) HasNode() bool {
+	_, ok := (*p)[NodeProp]
+	return ok
+}
+
+func (p *Properties) SetNode(node string) {
+	(*p)[NodeProp] = node
+}
+
+func (p *Properties) GetContainer() (string, error) {
+	if raw, ok := (*p)[ContainerProp]; ok {
+		if container, ok := raw.(string); ok {
+			return container, nil
+		}
+		return "", fmt.Errorf("ContainerProp is not a string")
+	}
+	return "", fmt.Errorf("ContainerProp not set")
+}
+
+func (p *Properties) HasContainer() bool {
+	_, ok := (*p)[ContainerProp]
+	return ok
+}
+
+func (p *Properties) SetContainer(container string) {
+	(*p)[ContainerProp] = container
+}
+
+func (p *Properties) GetController() (string, error) {
+	if raw, ok := (*p)[ControllerProp]; ok {
+		if controller, ok := raw.(string); ok {
+			return controller, nil
+		}
+		return "", fmt.Errorf("ControllerProp is not a string")
+	}
+	return "", fmt.Errorf("ControllerProp not set")
+}
+
+func (p *Properties) HasController() bool {
+	_, ok := (*p)[ControllerProp]
+	return ok
+}
+
+func (p *Properties) SetController(controller string) {
+	(*p)[ControllerProp] = controller
+}
+
+func (p *Properties) GetControllerKind() (string, error) {
+	if raw, ok := (*p)[ControllerKindProp]; ok {
+		if controllerKind, ok := raw.(string); ok {
+			return controllerKind, nil
+		}
+		return "", fmt.Errorf("ControllerKindProp is not a string")
+	}
+	return "", fmt.Errorf("ControllerKindProp not set")
+}
+
+func (p *Properties) HasControllerKind() bool {
+	_, ok := (*p)[ControllerKindProp]
+	return ok
+}
+
+func (p *Properties) SetControllerKind(controllerKind string) {
+	(*p)[ControllerKindProp] = controllerKind
+}
+
+func (p *Properties) GetLabels() (map[string]string, error) {
+	if raw, ok := (*p)[LabelProp]; ok {
+		if labels, ok := raw.(map[string]string); ok {
+			return labels, nil
+		}
+		return map[string]string{}, fmt.Errorf("LabelProp is not a map[string]string")
+	}
+	return map[string]string{}, fmt.Errorf("LabelProp not set")
+}
+
+func (p *Properties) HasLabel() bool {
+	_, ok := (*p)[LabelProp]
+	return ok
+}
+
+func (p *Properties) SetLabels(labels map[string]string) {
+	(*p)[LabelProp] = labels
+}
+
+func (p *Properties) GetNamespace() (string, error) {
+	if raw, ok := (*p)[NamespaceProp]; ok {
+		if namespace, ok := raw.(string); ok {
+			return namespace, nil
+		}
+		return "", fmt.Errorf("NamespaceProp is not a string")
+	}
+	return "", fmt.Errorf("NamespaceProp not set")
+}
+
+func (p *Properties) HasNamespace() bool {
+	_, ok := (*p)[NamespaceProp]
+	return ok
+}
+
+func (p *Properties) SetNamespace(namespace string) {
+	(*p)[NamespaceProp] = namespace
+}
+
+func (p *Properties) GetPod() (string, error) {
+	if raw, ok := (*p)[PodProp]; ok {
+		if pod, ok := raw.(string); ok {
+			return pod, nil
+		}
+		return "", fmt.Errorf("PodProp is not a string")
+	}
+	return "", fmt.Errorf("PodProp not set")
+}
+
+func (p *Properties) HasPod() bool {
+	_, ok := (*p)[PodProp]
+	return ok
+}
+
+func (p *Properties) SetPod(pod string) {
+	(*p)[PodProp] = pod
+}
+
+func (p *Properties) GetServices() ([]string, error) {
+	if raw, ok := (*p)[ServiceProp]; ok {
+		if services, ok := raw.([]string); ok {
+			return services, nil
+		}
+		return []string{}, fmt.Errorf("ServiceProp is not a string")
+	}
+	return []string{}, fmt.Errorf("ServiceProp not set")
+}
+
+func (p *Properties) HasService() bool {
+	_, ok := (*p)[ServiceProp]
+	return ok
+}
+
+func (p *Properties) SetServices(services []string) {
+	(*p)[ServiceProp] = services
+}
+
+func (p *Properties) MarshalBinary() (data []byte, err error) {
+	buff := util.NewBuffer()
+	buff.WriteUInt8(3) // version
+
+	// ClusterProp
+	cluster, err := p.GetCluster()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+		buff.WriteString(cluster) // write string
+	}
+
+	// NodeProp
+	node, err := p.GetNode()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+		buff.WriteString(node)    // write string
+	}
+
+	// ContainerProp
+	container, err := p.GetContainer()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))   // write non-nil byte
+		buff.WriteString(container) // write string
+	}
+
+	// ControllerProp
+	controller, err := p.GetController()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))    // write non-nil byte
+		buff.WriteString(controller) // write string
+	}
+
+	// ControllerKindProp
+	controllerKind, err := p.GetControllerKind()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))        // write non-nil byte
+		buff.WriteString(controllerKind) // write string
+	}
+
+	// NamespaceProp
+	namespace, err := p.GetNamespace()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))   // write non-nil byte
+		buff.WriteString(namespace) // write string
+	}
+
+	// PodProp
+	pod, err := p.GetPod()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+		buff.WriteString(pod)     // write string
+	}
+
+	// LabelProp
+	labels, err := p.GetLabels()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))  // write non-nil byte
+		buff.WriteInt(len(labels)) // map length
+		for k, v := range labels {
+			buff.WriteString(k) // write string
+			buff.WriteString(v) // write string
+		}
+	}
+
+	// ServiceProp
+	services, err := p.GetServices()
+	if err != nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1))    // write non-nil byte
+		buff.WriteInt(len(services)) // slice length
+		for _, v := range services {
+			buff.WriteString(v) // write string
+		}
+	}
+
+	return buff.Bytes(), nil
+}
+
+func (p *Properties) UnmarshalBinary(data []byte) error {
+	buff := util.NewBufferFromBytes(data)
+	v := buff.ReadUInt8() // version
+	if v != 3 {
+		return fmt.Errorf("Invalid Version. Expected 3, got %d", v)
+	}
+
+	*p = Properties{}
+
+	// ClusterProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		cluster := buff.ReadString() // read string
+		p.SetCluster(cluster)
+	}
+
+	// NodeProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		node := buff.ReadString() // read string
+		p.SetNode(node)
+	}
+
+	// ContainerProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		container := buff.ReadString() // read string
+		p.SetContainer(container)
+	}
+
+	// ControllerProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		controller := buff.ReadString() // read string
+		p.SetController(controller)
+	}
+
+	// ControllerKindProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		controllerKind := buff.ReadString() // read string
+		p.SetControllerKind(controllerKind)
+	}
+
+	// NamespaceProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		namespace := buff.ReadString() // read string
+		p.SetNamespace(namespace)
+	}
+
+	// PodProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		pod := buff.ReadString() // read string
+		p.SetPod(pod)
+	}
+
+	// LabelProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		labels := map[string]string{}
+		length := buff.ReadInt() // read map len
+		for idx := 0; idx < length; idx++ {
+			key := buff.ReadString()
+			val := buff.ReadString()
+			labels[key] = val
+		}
+		p.SetLabels(labels)
+	}
+
+	// ServiceProp
+	if buff.ReadUInt8() == 1 { // read nil byte
+		services := []string{}
+		length := buff.ReadInt() // read map len
+		for idx := 0; idx < length; idx++ {
+			val := buff.ReadString()
+			services = append(services, val)
+		}
+		p.SetServices(services)
+	}
+
+	return nil
+}

+ 241 - 0
pkg/kubecost/properties_test.go

@@ -0,0 +1,241 @@
+package kubecost
+
+import (
+	"testing"
+)
+
+// TODO niko/etl
+// func TestParseProperty(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperty_String(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_Clone(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_Intersection(t *testing.T) {}
+
+func TestProperties_Matches(t *testing.T) {
+	// nil Properties should match empty Properties
+	var p *Properties
+	propsEmpty := Properties{}
+
+	if !p.Matches(propsEmpty) {
+		t.Fatalf("Properties.Matches: expect nil to match empty")
+	}
+
+	// Empty Properties should match empty Properties
+	p = &Properties{}
+	if !p.Matches(propsEmpty) {
+		t.Fatalf("Properties.Matches: expect nil to match empty")
+	}
+
+	p.SetCluster("cluster-one")
+	p.SetNamespace("kubecost")
+	p.SetController("kubecost-deployment")
+	p.SetControllerKind("deployment")
+	p.SetPod("kubecost-deployment-abc123")
+	p.SetContainer("kubecost-cost-model")
+	p.SetServices([]string{"kubecost-frontend"})
+	p.SetLabels(map[string]string{
+		"app":  "kubecost",
+		"tier": "frontend",
+	})
+
+	// Non-empty Properties should match empty Properties, but not vice-a-versa
+	if !p.Matches(propsEmpty) {
+		t.Fatalf("Properties.Matches: expect nil to match empty")
+	}
+	if propsEmpty.Matches(*p) {
+		t.Fatalf("Properties.Matches: expect empty to not match non-empty")
+	}
+
+	// Non-empty Properties should match itself
+	if !p.Matches(*p) {
+		t.Fatalf("Properties.Matches: expect non-empty to match itself")
+	}
+
+	// Match on all
+	if !p.Matches(Properties{
+		ClusterProp:        "cluster-one",
+		NamespaceProp:      "kubecost",
+		ControllerProp:     "kubecost-deployment",
+		ControllerKindProp: "deployment",
+		PodProp:            "kubecost-deployment-abc123",
+		ContainerProp:      "kubecost-cost-model",
+		ServiceProp:        []string{"kubecost-frontend"},
+		LabelProp: map[string]string{
+			"app":  "kubecost",
+			"tier": "frontend",
+		},
+	}) {
+		t.Fatalf("Properties.Matches: expect match on all")
+	}
+
+	// Match on cluster
+	if !p.Matches(Properties{
+		ClusterProp: "cluster-one",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on cluster")
+	}
+
+	// No match on cluster
+	if p.Matches(Properties{
+		ClusterProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on cluster")
+	}
+
+	// Match on namespace
+	if !p.Matches(Properties{
+		NamespaceProp: "kubecost",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on namespace")
+	}
+
+	// No match on namespace
+	if p.Matches(Properties{
+		NamespaceProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on namespace")
+	}
+
+	// Match on controller
+	if !p.Matches(Properties{
+		ControllerProp: "kubecost-deployment",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on controller")
+	}
+
+	// No match on controller
+	if p.Matches(Properties{
+		ControllerProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on controller")
+	}
+
+	// Match on controller kind
+	if !p.Matches(Properties{
+		ControllerKindProp: "deployment",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on controller kind")
+	}
+
+	// No match on controller kind
+	if p.Matches(Properties{
+		ControllerKindProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on controller kind")
+	}
+
+	// Match on pod
+	if !p.Matches(Properties{
+		PodProp: "kubecost-deployment-abc123",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on pod")
+	}
+
+	// No match on pod
+	if p.Matches(Properties{
+		PodProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on pod")
+	}
+
+	// Match on container
+	if !p.Matches(Properties{
+		ContainerProp: "kubecost-cost-model",
+	}) {
+		t.Fatalf("Properties.Matches: expect match on container")
+	}
+
+	// No match on container
+	if p.Matches(Properties{
+		ContainerProp: "miss",
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on container")
+	}
+
+	// Match on single service
+	if !p.Matches(Properties{
+		ServiceProp: []string{"kubecost-frontend"},
+	}) {
+		t.Fatalf("Properties.Matches: expect match on service")
+	}
+
+	// No match on one missing service
+	if p.Matches(Properties{
+		ServiceProp: []string{"missing-service", "kubecost-frontend"},
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on 1 of 2 services")
+	}
+
+	// Match on single label
+	if !p.Matches(Properties{
+		LabelProp: map[string]string{
+			"app": "kubecost",
+		},
+	}) {
+		t.Fatalf("Properties.Matches: expect match on label")
+	}
+
+	// No match on one missing label
+	if !p.Matches(Properties{
+		LabelProp: map[string]string{
+			"app":   "kubecost",
+			"tier":  "frontend",
+			"label": "missing",
+		},
+	}) {
+		t.Fatalf("Properties.Matches: expect no match on 2 of 3 labels")
+	}
+}
+
+// TODO niko/etl
+// func TestProperties_GetCluster(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetCluster(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetContainer(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetContainer(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetController(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetController(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetControllerKind(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetControllerKind(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetLabels(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetLabels(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetNamespace(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetNamespace(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetPod(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetPod(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_GetServices(t *testing.T) {}
+
+// TODO niko/etl
+// func TestProperties_SetServices(t *testing.T) {}

+ 30 - 0
pkg/kubecost/status.go

@@ -0,0 +1,30 @@
+package kubecost
+
+import "time"
+
+// ETLStatus describes ETL metadata
+type ETLStatus struct {
+	Coverage    Window           `json:"coverage"`
+	Progress    float64          `json:"progress"`
+	RefreshRate string           `json:"refreshRate"`
+	StartTime   time.Time        `json:"startTime"`
+	UTCOffset   string           `json:"utcOffset"`
+	Backup      *DirectoryStatus `json:"backup,omitempty"`
+}
+
+// DirectoryStatus describes metadata of a directory of files
+type DirectoryStatus struct {
+	Path         string       `json:"path"`
+	Size         string       `json:"size"`
+	LastModified time.Time    `json:"lastModified"`
+	FileCount    int          `json:"fileCount"`
+	Files        []FileStatus `json:"files"`
+}
+
+// FileStatus describes the metadata of a single file
+type FileStatus struct {
+	Name         string            `json:"name"`
+	Size         string            `json:"size"`
+	LastModified time.Time         `json:"lastModified"`
+	Details      map[string]string `json:"details,omitempty"`
+}

+ 483 - 0
pkg/kubecost/window.go

@@ -0,0 +1,483 @@
+package kubecost
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"regexp"
+	"strconv"
+	"time"
+)
+
+const (
+	minutesPerDay  = 60 * 24
+	minutesPerHour = 60
+	hoursPerDay    = 24
+)
+
+// RoundBack rounds the given time back to a multiple of the given resolution
+// in the given time's timezone.
+// e.g. 2020-01-01T12:37:48-0700, 24h = 2020-01-01T00:00:00-0700
+func RoundBack(t time.Time, resolution time.Duration) time.Time {
+	_, offSec := t.Zone()
+	return t.Add(time.Duration(offSec) * time.Second).Truncate(resolution).Add(-time.Duration(offSec) * time.Second)
+}
+
+// RoundForward rounds the given time forward to a multiple of the given resolution
+// in the given time's timezone.
+// e.g. 2020-01-01T12:37:48-0700, 24h = 2020-01-02T00:00:00-0700
+func RoundForward(t time.Time, resolution time.Duration) time.Time {
+	back := RoundBack(t, resolution)
+	if back.Equal(t) {
+		// The given time is exactly a multiple of the given resolution
+		return t
+	}
+	return back.Add(resolution)
+}
+
+// Window defines a period of time with a start and an end. If either start or
+// end are nil it indicates an open time period.
+type Window struct {
+	start *time.Time
+	end   *time.Time
+}
+
+// NewWindow creates and returns a new Window instance from the given times
+func NewWindow(start, end *time.Time) Window {
+	return Window{
+		start: start,
+		end:   end,
+	}
+}
+
+// ParseWindowUTC attempts to parse the given string into a valid Window. It
+// accepts several formats, returning an error if the given string does not
+// match one of the following:
+// - named intervals: "today", "yesterday", "week", "month", "lastweek", "lastmonth"
+// - durations: "24h", "7d", etc.
+// - date ranges: "2020-04-01T00:00:00Z,2020-04-03T00:00:00Z", etc.
+// - timestamp ranges: "1586822400,1586908800", etc.
+func ParseWindowUTC(window string) (Window, error) {
+	return parseWindow(window, time.Now().UTC())
+}
+
+// ParseWindowWithOffsetString parses the given window string within the context of
+// the timezone defined by the UTC offset string of format -07:00, +01:30, etc.
+func ParseWindowWithOffsetString(window string, offset string) (Window, error) {
+	if offset == "UTC" || offset == "" {
+		return ParseWindowUTC(window)
+	}
+
+	regex := regexp.MustCompile(`^(\+|-)(\d\d):(\d\d)$`)
+	match := regex.FindStringSubmatch(offset)
+	if match == nil {
+		return Window{}, fmt.Errorf("illegal UTC offset: '%s'; should be of form '-07:00'", offset)
+	}
+
+	sig := 1
+	if match[1] == "-" {
+		sig = -1
+	}
+
+	hrs64, _ := strconv.ParseInt(match[2], 10, 64)
+	hrs := sig * int(hrs64)
+
+	mins64, _ := strconv.ParseInt(match[3], 10, 64)
+	mins := sig * int(mins64)
+
+	loc := time.FixedZone(fmt.Sprintf("UTC%s", offset), (hrs*60*60)+(mins*60))
+	now := time.Now().In(loc)
+	return parseWindow(window, now)
+}
+
+// ParseWindowWithOffset parses the given window string within the context of
+// the timezone defined by the UTC offset.
+func ParseWindowWithOffset(window string, offset time.Duration) (Window, error) {
+	loc := time.FixedZone("", int(offset.Seconds()))
+	now := time.Now().In(loc)
+	return parseWindow(window, now)
+}
+
+// parseWindow generalizes the parsing of window strings, relative to a given
+// moment in time, defined as "now".
+func parseWindow(window string, now time.Time) (Window, error) {
+	// compute UTC offset in terms of minutes
+	offHr := now.UTC().Hour() - now.Hour()
+	offMin := (now.UTC().Minute() - now.Minute()) + (offHr * 60)
+	offset := time.Duration(offMin) * time.Minute
+
+	if window == "today" {
+		start := now
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+
+		end := start.Add(time.Hour * 24)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "yesterday" {
+		start := now
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		start = start.Add(time.Hour * -24)
+
+		end := start.Add(time.Hour * 24)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "week" {
+		// now
+		start := now
+		// 00:00 today, accounting for timezone offset
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		// 00:00 Sunday of the current week
+		start = start.Add(-24 * time.Hour * time.Duration(start.Weekday()))
+
+		end := now
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "lastweek" {
+		// now
+		start := now
+		// 00:00 today, accounting for timezone offset
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		// 00:00 Sunday of last week
+		start = start.Add(-24 * time.Hour * time.Duration(start.Weekday()+7))
+
+		end := start.Add(7 * 24 * time.Hour)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "month" {
+		// now
+		start := now
+		// 00:00 today, accounting for timezone offset
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		// 00:00 1st of this month
+		start = start.Add(-24 * time.Hour * time.Duration(start.Day()-1))
+
+		end := now
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "month" {
+		// now
+		start := now
+		// 00:00 today, accounting for timezone offset
+		start = start.Truncate(time.Hour * 24)
+		start = start.Add(offset)
+		// 00:00 1st of this month
+		start = start.Add(-24 * time.Hour * time.Duration(start.Day()-1))
+
+		end := now
+
+		return NewWindow(&start, &end), nil
+	}
+
+	if window == "lastmonth" {
+		// now
+		end := now
+		// 00:00 today, accounting for timezone offset
+		end = end.Truncate(time.Hour * 24)
+		end = end.Add(offset)
+		// 00:00 1st of this month
+		end = end.Add(-24 * time.Hour * time.Duration(end.Day()-1))
+
+		// 00:00 last day of last month
+		start := end.Add(-24 * time.Hour)
+		// 00:00 1st of last month
+		start = start.Add(-24 * time.Hour * time.Duration(start.Day()-1))
+
+		return NewWindow(&start, &end), nil
+	}
+
+	// Match duration strings; e.g. "45m", "24h", "7d"
+	regex := regexp.MustCompile(`^(\d+)(m|h|d)$`)
+	match := regex.FindStringSubmatch(window)
+	if match != nil {
+		dur := time.Minute
+		if match[2] == "h" {
+			dur = time.Hour
+		}
+		if match[2] == "d" {
+			dur = 24 * time.Hour
+		}
+
+		num, _ := strconv.ParseInt(match[1], 10, 64)
+
+		end := now
+		start := end.Add(-time.Duration(num) * dur)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	// Match duration strings with offset; e.g. "45m offset 15m", etc.
+	regex = regexp.MustCompile(`^(\d+)(m|h|d) offset (\d+)(m|h|d)$`)
+	match = regex.FindStringSubmatch(window)
+	if match != nil {
+		end := now
+
+		offUnit := time.Minute
+		if match[4] == "h" {
+			offUnit = time.Hour
+		}
+		if match[4] == "d" {
+			offUnit = 24 * time.Hour
+		}
+
+		offNum, _ := strconv.ParseInt(match[3], 10, 64)
+
+		end = end.Add(-time.Duration(offNum) * offUnit)
+
+		durUnit := time.Minute
+		if match[2] == "h" {
+			durUnit = time.Hour
+		}
+		if match[2] == "d" {
+			durUnit = 24 * time.Hour
+		}
+
+		durNum, _ := strconv.ParseInt(match[1], 10, 64)
+
+		start := end.Add(-time.Duration(durNum) * durUnit)
+
+		return NewWindow(&start, &end), nil
+	}
+
+	// Match timestamp pairs, e.g. "1586822400,1586908800" or "1586822400-1586908800"
+	regex = regexp.MustCompile(`^(\d+)[,|-](\d+)$`)
+	match = regex.FindStringSubmatch(window)
+	if match != nil {
+		s, _ := strconv.ParseInt(match[1], 10, 64)
+		e, _ := strconv.ParseInt(match[2], 10, 64)
+		start := time.Unix(s, 0)
+		end := time.Unix(e, 0)
+		return NewWindow(&start, &end), nil
+	}
+
+	// Match RFC3339 pairs, e.g. "2020-04-01T00:00:00Z,2020-04-03T00:00:00Z"
+	rfc3339 := `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ`
+	regex = regexp.MustCompile(fmt.Sprintf(`(%s),(%s)`, rfc3339, rfc3339))
+	match = regex.FindStringSubmatch(window)
+	if match != nil {
+		start, _ := time.Parse(time.RFC3339, match[1])
+		end, _ := time.Parse(time.RFC3339, match[2])
+		return NewWindow(&start, &end), nil
+	}
+
+	return Window{nil, nil}, fmt.Errorf("illegal window: %s", window)
+}
+
+// ApproximatelyEqual returns true if the start and end times of the two windows,
+// respectively, are within the given threshold of each other.
+func (w Window) ApproximatelyEqual(that Window, threshold time.Duration) bool {
+	return approxEqual(w.start, that.start, threshold) && approxEqual(w.end, that.end, threshold)
+}
+
+func approxEqual(x *time.Time, y *time.Time, threshold time.Duration) bool {
+	// both times are nil, so they are equal
+	if x == nil && y == nil {
+		return true
+	}
+
+	// one time is nil, but the other is not, so they are not equal
+	if x == nil || y == nil {
+		return false
+	}
+
+	// neither time is nil, so they are approximately close if their times are
+	// within the given threshold
+	delta := math.Abs((*x).Sub(*y).Seconds())
+	return delta < threshold.Seconds()
+}
+
+func (w Window) Clone() Window {
+	var start, end *time.Time
+	var s, e time.Time
+
+	if w.start != nil {
+		s = *w.start
+		start = &s
+	}
+
+	if w.end != nil {
+		e = *w.end
+		end = &e
+	}
+
+	return NewWindow(start, end)
+}
+
+func (w Window) Contains(t time.Time) bool {
+	if w.start != nil && t.Before(*w.start) {
+		return false
+	}
+
+	if w.end != nil && t.After(*w.end) {
+		return false
+	}
+
+	return true
+}
+
+func (w Window) Duration() time.Duration {
+	if w.start != nil && w.end != nil {
+		return w.end.Sub(*w.start)
+	}
+
+	return 0
+}
+
+func (w Window) End() *time.Time {
+	return w.end
+}
+
+func (w Window) Equal(that Window) bool {
+	if w.start != nil && that.start != nil && !w.start.Equal(*that.start) {
+		// starts are not nil, but not equal
+		return false
+	}
+
+	if w.end != nil && that.end != nil && !w.end.Equal(*that.end) {
+		// ends are not nil, but not equal
+		return false
+	}
+
+	if (w.start == nil && that.start != nil) || (w.start != nil && that.start == nil) {
+		// one start is nil, the other is not
+		return false
+	}
+
+	if (w.end == nil && that.end != nil) || (w.end != nil && that.end == nil) {
+		// one end is nil, the other is not
+		return false
+	}
+
+	// either both starts are nil, or they match; likewise for the ends
+	return true
+}
+
+func (w Window) ExpandStart(start time.Time) Window {
+	if w.start == nil || start.Before(*w.start) {
+		w.start = &start
+	}
+	return w
+}
+
+func (w Window) ExpandEnd(end time.Time) Window {
+	if w.end == nil || end.After(*w.end) {
+		w.end = &end
+	}
+	return w
+}
+
+func (w Window) Expand(that Window) Window {
+	return w.ExpandStart(*that.start).ExpandEnd(*that.end)
+}
+
+func (w Window) MarshalJSON() ([]byte, error) {
+	buffer := bytes.NewBufferString("{")
+	buffer.WriteString(fmt.Sprintf("\"start\":\"%s\",", w.start.Format("2006-01-02T15:04:05-0700")))
+	buffer.WriteString(fmt.Sprintf("\"end\":\"%s\"", w.end.Format("2006-01-02T15:04:05-0700")))
+	buffer.WriteString("}")
+	return buffer.Bytes(), nil
+}
+
+func (w Window) Minutes() float64 {
+	if w.start == nil || w.end == nil {
+		return math.Inf(1)
+	}
+
+	return w.end.Sub(*w.start).Minutes()
+}
+
+// Shift adds the given duration to both the start and end times of the window
+func (w Window) Shift(dur time.Duration) Window {
+	if w.start != nil {
+		s := w.start.Add(dur)
+		w.start = &s
+	}
+
+	if w.end != nil {
+		e := w.end.Add(dur)
+		w.end = &e
+	}
+
+	return w
+}
+
+func (w Window) Start() *time.Time {
+	return w.start
+}
+
+func (w Window) String() string {
+	if w.start == nil && w.end == nil {
+		return "[nil, nil)"
+	}
+	if w.start == nil {
+		return fmt.Sprintf("[nil, %s)", w.end.Format("2006-01-02T15:04:05-0700"))
+	}
+	if w.end == nil {
+		return fmt.Sprintf("[%s, nil)", w.start.Format("2006-01-02T15:04:05-0700"))
+	}
+	return fmt.Sprintf("[%s, %s)", w.start.Format("2006-01-02T15:04:05-0700"), w.end.Format("2006-01-02T15:04:05-0700"))
+}
+
+// ToDurationOffset returns formatted strings representing the duration and
+// offset of the window in terms of minutes; e.g. ("30m", "1m")
+func (w Window) ToDurationOffset() (string, string) {
+	durMins := int(w.Duration().Minutes())
+
+	offStr := ""
+	if w.End() != nil {
+		offMins := int(time.Now().Sub(*w.End()).Minutes())
+		if offMins > 1 {
+			offStr = fmt.Sprintf("%dm", int(offMins))
+		} else if offMins < -1 {
+			durMins += offMins
+		}
+	}
+
+	// default to formatting in terms of minutes
+	durStr := fmt.Sprintf("%dm", durMins)
+	if (durMins >= minutesPerDay) && (durMins%minutesPerDay == 0) {
+		// convert to days
+		durStr = fmt.Sprintf("%dd", durMins/minutesPerDay)
+	} else if (durMins >= minutesPerHour) && (durMins%minutesPerHour == 0) {
+		// convert to hours
+		durStr = fmt.Sprintf("%dh", durMins/minutesPerHour)
+	}
+
+	return durStr, offStr
+}
+
+type BoundaryError struct {
+	Requested Window
+	Supported Window
+	Message   string
+}
+
+func NewBoundaryError(req, sup Window, msg string) *BoundaryError {
+	return &BoundaryError{
+		Requested: req,
+		Supported: sup,
+		Message:   msg,
+	}
+}
+
+func (be *BoundaryError) Error() string {
+	if be == nil {
+		return "<nil>"
+	}
+
+	return fmt.Sprintf("boundary error: requested %s; supported %s: %s", be.Requested, be.Supported, be.Message)
+}

+ 626 - 0
pkg/kubecost/window_test.go

@@ -0,0 +1,626 @@
+package kubecost
+
+import (
+	"fmt"
+	"testing"
+	"time"
+)
+
+func TestRoundBack(t *testing.T) {
+	boulder := time.FixedZone("Boulder", -7*60*60)
+	beijing := time.FixedZone("Beijing", 8*60*60)
+
+	to := time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)
+	tb := RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, boulder)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, boulder)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 37, 48, 0, boulder)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, beijing)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, beijing)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 59, 59, 0, beijing)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, time.UTC)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, time.UTC)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 59, 0, 0, time.UTC)
+	tb = RoundBack(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundBack: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+}
+
+func TestRoundForward(t *testing.T) {
+	boulder := time.FixedZone("Boulder", -7*60*60)
+	beijing := time.FixedZone("Beijing", 8*60*60)
+
+	to := time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)
+	tb := RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundForward: expected 2020-01-01T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, boulder)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, boulder)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 37, 48, 0, boulder)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, boulder)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00-07:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundForward: expected 2020-01-01T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, beijing)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, beijing)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 59, 59, 0, beijing)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, beijing)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00+08:00; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundForward: expected 2020-01-01T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 0, 0, 1, 0, time.UTC)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 12, 37, 48, 0, time.UTC)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00Z; actual %s", tb)
+	}
+
+	to = time.Date(2020, time.January, 1, 23, 59, 0, 0, time.UTC)
+	tb = RoundForward(to, 24*time.Hour)
+	if !tb.Equal(time.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC)) {
+		t.Fatalf("RoundForward: expected 2020-01-02T00:00:00Z; actual %s", tb)
+	}
+}
+
+func TestParseWindowUTC(t *testing.T) {
+	now := time.Now().UTC()
+
+	// "today" should span Now() and not produce an error
+	today, err := ParseWindowUTC("today")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "today": %s`, err)
+	}
+	if today.Duration().Hours() != 24 {
+		t.Fatalf(`expect: window "today" to have duration 24 hour; actual: %f hours`, today.Duration().Hours())
+	}
+	if !today.Contains(time.Now().UTC()) {
+		t.Fatalf(`expect: window "today" to contain now; actual: %s`, today)
+	}
+
+	// "yesterday" should span Now() and not produce an error
+	yesterday, err := ParseWindowUTC("yesterday")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
+	}
+	if yesterday.Duration().Hours() != 24 {
+		t.Fatalf(`expect: window "yesterday" to have duration 24 hour; actual: %f hours`, yesterday.Duration().Hours())
+	}
+	if !yesterday.End().Before(time.Now().UTC()) {
+		t.Fatalf(`expect: window "yesterday" to end before now; actual: %s ends after %s`, yesterday, time.Now().UTC())
+	}
+
+	week, err := ParseWindowUTC("week")
+	hoursThisWeek := float64(time.Now().UTC().Weekday()) * 24.0
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "week": %s`, err)
+	}
+	if week.Duration().Hours() < hoursThisWeek {
+		t.Fatalf(`expect: window "week" to have at least %f hours; actual: %f hours`, hoursThisWeek, week.Duration().Hours())
+	}
+	if !week.End().Before(time.Now().UTC()) {
+		t.Fatalf(`expect: window "week" to end before now; actual: %s ends after %s`, week, time.Now().UTC())
+	}
+
+	month, err := ParseWindowUTC("month")
+	hoursThisMonth := float64(time.Now().UTC().Day()) * 24.0
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "month": %s`, err)
+	}
+	if month.Duration().Hours() > hoursThisMonth || month.Duration().Hours() < (hoursThisMonth-24.0) {
+		t.Fatalf(`expect: window "month" to have approximately %f hours; actual: %f hours`, hoursThisMonth, month.Duration().Hours())
+	}
+	if !month.End().Before(time.Now().UTC()) {
+		t.Fatalf(`expect: window "month" to end before now; actual: %s ends after %s`, month, time.Now().UTC())
+	}
+
+	// TODO niko/etl lastweek
+
+	lastmonth, err := ParseWindowUTC("lastmonth")
+	monthMinHours := float64(24 * 28)
+	monthMaxHours := float64(24 * 31)
+	firstOfMonth := now.Truncate(time.Hour * 24).Add(-24 * time.Hour * time.Duration(now.Day()-1))
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastmonth": %s`, err)
+	}
+	if lastmonth.Duration().Hours() > monthMaxHours || lastmonth.Duration().Hours() < monthMinHours {
+		t.Fatalf(`expect: window "lastmonth" to have approximately %f hours; actual: %f hours`, hoursThisMonth, lastmonth.Duration().Hours())
+	}
+	if !lastmonth.End().Equal(firstOfMonth) {
+		t.Fatalf(`expect: window "lastmonth" to end on the first of the current month; actual: %s doesn't end on %s`, lastmonth, firstOfMonth)
+	}
+
+	ago12h := time.Now().UTC().Add(-12 * time.Hour)
+	ago36h := time.Now().UTC().Add(-36 * time.Hour)
+	ago60h := time.Now().UTC().Add(-60 * time.Hour)
+
+	// "24h" should have 24 hour duration and not produce an error
+	dur24h, err := ParseWindowUTC("24h")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "24h": %s`, err)
+	}
+	if dur24h.Duration().Hours() != 24 {
+		t.Fatalf(`expect: window "24h" to have duration 24 hour; actual: %f hours`, dur24h.Duration().Hours())
+	}
+	if !dur24h.Contains(ago12h) {
+		t.Fatalf(`expect: window "24h" to contain 12 hours ago; actual: %s doesn't contain %s`, dur24h, ago12h)
+	}
+	if dur24h.Contains(ago36h) {
+		t.Fatalf(`expect: window "24h" to not contain 36 hours ago; actual: %s contains %s`, dur24h, ago36h)
+	}
+
+	// "2d" should have 2 day duration and not produce an error
+	dur2d, err := ParseWindowUTC("2d")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "2d": %s`, err)
+	}
+	if dur2d.Duration().Hours() != 48 {
+		t.Fatalf(`expect: window "2d" to have duration 48 hour; actual: %f hours`, dur2d.Duration().Hours())
+	}
+	if !dur2d.Contains(ago36h) {
+		t.Fatalf(`expect: window "2d" to contain 36 hours ago; actual: %s doesn't contain %s`, dur2d, ago36h)
+	}
+	if dur2d.Contains(ago60h) {
+		t.Fatalf(`expect: window "2d" to not contain 60 hours ago; actual: %s contains %s`, dur2d, ago60h)
+	}
+
+	// "24h offset 14h" should have 24 hour duration and not produce an error
+	dur24hOff14h, err := ParseWindowUTC("24h offset 14h")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "24h offset 14h": %s`, err)
+	}
+	if dur24hOff14h.Duration().Hours() != 24 {
+		t.Fatalf(`expect: window "24h offset 14h" to have duration 24 hour; actual: %f hours`, dur24hOff14h.Duration().Hours())
+	}
+	if dur24hOff14h.Contains(ago12h) {
+		t.Fatalf(`expect: window "24h offset 14h" not to contain 12 hours ago; actual: %s contains %s`, dur24hOff14h, ago12h)
+	}
+	if !dur24hOff14h.Contains(ago36h) {
+		t.Fatalf(`expect: window "24h offset 14h" to contain 36 hours ago; actual: %s does not contain %s`, dur24hOff14h, ago36h)
+	}
+
+	april152020, _ := time.Parse(time.RFC3339, "2020-04-15T00:00:00Z")
+	april102020, _ := time.Parse(time.RFC3339, "2020-04-10T00:00:00Z")
+	april052020, _ := time.Parse(time.RFC3339, "2020-04-05T00:00:00Z")
+
+	// "2020-04-08T00:00:00Z,2020-04-12T00:00:00Z" should have 96 hour duration and not produce an error
+	april8to12, err := ParseWindowUTC("2020-04-08T00:00:00Z,2020-04-12T00:00:00Z")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "2020-04-08T00:00:00Z,2020-04-12T00:00:00Z": %s`, err)
+	}
+	if april8to12.Duration().Hours() != 96 {
+		t.Fatalf(`expect: window %s to have duration 96 hour; actual: %f hours`, april8to12, april8to12.Duration().Hours())
+	}
+	if !april8to12.Contains(april102020) {
+		t.Fatalf(`expect: window April 8-12 to contain April 10; actual: %s doesn't contain %s`, april8to12, april102020)
+	}
+	if april8to12.Contains(april052020) {
+		t.Fatalf(`expect: window April 8-12 to not contain April 5; actual: %s contains %s`, april8to12, april052020)
+	}
+	if april8to12.Contains(april152020) {
+		t.Fatalf(`expect: window April 8-12 to not contain April 15; actual: %s contains %s`, april8to12, april152020)
+	}
+
+	march152020, _ := time.Parse(time.RFC3339, "2020-03-15T00:00:00Z")
+	march102020, _ := time.Parse(time.RFC3339, "2020-03-10T00:00:00Z")
+	march052020, _ := time.Parse(time.RFC3339, "2020-03-05T00:00:00Z")
+
+	// "1583712000,1583884800" should have 48 hour duration and not produce an error
+	march9to11, err := ParseWindowUTC("1583712000,1583884800")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "2020-04-08T00:00:00Z,2020-04-12T00:00:00Z": %s`, err)
+	}
+	if march9to11.Duration().Hours() != 48 {
+		t.Fatalf(`expect: window %s to have duration 48 hour; actual: %f hours`, march9to11, march9to11.Duration().Hours())
+	}
+	if !march9to11.Contains(march102020) {
+		t.Fatalf(`expect: window March 9-11 to contain March 10; actual: %s doesn't contain %s`, march9to11, march102020)
+	}
+	if march9to11.Contains(march052020) {
+		t.Fatalf(`expect: window March 9-11 to not contain March 5; actual: %s contains %s`, march9to11, march052020)
+	}
+	if march9to11.Contains(march152020) {
+		t.Fatalf(`expect: window March 9-11 to not contain March 15; actual: %s contains %s`, march9to11, march152020)
+	}
+}
+
+func TestParseWindowWithOffsetString(t *testing.T) {
+	// ParseWindowWithOffsetString should equal ParseWindowUTC when location == "UTC"
+	// for all window string formats
+
+	todayUTC, err := ParseWindowUTC("today")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "today": %s`, err)
+	}
+	todayTZ, err := ParseWindowWithOffsetString("today", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "today": %s`, err)
+	}
+	if !todayUTC.ApproximatelyEqual(todayTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "today" UTC to equal "today" with timezone "UTC"; actual: %s not equal %s`, todayUTC, todayTZ)
+	}
+
+	yesterdayUTC, err := ParseWindowUTC("yesterday")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
+	}
+	yesterdayTZ, err := ParseWindowWithOffsetString("yesterday", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
+	}
+	if !yesterdayUTC.ApproximatelyEqual(yesterdayTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "yesterday" UTC to equal "yesterday" with timezone "UTC"; actual: %s not equal %s`, yesterdayUTC, yesterdayTZ)
+	}
+
+	weekUTC, err := ParseWindowUTC("week")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "week": %s`, err)
+	}
+	weekTZ, err := ParseWindowWithOffsetString("week", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "week": %s`, err)
+	}
+	if !weekUTC.ApproximatelyEqual(weekTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "week" UTC to equal "week" with timezone "UTC"; actual: %s not equal %s`, weekUTC, weekTZ)
+	}
+
+	monthUTC, err := ParseWindowUTC("month")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "month": %s`, err)
+	}
+	monthTZ, err := ParseWindowWithOffsetString("month", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "month": %s`, err)
+	}
+	if !monthUTC.ApproximatelyEqual(monthTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "month" UTC to equal "month" with timezone "UTC"; actual: %s not equal %s`, monthUTC, monthTZ)
+	}
+
+	lastweekUTC, err := ParseWindowUTC("lastweek")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastweek": %s`, err)
+	}
+	lastweekTZ, err := ParseWindowWithOffsetString("lastweek", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastweek": %s`, err)
+	}
+	if !lastweekUTC.ApproximatelyEqual(lastweekTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "lastweek" UTC to equal "lastweek" with timezone "UTC"; actual: %s not equal %s`, lastweekUTC, lastweekTZ)
+	}
+
+	lastmonthUTC, err := ParseWindowUTC("lastmonth")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastmonth": %s`, err)
+	}
+	lastmonthTZ, err := ParseWindowWithOffsetString("lastmonth", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "lastmonth": %s`, err)
+	}
+	if !lastmonthUTC.ApproximatelyEqual(lastmonthTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "lastmonth" UTC to equal "lastmonth" with timezone "UTC"; actual: %s not equal %s`, lastmonthUTC, lastmonthTZ)
+	}
+
+	dur10mUTC, err := ParseWindowUTC("10m")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "10m": %s`, err)
+	}
+	dur10mTZ, err := ParseWindowWithOffsetString("10m", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "10m": %s`, err)
+	}
+	if !dur10mUTC.ApproximatelyEqual(dur10mTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "10m" UTC to equal "10m" with timezone "UTC"; actual: %s not equal %s`, dur10mUTC, dur10mTZ)
+	}
+
+	dur24hUTC, err := ParseWindowUTC("24h")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "24h": %s`, err)
+	}
+	dur24hTZ, err := ParseWindowWithOffsetString("24h", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "24h": %s`, err)
+	}
+	if !dur24hUTC.ApproximatelyEqual(dur24hTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "24h" UTC to equal "24h" with timezone "UTC"; actual: %s not equal %s`, dur24hUTC, dur24hTZ)
+	}
+
+	dur37dUTC, err := ParseWindowUTC("37d")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "37d": %s`, err)
+	}
+	dur37dTZ, err := ParseWindowWithOffsetString("37d", "UTC")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "37d": %s`, err)
+	}
+	if !dur37dUTC.ApproximatelyEqual(dur37dTZ, time.Millisecond) {
+		t.Fatalf(`expect: window "37d" UTC to equal "37d" with timezone "UTC"; actual: %s not equal %s`, dur37dUTC, dur37dTZ)
+	}
+
+	// ParseWindowWithOffsetString should be the correct relative to ParseWindowUTC; i.e.
+	// - for durations, the times should match, but the representations should differ
+	//   by the number of hours offset
+	// - for words like "today" and "yesterday", the times may not match, in which
+	//   case, for example, "today" UTC-08:00 might equal "yesterday" UTC
+
+	// fmtWindow only compares date and time to the minute, not second or
+	// timezone. Helper for comparing timezone shifted windows.
+	fmtWindow := func(w Window) string {
+		s := "nil"
+		if w.start != nil {
+			s = w.start.Format("2006-01-02T15:04")
+		}
+
+		e := "nil"
+		if w.end != nil {
+			e = w.end.Format("2006-01-02T15:04")
+		}
+		return fmt.Sprintf("[%s, %s]", s, e)
+	}
+
+	// Test UTC-08:00 (California), UTC+03:00 (Moscow), UTC+12:00 (New Zealand), and UTC itself
+	for _, offsetHrs := range []int{-8, 3, 12, 0} {
+		offStr := fmt.Sprintf("+%02d:00", offsetHrs)
+		if offsetHrs < 0 {
+			offStr = fmt.Sprintf("-%02d:00", -offsetHrs)
+		}
+		off := time.Duration(offsetHrs) * time.Hour
+
+		dur10mTZ, err = ParseWindowWithOffsetString("10m", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "10m": %s`, err)
+		}
+		if !dur10mTZ.ApproximatelyEqual(dur10mUTC, time.Second) {
+			t.Fatalf(`expect: window "10m" UTC to equal "10m" with timezone "%s"; actual: %s not equal %s`, offStr, dur10mUTC, dur10mTZ)
+		}
+		if fmtWindow(dur10mTZ.Shift(-off)) != fmtWindow(dur10mUTC) {
+			t.Fatalf(`expect: date, hour, and minute of window "10m" UTC to equal that of "10m" %s shifted by %s; actual: %s not equal %s`, offStr, off, fmtWindow(dur10mUTC), fmtWindow(dur10mTZ.Shift(-off)))
+		}
+
+		dur24hTZ, err = ParseWindowWithOffsetString("24h", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "24h": %s`, err)
+		}
+		if !dur24hTZ.ApproximatelyEqual(dur24hUTC, time.Second) {
+			t.Fatalf(`expect: window "24h" UTC to equal "24h" with timezone "%s"; actual: %s not equal %s`, offStr, dur24hUTC, dur24hTZ)
+		}
+		if fmtWindow(dur24hTZ.Shift(-off)) != fmtWindow(dur24hUTC) {
+			t.Fatalf(`expect: date, hour, and minute of window "24h" UTC to equal that of "24h" %s shifted by %s; actual: %s not equal %s`, offStr, off, fmtWindow(dur24hUTC), fmtWindow(dur24hTZ.Shift(-off)))
+		}
+
+		dur37dTZ, err = ParseWindowWithOffsetString("37d", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "37d": %s`, err)
+		}
+		if !dur37dTZ.ApproximatelyEqual(dur37dUTC, time.Second) {
+			t.Fatalf(`expect: window "37d" UTC to equal "37d" with timezone "%s"; actual: %s not equal %s`, offStr, dur37dUTC, dur37dTZ)
+		}
+		if fmtWindow(dur37dTZ.Shift(-off)) != fmtWindow(dur37dUTC) {
+			t.Fatalf(`expect: date, hour, and minute of window "37d" UTC to equal that of "37d" %s shifted by %s; actual: %s not equal %s`, offStr, off, fmtWindow(dur37dUTC), fmtWindow(dur37dTZ.Shift(-off)))
+		}
+
+		// "today" and "yesterday" should comply with the current day in each
+		// respective timezone, depending on if it is ahead of, equal to, or
+		// behind UTC at the given moment.
+
+		todayTZ, err = ParseWindowWithOffsetString("today", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "today": %s`, err)
+		}
+
+		yesterdayTZ, err = ParseWindowWithOffsetString("yesterday", offStr)
+		if err != nil {
+			t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
+		}
+
+		hoursSinceYesterdayUTC := time.Now().UTC().Sub(time.Now().UTC().Truncate(24.0 * time.Hour)).Hours()
+		hoursUntilTomorrowUTC := 24.0 - hoursSinceYesterdayUTC
+		aheadOfUTC := float64(offsetHrs)-hoursUntilTomorrowUTC > 0
+		behindUTC := float64(offsetHrs)+hoursSinceYesterdayUTC < 0
+
+		// yesterday in this timezone should equal today UTC
+		if aheadOfUTC {
+			if fmtWindow(yesterdayTZ) != fmtWindow(todayUTC) {
+				t.Fatalf(`expect: window "today" UTC to equal "yesterday" with timezone "%s"; actual: %s not equal %s`, offStr, yesterdayTZ, todayUTC)
+			}
+		}
+
+		// today in this timezone should equal yesterday UTC
+		if behindUTC {
+			if fmtWindow(todayTZ) != fmtWindow(yesterdayUTC) {
+				t.Fatalf(`expect: window "today" UTC to equal "yesterday" with timezone "%s"; actual: %s not equal %s`, offStr, todayTZ, yesterdayUTC)
+			}
+		}
+
+		// today in this timezone should equal today UTC, likewise for yesterday
+		if !aheadOfUTC && !behindUTC {
+			if fmtWindow(todayTZ) != fmtWindow(todayUTC) {
+				t.Fatalf(`expect: window "today" UTC to equal "today" with timezone "%s"; actual: %s not equal %s`, offStr, todayTZ, todayUTC)
+			}
+			// yesterday in this timezone should equal yesterday UTC
+			if fmtWindow(yesterdayTZ) != fmtWindow(yesterdayUTC) {
+				t.Fatalf(`expect: window "yesterday" UTC to equal "yesterday" with timezone "%s"; actual: %s not equal %s`, offStr, yesterdayTZ, yesterdayUTC)
+			}
+		}
+	}
+
+}
+
+// TODO niko/etl
+// func TestWindow_Contains(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_Duration(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_End(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_Equal(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_ExpandStart(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_ExpandEnd(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_Start(t *testing.T) {}
+
+// TODO niko/etl
+// func TestWindow_String(t *testing.T) {}
+
+func TestWindow_ToDurationOffset(t *testing.T) {
+	w, err := ParseWindowUTC("1d")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1d": %s`, err)
+	}
+	dur, off := w.ToDurationOffset()
+	if dur != "1d" {
+		t.Fatalf(`expect: window to be "1d"; actual: "%s"`, dur)
+	}
+	if off != "" {
+		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
+	}
+
+	w, err = ParseWindowUTC("3h")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1d": %s`, err)
+	}
+	dur, off = w.ToDurationOffset()
+	if dur != "3h" {
+		t.Fatalf(`expect: window to be "3h"; actual: "%s"`, dur)
+	}
+	if off != "" {
+		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
+	}
+
+	w, err = ParseWindowUTC("10m")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1d": %s`, err)
+	}
+	dur, off = w.ToDurationOffset()
+	if dur != "10m" {
+		t.Fatalf(`expect: window to be "10m"; actual: "%s"`, dur)
+	}
+	if off != "" {
+		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
+	}
+
+	w, err = ParseWindowUTC("1589448338,1589534798")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1589448338,1589534798": %s`, err)
+	}
+	dur, off = w.ToDurationOffset()
+	if dur != "1441m" {
+		t.Fatalf(`expect: window to be "1441m"; actual: "%s"`, dur)
+	}
+	if off == "" {
+		t.Fatalf(`expect: offset to not be ""; actual: "%s"`, off)
+	}
+
+	w, err = ParseWindowUTC("yesterday")
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "1589448338,1589534798": %s`, err)
+	}
+	dur, off = w.ToDurationOffset()
+	if dur != "1d" {
+		t.Fatalf(`expect: window to be "1d"; actual: "%s"`, dur)
+	}
+}

+ 400 - 0
pkg/util/buffer.go

@@ -0,0 +1,400 @@
+package util
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"io"
+	"math"
+	"reflect"
+	"unsafe"
+)
+
+// NonPrimitiveTypeError represents an error where the user provided a non-primitive data type for reading/writing
+var NonPrimitiveTypeError error = errors.New("Type provided to read/write does not fit inside 8 bytes.")
+
+// Buffer is a utility type which implements a very basic binary protocol for
+// writing core go types.
+type Buffer struct {
+	b *bytes.Buffer
+}
+
+// NewBuffer creates a new Buffer instance using LittleEndian ByteOrder.
+func NewBuffer() *Buffer {
+	var b bytes.Buffer
+	return &Buffer{
+		b: &b,
+	}
+}
+
+// NewBufferFromBytes creates a new Buffer instance using the provided byte slice.
+// The new buffer assumes ownership of the byte slice.
+func NewBufferFromBytes(b []byte) *Buffer {
+	return &Buffer{
+		b: bytes.NewBuffer(b),
+	}
+}
+
+// NewBufferFrom creates a new Buffer instance using the remaining unread data from the
+// provided Buffer instance. The new buffer assumes ownership of the underlying data.
+func NewBufferFrom(b *Buffer) *Buffer {
+	bb := b.Bytes()
+	return &Buffer{
+		b: bytes.NewBuffer(bb),
+	}
+}
+
+// WriteBool writes a bool value to the buffer.
+func (b *Buffer) WriteBool(t bool) {
+	write(b.b, t)
+}
+
+// WriteInt writes an int value to the buffer.
+func (b *Buffer) WriteInt(i int) {
+	write(b.b, int32(i))
+}
+
+// WriteInt8 writes an int8 value to the buffer.
+func (b *Buffer) WriteInt8(i int8) {
+	write(b.b, i)
+}
+
+// WriteInt16 writes an int16 value to the buffer.
+func (b *Buffer) WriteInt16(i int16) {
+	write(b.b, i)
+}
+
+// WriteInt32 writes an int32 value to the buffer.
+func (b *Buffer) WriteInt32(i int32) {
+	write(b.b, i)
+}
+
+// WriteInt64 writes an int64 value to the buffer.
+func (b *Buffer) WriteInt64(i int64) {
+	write(b.b, i)
+}
+
+// WriteUInt writes a uint value to the buffer.
+func (b *Buffer) WriteUInt(i uint) {
+	write(b.b, i)
+}
+
+// WriteUInt8 writes a uint8 value to the buffer.
+func (b *Buffer) WriteUInt8(i uint8) {
+	write(b.b, i)
+}
+
+// WriteUInt16 writes a uint16 value to the buffer.
+func (b *Buffer) WriteUInt16(i uint16) {
+	write(b.b, i)
+}
+
+// WriteUInt32 writes a uint32 value to the buffer.
+func (b *Buffer) WriteUInt32(i uint32) {
+	write(b.b, i)
+}
+
+// WriteUInt64 writes a uint64 value to the buffer.
+func (b *Buffer) WriteUInt64(i uint64) {
+	write(b.b, i)
+}
+
+// WriteFloat32 writes a float32 value to the buffer.
+func (b *Buffer) WriteFloat32(i float32) {
+	write(b.b, i)
+}
+
+// WriteFloat64 writes a float64 value to the buffer.
+func (b *Buffer) WriteFloat64(i float64) {
+	write(b.b, i)
+}
+
+// WriteString writes the string's length as a uint16 followed by the string contents.
+func (b *Buffer) WriteString(i string) {
+	s := stringToBytes(i)
+	write(b.b, uint16(len(s)))
+	b.b.Write(s)
+}
+
+// WriteBytes writes the contents of the byte slice to the buffer.
+func (b *Buffer) WriteBytes(bytes []byte) {
+	b.b.Write(bytes)
+}
+
+// ReadBool reads a bool value from the buffer.
+func (b *Buffer) ReadBool() bool {
+	var i bool
+	read(b.b, &i)
+	return i
+}
+
+// ReadInt reads an int value from the buffer.
+func (b *Buffer) ReadInt() int {
+	var i int32
+	read(b.b, &i)
+	return int(i)
+}
+
+// ReadInt8 reads an int8 value from the buffer.
+func (b *Buffer) ReadInt8() int8 {
+	var i int8
+	read(b.b, &i)
+	return i
+}
+
+// ReadInt16 reads an int16 value from the buffer.
+func (b *Buffer) ReadInt16() int16 {
+	var i int16
+	read(b.b, &i)
+	return i
+}
+
+// ReadInt32 reads an int32 value from the buffer.
+func (b *Buffer) ReadInt32() int32 {
+	var i int32
+	read(b.b, &i)
+	return i
+}
+
+// ReadInt64 reads an int64 value from the buffer.
+func (b *Buffer) ReadInt64() int64 {
+	var i int64
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt reads a uint value from the buffer.
+func (b *Buffer) ReadUInt() uint {
+	var i uint
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt8 reads a uint8 value from the buffer.
+func (b *Buffer) ReadUInt8() uint8 {
+	var i uint8
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt16 reads a uint16 value from the buffer.
+func (b *Buffer) ReadUInt16() uint16 {
+	var i uint16
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt32 reads a uint32 value from the buffer.
+func (b *Buffer) ReadUInt32() uint32 {
+	var i uint32
+	read(b.b, &i)
+	return i
+}
+
+// ReadUInt64 reads a uint64 value from the buffer.
+func (b *Buffer) ReadUInt64() uint64 {
+	var i uint64
+	read(b.b, &i)
+	return i
+}
+
+// ReadFloat32 reads a float32 value from the buffer.
+func (b *Buffer) ReadFloat32() float32 {
+	var i float32
+	read(b.b, &i)
+	return i
+}
+
+// ReadFloat64 reads a float64 value from the buffer.
+func (b *Buffer) ReadFloat64() float64 {
+	var i float64
+	read(b.b, &i)
+	return i
+}
+
+// ReadString reads a uint16 value from the buffer representing the string's length,
+// then uses the length to extract the exact length []byte representing the string.
+func (b *Buffer) ReadString() string {
+	var l uint16
+	read(b.b, &l)
+	return bytesToString(b.b.Next(int(l)))
+}
+
+// ReadBytes reads the specified length from the buffer and returns the byte slice.
+func (b *Buffer) ReadBytes(length int) []byte {
+	return b.b.Next(length)
+}
+
+// Bytes returns the unread portion of the underlying buffer storage.
+func (b *Buffer) Bytes() []byte {
+	return b.b.Bytes()
+}
+
+// Read reads structured binary data from r into data.
+func read(r *bytes.Buffer, data interface{}) error {
+	order := binary.LittleEndian
+
+	var b [8]byte
+	if n := intDataSize(data); n != 0 {
+		bs := b[:n]
+
+		if _, err := readFull(r, bs); err != nil {
+			return err
+		}
+
+		switch data := data.(type) {
+		case *bool:
+			*data = bs[0] != 0
+		case *int8:
+			*data = int8(bs[0])
+		case *uint8:
+			*data = bs[0]
+		case *int16:
+			*data = int16(order.Uint16(bs))
+		case *uint16:
+			*data = order.Uint16(bs)
+		case *int32:
+			*data = int32(order.Uint32(bs))
+		case *uint32:
+			*data = order.Uint32(bs)
+		case *int64:
+			*data = int64(order.Uint64(bs))
+		case *uint64:
+			*data = order.Uint64(bs)
+		case *float32:
+			*data = math.Float32frombits(order.Uint32(bs))
+		case *float64:
+			*data = math.Float64frombits(order.Uint64(bs))
+		default:
+			n = 0 // fast path doesn't apply
+		}
+
+		if n != 0 {
+			return nil
+		}
+	}
+
+	return NonPrimitiveTypeError
+}
+
+// read full is a bytes.Buffer specific implementation of ioutil.ReadFull() which
+// avoids escaping our stack allocated scratch bytes
+func readFull(r *bytes.Buffer, buf []byte) (n int, err error) {
+	min := len(buf)
+	for n < min && err == nil {
+		var nn int
+		nn, err = r.Read(buf[n:])
+		n += nn
+	}
+	if n >= min {
+		err = nil
+	} else if n > 0 && err == io.EOF {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// Write writes the binary representation of data into w.
+func write(w *bytes.Buffer, data interface{}) error {
+	order := binary.LittleEndian
+
+	var b [8]byte
+	if n := intDataSize(data); n != 0 {
+		bs := b[:n]
+
+		switch v := data.(type) {
+		case *bool:
+			if *v {
+				bs[0] = 1
+			} else {
+				bs[0] = 0
+			}
+		case bool:
+			if v {
+				bs[0] = 1
+			} else {
+				bs[0] = 0
+			}
+		case *int8:
+			bs[0] = byte(*v)
+		case int8:
+			bs[0] = byte(v)
+		case *uint8:
+			bs[0] = *v
+		case uint8:
+			bs[0] = v
+		case *int16:
+			order.PutUint16(bs, uint16(*v))
+		case int16:
+			order.PutUint16(bs, uint16(v))
+		case *uint16:
+			order.PutUint16(bs, *v)
+		case uint16:
+			order.PutUint16(bs, v)
+		case *int32:
+			order.PutUint32(bs, uint32(*v))
+		case int32:
+			order.PutUint32(bs, uint32(v))
+		case *uint32:
+			order.PutUint32(bs, *v)
+		case uint32:
+			order.PutUint32(bs, v)
+		case *int64:
+			order.PutUint64(bs, uint64(*v))
+		case int64:
+			order.PutUint64(bs, uint64(v))
+		case *uint64:
+			order.PutUint64(bs, *v)
+		case uint64:
+			order.PutUint64(bs, v)
+		case *float32:
+			order.PutUint32(bs, math.Float32bits(*v))
+		case float32:
+			order.PutUint32(bs, math.Float32bits(v))
+		case *float64:
+			order.PutUint64(bs, math.Float64bits(*v))
+		case float64:
+			order.PutUint64(bs, math.Float64bits(v))
+		}
+
+		_, err := w.Write(bs)
+		return err
+	}
+
+	return NonPrimitiveTypeError
+}
+
+// intDataSize returns the size of the data required to represent the data when encoded.
+// It returns zero if the type cannot be implemented by the fast path in Read or Write.
+func intDataSize(data interface{}) int {
+	switch data.(type) {
+	case bool, int8, uint8, *bool, *int8, *uint8:
+		return 1
+	case int16, uint16, *int16, *uint16:
+		return 2
+	case int32, uint32, *int32, *uint32:
+		return 4
+	case int64, uint64, *int64, *uint64:
+		return 8
+	case float32, *float32:
+		return 4
+	case float64, *float64:
+		return 8
+	}
+	return 0
+}
+
+// Direct byte to string conversion that doesn't allocate.
+func bytesToString(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+// Direct string to byte conversion that doesn't allocate.
+func stringToBytes(s string) (b []byte) {
+	strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
+	sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+	sh.Data = strh.Data
+	sh.Len = strh.Len
+	sh.Cap = strh.Len
+	return b
+}

+ 65 - 0
pkg/util/strings.go

@@ -0,0 +1,65 @@
+package util
+
+import (
+	"fmt"
+	"math"
+	"math/rand"
+	"time"
+)
+
+func init() {
+	rand.Seed(time.Now().UnixNano())
+}
+
+var alpha = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+var alphanumeric = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
+
+const (
+	_ = 1 << (10 * iota)
+	// KiB is bytes per Kibibyte
+	KiB
+	// MiB is bytes per Mebibyte
+	MiB
+	// GiB is bytes per Gibibyte
+	GiB
+	// TiB is bytes per Tebibyte
+	TiB
+)
+
+// RandSeq generates a pseudo-random alphabetic string of the given length
+func RandSeq(n int) string {
+	b := make([]rune, n)
+	for i := range b {
+		b[i] = alpha[rand.Intn(len(alpha))]
+	}
+	return string(b)
+}
+
+// FormatBytes takes a number of bytes and formats it as a string
+func FormatBytes(numBytes int64) string {
+	if numBytes > TiB {
+		return fmt.Sprintf("%.2fTiB", float64(numBytes)/TiB)
+	}
+	if numBytes > GiB {
+		return fmt.Sprintf("%.2fGiB", float64(numBytes)/GiB)
+	}
+	if numBytes > MiB {
+		return fmt.Sprintf("%.2fMiB", float64(numBytes)/MiB)
+	}
+	if numBytes > KiB {
+		return fmt.Sprintf("%.2fKiB", float64(numBytes)/KiB)
+	}
+	return fmt.Sprintf("%dB", numBytes)
+}
+
+// FormatUTCOffset converts a duration to a string of format "-07:00"
+func FormatUTCOffset(dur time.Duration) string {
+	utcOffSig := "+"
+	if dur.Hours() < 0 {
+		utcOffSig = "-"
+	}
+	utcOffHrs := int(math.Trunc(math.Abs(dur.Hours())))
+	utcOffMin := int(math.Abs(dur.Minutes())) - (utcOffHrs * 60)
+
+	return fmt.Sprintf("%s%02d:%02d", utcOffSig, utcOffHrs, utcOffMin)
+}