فهرست منبع

Draft: add ProportionalAssetResourceCosts to Allocation API

Signed-off-by: Niko Kovacevic <nikovacevic@gmail.com>
Signed-off-by: Alex Meijer <ameijer@kubecost.com>
Niko Kovacevic 3 سال پیش
والد
کامیت
c8e9cf474a
5فایلهای تغییر یافته به همراه433 افزوده شده و 68 حذف شده
  1. 1 0
      pkg/cmd/costmodel/costmodel.go
  2. 14 48
      pkg/costmodel/aggregation.go
  3. 170 0
      pkg/costmodel/assets.go
  4. 121 0
      pkg/costmodel/costmodel.go
  5. 127 20
      pkg/kubecost/allocation.go

+ 1 - 0
pkg/cmd/costmodel/costmodel.go

@@ -30,6 +30,7 @@ func Execute(opts *CostModelOpts) error {
 
 	rootMux := http.NewServeMux()
 	a.Router.GET("/healthz", Healthz)
+	a.Router.GET("/allocation", a.ComputeAllocationHandler)
 	a.Router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
 	rootMux.Handle("/", a.Router)
 	rootMux.Handle("/metrics", promhttp.Handler())

+ 14 - 48
pkg/costmodel/aggregation.go

@@ -1076,7 +1076,7 @@ func (a *Accesses) ComputeAggregateCostModel(promClient prometheusClient.Client,
 		if durMins%60 != 0 || durMins < 3*60 { // not divisible by 1h or less than 3h
 			resolution = time.Minute
 		}
-	} else {                    // greater than 1d
+	} else { // greater than 1d
 		if durMins >= 7*24*60 { // greater than (or equal to) 7 days
 			resolution = 24.0 * time.Hour
 		} else if durMins >= 2*24*60 { // greater than (or equal to) 2 days
@@ -2221,15 +2221,15 @@ func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Reque
 		http.Error(w, fmt.Sprintf("Invalid 'window' parameter: %s", err), http.StatusBadRequest)
 	}
 
+	// Resolution is an optional parameter, defaulting to the configured ETL
+	// resolution.
+	resolution := qp.GetDuration("resolution", env.GetETLResolution())
+
 	// Step is an optional parameter that defines the duration per-set, i.e.
 	// the window for an AllocationSet, of the AllocationSetRange to be
 	// computed. Defaults to the window size, making one set.
 	step := qp.GetDuration("step", window.Duration())
 
-	// Resolution is an optional parameter, defaulting to the configured ETL
-	// resolution.
-	resolution := qp.GetDuration("resolution", env.GetETLResolution())
-
 	// Aggregation is an optional comma-separated list of fields by which to
 	// aggregate results. Some fields allow a sub-field, which is distinguished
 	// with a colon; e.g. "label:app".
@@ -2239,52 +2239,18 @@ func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Reque
 		http.Error(w, fmt.Sprintf("Invalid 'aggregate' parameter: %s", err), http.StatusBadRequest)
 	}
 
-	// Accumulate is an optional parameter, defaulting to false, which if true
-	// sums each Set in the Range, producing one Set.
-	accumulate := qp.GetBool("accumulate", false)
-
-	// Accumulate is an optional parameter that accumulates an AllocationSetRange
-	// by the resolution of the given time duration.
-	// Defaults to 0. If a value is not passed then the parameter is not used.
-	accumulateBy := kubecost.AccumulateOption(qp.Get("accumulateBy", ""))
-
-	// if accumulateBy is not explicitly set, and accumulate is true, ensure result is accumulated
-	if accumulateBy == kubecost.AccumulateOptionNone && accumulate {
-		accumulateBy = kubecost.AccumulateOptionAll
-	}
-
-	// Query for AllocationSets in increments of the given step duration,
-	// appending each to the AllocationSetRange.
-	asr := kubecost.NewAllocationSetRange()
-	stepStart := *window.Start()
-	for window.End().After(stepStart) {
-		stepEnd := stepStart.Add(step)
-		stepWindow := kubecost.NewWindow(&stepStart, &stepEnd)
-
-		as, err := a.Model.ComputeAllocation(*stepWindow.Start(), *stepWindow.End(), resolution)
-		if err != nil {
-			WriteError(w, InternalServerError(err.Error()))
-			return
-		}
-		asr.Append(as)
+	// IncludeIdle, if true, uses Asset data to incorporate Idle Allocation
+	includeIdle := qp.GetBool("includeIdle", false)
 
-		stepStart = stepEnd
-	}
+	// IdleByNode, if true, computes idle allocations at the node level.
+	// Otherwise it is computed at the cluster level. (Not relevant if idle
+	// is not included.)
+	idleByNode := qp.GetBool("idleByNode", false)
 
-	// Aggregate, if requested
-	if len(aggregateBy) > 0 {
-		err = asr.AggregateBy(aggregateBy, nil)
-		if err != nil {
-			WriteError(w, InternalServerError(err.Error()))
-			return
-		}
-	}
-
-	// Accumulate, if requested
-	if accumulateBy != kubecost.AccumulateOptionNone {
-		asr, err = asr.Accumulate(accumulateBy)
-	}
+	// IncludeProportionalAssetResourceCosts, if true,
+	includeProportionalAssetResourceCosts := qp.GetBool("includeProportionalAssetResourceCosts", false)
 
+	asr, err := a.Model.QueryAllocation(window, resolution, step, aggregateBy, includeIdle, idleByNode, includeProportionalAssetResourceCosts)
 	if err != nil {
 		WriteError(w, InternalServerError(err.Error()))
 		return

+ 170 - 0
pkg/costmodel/assets.go

@@ -0,0 +1,170 @@
+package costmodel
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+func (cm *CostModel) ComputeAssets(start, end time.Time) (*kubecost.AssetSet, error) {
+	assetSet := kubecost.NewAssetSet(start, end)
+
+	nodeMap, err := cm.ClusterNodes(start, end)
+	if err != nil {
+		return nil, fmt.Errorf("error computing node assets for %s: %w", kubecost.NewClosedWindow(start, end), err)
+
+	}
+
+	lbMap, err := cm.ClusterLoadBalancers(start, end)
+	if err != nil {
+		return nil, fmt.Errorf("error computing load balancer assets for %s: %w", kubecost.NewClosedWindow(start, end), err)
+	}
+
+	diskMap, err := cm.ClusterDisks(start, end)
+	if err != nil {
+		return nil, fmt.Errorf("error computing disk assets for %s: %w", kubecost.NewClosedWindow(start, end), err)
+	}
+
+	for _, d := range diskMap {
+		s := d.Start
+		if s.Before(start) || s.After(end) {
+			log.Debugf("CostModel.ComputeAssets: disk '%s' start outside window: %s not in [%s, %s]", d.Name, s.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
+			s = start
+		}
+
+		e := d.End
+		if e.Before(start) || e.After(end) {
+			log.Debugf("CostModel.ComputeAssets: disk '%s' end outside window: %s not in [%s, %s]", d.Name, e.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
+			e = end
+		}
+
+		hours := e.Sub(s).Hours()
+
+		disk := kubecost.NewDisk(d.Name, d.Cluster, d.ProviderID, s, e, kubecost.NewWindow(&start, &end))
+		cm.propertiesFromCluster(disk.Properties)
+		disk.Cost = d.Cost
+		disk.ByteHours = d.Bytes * hours
+		if d.BytesUsedAvgPtr != nil {
+			byteHours := *d.BytesUsedAvgPtr * hours
+			disk.ByteHoursUsed = &byteHours
+		}
+		if d.BytesUsedMaxPtr != nil {
+			usageMax := *d.BytesUsedMaxPtr
+			disk.ByteUsageMax = &usageMax
+		}
+
+		if d.Local {
+			disk.Local = 1.0
+		}
+		disk.Breakdown = &kubecost.Breakdown{
+			Idle:   d.Breakdown.Idle,
+			System: d.Breakdown.System,
+			User:   d.Breakdown.User,
+			Other:  d.Breakdown.Other,
+		}
+		disk.StorageClass = d.StorageClass
+		disk.VolumeName = d.VolumeName
+		disk.ClaimName = d.ClaimName
+		disk.ClaimNamespace = d.ClaimNamespace
+		assetSet.Insert(disk, nil)
+	}
+
+	for _, lb := range lbMap {
+		s := lb.Start
+		if s.Before(start) || s.After(end) {
+			log.Debugf("CostModel.ComputeAssets: load balancer '%s' start outside window: %s not in [%s, %s]", lb.Name, s.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
+			s = start
+		}
+
+		e := lb.End
+		if e.Before(start) || e.After(end) {
+			log.Debugf("CostModel.ComputeAssets: load balancer '%s' end outside window: %s not in [%s, %s]", lb.Name, e.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
+			e = end
+		}
+
+		loadBalancer := kubecost.NewLoadBalancer(lb.Name, lb.Cluster, lb.ProviderID, s, e, kubecost.NewWindow(&start, &end))
+		cm.propertiesFromCluster(loadBalancer.Properties)
+		loadBalancer.Cost = lb.Cost
+		assetSet.Insert(loadBalancer, nil)
+	}
+
+	for _, n := range nodeMap {
+		s := n.Start
+		if s.Before(start) || s.After(end) {
+			log.Debugf("CostModel.ComputeAssets: node '%s' start outside window: %s not in [%s, %s]", n.Name, s.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
+			s = start
+		}
+
+		e := n.End
+		if e.Before(start) || e.After(end) {
+			log.Debugf("CostModel.ComputeAssets: node '%s' end outside window: %s not in [%s, %s]", n.Name, e.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
+			e = end
+		}
+
+		hours := e.Sub(s).Hours()
+
+		node := kubecost.NewNode(n.Name, n.Cluster, n.ProviderID, s, e, kubecost.NewWindow(&start, &end))
+		cm.propertiesFromCluster(node.Properties)
+		node.NodeType = n.NodeType
+		node.CPUCoreHours = n.CPUCores * hours
+		node.RAMByteHours = n.RAMBytes * hours
+		node.GPUHours = n.GPUCount * hours
+		node.CPUBreakdown = &kubecost.Breakdown{
+			Idle:   n.CPUBreakdown.Idle,
+			System: n.CPUBreakdown.System,
+			User:   n.CPUBreakdown.User,
+			Other:  n.CPUBreakdown.Other,
+		}
+		node.RAMBreakdown = &kubecost.Breakdown{
+			Idle:   n.RAMBreakdown.Idle,
+			System: n.RAMBreakdown.System,
+			User:   n.RAMBreakdown.User,
+			Other:  n.RAMBreakdown.Other,
+		}
+		node.CPUCost = n.CPUCost
+		node.GPUCost = n.GPUCost
+		node.GPUCount = n.GPUCount
+		node.RAMCost = n.RAMCost
+		node.Discount = n.Discount
+		if n.Preemptible {
+			node.Preemptible = 1.0
+		}
+		node.SetLabels(kubecost.AssetLabels(n.Labels))
+		assetSet.Insert(node, nil)
+	}
+
+	return assetSet, nil
+}
+
+func (cm *CostModel) ClusterDisks(start, end time.Time) (map[DiskIdentifier]*Disk, error) {
+	return ClusterDisks(cm.PrometheusClient, cm.Provider, start, end)
+}
+
+func (cm *CostModel) ClusterLoadBalancers(start, end time.Time) (map[LoadBalancerIdentifier]*LoadBalancer, error) {
+	return ClusterLoadBalancers(cm.PrometheusClient, start, end)
+}
+
+func (cm *CostModel) ClusterNodes(start, end time.Time) (map[NodeIdentifier]*Node, error) {
+	return ClusterNodes(cm.Provider, cm.PrometheusClient, start, end)
+}
+
+// propertiesFromCluster populates static cluster properties to individual asset properties
+func (cm *CostModel) propertiesFromCluster(props *kubecost.AssetProperties) {
+	// If properties does not have cluster value, do nothing
+	if props.Cluster == "" {
+		return
+	}
+
+	clusterMap := cm.ClusterMap.AsMap()
+	ci, ok := clusterMap[props.Cluster]
+	if !ok {
+		log.Debugf("CostMode.propertiesFromCluster: cluster '%s' was not found in ClusterMap", props.Cluster)
+		return
+	}
+
+	props.Project = ci.Project
+	props.Account = ci.Account
+	props.Provider = ci.Provider
+}

+ 121 - 0
pkg/costmodel/costmodel.go

@@ -2293,3 +2293,124 @@ func measureTimeAsync(start time.Time, threshold time.Duration, name string, ch
 		ch <- fmt.Sprintf("%s took %s", name, time.Since(start))
 	}
 }
+
+func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts bool) (*kubecost.AllocationSetRange, error) {
+	// Validate window is legal
+	if window.IsOpen() || window.IsNegative() {
+		return nil, fmt.Errorf("illegal window: %s", window)
+	}
+
+	// Idle is required for proportional asset costs
+	if includeProportionalAssetResourceCosts {
+		includeIdle = true
+	}
+
+	// Begin with empty response
+	asr := kubecost.NewAllocationSetRange()
+
+	// Query for AllocationSets in increments of the given step duration,
+	// appending each to the response.
+	stepStart := *window.Start()
+	stepEnd := stepStart.Add(step)
+	for window.End().After(stepStart) {
+		allocSet, err := cm.ComputeAllocation(stepStart, stepEnd, resolution)
+		if err != nil {
+			return nil, fmt.Errorf("error computing allocations for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
+		}
+
+		if includeIdle {
+			assetSet, err := cm.ComputeAssets(stepStart, stepEnd)
+			if err != nil {
+				return nil, fmt.Errorf("error computing assets for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
+			}
+
+			idleSet, err := computeIdleAllocations(allocSet, assetSet, true)
+			if err != nil {
+				return nil, fmt.Errorf("error computing idle allocations for %s: %w", kubecost.NewClosedWindow(stepStart, stepEnd), err)
+			}
+
+			for _, idleAlloc := range idleSet.Allocations {
+				allocSet.Insert(idleAlloc)
+			}
+		}
+
+		asr.Append(allocSet)
+
+		stepStart = stepEnd
+		stepEnd = stepStart.Add(step)
+	}
+
+	// Set aggregation options and aggregate
+	opts := &kubecost.AllocationAggregationOptions{
+		IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
+	}
+
+	// Aggregate
+	err := asr.AggregateBy(aggregate, opts)
+	if err != nil {
+		return nil, fmt.Errorf("error aggregating for %s: %w", window, err)
+	}
+
+	return asr, nil
+}
+
+func computeIdleAllocations(allocSet *kubecost.AllocationSet, assetSet *kubecost.AssetSet, idleByNode bool) (*kubecost.AllocationSet, error) {
+	if !allocSet.Window.Equal(assetSet.Window) {
+		return nil, fmt.Errorf("cannot compute idle allocations for mismatched sets: %s does not equal %s", allocSet.Window, assetSet.Window)
+	}
+
+	var allocTotals map[string]*kubecost.AllocationTotals
+	var assetTotals map[string]*kubecost.AssetTotals
+
+	if idleByNode {
+		allocTotals = kubecost.ComputeAllocationTotals(allocSet, kubecost.AllocationNodeProp)
+		assetTotals = kubecost.ComputeAssetTotals(assetSet, kubecost.AssetNodeProp)
+	} else {
+		allocTotals = kubecost.ComputeAllocationTotals(allocSet, kubecost.AllocationNodeProp)
+		assetTotals = kubecost.ComputeAssetTotals(assetSet, kubecost.AssetNodeProp)
+	}
+
+	start, end := *allocSet.Window.Start(), *allocSet.Window.End()
+	idleSet := kubecost.NewAllocationSet(start, end)
+
+	for key, assetTotal := range assetTotals {
+		allocTotal, ok := allocTotals[key]
+		if !ok {
+			log.Warnf("ETL: did not find allocations for asset key: %s", key)
+
+			// Use a zero-value set of totals. This indicates either (1) an
+			// error computing totals, or (2) that no allocations ran on the
+			// given node for the given window.
+			allocTotal = &kubecost.AllocationTotals{
+				Cluster: assetTotal.Cluster,
+				Node:    assetTotal.Node,
+				Start:   assetTotal.Start,
+				End:     assetTotal.End,
+			}
+		}
+
+		// Insert one idle allocation for each key (whether by node or
+		// by cluster), defined as the difference between the total
+		// asset cost and the allocated cost per-resource.
+		name := fmt.Sprintf("%s/%s", key, kubecost.IdleSuffix)
+		err := idleSet.Insert(&kubecost.Allocation{
+			Name:   name,
+			Window: idleSet.Window.Clone(),
+			Properties: &kubecost.AllocationProperties{
+				Cluster:    assetTotal.Cluster,
+				Node:       assetTotal.Node,
+				ProviderID: assetTotal.Node,
+			},
+			Start:   assetTotal.Start,
+			End:     assetTotal.End,
+			CPUCost: assetTotal.TotalCPUCost() - allocTotal.TotalCPUCost(),
+			GPUCost: assetTotal.TotalGPUCost() - allocTotal.TotalGPUCost(),
+			RAMCost: assetTotal.TotalRAMCost() - allocTotal.TotalRAMCost(),
+		})
+		if err != nil {
+			return nil, fmt.Errorf("failed to insert idle allocation %s: %w", name, err)
+		}
+	}
+
+	return idleSet, nil
+}

+ 127 - 20
pkg/kubecost/allocation.go

@@ -81,6 +81,11 @@ type Allocation struct {
 	// RawAllocationOnly is a pointer so if it is not present it will be
 	// marshalled as null rather than as an object with Go default values.
 	RawAllocationOnly *RawAllocationOnlyData `json:"rawAllocationOnly"`
+	// ProportionalAssetResourceCost represents the per-resource costs of the
+	// allocation as a percentage of the per-resource total cost of the
+	// asset on which the allocation was run. It is optionally computed
+	// and appended to an Allocation, and so by default is is nil.
+	ProportionalAssetResourceCosts ProportionalAssetResourceCosts `json:"proportionalAssetResourceCosts"`
 }
 
 // RawAllocationOnlyData is information that only belong in "raw" Allocations,
@@ -240,6 +245,43 @@ func (pva *PVAllocation) Equal(that *PVAllocation) bool {
 		util.IsApproximately(pva.Cost, that.Cost)
 }
 
+type ProportionalAssetResourceCost struct {
+	ProportionalAssetResourceCostKey
+	CPUPercentage float64 `json:"cpuPercentage"`
+	GPUPercentage float64 `json:"gpuPercentage"`
+	RAMPercentage float64 `json:"ramPercentage"`
+}
+
+func (parc ProportionalAssetResourceCost) Key() ProportionalAssetResourceCostKey {
+	return parc.ProportionalAssetResourceCostKey
+}
+
+type ProportionalAssetResourceCostKey struct {
+	Cluster string `json:"cluster"`
+	Node    string `json:"node"`
+}
+
+type ProportionalAssetResourceCosts map[ProportionalAssetResourceCostKey]ProportionalAssetResourceCost
+
+func (parcs ProportionalAssetResourceCosts) Insert(parc ProportionalAssetResourceCost) {
+	if curr, ok := parcs[parc.Key()]; ok {
+		parcs[parc.Key()] = ProportionalAssetResourceCost{
+			ProportionalAssetResourceCostKey: parc.Key(),
+			CPUPercentage:                    curr.CPUPercentage + parc.CPUPercentage,
+			GPUPercentage:                    curr.GPUPercentage + parc.GPUPercentage,
+			RAMPercentage:                    curr.RAMPercentage + parc.RAMPercentage,
+		}
+	} else {
+		parcs[parc.Key()] = parc
+	}
+}
+
+func (parcs ProportionalAssetResourceCosts) Add(that ProportionalAssetResourceCosts) {
+	for _, parc := range that {
+		parcs.Insert(parc)
+	}
+}
+
 // GetWindow returns the window of the struct
 func (a *Allocation) GetWindow() Window {
 	return a.Window
@@ -714,6 +756,12 @@ func (a *Allocation) add(that *Allocation) {
 	// Preserve string properties that are matching between the two allocations
 	a.Properties = a.Properties.Intersection(that.Properties)
 
+	// If both Allocations have ProportionalAssetResourceCosts, then
+	// add those from the given Allocation into the receiver.
+	if a.ProportionalAssetResourceCosts != nil && that.ProportionalAssetResourceCosts != nil {
+		a.ProportionalAssetResourceCosts.Add(that.ProportionalAssetResourceCosts)
+	}
+
 	// Overwrite regular intersection logic for the controller name property in the
 	// case that the Allocation keys are the same but the controllers are not.
 	if leftKey == rightKey &&
@@ -845,18 +893,19 @@ func NewAllocationSet(start, end time.Time, allocs ...*Allocation) *AllocationSe
 // succeeds, the allocation is marked as a shared resource. ShareIdle is a
 // simple flag for sharing idle resources.
 type AllocationAggregationOptions struct {
-	AllocationTotalsStore AllocationTotalsStore
-	Filter                AllocationFilter
-	IdleByNode            bool
-	LabelConfig           *LabelConfig
-	MergeUnallocated      bool
-	Reconcile             bool
-	ReconcileNetwork      bool
-	ShareFuncs            []AllocationMatchFunc
-	ShareIdle             string
-	ShareSplit            string
-	SharedHourlyCosts     map[string]float64
-	SplitIdle             bool
+	AllocationTotalsStore                 AllocationTotalsStore
+	Filter                                AllocationFilter
+	IdleByNode                            bool
+	IncludeProportionalAssetResourceCosts bool
+	LabelConfig                           *LabelConfig
+	MergeUnallocated                      bool
+	Reconcile                             bool
+	ReconcileNetwork                      bool
+	ShareFuncs                            []AllocationMatchFunc
+	ShareIdle                             string
+	ShareSplit                            string
+	SharedHourlyCosts                     map[string]float64
+	SplitIdle                             bool
 }
 
 // AggregateBy aggregates the Allocations in the given AllocationSet by the given
@@ -869,14 +918,18 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	//     Also, create the aggSet into which the results will be aggregated.
 	//
 	//  2. Compute sharing coefficients for idle and shared resources
-	//     a) if idle allocation is to be shared, compute idle coefficients
-	//     b) if idle allocation is NOT shared, but filters are present, compute
+	//     a) if idle allocation is to be shared, or if proportional asset
+	//        resource costs are to be included, then compute idle coefficients
+	//        (proportional asset resource costs are derived from idle coefficients)
+	//     b) if proportional asset costs are to be included, derive them from
+	//        idle coefficients and add them to the allocations.
+	//     c) if idle allocation is NOT shared, but filters are present, compute
 	//        idle filtration coefficients for the purpose of only returning the
 	//        portion of idle allocation that would have been shared with the
 	//        unfiltered results. (See unit tests 5.a,b,c)
-	//     c) generate shared allocation for them given shared overhead, which
+	//     d) generate shared allocation for them given shared overhead, which
 	//        must happen after (2a) and (2b)
-	//     d) if there are shared resources, compute share coefficients
+	//     e) if there are shared resources, compute share coefficients
 	//
 	//  3. Drop any allocation that fails any of the filters
 	//
@@ -936,7 +989,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 	shouldAggregate := aggregateBy != nil
 	shouldFilter := options.Filter != nil
 	shouldShare := len(options.SharedHourlyCosts) > 0 || len(options.ShareFuncs) > 0
-	if !shouldAggregate && !shouldFilter && !shouldShare && options.ShareIdle == ShareNone {
+	if !shouldAggregate && !shouldFilter && !shouldShare && options.ShareIdle == ShareNone && !options.IncludeProportionalAssetResourceCosts {
 		// There is nothing for AggregateBy to do, so simply return nil
 		return nil
 	}
@@ -1057,7 +1110,30 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		}
 	}
 
-	// (2b) If idle costs are not to be shared, but there are filters, then we
+	// (2b) If proportional asset resource costs are to be included, derive them
+	// from idle coefficients and add them to the allocations.
+	if options.IncludeProportionalAssetResourceCosts {
+		if idleCoefficients == nil {
+			return fmt.Errorf("cannot include proportional resource costs")
+		}
+
+		for _, alloc := range as.Allocations {
+			// Create an empty set of proportional asset resource costs,
+			// regardless of whether or not we're successful in deriving them.
+			alloc.ProportionalAssetResourceCosts = ProportionalAssetResourceCosts{}
+
+			// Attempt to derive proportional asset resource costs from idle
+			// coefficients, and insert them into the set if successful.
+			parc, err := deriveProportionalAssetResourceCostsFromIdleCoefficients(idleCoefficients, alloc, options)
+			if err != nil {
+				log.Debugf("AggregateBy: failed to derive proportional asset resource costs from idle coefficients for %s: %s", alloc.Name, err)
+				continue
+			}
+			alloc.ProportionalAssetResourceCosts.Insert(parc)
+		}
+	}
+
+	// (2c) If idle costs are not to be shared, but there are filters, then we
 	// need to track the amount of each idle allocation to "filter" in order to
 	// maintain parity with the results when idle is shared. That is, we want
 	// to return only the idle costs that would have been shared with the given
@@ -1089,7 +1165,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		}
 	}
 
-	// (2c) Convert SharedHourlyCosts to Allocations in the shareSet. This must
+	// (2d) Convert SharedHourlyCosts to Allocations in the shareSet. This must
 	// come after idle coefficients are computed so that allocations generated
 	// by shared overhead do not skew the idle coefficient computation.
 	for name, cost := range options.SharedHourlyCosts {
@@ -1114,7 +1190,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		}
 	}
 
-	// (2d) Compute share coefficients for shared resources. These are computed
+	// (2e) Compute share coefficients for shared resources. These are computed
 	// after idle coefficients, and are computed for the aggregated allocations
 	// of the main allocation set. See above for details and an example.
 	var shareCoefficients map[string]float64
@@ -1623,6 +1699,37 @@ func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet,
 	return coeffs, totals, nil
 }
 
+func deriveProportionalAssetResourceCostsFromIdleCoefficients(idleCoeffs map[string]map[string]map[string]float64, allocation *Allocation, options *AllocationAggregationOptions) (ProportionalAssetResourceCost, error) {
+	idleId, err := allocation.getIdleId(options)
+	if err != nil {
+		return ProportionalAssetResourceCost{}, fmt.Errorf("failed to get idle ID for allocation %s", allocation.Name)
+	}
+
+	if _, ok := idleCoeffs[idleId]; !ok {
+		return ProportionalAssetResourceCost{}, fmt.Errorf("failed to find idle coeffs for idle ID %s", idleId)
+	}
+
+	if _, ok := idleCoeffs[idleId][allocation.Name]; !ok {
+		return ProportionalAssetResourceCost{}, fmt.Errorf("failed to find idle coeffs for allocation %s", allocation.Name)
+	}
+
+	cpuPct := idleCoeffs[idleId][allocation.Name]["cpu"]
+	gpuPct := idleCoeffs[idleId][allocation.Name]["gpu"]
+	ramPct := idleCoeffs[idleId][allocation.Name]["ram"]
+
+	key := ProportionalAssetResourceCostKey{
+		Cluster: allocation.Properties.Cluster,
+		Node:    allocation.Properties.Node,
+	}
+
+	return ProportionalAssetResourceCost{
+		ProportionalAssetResourceCostKey: key,
+		CPUPercentage:                    cpuPct,
+		GPUPercentage:                    gpuPct,
+		RAMPercentage:                    ramPct,
+	}, nil
+}
+
 // getIdleId returns the providerId or cluster of an Allocation depending on the IdleByNode
 // option in the AllocationAggregationOptions and an error if the respective field is missing
 func (a *Allocation) getIdleId(options *AllocationAggregationOptions) (string, error) {