Selaa lähdekoodia

ProportionalAssetResourceCosts for PVs, and improvements to parsing dates from Prometheus results, fixing mocking and test cases for PARCS

Signed-off-by: Niko Kovacevic <nikovacevic@gmail.com>
Niko Kovacevic 2 vuotta sitten
vanhempi
sitoutus
d72e80ea88

+ 3 - 3
pkg/costmodel/allocation.go

@@ -647,14 +647,14 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	// a PVC, we get time running there, so this is only inaccurate
 	// for short-lived, unmounted PVs.)
 	pvMap := map[pvKey]*pv{}
-	buildPVMap(resolution, pvMap, resPVCostPerGiBHour, resPVActiveMins)
+	buildPVMap(resolution, pvMap, resPVCostPerGiBHour, resPVActiveMins, window)
 	applyPVBytes(pvMap, resPVBytes)
 
 	// Build out the map of all PVCs with time running, bytes requested,
 	// and connect to the correct PV from pvMap. (If no PV exists, that
 	// is noted, but does not result in any allocation/cost.)
 	pvcMap := map[pvcKey]*pvc{}
-	buildPVCMap(resolution, pvcMap, pvMap, resPVCInfo)
+	buildPVCMap(resolution, pvcMap, pvMap, resPVCInfo, window)
 	applyPVCBytesRequested(pvcMap, resPVCBytesRequested)
 
 	// Build out the relationships of pods to their PVCs. This step
@@ -671,7 +671,7 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	applyUnmountedPVs(window, podMap, pvMap, pvcMap)
 
 	lbMap := make(map[serviceKey]*lbCost)
-	getLoadBalancerCosts(lbMap, resLBCostPerHr, resLBActiveMins, resolution)
+	getLoadBalancerCosts(lbMap, resLBCostPerHr, resLBActiveMins, resolution, window)
 	applyLoadBalancersToPods(window, podMap, lbMap, allocsByService)
 
 	// Build out a map of Nodes with resource costs, discounts, and node types

+ 35 - 100
pkg/costmodel/allocation_helpers.go

@@ -164,7 +164,7 @@ func applyPodResults(window kubecost.Window, resolution time.Duration, podMap ma
 
 		}
 
-		allocStart, allocEnd := calculateStartEndFromIsRunning(res, resolution, window)
+		allocStart, allocEnd := calculateStartAndEnd(res, resolution, window)
 		if allocStart.IsZero() || allocEnd.IsZero() {
 			continue
 		}
@@ -1343,7 +1343,7 @@ func applyServicesToPods(podMap map[podKey]*pod, podLabels map[podKey]map[string
 	}
 }
 
-func getLoadBalancerCosts(lbMap map[serviceKey]*lbCost, resLBCost, resLBActiveMins []*prom.QueryResult, resolution time.Duration) {
+func getLoadBalancerCosts(lbMap map[serviceKey]*lbCost, resLBCost, resLBActiveMins []*prom.QueryResult, resolution time.Duration, window kubecost.Window) {
 	for _, res := range resLBActiveMins {
 		serviceKey, err := resultServiceKey(res, env.GetPromClusterLabel(), "namespace", "service_name")
 		if err != nil || len(res.Values) == 0 {
@@ -1351,7 +1351,7 @@ func getLoadBalancerCosts(lbMap map[serviceKey]*lbCost, resLBCost, resLBActiveMi
 		}
 
 		// load balancers have interpolation for costs, we don't need to offset the resolution
-		lbStart, lbEnd := calculateStartAndEnd(res, resolution, false)
+		lbStart, lbEnd := calculateStartAndEnd(res, resolution, window)
 		if lbStart.IsZero() || lbEnd.IsZero() {
 			log.Warnf("CostModel.ComputeAllocation: pvc %s has no running time", serviceKey)
 		}
@@ -1774,7 +1774,7 @@ func (cm *CostModel) getNodePricing(nodeMap map[nodeKey]*nodePricing, nodeKey no
 
 /* PV/PVC Helpers */
 
-func buildPVMap(resolution time.Duration, pvMap map[pvKey]*pv, resPVCostPerGiBHour, resPVActiveMins []*prom.QueryResult) {
+func buildPVMap(resolution time.Duration, pvMap map[pvKey]*pv, resPVCostPerGiBHour, resPVActiveMins []*prom.QueryResult, window kubecost.Window) {
 	for _, result := range resPVActiveMins {
 		key, err := resultPVKey(result, env.GetPromClusterLabel(), "persistentvolume")
 		if err != nil {
@@ -1782,7 +1782,7 @@ func buildPVMap(resolution time.Duration, pvMap map[pvKey]*pv, resPVCostPerGiBHo
 			continue
 		}
 
-		pvStart, pvEnd := calculateStartAndEnd(result, resolution, true)
+		pvStart, pvEnd := calculateStartAndEnd(result, resolution, window)
 		if pvStart.IsZero() || pvEnd.IsZero() {
 			log.Warnf("CostModel.ComputeAllocation: pv %s has no running time", key)
 		}
@@ -1836,7 +1836,7 @@ func applyPVBytes(pvMap map[pvKey]*pv, resPVBytes []*prom.QueryResult) {
 	}
 }
 
-func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvKey]*pv, resPVCInfo []*prom.QueryResult) {
+func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvKey]*pv, resPVCInfo []*prom.QueryResult, window kubecost.Window) {
 	for _, res := range resPVCInfo {
 		cluster, err := res.GetString(env.GetPromClusterLabel())
 		if err != nil {
@@ -1857,7 +1857,7 @@ func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvK
 		pvKey := newPVKey(cluster, volume)
 		pvcKey := newPVCKey(cluster, namespace, name)
 
-		pvcStart, pvcEnd := calculateStartAndEnd(res, resolution, true)
+		pvcStart, pvcEnd := calculateStartAndEnd(res, resolution, window)
 		if pvcStart.IsZero() || pvcEnd.IsZero() {
 			log.Warnf("CostModel.ComputeAllocation: pvc %s has no running time", pvcKey)
 		}
@@ -2192,100 +2192,35 @@ func getUnmountedPodForNamespace(window kubecost.Window, podMap map[podKey]*pod,
 	return thisPod
 }
 
-func calculateStartAndEnd(result *prom.QueryResult, resolution time.Duration, offsetResolution bool) (time.Time, time.Time) {
+func calculateStartAndEnd(result *prom.QueryResult, resolution time.Duration, window kubecost.Window) (time.Time, time.Time) {
+	// Start and end for a range vector are pulled from the timestamps of the
+	// first and final values in the range. There is no "offsetting" required
+	// of the start or the end, as we used to do. If you query for a duration
+	// of time that is divisible by the given resolution, and set the end time
+	// to be precisely the end of the window, Prometheus should give all the
+	// relevant timestamps.
+	//
+	// E.g. avg(kube_pod_container_status_running{}) by (pod, namespace)[1h:1m]
+	// with time=01:00:00 will return, for a pod running the entire time,
+	// 61 timestamps where the first is 00:00:00 and the last is 01:00:00.
 	s := time.Unix(int64(result.Values[0].Timestamp), 0).UTC()
-	if offsetResolution {
-		// subtract resolution from start time to cover full time period
-		s = s.Add(-resolution)
-	}
 	e := time.Unix(int64(result.Values[len(result.Values)-1].Timestamp), 0).UTC()
-	return s, e
-}
 
-// calculateStartEndFromIsRunning Calculates the start and end of a prom result when the values of the datum are 0 for not running and 1 for running
-// the coeffs are used to adjust the start and end when the value is not equal to 1 or 0, which means that pod came up or went down in that window.
-func calculateStartEndFromIsRunning(result *prom.QueryResult, resolution time.Duration, window kubecost.Window) (time.Time, time.Time) {
-	// start and end are the timestamps of the first and last
-	// minutes the pod was running, respectively. We subtract one resolution
-	// from start because this point will actually represent the end
-	// of the first minute. We don't subtract from end because it
-	// already represents the end of the last minute.
-	var start, end time.Time
-	startAdjustmentCoeff, endAdjustmentCoeff := 1.0, 1.0
-	for _, datum := range result.Values {
-		t := time.Unix(int64(datum.Timestamp), 0)
-
-		if start.IsZero() && datum.Value > 0 && window.Contains(t) {
-			// Set the start timestamp to the earliest non-zero timestamp
-			start = t
-
-			// Record adjustment coefficient, i.e. the portion of the start
-			// timestamp to "ignore". That is, sometimes the value will be
-			// 0.5, meaning that we should discount the time running by
-			// half of the resolution the timestamp stands for.
-			startAdjustmentCoeff = (1.0 - datum.Value)
-		}
-
-		if datum.Value > 0 && window.Contains(t) {
-			// Set the end timestamp to the latest non-zero timestamp
-			end = t
-
-			// Record adjustment coefficient, i.e. the portion of the end
-			// timestamp to "ignore". (See explanation above for start.)
-			endAdjustmentCoeff = (1.0 - datum.Value)
-		}
-	}
-
-	// Do not attempt to adjust start if it is zero
-	if !start.IsZero() {
-		// Adjust timestamps according to the resolution and the adjustment
-		// coefficients, as described above. That is, count the start timestamp
-		// from the beginning of the resolution, not the end. Then "reduce" the
-		// start and end by the correct amount, in the case that the "running"
-		// value of the first or last timestamp was not a full 1.0.
-		start = start.Add(-resolution)
-		// Note: the *100 and /100 are necessary because Duration is an int, so
-		// 0.5, for instance, will be truncated, resulting in no adjustment.
-		start = start.Add(time.Duration(startAdjustmentCoeff*100) * resolution / time.Duration(100))
-		end = end.Add(-time.Duration(endAdjustmentCoeff*100) * resolution / time.Duration(100))
-
-		// Ensure that the start is always within the window, adjusting
-		// for the occasions where start falls 1m before the query window.
-		// NOTE: window here will always be closed (so no need to nil check
-		// "start").
-		// TODO:CLEANUP revisit query methodology to figure out why this is
-		// happening on occasion
-		if start.Before(*window.Start()) {
-			start = *window.Start()
-		}
-	}
-
-	// do not attempt to adjust end if it is zero
-	if !end.IsZero() {
-		// If there is only one point with a value <= 0.5 that the start and
-		// end timestamps both share, then we will enter this case because at
-		// least half of a resolution will be subtracted from both the start
-		// and the end. If that is the case, then add back half of each side
-		// so that the pod is said to run for half a resolution total.
-		// e.g. For resolution 1m and a value of 0.5 at one timestamp, we'll
-		//      end up with end == start and each coeff == 0.5. In
-		//      that case, add 0.25m to each side, resulting in 0.5m duration.
-		if !end.After(start) {
-			start = start.Add(-time.Duration(50*startAdjustmentCoeff) * resolution / time.Duration(100))
-			end = end.Add(time.Duration(50*endAdjustmentCoeff) * resolution / time.Duration(100))
-		}
-
-		// Ensure that the allocEnf is always within the window, adjusting
-		// for the occasions where end falls 1m after the query window. This
-		// has not ever happened, but is symmetrical with the start check
-		// above.
-		// NOTE: window here will always be closed (so no need to nil check
-		// "end").
-		// TODO:CLEANUP revisit query methodology to figure out why this is
-		// happening on occasion
-		if end.After(*window.End()) {
-			end = *window.End()
-		}
-	}
-	return start, end
+	// The only corner-case here is what to do if you only get one timestamp.
+	// This dilemma still requires the use of the resolution, and can be
+	// clamped using the window. In this case, we want to honor the existence
+	// of the pod by giving "one resolution" worth of duration, half on each
+	// side of the given timestamp.
+	if s.Equal(e) {
+		s = s.Add(-1 * resolution / time.Duration(2))
+		e = e.Add(resolution / time.Duration(2))
+	}
+	if s.Before(*window.Start()) {
+		s = *window.Start()
+	}
+	if e.After(*window.End()) {
+		e = *window.End()
+	}
+
+	return s, e
 }

+ 79 - 7
pkg/costmodel/allocation_helpers_test.go

@@ -272,6 +272,9 @@ func TestBuildPVMap(t *testing.T) {
 						"persistentvolume": "pv1",
 					},
 					Values: []*util.Vector{
+						{
+							Timestamp: startFloat,
+						},
 						{
 							Timestamp: startFloat + (hour * 6),
 						},
@@ -289,6 +292,9 @@ func TestBuildPVMap(t *testing.T) {
 						"persistentvolume": "pv2",
 					},
 					Values: []*util.Vector{
+						{
+							Timestamp: startFloat,
+						},
 						{
 							Timestamp: startFloat + (hour * 6),
 						},
@@ -309,6 +315,9 @@ func TestBuildPVMap(t *testing.T) {
 						"persistentvolume": "pv3",
 					},
 					Values: []*util.Vector{
+						{
+							Timestamp: startFloat + (hour * 6),
+						},
 						{
 							Timestamp: startFloat + (hour * 12),
 						},
@@ -323,6 +332,9 @@ func TestBuildPVMap(t *testing.T) {
 						"persistentvolume": "pv4",
 					},
 					Values: []*util.Vector{
+						{
+							Timestamp: startFloat,
+						},
 						{
 							Timestamp: startFloat + (hour * 6),
 						},
@@ -342,7 +354,7 @@ func TestBuildPVMap(t *testing.T) {
 	for name, testCase := range testCases {
 		t.Run(name, func(t *testing.T) {
 			pvMap := make(map[pvKey]*pv)
-			buildPVMap(testCase.resolution, pvMap, testCase.resultsPVCostPerGiBHour, testCase.resultsActiveMinutes)
+			buildPVMap(testCase.resolution, pvMap, testCase.resultsPVCostPerGiBHour, testCase.resultsActiveMinutes, window)
 			if len(pvMap) != len(testCase.expected) {
 				t.Errorf("pv map does not have the expected length %d : %d", len(pvMap), len(testCase.expected))
 			}
@@ -353,7 +365,7 @@ func TestBuildPVMap(t *testing.T) {
 					t.Errorf("pv map is missing key %s", thisPVKey)
 				}
 				if !actualPV.equal(expectedPV) {
-					t.Errorf("pv does not match with key %s", thisPVKey)
+					t.Errorf("pv does not match with key %s: %s != %s", thisPVKey, kubecost.NewClosedWindow(actualPV.Start, actualPV.End), kubecost.NewClosedWindow(expectedPV.Start, expectedPV.End))
 				}
 			}
 		})
@@ -456,6 +468,9 @@ func TestCalculateStartAndEnd(t *testing.T) {
 			expectedEnd:   windowStart.Add(time.Hour),
 			result: &prom.QueryResult{
 				Values: []*util.Vector{
+					{
+						Timestamp: startFloat,
+					},
 					{
 						Timestamp: startFloat + (minute * 60),
 					},
@@ -468,6 +483,9 @@ func TestCalculateStartAndEnd(t *testing.T) {
 			expectedEnd:   windowStart.Add(time.Hour),
 			result: &prom.QueryResult{
 				Values: []*util.Vector{
+					{
+						Timestamp: startFloat,
+					},
 					{
 						Timestamp: startFloat + (minute * 30),
 					},
@@ -479,8 +497,8 @@ func TestCalculateStartAndEnd(t *testing.T) {
 		},
 		"15 minute resolution, 45 minute window": {
 			resolution:    time.Minute * 15,
-			expectedStart: windowStart.Add(time.Minute * -15),
-			expectedEnd:   windowStart.Add(time.Minute * 30),
+			expectedStart: windowStart,
+			expectedEnd:   windowStart.Add(time.Minute * 45),
 			result: &prom.QueryResult{
 				Values: []*util.Vector{
 					{
@@ -492,6 +510,60 @@ func TestCalculateStartAndEnd(t *testing.T) {
 					{
 						Timestamp: startFloat + (minute * 30),
 					},
+					{
+						Timestamp: startFloat + (minute * 45),
+					},
+				},
+			},
+		},
+		"1 minute resolution, 5 minute window": {
+			resolution:    time.Minute,
+			expectedStart: windowStart.Add(time.Minute * 15),
+			expectedEnd:   windowStart.Add(time.Minute * 20),
+			result: &prom.QueryResult{
+				Values: []*util.Vector{
+					{
+						Timestamp: startFloat + (minute * 15),
+					},
+					{
+						Timestamp: startFloat + (minute * 16),
+					},
+					{
+						Timestamp: startFloat + (minute * 17),
+					},
+					{
+						Timestamp: startFloat + (minute * 18),
+					},
+					{
+						Timestamp: startFloat + (minute * 19),
+					},
+					{
+						Timestamp: startFloat + (minute * 20),
+					},
+				},
+			},
+		},
+		"1 minute resolution, 1 minute window": {
+			resolution:    time.Minute,
+			expectedStart: windowStart.Add(time.Minute * 14).Add(time.Second * 30),
+			expectedEnd:   windowStart.Add(time.Minute * 15).Add(time.Second * 30),
+			result: &prom.QueryResult{
+				Values: []*util.Vector{
+					{
+						Timestamp: startFloat + (minute * 15),
+					},
+				},
+			},
+		},
+		"1 minute resolution, 1 minute window, at window start": {
+			resolution:    time.Minute,
+			expectedStart: windowStart,
+			expectedEnd:   windowStart.Add(time.Second * 30),
+			result: &prom.QueryResult{
+				Values: []*util.Vector{
+					{
+						Timestamp: startFloat,
+					},
 				},
 			},
 		},
@@ -499,12 +571,12 @@ func TestCalculateStartAndEnd(t *testing.T) {
 
 	for name, testCase := range testCases {
 		t.Run(name, func(t *testing.T) {
-			start, end := calculateStartAndEnd(testCase.result, testCase.resolution, true)
+			start, end := calculateStartAndEnd(testCase.result, testCase.resolution, window)
 			if !start.Equal(testCase.expectedStart) {
-				t.Errorf("start to not match expected %v : %v", start, testCase.expectedStart)
+				t.Errorf("start does not match: expected %v; got %v", testCase.expectedStart, start)
 			}
 			if !end.Equal(testCase.expectedEnd) {
-				t.Errorf("end to not match expected %v : %v", end, testCase.expectedEnd)
+				t.Errorf("end does not match: expected %v; got %v", testCase.expectedEnd, end)
 			}
 		})
 	}

+ 5 - 5
pkg/costmodel/cluster.go

@@ -254,7 +254,7 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 		diskMap[key].ClaimNamespace = claimNamespace
 	}
 
-	pvCosts(diskMap, resolution, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo, provider)
+	pvCosts(diskMap, resolution, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo, provider, kubecost.NewClosedWindow(start, end))
 
 	for _, result := range resLocalStorageCost {
 		cluster, err := result.GetString(env.GetPromClusterLabel())
@@ -630,7 +630,7 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 		return nil, requiredCtx.ErrorCollection()
 	}
 
-	activeDataMap := buildActiveDataMap(resActiveMins, resolution)
+	activeDataMap := buildActiveDataMap(resActiveMins, resolution, kubecost.NewClosedWindow(start, end))
 
 	gpuCountMap := buildGPUCountMap(resNodeGPUCount)
 	preemptibleMap := buildPreemptibleMap(resIsSpot)
@@ -1332,7 +1332,7 @@ func ClusterCostsOverTime(cli prometheus.Client, provider models.Provider, start
 	}, nil
 }
 
-func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo []*prom.QueryResult, cp models.Provider) {
+func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo []*prom.QueryResult, cp models.Provider, window kubecost.Window) {
 	for _, result := range resActiveMins {
 		cluster, err := result.GetString(env.GetPromClusterLabel())
 		if err != nil {
@@ -1357,8 +1357,8 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 				Breakdown: &ClusterCostsBreakdown{},
 			}
 		}
-		s := time.Unix(int64(result.Values[0].Timestamp), 0)
-		e := time.Unix(int64(result.Values[len(result.Values)-1].Timestamp), 0)
+
+		s, e := calculateStartAndEnd(result, resolution, window)
 		mins := e.Sub(s).Minutes()
 
 		diskMap[key].End = e

+ 3 - 3
pkg/costmodel/cluster_helpers.go

@@ -6,6 +6,7 @@ import (
 
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/provider"
+	"github.com/opencost/opencost/pkg/kubecost"
 
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/log"
@@ -527,7 +528,7 @@ type activeData struct {
 	minutes float64
 }
 
-func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Duration) map[NodeIdentifier]activeData {
+func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Duration, window kubecost.Window) map[NodeIdentifier]activeData {
 
 	m := make(map[NodeIdentifier]activeData)
 
@@ -555,8 +556,7 @@ func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Durat
 			continue
 		}
 
-		s := time.Unix(int64(result.Values[0].Timestamp), 0)
-		e := time.Unix(int64(result.Values[len(result.Values)-1].Timestamp), 0)
+		s, e := calculateStartAndEnd(result, resolution, window)
 		mins := e.Sub(s).Minutes()
 
 		// TODO niko/assets if mins >= threshold, interpolate for missing data?

+ 18 - 11
pkg/costmodel/cluster_helpers_test.go

@@ -7,6 +7,7 @@ import (
 
 	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/prom"
 	"github.com/opencost/opencost/pkg/util"
 
@@ -891,6 +892,12 @@ func TestBuildGPUCostMap(t *testing.T) {
 
 func TestAssetCustompricing(t *testing.T) {
 
+	windowStart := time.Date(2020, time.April, 13, 0, 0, 0, 0, time.UTC)
+	windowEnd := windowStart.Add(time.Hour)
+	window := kubecost.NewClosedWindow(windowStart, windowEnd)
+
+	startTimestamp := float64(windowStart.Unix())
+
 	nodePromResult := []*prom.QueryResult{
 		{
 			Metric: map[string]interface{}{
@@ -901,7 +908,7 @@ func TestAssetCustompricing(t *testing.T) {
 			},
 			Values: []*util.Vector{
 				{
-					Timestamp: 0,
+					Timestamp: startTimestamp,
 					Value:     0.5,
 				},
 			},
@@ -917,7 +924,7 @@ func TestAssetCustompricing(t *testing.T) {
 			},
 			Values: []*util.Vector{
 				{
-					Timestamp: 0,
+					Timestamp: startTimestamp,
 					Value:     1.0,
 				},
 			},
@@ -933,7 +940,7 @@ func TestAssetCustompricing(t *testing.T) {
 			},
 			Values: []*util.Vector{
 				{
-					Timestamp: 0,
+					Timestamp: startTimestamp,
 					Value:     1073741824.0,
 				},
 			},
@@ -949,11 +956,11 @@ func TestAssetCustompricing(t *testing.T) {
 			},
 			Values: []*util.Vector{
 				{
-					Timestamp: 0,
+					Timestamp: startTimestamp,
 					Value:     1.0,
 				},
 				{
-					Timestamp: 3600.0,
+					Timestamp: startTimestamp + (60.0 * 60.0),
 					Value:     1.0,
 				},
 			},
@@ -969,11 +976,11 @@ func TestAssetCustompricing(t *testing.T) {
 			},
 			Values: []*util.Vector{
 				{
-					Timestamp: 0,
+					Timestamp: startTimestamp,
 					Value:     1.0,
 				},
 				{
-					Timestamp: 3600.0,
+					Timestamp: startTimestamp + (60.0 * 60.0),
 					Value:     1.0,
 				},
 			},
@@ -989,11 +996,11 @@ func TestAssetCustompricing(t *testing.T) {
 			},
 			Values: []*util.Vector{
 				{
-					Timestamp: 0,
+					Timestamp: startTimestamp,
 					Value:     1.0,
 				},
 				{
-					Timestamp: 3600.0,
+					Timestamp: startTimestamp + (60.0 * 60.0),
 					Value:     1.0,
 				},
 			},
@@ -1010,7 +1017,7 @@ func TestAssetCustompricing(t *testing.T) {
 			},
 			Values: []*util.Vector{
 				{
-					Timestamp: 0,
+					Timestamp: startTimestamp,
 					Value:     1.0,
 				},
 			},
@@ -1081,7 +1088,7 @@ func TestAssetCustompricing(t *testing.T) {
 			gpuResult := gpuMap[nodeKey]
 
 			diskMap := map[DiskIdentifier]*Disk{}
-			pvCosts(diskMap, time.Hour, pvMinsPromResult, pvSizePromResult, pvCostPromResult, pvAvgUsagePromResult, pvMaxUsagePromResult, pvInfoPromResult, testProvider)
+			pvCosts(diskMap, time.Hour, pvMinsPromResult, pvSizePromResult, pvCostPromResult, pvAvgUsagePromResult, pvMaxUsagePromResult, pvInfoPromResult, testProvider, window)
 
 			diskResult := diskMap[DiskIdentifier{"cluster1", "pvc1"}].Cost
 

+ 1 - 0
pkg/costmodel/costmodel.go

@@ -2520,6 +2520,7 @@ func (cm *CostModel) QueryAllocation(window kubecost.Window, resolution, step ti
 					parc.CPUTotalCost = totals.CPUCost
 					parc.GPUTotalCost = totals.GPUCost
 					parc.RAMTotalCost = totals.RAMCost
+					parc.PVTotalCost = totals.PersistentVolumeCost
 					if !isAzure {
 						parc.LoadBalancerTotalCost = totals.LoadBalancerCost
 					} else if len(alloc.LoadBalancers) > 0 {

+ 26 - 12
pkg/kubecost/allocation.go

@@ -278,15 +278,15 @@ func (pva *PVAllocation) Equal(that *PVAllocation) bool {
 }
 
 type ProportionalAssetResourceCost struct {
-	Cluster                string  `json:"cluster"`
-	Name                   string  `json:"name,omitempty"`
-	Type                   string  `json:"type,omitempty"`
-	ProviderID             string  `json:"providerID,omitempty"`
-	CPUPercentage          float64 `json:"cpuPercentage"`
-	GPUPercentage          float64 `json:"gpuPercentage"`
-	RAMPercentage          float64 `json:"ramPercentage"`
-	LoadBalancerPercentage float64 `json:"loadBalancerPercentage"`
-
+	Cluster                      string  `json:"cluster"`
+	Name                         string  `json:"name,omitempty"`
+	Type                         string  `json:"type,omitempty"`
+	ProviderID                   string  `json:"providerID,omitempty"`
+	CPUPercentage                float64 `json:"cpuPercentage"`
+	GPUPercentage                float64 `json:"gpuPercentage"`
+	RAMPercentage                float64 `json:"ramPercentage"`
+	LoadBalancerPercentage       float64 `json:"loadBalancerPercentage"`
+	PVPercentage                 float64 `json:"pvPercentage"`
 	NodeResourceCostPercentage   float64 `json:"nodeResourceCostPercentage"`
 	GPUTotalCost                 float64 `json:"-"`
 	GPUProportionalCost          float64 `json:"-"`
@@ -296,6 +296,8 @@ type ProportionalAssetResourceCost struct {
 	RAMProportionalCost          float64 `json:"-"`
 	LoadBalancerProportionalCost float64 `json:"-"`
 	LoadBalancerTotalCost        float64 `json:"-"`
+	PVProportionalCost           float64 `json:"-"`
+	PVTotalCost                  float64 `json:"-"`
 }
 
 func (parc ProportionalAssetResourceCost) Key(insertByName bool) string {
@@ -324,8 +326,8 @@ func (parcs ProportionalAssetResourceCosts) Insert(parc ProportionalAssetResourc
 		parc.Type = ""
 		parc.ProviderID = ""
 	}
-	if curr, ok := parcs[parc.Key(insertByName)]; ok {
 
+	if curr, ok := parcs[parc.Key(insertByName)]; ok {
 		toInsert := ProportionalAssetResourceCost{
 			Name:                         curr.Name,
 			Type:                         curr.Type,
@@ -334,6 +336,7 @@ func (parcs ProportionalAssetResourceCosts) Insert(parc ProportionalAssetResourc
 			CPUProportionalCost:          curr.CPUProportionalCost + parc.CPUProportionalCost,
 			RAMProportionalCost:          curr.RAMProportionalCost + parc.RAMProportionalCost,
 			GPUProportionalCost:          curr.GPUProportionalCost + parc.GPUProportionalCost,
+			PVProportionalCost:           curr.PVProportionalCost + parc.PVProportionalCost,
 			LoadBalancerProportionalCost: curr.LoadBalancerProportionalCost + parc.LoadBalancerProportionalCost,
 		}
 
@@ -346,7 +349,6 @@ func (parcs ProportionalAssetResourceCosts) Insert(parc ProportionalAssetResourc
 }
 
 func ComputePercentages(toInsert *ProportionalAssetResourceCost) {
-	// compute percentages
 	totalNodeCost := toInsert.RAMTotalCost + toInsert.CPUTotalCost + toInsert.GPUTotalCost
 
 	if toInsert.CPUTotalCost > 0 {
@@ -365,6 +367,10 @@ func ComputePercentages(toInsert *ProportionalAssetResourceCost) {
 		toInsert.RAMPercentage = toInsert.RAMProportionalCost / toInsert.RAMTotalCost
 	}
 
+	if toInsert.PVTotalCost > 0 {
+		toInsert.PVPercentage = toInsert.PVProportionalCost / toInsert.PVTotalCost
+	}
+
 	ramFraction := toInsert.RAMTotalCost / totalNodeCost
 	if ramFraction != ramFraction || ramFraction < 0 {
 		ramFraction = 0
@@ -385,7 +391,6 @@ func ComputePercentages(toInsert *ProportionalAssetResourceCost) {
 }
 
 func (parcs ProportionalAssetResourceCosts) Add(that ProportionalAssetResourceCosts) {
-
 	for _, parc := range that {
 		// if name field is empty, we know this is a cluster level PARC aggregation
 		insertByName := true
@@ -2115,6 +2120,15 @@ func deriveProportionalAssetResourceCosts(options *AllocationAggregationOptions,
 			}
 		}
 
+		for name, pvAlloc := range alloc.PVs {
+			// insert a separate PARC for each PV attached
+			alloc.ProportionalAssetResourceCosts.Insert(ProportionalAssetResourceCost{
+				Cluster:            name.Cluster,
+				Name:               name.Name,
+				Type:               "PV",
+				PVProportionalCost: pvAlloc.Cost,
+			}, options.IdleByNode)
+		}
 	}
 
 	return nil

+ 223 - 7
pkg/kubecost/allocation_test.go

@@ -551,8 +551,9 @@ func assertParcResults(t *testing.T, as *AllocationSet, msg string, exps map[str
 			actualParc.CPUPercentage = roundFloat(actualParc.CPUPercentage)
 			actualParc.RAMPercentage = roundFloat(actualParc.RAMPercentage)
 			actualParc.GPUPercentage = roundFloat(actualParc.GPUPercentage)
+			actualParc.PVPercentage = roundFloat(actualParc.PVPercentage)
 			if !reflect.DeepEqual(expectedParcs[key], actualParc) {
-				t.Fatalf("actual PARC %v did not match expected PARC %v", actualParc, expectedParcs[key])
+				t.Fatalf("actual PARC %+v did not match expected PARC %+v", actualParc, expectedParcs[key])
 			}
 		}
 
@@ -759,9 +760,11 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 
 		// 1a AggregationProperties=(Cluster)
 		"1a": {
-			start:      start,
-			aggBy:      []string{AllocationClusterProp},
-			aggOpts:    nil,
+			start: start,
+			aggBy: []string{AllocationClusterProp},
+			aggOpts: &AllocationAggregationOptions{
+				IncludeProportionalAssetResourceCosts: true,
+			},
 			numResults: numClusters + numIdle,
 			totalCost:  activeTotalCost + idleTotalCost,
 			results: map[string]float64{
@@ -772,6 +775,32 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowStart: startYesterday,
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
+			expectedParcResults: map[string]ProportionalAssetResourceCosts{
+				"cluster1": {
+					"cluster1": ProportionalAssetResourceCost{
+						Cluster:             "cluster1",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 6.0,
+						CPUProportionalCost: 6.0,
+						RAMProportionalCost: 16.0,
+						PVProportionalCost:  6.0,
+					},
+				},
+				"cluster2": {
+					"cluster2": ProportionalAssetResourceCost{
+						Cluster:             "cluster2",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 6,
+						CPUProportionalCost: 6,
+						RAMProportionalCost: 6,
+						PVProportionalCost:  6,
+					},
+				},
+			},
 		},
 		// 1b AggregationProperties=(Namespace)
 		"1b": {
@@ -792,9 +821,11 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 		},
 		// 1c AggregationProperties=(Pod)
 		"1c": {
-			start:      start,
-			aggBy:      []string{AllocationPodProp},
-			aggOpts:    nil,
+			start: start,
+			aggBy: []string{AllocationPodProp},
+			aggOpts: &AllocationAggregationOptions{
+				IncludeProportionalAssetResourceCosts: true,
+			},
 			numResults: numPods + numIdle,
 			totalCost:  activeTotalCost + idleTotalCost,
 			results: map[string]float64{
@@ -812,6 +843,116 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowStart: startYesterday,
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
+			expectedParcResults: map[string]ProportionalAssetResourceCosts{
+				"pod1": {
+					"cluster1": ProportionalAssetResourceCost{
+						Cluster:             "cluster1",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 1.0,
+						CPUProportionalCost: 1.0,
+						RAMProportionalCost: 11.0,
+						PVProportionalCost:  1.0,
+					},
+				},
+				"pod-abc": {
+					"cluster1": ProportionalAssetResourceCost{
+						Cluster:             "cluster1",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 1.0,
+						CPUProportionalCost: 1.0,
+						RAMProportionalCost: 1.0,
+						PVProportionalCost:  1.0,
+					},
+				},
+				"pod-def": {
+					"cluster1": ProportionalAssetResourceCost{
+						Cluster:             "cluster1",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 1.0,
+						CPUProportionalCost: 1.0,
+						RAMProportionalCost: 1.0,
+						PVProportionalCost:  1.0,
+					},
+				},
+				"pod-ghi": {
+					"cluster1": ProportionalAssetResourceCost{
+						Cluster:             "cluster1",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 2.0,
+						CPUProportionalCost: 2.0,
+						RAMProportionalCost: 2.0,
+						PVProportionalCost:  2.0,
+					},
+				},
+				"pod-jkl": {
+					"cluster1": ProportionalAssetResourceCost{
+						Cluster:             "cluster1",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 1.0,
+						CPUProportionalCost: 1.0,
+						RAMProportionalCost: 1.0,
+						PVProportionalCost:  1.0,
+					},
+				},
+				"pod-mno": {
+					"cluster2": ProportionalAssetResourceCost{
+						Cluster:             "cluster2",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 2.0,
+						CPUProportionalCost: 2.0,
+						RAMProportionalCost: 2.0,
+						PVProportionalCost:  2.0,
+					},
+				},
+				"pod-pqr": {
+					"cluster2": ProportionalAssetResourceCost{
+						Cluster:             "cluster2",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 1.0,
+						CPUProportionalCost: 1.0,
+						RAMProportionalCost: 1.0,
+						PVProportionalCost:  1.0,
+					},
+				},
+				"pod-stu": {
+					"cluster2": ProportionalAssetResourceCost{
+						Cluster:             "cluster2",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 1.0,
+						CPUProportionalCost: 1.0,
+						RAMProportionalCost: 1.0,
+						PVProportionalCost:  1.0,
+					},
+				},
+				"pod-vwx": {
+					"cluster2": ProportionalAssetResourceCost{
+						Cluster:             "cluster2",
+						Name:                "",
+						Type:                "",
+						ProviderID:          "",
+						GPUProportionalCost: 2.0,
+						CPUProportionalCost: 2.0,
+						RAMProportionalCost: 2.0,
+						PVProportionalCost:  2.0,
+					},
+				},
+			},
 		},
 		// 1d AggregationProperties=(Container)
 		"1d": {
@@ -1121,6 +1262,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						GPUProportionalCost: 3,
 						CPUProportionalCost: 3,
 						RAMProportionalCost: 13,
+						PVProportionalCost:  3,
 					},
 				},
 				"namespace2": {
@@ -1132,6 +1274,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						GPUProportionalCost: 3,
 						CPUProportionalCost: 3,
 						RAMProportionalCost: 3,
+						PVProportionalCost:  3,
 					},
 					"cluster2": ProportionalAssetResourceCost{
 						Cluster:             "cluster2",
@@ -1141,6 +1284,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						GPUProportionalCost: 3,
 						CPUProportionalCost: 3,
 						RAMProportionalCost: 3,
+						PVProportionalCost:  3,
 					},
 				},
 			},
@@ -1565,6 +1709,24 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						CPUProportionalCost: 3,
 						RAMProportionalCost: 3,
 					},
+					"cluster1,pv-a1111": {
+						Cluster:            "cluster1",
+						Name:               "pv-a1111",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster1,pv-a11abc2": {
+						Cluster:            "cluster1",
+						Name:               "pv-a11abc2",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster1,pv-a11def3": {
+						Cluster:            "cluster1",
+						Name:               "pv-a11def3",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
 				},
 				"namespace2": {
 					"cluster1,c1nodes": ProportionalAssetResourceCost{
@@ -1594,6 +1756,42 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						CPUProportionalCost: 1,
 						RAMProportionalCost: 1,
 					},
+					"cluster1,pv-a12ghi4": {
+						Cluster:            "cluster1",
+						Name:               "pv-a12ghi4",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster1,pv-a12ghi5": {
+						Cluster:            "cluster1",
+						Name:               "pv-a12ghi5",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster1,pv-a12jkl6": {
+						Cluster:            "cluster1",
+						Name:               "pv-a12jkl6",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster2,pv-a22mno4": {
+						Cluster:            "cluster2",
+						Name:               "pv-a22mno4",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster2,pv-a22mno5": {
+						Cluster:            "cluster2",
+						Name:               "pv-a22mno5",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster2,pv-a22pqr6": {
+						Cluster:            "cluster2",
+						Name:               "pv-a22pqr6",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
 				},
 				"namespace3": {
 					"cluster2,node3": ProportionalAssetResourceCost{
@@ -1614,6 +1812,24 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						CPUProportionalCost: 1,
 						RAMProportionalCost: 1,
 					},
+					"cluster2,pv-a23stu7": {
+						Cluster:            "cluster2",
+						Name:               "pv-a23stu7",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster2,pv-a23vwx8": {
+						Cluster:            "cluster2",
+						Name:               "pv-a23vwx8",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
+					"cluster2,pv-a23vwx9": {
+						Cluster:            "cluster2",
+						Name:               "pv-a23vwx9",
+						Type:               "PV",
+						PVProportionalCost: 1,
+					},
 				},
 			},
 		},

+ 72 - 0
pkg/kubecost/mock.go

@@ -163,6 +163,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		Node:       "c1nodes",
 	})
 	a1111.RAMCost = 11.00
+	a1111.PVs = PVAllocations{
+		PVKey{Cluster: "cluster1", Name: "pv-a1111"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a11abc2 := NewMockUnitAllocation("cluster1/namespace1/pod-abc/container2", start, day, &AllocationProperties{
 		Cluster:    "cluster1",
@@ -172,6 +178,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "c1nodes",
 		Node:       "c1nodes",
 	})
+	a11abc2.PVs = PVAllocations{
+		PVKey{Cluster: "cluster1", Name: "pv-a11abc2"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a11def3 := NewMockUnitAllocation("cluster1/namespace1/pod-def/container3", start, day, &AllocationProperties{
 		Cluster:    "cluster1",
@@ -181,6 +193,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "c1nodes",
 		Node:       "c1nodes",
 	})
+	a11def3.PVs = PVAllocations{
+		PVKey{Cluster: "cluster1", Name: "pv-a11def3"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a12ghi4 := NewMockUnitAllocation("cluster1/namespace2/pod-ghi/container4", start, day, &AllocationProperties{
 		Cluster:    "cluster1",
@@ -190,6 +208,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "c1nodes",
 		Node:       "c1nodes",
 	})
+	a12ghi4.PVs = PVAllocations{
+		PVKey{Cluster: "cluster1", Name: "pv-a12ghi4"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a12ghi5 := NewMockUnitAllocation("cluster1/namespace2/pod-ghi/container5", start, day, &AllocationProperties{
 		Cluster:    "cluster1",
@@ -199,6 +223,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "c1nodes",
 		Node:       "c1nodes",
 	})
+	a12ghi5.PVs = PVAllocations{
+		PVKey{Cluster: "cluster1", Name: "pv-a12ghi5"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a12jkl6 := NewMockUnitAllocation("cluster1/namespace2/pod-jkl/container6", start, day, &AllocationProperties{
 		Cluster:    "cluster1",
@@ -208,6 +238,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "c1nodes",
 		Node:       "c1nodes",
 	})
+	a12jkl6.PVs = PVAllocations{
+		PVKey{Cluster: "cluster1", Name: "pv-a12jkl6"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a22mno4 := NewMockUnitAllocation("cluster2/namespace2/pod-mno/container4", start, day, &AllocationProperties{
 		Cluster:    "cluster2",
@@ -217,6 +253,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "node1",
 		Node:       "node1",
 	})
+	a22mno4.PVs = PVAllocations{
+		PVKey{Cluster: "cluster2", Name: "pv-a22mno4"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a22mno5 := NewMockUnitAllocation("cluster2/namespace2/pod-mno/container5", start, day, &AllocationProperties{
 		Cluster:    "cluster2",
@@ -226,6 +268,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "node1",
 		Node:       "node1",
 	})
+	a22mno5.PVs = PVAllocations{
+		PVKey{Cluster: "cluster2", Name: "pv-a22mno5"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a22pqr6 := NewMockUnitAllocation("cluster2/namespace2/pod-pqr/container6", start, day, &AllocationProperties{
 		Cluster:    "cluster2",
@@ -235,6 +283,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "node2",
 		Node:       "node2",
 	})
+	a22pqr6.PVs = PVAllocations{
+		PVKey{Cluster: "cluster2", Name: "pv-a22pqr6"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a23stu7 := NewMockUnitAllocation("cluster2/namespace3/pod-stu/container7", start, day, &AllocationProperties{
 		Cluster:    "cluster2",
@@ -244,6 +298,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "node2",
 		Node:       "node2",
 	})
+	a23stu7.PVs = PVAllocations{
+		PVKey{Cluster: "cluster2", Name: "pv-a23stu7"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a23vwx8 := NewMockUnitAllocation("cluster2/namespace3/pod-vwx/container8", start, day, &AllocationProperties{
 		Cluster:    "cluster2",
@@ -253,6 +313,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "node3",
 		Node:       "node3",
 	})
+	a23vwx8.PVs = PVAllocations{
+		PVKey{Cluster: "cluster2", Name: "pv-a23vwx8"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	a23vwx9 := NewMockUnitAllocation("cluster2/namespace3/pod-vwx/container9", start, day, &AllocationProperties{
 		Cluster:    "cluster2",
@@ -262,6 +328,12 @@ func GenerateMockAllocationSet(start time.Time) *AllocationSet {
 		ProviderID: "node3",
 		Node:       "node3",
 	})
+	a23vwx9.PVs = PVAllocations{
+		PVKey{Cluster: "cluster2", Name: "pv-a23vwx9"}: {
+			ByteHours: 1,
+			Cost:      1,
+		},
+	}
 
 	// Controllers
 

+ 2 - 3
pkg/kubecost/totals.go

@@ -478,10 +478,9 @@ func ComputeAssetTotals(as *AssetSet, byAsset bool) map[string]*AssetTotals {
 			arts[key].Count++
 			arts[key].AttachedVolumeCost += disk.Cost
 			arts[key].AttachedVolumeCostAdjustment += disk.Adjustment
-		} else if !byAsset {
+		} else {
 			// Here, we're looking at a PersistentVolume because we're not
-			// looking at an AttachedVolume. Only record PersistentVolume data
-			// at the cluster level (i.e. prop == AssetClusterProp).
+			// looking at an AttachedVolume.
 			arts[key].Count++
 			arts[key].PersistentVolumeCost += disk.Cost
 			arts[key].PersistentVolumeCostAdjustment += disk.Adjustment