Pārlūkot izejas kodu

Replace local storage cost queries with equivalent basic math (#3755)

Signed-off-by: Matt Bolt <mbolt35@gmail.com>
Co-authored-by: Warwick <warwick.peatey@ibm.com>
Co-authored-by: Matt Bolt <mbolt35@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Niko Kovacevic 2 nedēļas atpakaļ
vecāks
revīzija
8c823a0871

+ 0 - 2
core/pkg/source/datasource.go

@@ -16,8 +16,6 @@ type MetricsQuerier interface {
 
 	// Local Cluster Disks
 	QueryLocalStorageActiveMinutes(start, end time.Time) *Future[LocalStorageActiveMinutesResult]
-	QueryLocalStorageCost(start, end time.Time) *Future[LocalStorageCostResult]
-	QueryLocalStorageUsedCost(start, end time.Time) *Future[LocalStorageUsedCostResult]
 	QueryLocalStorageUsedAvg(start, end time.Time) *Future[LocalStorageUsedAvgResult]
 	QueryLocalStorageUsedMax(start, end time.Time) *Future[LocalStorageUsedMaxResult]
 	QueryLocalStorageBytes(start, end time.Time) *Future[LocalStorageBytesResult]

+ 0 - 2
modules/collector-source/pkg/collector/collector.go

@@ -235,8 +235,6 @@ func NewPVActiveMinutesMetricCollector() *metric.MetricCollector {
 //	) by (instance, device, cluster_id)[%s:%dm]
 //
 // ) / 1024 / 1024 / 1024 * %f * %f`
-// NewLocalStorageUsedActiveMinutesMetricCollector does not have an associated query end point but is used in the results
-// of QueryLocalStorageUsedCost
 func NewLocalStorageUsedActiveMinutesMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(
 		metric.LocalStorageUsedActiveMinutesID,

+ 0 - 96
modules/collector-source/pkg/collector/metricsquerier.go

@@ -74,102 +74,6 @@ func (c *collectorMetricsQuerier) QueryLocalStorageActiveMinutes(start, end time
 	return queryCollector(c, start, end, metric.LocalStorageActiveMinutesID, source.DecodeLocalStorageActiveMinutesResult)
 }
 
-func (c *collectorMetricsQuerier) QueryLocalStorageCost(start, end time.Time) *source.Future[source.LocalStorageCostResult] {
-	queryResults := source.NewQueryResults("LocalStorageCost")
-	collector := c.collectorProvider.GetStore(start, end)
-	if collector != nil {
-		minutesResults, err := collector.Query(metric.LocalStorageActiveMinutesID)
-		if err != nil {
-			queryResults.Error = err
-		}
-		minutesByNode := map[string]float64{}
-		for _, result := range minutesResults {
-			node := result.MetricLabels[source.NodeLabel]
-			if node == "" || len(result.Values) == 0 {
-				continue
-			}
-			nodeStart := result.Values[0].Timestamp
-			nodeEnd := result.Values[len(result.Values)-1].Timestamp
-			if nodeStart == nil || nodeEnd == nil {
-				continue
-			}
-			minutesByNode[node] = nodeEnd.Sub(*nodeStart).Minutes()
-
-		}
-		bytesResults, err := collector.Query(metric.LocalStorageBytesID)
-		if err != nil {
-			queryResults.Error = err
-		}
-		for _, result := range bytesResults {
-			instance := result.MetricLabels[source.InstanceLabel]
-			if instance == "" || len(result.Values) == 0 {
-				continue
-			}
-			mintues, ok := minutesByNode[instance]
-			if !ok {
-				continue
-			}
-			queryResult := result.ToQueryResult()
-			bytes := queryResult.Values[0].Value
-			GiBs := bytes / GiB
-			hours := mintues / 60
-			queryResult.Values[0].Value = GiBs * hours * LocalStorageCostPerGiBHr
-			queryResults.Results = append(queryResults.Results, queryResult)
-		}
-	}
-	ch := make(source.QueryResultsChan, 1)
-	ch <- queryResults
-	return source.NewFuture(source.DecodeLocalStorageCostResult, ch)
-}
-
-func (c *collectorMetricsQuerier) QueryLocalStorageUsedCost(start, end time.Time) *source.Future[source.LocalStorageUsedCostResult] {
-	queryResults := source.NewQueryResults("LocalStorageUsedCost")
-	collector := c.collectorProvider.GetStore(start, end)
-	if collector != nil {
-		minutesResults, err := collector.Query(metric.LocalStorageUsedActiveMinutesID)
-		if err != nil {
-			queryResults.Error = err
-		}
-		minutesByNode := map[string]float64{}
-		for _, result := range minutesResults {
-			node := result.MetricLabels[source.InstanceLabel]
-			if node == "" || len(result.Values) == 0 {
-				continue
-			}
-			nodeStart := result.Values[0].Timestamp
-			nodeEnd := result.Values[len(result.Values)-1].Timestamp
-			if nodeStart == nil || nodeEnd == nil {
-				continue
-			}
-			minutesByNode[node] = nodeEnd.Sub(*nodeStart).Minutes()
-
-		}
-		bytesResults, err := collector.Query(metric.LocalStorageUsedAverageID)
-		if err != nil {
-			queryResults.Error = err
-		}
-		for _, result := range bytesResults {
-			instance := result.MetricLabels[source.InstanceLabel]
-			if instance == "" || len(result.Values) == 0 {
-				continue
-			}
-			mintues, ok := minutesByNode[instance]
-			if !ok {
-				continue
-			}
-			queryResult := result.ToQueryResult()
-			bytes := queryResult.Values[0].Value
-			GiBs := bytes / GiB
-			hours := mintues / 60
-			queryResult.Values[0].Value = GiBs * hours * LocalStorageCostPerGiBHr
-			queryResults.Results = append(queryResults.Results, queryResult)
-		}
-	}
-	ch := make(source.QueryResultsChan, 1)
-	ch <- queryResults
-	return source.NewFuture(source.DecodeLocalStorageUsedCostResult, ch)
-}
-
 func (c *collectorMetricsQuerier) QueryLocalStorageUsedAvg(start, end time.Time) *source.Future[source.LocalStorageUsedAvgResult] {
 	return queryCollector(c, start, end, metric.LocalStorageUsedAverageID, source.DecodeLocalStorageUsedAvgResult)
 }

+ 0 - 68
modules/collector-source/pkg/collector/metricsquerier_test.go

@@ -182,74 +182,6 @@ func GetMockCollectorProvider() StoreProvider {
 	}
 }
 
-func TestCollectorMetricsQuerier_QueryLocalStorageCost(t *testing.T) {
-	start1, _ := time.Parse(time.RFC3339, Start1Str)
-	end1, _ := time.Parse(time.RFC3339, End1Str)
-
-	c := collectorMetricsQuerier{
-		collectorProvider: GetMockCollectorProvider(),
-	}
-	resCh := c.QueryLocalStorageCost(start1, end1)
-	res, err := resCh.Await()
-	if err != nil {
-		t.Errorf("unexpected error: %v", err.Error())
-	}
-	expected := []*source.LocalStorageCostResult{
-		{
-			Cluster:  "",
-			Instance: "node1",
-			Device:   "local",
-			Data: []*util.Vector{
-				{
-					Value: LocalStorageCostPerGiBHr * 2,
-				},
-			},
-		},
-	}
-	if len(res) != len(expected) {
-		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
-	}
-	for i, got := range res {
-		if !reflect.DeepEqual(got, expected[i]) {
-			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
-		}
-	}
-}
-
-func TestCollectorMetricsQuerier_QueryLocalStorageUsedCost(t *testing.T) {
-	start1, _ := time.Parse(time.RFC3339, Start1Str)
-	end1, _ := time.Parse(time.RFC3339, End1Str)
-
-	c := collectorMetricsQuerier{
-		collectorProvider: GetMockCollectorProvider(),
-	}
-	resCh := c.QueryLocalStorageUsedCost(start1, end1)
-	res, err := resCh.Await()
-	if err != nil {
-		t.Errorf("unexpected error: %v", err.Error())
-	}
-	expected := []*source.LocalStorageUsedCostResult{
-		{
-			Cluster:  "",
-			Instance: "node1",
-			Device:   "local",
-			Data: []*util.Vector{
-				{
-					Value: LocalStorageCostPerGiBHr,
-				},
-			},
-		},
-	}
-	if len(res) != len(expected) {
-		t.Errorf("length of result was not as expected: got = %d, want %d", len(res), len(expected))
-	}
-	for i, got := range res {
-		if !reflect.DeepEqual(got, expected[i]) {
-			t.Errorf("result at index %d did not match: got = %v, want %v", i, got, expected[i])
-		}
-	}
-}
-
 func TestCollectorMetricsQuerier_QueryNodeActiveMinutes(t *testing.T) {
 	start1, _ := time.Parse(time.RFC3339, Start1Str)
 	end1, _ := time.Parse(time.RFC3339, End1Str)

+ 0 - 50
modules/prometheus-source/pkg/prom/metricsquerier.go

@@ -129,56 +129,6 @@ func (pds *PrometheusMetricsQuerier) QueryPVActiveMinutes(start, end time.Time)
 	return source.NewFuture(source.DecodePVActiveMinutesResult, ctx.QueryAtTime(queryPVActiveMins, end))
 }
 
-func (pds *PrometheusMetricsQuerier) QueryLocalStorageCost(start, end time.Time) *source.Future[source.LocalStorageCostResult] {
-	const queryName = "QueryLocalStorageCost"
-	const localStorageCostQuery = `sum_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, uid, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
-
-	cfg := pds.promConfig
-	minsPerResolution := cfg.DataResolutionMinutes
-
-	durStr := pds.durationStringFor(start, end, minsPerResolution, false)
-	if durStr == "" {
-		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
-	}
-
-	// hourlyToCumulative is a scaling factor that, when multiplied by an
-	// hourly value, converts it to a cumulative value; i.e. [$/hr] *
-	// [min/res]*[hr/min] = [$/res]
-	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
-	costPerGBHr := 0.04 / 730.0
-
-	queryLocalStorageCost := fmt.Sprintf(localStorageCostQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
-	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryLocalStorageCost)
-
-	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
-	return source.NewFuture(source.DecodeLocalStorageCostResult, ctx.QueryAtTime(queryLocalStorageCost, end))
-}
-
-func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedCost(start, end time.Time) *source.Future[source.LocalStorageUsedCostResult] {
-	const queryName = "QueryLocalStorageUsedCost"
-	const localStorageUsedCostQuery = `sum_over_time(sum(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, uid, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`
-
-	cfg := pds.promConfig
-	minsPerResolution := cfg.DataResolutionMinutes
-
-	durStr := pds.durationStringFor(start, end, minsPerResolution, false)
-	if durStr == "" {
-		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
-	}
-
-	// hourlyToCumulative is a scaling factor that, when multiplied by an
-	// hourly value, converts it to a cumulative value; i.e. [$/hr] *
-	// [min/res]*[hr/min] = [$/res]
-	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
-	costPerGBHr := 0.04 / 730.0
-
-	queryLocalStorageUsedCost := fmt.Sprintf(localStorageUsedCostQuery, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
-	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryLocalStorageUsedCost)
-
-	ctx := pds.promContexts.NewNamedContext(ClusterContextName)
-	return source.NewFuture(source.DecodeLocalStorageUsedCostResult, ctx.QueryAtTime(queryLocalStorageUsedCost, end))
-}
-
 func (pds *PrometheusMetricsQuerier) QueryLocalStorageUsedAvg(start, end time.Time) *source.Future[source.LocalStorageUsedAvgResult] {
 	const queryName = "QueryLocalStorageUsedAvg"
 	const localStorageUsedAvgQuery = `avg(sum(avg_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, uid, %s, job)) by (instance, device, uid, %s)`

+ 0 - 2
modules/prometheus-source/pkg/prom/metricsquerier_test.go

@@ -100,8 +100,6 @@ func TestQueryLogs(t *testing.T) {
 		"QueryPVUsedAverage":                            func(s, e time.Time) { querier.QueryPVUsedAverage(s, e) },
 		"QueryPVUsedMax":                                func(s, e time.Time) { querier.QueryPVUsedMax(s, e) },
 		"QueryLocalStorageActiveMinutes":                func(s, e time.Time) { querier.QueryLocalStorageActiveMinutes(s, e) },
-		"QueryLocalStorageCost":                         func(s, e time.Time) { querier.QueryLocalStorageCost(s, e) },
-		"QueryLocalStorageUsedCost":                     func(s, e time.Time) { querier.QueryLocalStorageUsedCost(s, e) },
 		"QueryLocalStorageUsedAvg":                      func(s, e time.Time) { querier.QueryLocalStorageUsedAvg(s, e) },
 		"QueryLocalStorageUsedMax":                      func(s, e time.Time) { querier.QueryLocalStorageUsedMax(s, e) },
 		"QueryLocalStorageBytes":                        func(s, e time.Time) { querier.QueryLocalStorageBytes(s, e) },

+ 47 - 32
pkg/costmodel/assets.go

@@ -8,6 +8,35 @@ import (
 	"github.com/opencost/opencost/core/pkg/opencost"
 )
 
+// clampTimeToRange does not permit timestamps to exceed a given start, end
+// range, inclusive of start and end times. For examples:
+//
+// If time is within (start, end) inclusive, return that time:
+//
+// >      S----T-------------E      => T
+//
+// If time is before start, return start:
+//
+// >   T  S------------------E      => S
+//
+// If time is after end, return end:
+//
+// >      S------------------E   T  => E
+//
+// Note: if this function encounters a "zero" time (either time.Zero or Unix
+// timestamp 0) the time returned will be the given start time.
+func clampTimeToRange(t time.Time, start, end time.Time) time.Time {
+	if t.Before(start) {
+		return start
+	}
+
+	if t.After(end) {
+		return end
+	}
+
+	return t
+}
+
 func (cm *CostModel) ComputeAssets(start, end time.Time) (*opencost.AssetSet, error) {
 	assetSet := opencost.NewAssetSet(start, end)
 
@@ -32,17 +61,12 @@ func (cm *CostModel) ComputeAssets(start, end time.Time) (*opencost.AssetSet, er
 	}
 
 	for _, d := range diskMap {
-		s := d.Start
-		if s.Before(start) || s.After(end) {
-			log.Debugf("CostModel.ComputeAssets: disk '%s' start outside window: %s not in [%s, %s]", d.Name, s.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
-			s = start
-		}
-
-		e := d.End
-		if e.Before(start) || e.After(end) {
-			log.Debugf("CostModel.ComputeAssets: disk '%s' end outside window: %s not in [%s, %s]", d.Name, e.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
-			e = end
-		}
+		// Clamp the start and end fields to the start and end of the window.
+		// In the case that start and end are missing (e.g. due to the "active
+		// minutes" metric being absent), both times will be set to the start
+		// of the window -- representing zero "runtime" within the window.
+		s := clampTimeToRange(d.Start, start, end)
+		e := clampTimeToRange(d.End, start, end)
 
 		hours := e.Sub(s).Hours()
 
@@ -76,17 +100,12 @@ func (cm *CostModel) ComputeAssets(start, end time.Time) (*opencost.AssetSet, er
 	}
 
 	for _, lb := range lbMap {
-		s := lb.Start
-		if s.Before(start) || s.After(end) {
-			log.Debugf("CostModel.ComputeAssets: load balancer '%s' start outside window: %s not in [%s, %s]", lb.Name, s.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
-			s = start
-		}
-
-		e := lb.End
-		if e.Before(start) || e.After(end) {
-			log.Debugf("CostModel.ComputeAssets: load balancer '%s' end outside window: %s not in [%s, %s]", lb.Name, e.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
-			e = end
-		}
+		// Clamp the start and end fields to the start and end of the window.
+		// In the case that start and end are missing (e.g. due to the "active
+		// minutes" metric being absent), both times will be set to the start
+		// of the window -- representing zero "runtime" within the window.
+		s := clampTimeToRange(lb.Start, start, end)
+		e := clampTimeToRange(lb.End, start, end)
 
 		loadBalancer := opencost.NewLoadBalancer(lb.Name, lb.Cluster, lb.ProviderID, s, e, opencost.NewWindow(&start, &end), lb.Private, lb.Ip)
 		cm.PropertiesFromCluster(loadBalancer.Properties)
@@ -110,17 +129,13 @@ func (cm *CostModel) ComputeAssets(start, end time.Time) (*opencost.AssetSet, er
 				continue
 			}
 		}
-		s := n.Start
-		if s.Before(start) || s.After(end) {
-			log.Debugf("CostModel.ComputeAssets: node '%s' start outside window: %s not in [%s, %s]", n.Name, s.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
-			s = start
-		}
 
-		e := n.End
-		if e.Before(start) || e.After(end) {
-			log.Debugf("CostModel.ComputeAssets: node '%s' end outside window: %s not in [%s, %s]", n.Name, e.Format("2006-01-02T15:04:05"), start.Format("2006-01-02T15:04:05"), end.Format("2006-01-02T15:04:05"))
-			e = end
-		}
+		// Clamp the start and end fields to the start and end of the window.
+		// In the case that start and end are missing (e.g. due to the "active
+		// minutes" metric being absent), both times will be set to the start
+		// of the window -- representing zero "runtime" within the window.
+		s := clampTimeToRange(n.Start, start, end)
+		e := clampTimeToRange(n.End, start, end)
 
 		hours := e.Sub(s).Hours()
 

+ 16 - 63
pkg/costmodel/cluster.go

@@ -19,6 +19,8 @@ import (
 
 const MAX_LOCAL_STORAGE_SIZE = 1024 * 1024 * 1024 * 1024
 
+const localStoragePricePerGBHr = 0.04 / 730.0
+
 // When ASSET_INCLUDE_LOCAL_DISK_COST is set to false, local storage
 // provisioned by sig-storage-local-static-provisioner is excluded
 // by checking if the volume is prefixed by "local-pv-".
@@ -124,23 +126,17 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/RootDeviceStorage.html
 	// https://learn.microsoft.com/en-us/azure/virtual-machines/managed-disks-overview#temporary-disk
 	// https://cloud.google.com/compute/docs/disks/local-ssd
-	resLocalStorageCost := []*source.LocalStorageCostResult{}
-	resLocalStorageUsedCost := []*source.LocalStorageUsedCostResult{}
 	resLocalStorageUsedAvg := []*source.LocalStorageUsedAvgResult{}
 	resLocalStorageUsedMax := []*source.LocalStorageUsedMaxResult{}
 	resLocalStorageBytes := []*source.LocalStorageBytesResult{}
 	resLocalActiveMins := []*source.LocalStorageActiveMinutesResult{}
 
 	if env.IsAssetIncludeLocalDiskCost() {
-		resChLocalStorageCost := source.WithGroup(grp, mq.QueryLocalStorageCost(start, end))
-		resChLocalStorageUsedCost := source.WithGroup(grp, mq.QueryLocalStorageUsedCost(start, end))
 		resChLocalStoreageUsedAvg := source.WithGroup(grp, mq.QueryLocalStorageUsedAvg(start, end))
 		resChLocalStoreageUsedMax := source.WithGroup(grp, mq.QueryLocalStorageUsedMax(start, end))
 		resChLocalStorageBytes := source.WithGroup(grp, mq.QueryLocalStorageBytes(start, end))
 		resChLocalActiveMins := source.WithGroup(grp, mq.QueryLocalStorageActiveMinutes(start, end))
 
-		resLocalStorageCost, _ = resChLocalStorageCost.Await()
-		resLocalStorageUsedCost, _ = resChLocalStorageUsedCost.Await()
 		resLocalStorageUsedAvg, _ = resChLocalStoreageUsedAvg.Await()
 		resLocalStorageUsedMax, _ = resChLocalStoreageUsedMax.Await()
 		resLocalStorageBytes, _ = resChLocalStorageBytes.Await()
@@ -206,61 +202,6 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 		}
 	}
 
-	for _, result := range resLocalStorageCost {
-		cluster := result.Cluster
-		if cluster == "" {
-			cluster = coreenv.GetClusterID()
-		}
-
-		name := result.Instance
-		if name == "" {
-			log.Warnf("ClusterDisks: local storage data missing instance")
-			continue
-		}
-
-		device := result.Device
-		if device == "" {
-			log.Warnf("ClusterDisks: local storage data missing device")
-			continue
-		}
-
-		cost := result.Data[0].Value
-		key := DiskIdentifier{cluster, name}
-		ls, ok := localStorageDisks[key]
-		if !ok || ls.device != device {
-			continue
-		}
-		ls.disk.Cost = cost
-
-	}
-
-	for _, result := range resLocalStorageUsedCost {
-		cluster := result.Cluster
-		if cluster == "" {
-			cluster = coreenv.GetClusterID()
-		}
-
-		name := result.Instance
-		if name == "" {
-			log.Warnf("ClusterDisks: local storage data missing instance")
-			continue
-		}
-
-		device := result.Device
-		if device == "" {
-			log.Warnf("ClusterDisks: local storage data missing device")
-			continue
-		}
-
-		cost := result.Data[0].Value
-		key := DiskIdentifier{cluster, name}
-		ls, ok := localStorageDisks[key]
-		if !ok || ls.device != device {
-			continue
-		}
-		ls.disk.Breakdown.System = cost / ls.disk.Cost
-	}
-
 	for _, result := range resLocalStorageUsedAvg {
 		cluster := result.Cluster
 		if cluster == "" {
@@ -349,11 +290,23 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 		e := time.Unix(int64(result.Data[len(result.Data)-1].Timestamp), 0)
 		mins := e.Sub(s).Minutes()
 
-		// TODO niko/assets if mins >= threshold, interpolate for missing data?
-
 		ls.disk.End = e
 		ls.disk.Start = s
 		ls.disk.Minutes = mins
+
+		// Cost = GiB * hours * $-per-GB-hour
+		ls.disk.Cost = (ls.disk.Bytes / 1024 / 1024 / 1024) * (ls.disk.Minutes / 60) * localStoragePricePerGBHr
+
+		bytesUsedAvg := 0.0
+		if ls.disk.BytesUsedAvgPtr != nil {
+			bytesUsedAvg = *ls.disk.BytesUsedAvgPtr
+		}
+		// Used Cost = Used GiB * hours * $-per-GB-hour
+		if ls.disk.Cost > 0 {
+			ls.disk.Breakdown.System = ((bytesUsedAvg / 1024 / 1024 / 1024) * (ls.disk.Minutes / 60) * localStoragePricePerGBHr) / ls.disk.Cost
+		} else {
+			ls.disk.Breakdown.System = 0
+		}
 	}
 
 	// move local storage disks to main disk map