Browse Source

Merge pull request #1438 from opencost/mmd/disk-byte-usage-optional

Make usage of Disk resilient to absence of metrics
Michael Dresser 3 years ago
parent
commit
1397decb11

+ 22 - 11
pkg/costmodel/cluster.go

@@ -116,13 +116,24 @@ type Disk struct {
 	ClaimNamespace string
 	Cost           float64
 	Bytes          float64
-	BytesUsedAvg   float64
-	BytesUsedMax   float64
-	Local          bool
-	Start          time.Time
-	End            time.Time
-	Minutes        float64
-	Breakdown      *ClusterCostsBreakdown
+
+	// These two fields may not be available at all times because they rely on
+	// a new set of metrics that may or may not be available. Thus, they must
+	// be nilable to represent the complete absence of the data.
+	//
+	// In other words, nilability here lets us distinguish between
+	// "metric is not available" and "metric is available but is 0".
+	//
+	// They end in "Ptr" to distinguish from an earlier version in order to
+	// ensure that all usages are checked for nil.
+	BytesUsedAvgPtr *float64
+	BytesUsedMaxPtr *float64
+
+	Local     bool
+	Start     time.Time
+	End       time.Time
+	Minutes   float64
+	Breakdown *ClusterCostsBreakdown
 }
 
 type DiskIdentifier struct {
@@ -321,7 +332,7 @@ func ClusterDisks(client prometheus.Client, provider cloud.Provider, start, end
 				Local:     true,
 			}
 		}
-		diskMap[key].BytesUsedAvg = bytesAvg
+		diskMap[key].BytesUsedAvgPtr = &bytesAvg
 	}
 
 	for _, result := range resLocalStorageUsedMax {
@@ -346,7 +357,7 @@ func ClusterDisks(client prometheus.Client, provider cloud.Provider, start, end
 				Local:     true,
 			}
 		}
-		diskMap[key].BytesUsedMax = bytesMax
+		diskMap[key].BytesUsedMaxPtr = &bytesMax
 	}
 
 	for _, result := range resLocalStorageBytes {
@@ -1456,7 +1467,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 				Breakdown: &ClusterCostsBreakdown{},
 			}
 		}
-		diskMap[key].BytesUsedAvg = usage
+		diskMap[key].BytesUsedAvgPtr = &usage
 	}
 
 	for _, result := range resPVUsedMax {
@@ -1517,6 +1528,6 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 				Breakdown: &ClusterCostsBreakdown{},
 			}
 		}
-		diskMap[key].BytesUsedMax = usage
+		diskMap[key].BytesUsedMaxPtr = &usage
 	}
 }

+ 29 - 4
pkg/kubecost/asset.go

@@ -1072,7 +1072,7 @@ type Disk struct {
 	Local          float64
 	Breakdown      *Breakdown
 	StorageClass   string   // @bingen:field[version=17]
-	ByteHoursUsed  float64  // @bingen:field[version=18]
+	ByteHoursUsed  *float64 // @bingen:field[version=18]
 	ByteUsageMax   *float64 // @bingen:field[version=18]
 	VolumeName     string   // @bingen:field[version=18]
 	ClaimName      string   // @bingen:field[version=18]
@@ -1268,7 +1268,21 @@ func (d *Disk) add(that *Disk) {
 	d.Cost += that.Cost
 
 	d.ByteHours += that.ByteHours
-	d.ByteHoursUsed += that.ByteHoursUsed
+
+	if d.ByteHoursUsed == nil && that.ByteHoursUsed != nil {
+		copy := *that.ByteHoursUsed
+		d.ByteHoursUsed = &copy
+	} else if d.ByteHoursUsed != nil && that.ByteHoursUsed == nil {
+		// do nothing
+	} else if d.ByteHoursUsed != nil && that.ByteHoursUsed != nil {
+		sum := *d.ByteHoursUsed
+		sum += *that.ByteHoursUsed
+		d.ByteHoursUsed = &sum
+	}
+
+	// We have to nil out the max because we don't know if we're
+	// aggregating across time our properties. See RawAllocationOnly on
+	// Allocation for further reference.
 	d.ByteUsageMax = nil
 
 	// If storage class don't match default it to empty storage class
@@ -1294,6 +1308,11 @@ func (d *Disk) Clone() Asset {
 		copied := *d.ByteUsageMax
 		max = &copied
 	}
+	var byteHoursUsed *float64
+	if d.ByteHoursUsed != nil {
+		copied := *d.ByteHoursUsed
+		byteHoursUsed = &copied
+	}
 
 	return &Disk{
 		Properties:     d.Properties.Clone(),
@@ -1304,7 +1323,7 @@ func (d *Disk) Clone() Asset {
 		Adjustment:     d.Adjustment,
 		Cost:           d.Cost,
 		ByteHours:      d.ByteHours,
-		ByteHoursUsed:  d.ByteHoursUsed,
+		ByteHoursUsed:  byteHoursUsed,
 		ByteUsageMax:   max,
 		Local:          d.Local,
 		Breakdown:      d.Breakdown.Clone(),
@@ -1346,7 +1365,13 @@ func (d *Disk) Equal(a Asset) bool {
 	if d.ByteHours != that.ByteHours {
 		return false
 	}
-	if d.ByteHoursUsed != that.ByteHoursUsed {
+	if d.ByteHoursUsed != nil && that.ByteHoursUsed == nil {
+		return false
+	}
+	if d.ByteHoursUsed == nil && that.ByteHoursUsed != nil {
+		return false
+	}
+	if (d.ByteHoursUsed != nil && that.ByteHoursUsed != nil) && *d.ByteHoursUsed != *that.ByteHoursUsed {
 		return false
 	}
 	if d.ByteUsageMax != nil && that.ByteUsageMax == nil {

+ 11 - 2
pkg/kubecost/asset_json.go

@@ -259,7 +259,11 @@ func (d *Disk) MarshalJSON() ([]byte, error) {
 	jsonEncodeFloat64(buffer, "minutes", d.Minutes(), ",")
 	jsonEncodeFloat64(buffer, "byteHours", d.ByteHours, ",")
 	jsonEncodeFloat64(buffer, "bytes", d.Bytes(), ",")
-	jsonEncodeFloat64(buffer, "byteHoursUsed", d.ByteHoursUsed, ",")
+	if d.ByteHoursUsed == nil {
+		jsonEncode(buffer, "byteHoursUsed", nil, ",")
+	} else {
+		jsonEncodeFloat64(buffer, "byteHoursUsed", *d.ByteHoursUsed, ",")
+	}
 	if d.ByteUsageMax == nil {
 		jsonEncode(buffer, "byteUsageMax", nil, ",")
 	} else {
@@ -342,7 +346,12 @@ func (d *Disk) InterfaceToDisk(itf interface{}) error {
 		d.ByteHours = ByteHours.(float64)
 	}
 	if ByteHoursUsed, err := getTypedVal(fmap["byteHoursUsed"]); err == nil {
-		d.ByteHoursUsed = ByteHoursUsed.(float64)
+		if ByteHoursUsed == nil {
+			d.ByteHoursUsed = nil
+		} else {
+			byteHours := ByteHoursUsed.(float64)
+			d.ByteHoursUsed = &byteHours
+		}
 	}
 	if ByteUsageMax, err := getTypedVal(fmap["byteUsageMax"]); err == nil {
 		if ByteUsageMax == nil {

+ 8 - 3
pkg/kubecost/asset_json_test.go

@@ -164,7 +164,8 @@ func TestDisk_Unmarshal(t *testing.T) {
 
 	disk1 := NewDisk("disk1", "cluster1", "disk1", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow)
 	disk1.ByteHours = 60.0 * gb * hours
-	disk1.ByteHoursUsed = 40.0 * gb * hours
+	used := 40.0 * gb * hours
+	disk1.ByteHoursUsed = &used
 	max := 50.0 * gb * hours
 	disk1.ByteUsageMax = &max
 	disk1.Cost = 4.0
@@ -214,7 +215,7 @@ func TestDisk_Unmarshal(t *testing.T) {
 	if disk1.ByteHours != disk2.ByteHours {
 		t.Fatalf("Disk Unmarshal: ByteHours mutated in unmarshal")
 	}
-	if disk1.ByteHoursUsed != disk2.ByteHoursUsed {
+	if *disk1.ByteHoursUsed != *disk2.ByteHoursUsed {
 		t.Fatalf("Disk Unmarshal: ByteHoursUsed mutated in unmarshal")
 	}
 	if *disk1.ByteUsageMax != *disk2.ByteUsageMax {
@@ -232,7 +233,7 @@ func TestDisk_Unmarshal(t *testing.T) {
 	disk3 := NewDisk("disk3", "cluster1", "disk3", *unmarshalWindow.start, *unmarshalWindow.end, unmarshalWindow)
 
 	disk3.ByteHours = 60.0 * gb * hours
-	disk3.ByteHoursUsed = 40.0 * gb * hours
+	disk3.ByteHoursUsed = nil
 	disk3.ByteUsageMax = nil
 	disk3.Cost = 4.0
 	disk3.Local = 1.0
@@ -256,6 +257,10 @@ func TestDisk_Unmarshal(t *testing.T) {
 		t.Fatalf("Disk Unmarshal: unexpected error: %s", err)
 	}
 
+	// Check that both disks have nil usage
+	if disk3.ByteHoursUsed != disk4.ByteHoursUsed {
+		t.Fatalf("Disk Unmarshal: ByteHoursUsed mutated in unmarshal")
+	}
 	// Check that both disks have nil max usage
 	if disk3.ByteUsageMax != disk4.ByteUsageMax {
 		t.Fatalf("Disk Unmarshal: ByteUsageMax mutated in unmarshal")

+ 15 - 4
pkg/kubecost/kubecost_codecs.go

@@ -4978,7 +4978,13 @@ func (target *Disk) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
 	} else {
 		buff.WriteString(target.StorageClass) // write string
 	}
-	buff.WriteFloat64(target.ByteHoursUsed) // write float64
+	if target.ByteHoursUsed == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		buff.WriteFloat64(*target.ByteHoursUsed) // write float64
+	}
 	if target.ByteUsageMax == nil {
 		buff.WriteUInt8(uint8(0)) // write nil byte
 	} else {
@@ -5191,11 +5197,16 @@ func (target *Disk) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error)
 
 	// field version check
 	if uint8(18) <= version {
-		dd := buff.ReadFloat64() // read float64
-		target.ByteHoursUsed = dd
+		if buff.ReadUInt8() == uint8(0) {
+			target.ByteHoursUsed = nil
+		} else {
+			dd := buff.ReadFloat64() // read float64
+			target.ByteHoursUsed = &dd
 
+		}
 	} else {
-		target.ByteHoursUsed = float64(0) // default
+		target.ByteHoursUsed = nil
+
 	}
 
 	// field version check