Explorar o código

Proposal: Measure node overhead as percentage of capacity (#1912)

* Proposal: Measure node overhead as percentage of capacity

Signed-off-by: Alex Meijer <ameijer@kubecost.com>

* feedback comments - change name, add cost weighted average

Signed-off-by: Alex Meijer <ameijer@kubecost.com>

* fix tests

Signed-off-by: Alex Meijer <ameijer@kubecost.com>

* move order of encoding to avoid conditional comma

Signed-off-by: Alex Meijer <ameijer@kubecost.com>

* change calculation to be over total cost

Signed-off-by: Alex Meijer <ameijer@kubecost.com>

* code review fix - pre-size map

Signed-off-by: Alex Meijer <ameijer@kubecost.com>

---------

Signed-off-by: Alex Meijer <ameijer@kubecost.com>
Alex Meijer %!s(int64=3) %!d(string=hai) anos
pai
achega
87b4c630de

+ 7 - 0
pkg/costmodel/assets.go

@@ -133,6 +133,13 @@ func (cm *CostModel) ComputeAssets(start, end time.Time) (*kubecost.AssetSet, er
 		node.GPUCost = n.GPUCost
 		node.GPUCost = n.GPUCost
 		node.GPUCount = n.GPUCount
 		node.GPUCount = n.GPUCount
 		node.RAMCost = n.RAMCost
 		node.RAMCost = n.RAMCost
+
+		node.Overhead = &kubecost.NodeOverhead{
+			RamOverheadFraction: n.Overhead.RamOverheadFraction,
+			CpuOverheadFraction: n.Overhead.CpuOverheadFraction,
+			OverheadCostFraction: ((n.Overhead.CpuOverheadFraction * n.CPUCost) +
+				(n.Overhead.RamOverheadFraction * n.RAMCost)) / node.TotalCost(),
+		}
 		node.Discount = n.Discount
 		node.Discount = n.Discount
 		if n.Preemptible {
 		if n.Preemptible {
 			node.Preemptible = 1.0
 			node.Preemptible = 1.0

+ 27 - 11
pkg/costmodel/cluster.go

@@ -472,6 +472,10 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 	return diskMap, nil
 	return diskMap, nil
 }
 }
 
 
+type NodeOverhead struct {
+	CpuOverheadFraction float64
+	RamOverheadFraction float64
+}
 type Node struct {
 type Node struct {
 	Cluster         string
 	Cluster         string
 	Name            string
 	Name            string
@@ -494,6 +498,7 @@ type Node struct {
 	CostPerCPUHr    float64
 	CostPerCPUHr    float64
 	CostPerRAMGiBHr float64
 	CostPerRAMGiBHr float64
 	CostPerGPUHr    float64
 	CostPerGPUHr    float64
+	Overhead        *NodeOverhead
 }
 }
 
 
 // GKE lies about the number of cores e2 nodes have. This table
 // GKE lies about the number of cores e2 nodes have. This table
@@ -567,9 +572,11 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 	optionalCtx := prom.NewNamedContext(client, prom.ClusterOptionalContextName)
 	optionalCtx := prom.NewNamedContext(client, prom.ClusterOptionalContextName)
 
 
 	queryNodeCPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost[%s])) by (%s, node, instance_type, provider_id)`, durStr, env.GetPromClusterLabel())
 	queryNodeCPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost[%s])) by (%s, node, instance_type, provider_id)`, durStr, env.GetPromClusterLabel())
-	queryNodeCPUCores := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_cpu_cores[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
+	queryNodeCPUCoresCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_cpu_cores[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
+	queryNodeCPUCoresAllocatable := fmt.Sprintf(`avg(avg_over_time(kube_node_status_allocatable_cpu_cores[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
 	queryNodeRAMHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost[%s])) by (%s, node, instance_type, provider_id) / 1024 / 1024 / 1024`, durStr, env.GetPromClusterLabel())
 	queryNodeRAMHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost[%s])) by (%s, node, instance_type, provider_id) / 1024 / 1024 / 1024`, durStr, env.GetPromClusterLabel())
-	queryNodeRAMBytes := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_memory_bytes[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
+	queryNodeRAMBytesCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_memory_bytes[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
+	queryNodeRAMBytesAllocatable := fmt.Sprintf(`avg(avg_over_time(kube_node_status_allocatable_memory_bytes[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
 	queryNodeGPUCount := fmt.Sprintf(`avg(avg_over_time(node_gpu_count[%s])) by (%s, node, provider_id)`, durStr, env.GetPromClusterLabel())
 	queryNodeGPUCount := fmt.Sprintf(`avg(avg_over_time(node_gpu_count[%s])) by (%s, node, provider_id)`, durStr, env.GetPromClusterLabel())
 	queryNodeGPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost[%s])) by (%s, node, instance_type, provider_id)`, durStr, env.GetPromClusterLabel())
 	queryNodeGPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost[%s])) by (%s, node, instance_type, provider_id)`, durStr, env.GetPromClusterLabel())
 	queryNodeCPUModeTotal := fmt.Sprintf(`sum(rate(node_cpu_seconds_total[%s:%dm])) by (kubernetes_node, %s, mode)`, durStr, minsPerResolution, env.GetPromClusterLabel())
 	queryNodeCPUModeTotal := fmt.Sprintf(`sum(rate(node_cpu_seconds_total[%s:%dm])) by (kubernetes_node, %s, mode)`, durStr, minsPerResolution, env.GetPromClusterLabel())
@@ -581,9 +588,11 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 
 
 	// Return errors if these fail
 	// Return errors if these fail
 	resChNodeCPUHourlyCost := requiredCtx.QueryAtTime(queryNodeCPUHourlyCost, t)
 	resChNodeCPUHourlyCost := requiredCtx.QueryAtTime(queryNodeCPUHourlyCost, t)
-	resChNodeCPUCores := requiredCtx.QueryAtTime(queryNodeCPUCores, t)
+	resChNodeCPUCoresCapacity := requiredCtx.QueryAtTime(queryNodeCPUCoresCapacity, t)
+	resChNodeCPUCoresAllocatable := requiredCtx.QueryAtTime(queryNodeCPUCoresAllocatable, t)
 	resChNodeRAMHourlyCost := requiredCtx.QueryAtTime(queryNodeRAMHourlyCost, t)
 	resChNodeRAMHourlyCost := requiredCtx.QueryAtTime(queryNodeRAMHourlyCost, t)
-	resChNodeRAMBytes := requiredCtx.QueryAtTime(queryNodeRAMBytes, t)
+	resChNodeRAMBytesCapacity := requiredCtx.QueryAtTime(queryNodeRAMBytesCapacity, t)
+	resChNodeRAMBytesAllocatable := requiredCtx.QueryAtTime(queryNodeRAMBytesAllocatable, t)
 	resChNodeGPUCount := requiredCtx.QueryAtTime(queryNodeGPUCount, t)
 	resChNodeGPUCount := requiredCtx.QueryAtTime(queryNodeGPUCount, t)
 	resChNodeGPUHourlyCost := requiredCtx.QueryAtTime(queryNodeGPUHourlyCost, t)
 	resChNodeGPUHourlyCost := requiredCtx.QueryAtTime(queryNodeGPUHourlyCost, t)
 	resChActiveMins := requiredCtx.QueryAtTime(queryActiveMins, t)
 	resChActiveMins := requiredCtx.QueryAtTime(queryActiveMins, t)
@@ -596,11 +605,13 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 	resChLabels := optionalCtx.QueryAtTime(queryLabels, t)
 	resChLabels := optionalCtx.QueryAtTime(queryLabels, t)
 
 
 	resNodeCPUHourlyCost, _ := resChNodeCPUHourlyCost.Await()
 	resNodeCPUHourlyCost, _ := resChNodeCPUHourlyCost.Await()
-	resNodeCPUCores, _ := resChNodeCPUCores.Await()
+	resNodeCPUCoresCapacity, _ := resChNodeCPUCoresCapacity.Await()
+	resNodeCPUCoresAllocatable, _ := resChNodeCPUCoresAllocatable.Await()
 	resNodeGPUCount, _ := resChNodeGPUCount.Await()
 	resNodeGPUCount, _ := resChNodeGPUCount.Await()
 	resNodeGPUHourlyCost, _ := resChNodeGPUHourlyCost.Await()
 	resNodeGPUHourlyCost, _ := resChNodeGPUHourlyCost.Await()
 	resNodeRAMHourlyCost, _ := resChNodeRAMHourlyCost.Await()
 	resNodeRAMHourlyCost, _ := resChNodeRAMHourlyCost.Await()
-	resNodeRAMBytes, _ := resChNodeRAMBytes.Await()
+	resNodeRAMBytesCapacity, _ := resChNodeRAMBytesCapacity.Await()
+	resNodeRAMBytesAllocatable, _ := resChNodeRAMBytesAllocatable.Await()
 	resIsSpot, _ := resChIsSpot.Await()
 	resIsSpot, _ := resChIsSpot.Await()
 	resNodeCPUModeTotal, _ := resChNodeCPUModeTotal.Await()
 	resNodeCPUModeTotal, _ := resChNodeCPUModeTotal.Await()
 	resNodeRAMSystemPct, _ := resChNodeRAMSystemPct.Await()
 	resNodeRAMSystemPct, _ := resChNodeRAMSystemPct.Await()
@@ -633,8 +644,12 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 	clusterAndNameToTypeIntermediate := mergeTypeMaps(clusterAndNameToType1, clusterAndNameToType2)
 	clusterAndNameToTypeIntermediate := mergeTypeMaps(clusterAndNameToType1, clusterAndNameToType2)
 	clusterAndNameToType := mergeTypeMaps(clusterAndNameToTypeIntermediate, clusterAndNameToType3)
 	clusterAndNameToType := mergeTypeMaps(clusterAndNameToTypeIntermediate, clusterAndNameToType3)
 
 
-	cpuCoresMap := buildCPUCoresMap(resNodeCPUCores)
-	ramBytesMap := buildRAMBytesMap(resNodeRAMBytes)
+	cpuCoresCapacityMap := buildCPUCoresMap(resNodeCPUCoresCapacity)
+	ramBytesCapacityMap := buildRAMBytesMap(resNodeRAMBytesCapacity)
+
+	cpuCoresAllocatableMap := buildCPUCoresMap(resNodeCPUCoresAllocatable)
+	ramBytesAllocatableMap := buildRAMBytesMap(resNodeRAMBytesAllocatable)
+	overheadMap := buildOverheadMap(ramBytesCapacityMap, ramBytesAllocatableMap, cpuCoresCapacityMap, cpuCoresAllocatableMap)
 
 
 	ramUserPctMap := buildRAMUserPctMap(resNodeRAMUserPct)
 	ramUserPctMap := buildRAMUserPctMap(resNodeRAMUserPct)
 	ramSystemPctMap := buildRAMSystemPctMap(resNodeRAMSystemPct)
 	ramSystemPctMap := buildRAMSystemPctMap(resNodeRAMSystemPct)
@@ -643,13 +658,13 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 
 
 	labelsMap := buildLabelsMap(resLabels)
 	labelsMap := buildLabelsMap(resLabels)
 
 
-	costTimesMinuteAndCount(activeDataMap, cpuCostMap, cpuCoresMap)
-	costTimesMinuteAndCount(activeDataMap, ramCostMap, ramBytesMap)
+	costTimesMinuteAndCount(activeDataMap, cpuCostMap, cpuCoresCapacityMap)
+	costTimesMinuteAndCount(activeDataMap, ramCostMap, ramBytesCapacityMap)
 	costTimesMinute(activeDataMap, gpuCostMap) // there's no need to do a weird "nodeIdentifierNoProviderID" type match since gpuCounts have a providerID
 	costTimesMinute(activeDataMap, gpuCostMap) // there's no need to do a weird "nodeIdentifierNoProviderID" type match since gpuCounts have a providerID
 
 
 	nodeMap := buildNodeMap(
 	nodeMap := buildNodeMap(
 		cpuCostMap, ramCostMap, gpuCostMap, gpuCountMap,
 		cpuCostMap, ramCostMap, gpuCostMap, gpuCountMap,
-		cpuCoresMap, ramBytesMap, ramUserPctMap,
+		cpuCoresCapacityMap, ramBytesCapacityMap, ramUserPctMap,
 		ramSystemPctMap,
 		ramSystemPctMap,
 		cpuBreakdownMap,
 		cpuBreakdownMap,
 		activeDataMap,
 		activeDataMap,
@@ -657,6 +672,7 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 		labelsMap,
 		labelsMap,
 		clusterAndNameToType,
 		clusterAndNameToType,
 		resolution,
 		resolution,
+		overheadMap,
 	)
 	)
 
 
 	c, err := cp.GetConfig()
 	c, err := cp.GetConfig()

+ 43 - 0
pkg/costmodel/cluster_helpers.go

@@ -426,6 +426,43 @@ func buildCPUBreakdownMap(resNodeCPUModeTotal []*prom.QueryResult) map[nodeIdent
 	return cpuBreakdownMap
 	return cpuBreakdownMap
 }
 }
 
 
+func buildOverheadMap(capRam, allocRam, capCPU, allocCPU map[nodeIdentifierNoProviderID]float64) map[nodeIdentifierNoProviderID]*NodeOverhead {
+	m := make(map[nodeIdentifierNoProviderID]*NodeOverhead, len(capRam))
+
+	for identifier, ramCapacity := range capRam {
+		allocatableRam, ok := allocRam[identifier]
+		if !ok {
+			log.Warnf("Could not find allocatable ram for node %s", identifier.Name)
+			continue
+		}
+		overheadBytes := ramCapacity - allocatableRam
+		m[identifier] = &NodeOverhead{
+			RamOverheadFraction: overheadBytes / ramCapacity,
+		}
+	}
+
+	for identifier, cpuCapacity := range capCPU {
+		allocatableCPU, ok := allocCPU[identifier]
+		if !ok {
+			log.Warnf("Could not find allocatable cpu for node %s", identifier.Name)
+			continue
+		}
+
+		overhead := cpuCapacity - allocatableCPU
+
+		if _, found := m[identifier]; found {
+			m[identifier].CpuOverheadFraction = overhead / cpuCapacity
+		} else {
+			m[identifier] = &NodeOverhead{
+				CpuOverheadFraction: overhead / cpuCapacity,
+			}
+		}
+
+	}
+
+	return m
+}
+
 func buildRAMUserPctMap(resNodeRAMUserPct []*prom.QueryResult) map[nodeIdentifierNoProviderID]float64 {
 func buildRAMUserPctMap(resNodeRAMUserPct []*prom.QueryResult) map[nodeIdentifierNoProviderID]float64 {
 
 
 	m := make(map[nodeIdentifierNoProviderID]float64)
 	m := make(map[nodeIdentifierNoProviderID]float64)
@@ -707,6 +744,7 @@ func buildNodeMap(
 	labelsMap map[nodeIdentifierNoProviderID]map[string]string,
 	labelsMap map[nodeIdentifierNoProviderID]map[string]string,
 	clusterAndNameToType map[nodeIdentifierNoProviderID]string,
 	clusterAndNameToType map[nodeIdentifierNoProviderID]string,
 	res time.Duration,
 	res time.Duration,
+	overheadMap map[nodeIdentifierNoProviderID]*NodeOverhead,
 ) map[NodeIdentifier]*Node {
 ) map[NodeIdentifier]*Node {
 
 
 	nodeMap := make(map[NodeIdentifier]*Node)
 	nodeMap := make(map[NodeIdentifier]*Node)
@@ -784,6 +822,11 @@ func buildNodeMap(
 		if labels, ok := labelsMap[clusterAndNameID]; ok {
 		if labels, ok := labelsMap[clusterAndNameID]; ok {
 			nodePtr.Labels = labels
 			nodePtr.Labels = labels
 		}
 		}
+
+		if overhead, ok := overheadMap[clusterAndNameID]; ok {
+			nodePtr.Overhead = overhead
+		}
+
 	}
 	}
 
 
 	return nodeMap
 	return nodeMap

+ 15 - 0
pkg/costmodel/cluster_helpers_test.go

@@ -150,6 +150,7 @@ func TestBuildNodeMap(t *testing.T) {
 		labelsMap            map[nodeIdentifierNoProviderID]map[string]string
 		labelsMap            map[nodeIdentifierNoProviderID]map[string]string
 		clusterAndNameToType map[nodeIdentifierNoProviderID]string
 		clusterAndNameToType map[nodeIdentifierNoProviderID]string
 		expected             map[NodeIdentifier]*Node
 		expected             map[NodeIdentifier]*Node
+		overheadMap          map[nodeIdentifierNoProviderID]*NodeOverhead
 	}{
 	}{
 		{
 		{
 			name:     "empty",
 			name:     "empty",
@@ -657,6 +658,15 @@ func TestBuildNodeMap(t *testing.T) {
 					Name:    "node1",
 					Name:    "node1",
 				}: "e2-medium", // for this node type
 				}: "e2-medium", // for this node type
 			},
 			},
+			overheadMap: map[nodeIdentifierNoProviderID]*NodeOverhead{
+				{
+					Cluster: "cluster1",
+					Name:    "node1",
+				}: {
+					CpuOverheadFraction: 0.5,
+					RamOverheadFraction: 0.25,
+				}, // for this node type
+			},
 			expected: map[NodeIdentifier]*Node{
 			expected: map[NodeIdentifier]*Node{
 				{
 				{
 					Cluster:    "cluster1",
 					Cluster:    "cluster1",
@@ -671,6 +681,10 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUCores:     partialCPUMap["e2-medium"],
 					CPUCores:     partialCPUMap["e2-medium"],
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
+					Overhead: &NodeOverhead{
+						CpuOverheadFraction: 0.5,
+						RamOverheadFraction: 0.25,
+					},
 				},
 				},
 			},
 			},
 		},
 		},
@@ -688,6 +702,7 @@ func TestBuildNodeMap(t *testing.T) {
 				testCase.labelsMap,
 				testCase.labelsMap,
 				testCase.clusterAndNameToType,
 				testCase.clusterAndNameToType,
 				time.Minute,
 				time.Minute,
+				testCase.overheadMap,
 			)
 			)
 
 
 			if !reflect.DeepEqual(result, testCase.expected) {
 			if !reflect.DeepEqual(result, testCase.expected) {

+ 16 - 0
pkg/kubecost/asset.go

@@ -1753,6 +1753,14 @@ func (n *Network) String() string {
 	return toString(n)
 	return toString(n)
 }
 }
 
 
+// NodeOverhead represents the delta between the allocatable resources
+// of the node and the node nameplate capacity
+type NodeOverhead struct {
+	CpuOverheadFraction  float64
+	RamOverheadFraction  float64
+	OverheadCostFraction float64
+}
+
 // Node is an Asset representing a single node in a cluster
 // Node is an Asset representing a single node in a cluster
 type Node struct {
 type Node struct {
 	Properties   *AssetProperties
 	Properties   *AssetProperties
@@ -1773,6 +1781,7 @@ type Node struct {
 	RAMCost      float64
 	RAMCost      float64
 	Discount     float64
 	Discount     float64
 	Preemptible  float64
 	Preemptible  float64
+	Overhead     *NodeOverhead // @bingen:field[version=19]
 }
 }
 
 
 // NewNode creates and returns a new Node Asset
 // NewNode creates and returns a new Node Asset
@@ -2001,6 +2010,13 @@ func (n *Node) add(that *Node) {
 	n.GPUCost += that.GPUCost
 	n.GPUCost += that.GPUCost
 	n.RAMCost += that.RAMCost
 	n.RAMCost += that.RAMCost
 	n.Adjustment += that.Adjustment
 	n.Adjustment += that.Adjustment
+
+	if n.Overhead != nil && that.Overhead != nil {
+
+		n.Overhead.RamOverheadFraction = (n.Overhead.RamOverheadFraction*n.RAMCost + that.Overhead.RamOverheadFraction*that.RAMCost) / totalRAMCost
+		n.Overhead.CpuOverheadFraction = (n.Overhead.CpuOverheadFraction*n.CPUCost + that.Overhead.CpuOverheadFraction*that.CPUCost) / totalCPUCost
+		n.Overhead.OverheadCostFraction = ((n.Overhead.CpuOverheadFraction * n.CPUCost) + (n.Overhead.RamOverheadFraction * n.RAMCost)) / n.TotalCost()
+	}
 }
 }
 
 
 // Clone returns a deep copy of the given Node
 // Clone returns a deep copy of the given Node

+ 4 - 0
pkg/kubecost/asset_json.go

@@ -494,7 +494,11 @@ func (n *Node) MarshalJSON() ([]byte, error) {
 	jsonEncodeFloat64(buffer, "gpuCount", n.GPUs(), ",")
 	jsonEncodeFloat64(buffer, "gpuCount", n.GPUs(), ",")
 	jsonEncodeFloat64(buffer, "ramCost", n.RAMCost, ",")
 	jsonEncodeFloat64(buffer, "ramCost", n.RAMCost, ",")
 	jsonEncodeFloat64(buffer, "adjustment", n.Adjustment, ",")
 	jsonEncodeFloat64(buffer, "adjustment", n.Adjustment, ",")
+	if n.Overhead != nil {
+		jsonEncode(buffer, "overhead", n.Overhead, ",")
+	}
 	jsonEncodeFloat64(buffer, "totalCost", n.TotalCost(), "")
 	jsonEncodeFloat64(buffer, "totalCost", n.TotalCost(), "")
+
 	buffer.WriteString("}")
 	buffer.WriteString("}")
 	return buffer.Bytes(), nil
 	return buffer.Bytes(), nil
 }
 }

+ 2 - 1
pkg/kubecost/bingen.go

@@ -26,7 +26,7 @@ package kubecost
 // @bingen:generate:CoverageSet
 // @bingen:generate:CoverageSet
 
 
 // Asset Version Set: Includes Asset pipeline specific resources
 // Asset Version Set: Includes Asset pipeline specific resources
-// @bingen:set[name=Assets,version=18]
+// @bingen:set[name=Assets,version=19]
 // @bingen:generate:Any
 // @bingen:generate:Any
 // @bingen:generate:Asset
 // @bingen:generate:Asset
 // @bingen:generate:AssetLabels
 // @bingen:generate:AssetLabels
@@ -41,6 +41,7 @@ package kubecost
 // @bingen:generate:LoadBalancer
 // @bingen:generate:LoadBalancer
 // @bingen:generate:Network
 // @bingen:generate:Network
 // @bingen:generate:Node
 // @bingen:generate:Node
+// @bingen:generate:NodeOverhead
 // @bingen:generate:SharedAsset
 // @bingen:generate:SharedAsset
 // @bingen:end
 // @bingen:end
 
 

+ 150 - 2
pkg/kubecost/kubecost_codecs.go

@@ -40,10 +40,10 @@ const (
 	CloudCostCodecVersion uint8 = 1
 	CloudCostCodecVersion uint8 = 1
 
 
 	// DefaultCodecVersion is used for any resources listed in the Default version set
 	// DefaultCodecVersion is used for any resources listed in the Default version set
-	DefaultCodecVersion uint8 = 17
+	DefaultCodecVersion uint8 = 19
 
 
 	// AssetsCodecVersion is used for any resources listed in the Assets version set
 	// AssetsCodecVersion is used for any resources listed in the Assets version set
-	AssetsCodecVersion uint8 = 18
+	AssetsCodecVersion uint8 = 19
 
 
 	// AllocationCodecVersion is used for any resources listed in the Allocation version set
 	// AllocationCodecVersion is used for any resources listed in the Allocation version set
 	AllocationCodecVersion uint8 = 16
 	AllocationCodecVersion uint8 = 16
@@ -86,6 +86,7 @@ var typeMap map[string]reflect.Type = map[string]reflect.Type{
 	"LoadBalancer":                  reflect.TypeOf((*LoadBalancer)(nil)).Elem(),
 	"LoadBalancer":                  reflect.TypeOf((*LoadBalancer)(nil)).Elem(),
 	"Network":                       reflect.TypeOf((*Network)(nil)).Elem(),
 	"Network":                       reflect.TypeOf((*Network)(nil)).Elem(),
 	"Node":                          reflect.TypeOf((*Node)(nil)).Elem(),
 	"Node":                          reflect.TypeOf((*Node)(nil)).Elem(),
+	"NodeOverhead":                  reflect.TypeOf((*NodeOverhead)(nil)).Elem(),
 	"PVAllocation":                  reflect.TypeOf((*PVAllocation)(nil)).Elem(),
 	"PVAllocation":                  reflect.TypeOf((*PVAllocation)(nil)).Elem(),
 	"PVKey":                         reflect.TypeOf((*PVKey)(nil)).Elem(),
 	"PVKey":                         reflect.TypeOf((*PVKey)(nil)).Elem(),
 	"RawAllocationOnlyData":         reflect.TypeOf((*RawAllocationOnlyData)(nil)).Elem(),
 	"RawAllocationOnlyData":         reflect.TypeOf((*RawAllocationOnlyData)(nil)).Elem(),
@@ -7730,6 +7731,20 @@ func (target *Node) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
 	buff.WriteFloat64(target.RAMCost)     // write float64
 	buff.WriteFloat64(target.RAMCost)     // write float64
 	buff.WriteFloat64(target.Discount)    // write float64
 	buff.WriteFloat64(target.Discount)    // write float64
 	buff.WriteFloat64(target.Preemptible) // write float64
 	buff.WriteFloat64(target.Preemptible) // write float64
+	if target.Overhead == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](NodeOverhead) ---
+		buff.WriteInt(0) // [compatibility, unused]
+		errG := target.Overhead.MarshalBinaryWithContext(ctx)
+		if errG != nil {
+			return errG
+		}
+		// --- [end][write][struct](NodeOverhead) ---
+
+	}
 	return nil
 	return nil
 }
 }
 
 
@@ -7941,6 +7956,139 @@ func (target *Node) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error)
 	ll := buff.ReadFloat64() // read float64
 	ll := buff.ReadFloat64() // read float64
 	target.Preemptible = ll
 	target.Preemptible = ll
 
 
+	// field version check
+	if uint8(19) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Overhead = nil
+		} else {
+			// --- [begin][read][struct](NodeOverhead) ---
+			mm := &NodeOverhead{}
+			buff.ReadInt() // [compatibility, unused]
+			errG := mm.UnmarshalBinaryWithContext(ctx)
+			if errG != nil {
+				return errG
+			}
+			target.Overhead = mm
+			// --- [end][read][struct](NodeOverhead) ---
+
+		}
+	} else {
+		target.Overhead = nil
+
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  NodeOverhead
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this NodeOverhead instance
+// into a byte array
+func (target *NodeOverhead) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this NodeOverhead instance
+// into a byte array leveraging a predefined context.
+func (target *NodeOverhead) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(AssetsCodecVersion) // version
+
+	buff.WriteFloat64(target.CpuOverheadFraction)  // write float64
+	buff.WriteFloat64(target.RamOverheadFraction)  // write float64
+	buff.WriteFloat64(target.OverheadCostFraction) // write float64
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the NodeOverhead type
+func (target *NodeOverhead) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the NodeOverhead type
+func (target *NodeOverhead) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > AssetsCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling NodeOverhead. Expected %d or less, got %d", AssetsCodecVersion, version)
+	}
+
+	a := buff.ReadFloat64() // read float64
+	target.CpuOverheadFraction = a
+
+	b := buff.ReadFloat64() // read float64
+	target.RamOverheadFraction = b
+
+	c := buff.ReadFloat64() // read float64
+	target.OverheadCostFraction = c
+
 	return nil
 	return nil
 }
 }