Преглед на файлове

initial kubemodel proposal (#3485)

Signed-off-by: aaliomer-ibm <aaliomer@ibm.com>
Co-authored-by: Alex Meijer <ameijer@users.noreply.github.com>
Co-authored-by: Hunter Metcalfe <106991365+HMetcalfeW@users.noreply.github.com>
Co-authored-by: Kaelan Patel <32113845+kaelanspatel@users.noreply.github.com>
Co-authored-by: Matt Bolt <mbolt35@gmail.com>
Co-authored-by: Nick Curie <32180999+nickcurie@users.noreply.github.com>
Jay (Ayad) Aliomer преди 3 месеца
родител
ревизия
f19e9a45f6

+ 83 - 16
core/pkg/model/kubemodel/container.go

@@ -6,22 +6,89 @@ import (
 )
 )
 
 
 type Container struct {
 type Container struct {
-	PodUID                              string    `json:"podUid"`
-	Name                                string    `json:"name"`
-	Start                               time.Time `json:"start"`
-	End                                 time.Time `json:"end"`
-	CpuMillicoreSecondsAllocated        uint64    `json:"cpuMillicoreSecondsAllocated"`
-	CpuMillicoreRequestAverageAllocated uint64    `json:"cpuMillicoreRequestAverageAllocated"`
-	CpuMillicoreUsageAverage            uint64    `json:"cpuMillicoreUsageAverage"`
-	CpuMillicoreUsageMax                uint64    `json:"cpuMillicoreUsageMax"`
-	RAMByteSecondsAllocated             uint64    `json:"ramByteSecondsAllocated"`
-	RAMByteRequestAverageAllocated      uint64    `json:"ramByteRequestAverageAllocated"`
-	RAMByteUsageAverage                 uint64    `json:"ramByteUsageAverage"`
-	RAMByteUsageMax                     uint64    `json:"ramByteUsageMax"`
-	StorageByteSecondsAllocated         uint64    `json:"storageByteSecondsAllocated"`
-	StorageByteRequestAverageAllocated  uint64    `json:"storageByteRequestAverageAllocated"`
-	StorageByteUsageAverage             uint64    `json:"storageByteUsageAverage"`
-	StorageByteUsageMax                 uint64    `json:"storageByteUsageMax"`
+	PodUID                     string                 `json:"podUid"`
+	Name                       string                 `json:"name"`
+	DurationSeconds            Measurement            `json:"durationSeconds"`
+	CpuMillicoreSeconds        Measurement            `json:"cpuMillicoreSeconds"`
+	CpuMillicoreUsageMax       Measurement            `json:"cpuMillicoreUsageMax"`
+	CpuMillicoreRequestSeconds Measurement            `json:"cpuMillicoreRequestSeconds"`
+	RAMByteSeconds             Measurement            `json:"ramByteSeconds"`
+	RAMByteUsageMax            Measurement            `json:"ramByteUsageMax"`
+	RAMByteSecondRequest       Measurement            `json:"ramByteSecondRequest"`
+	VolumeStorageByteSeconds   map[string]Measurement `json:"volumeStorageByteSeconds,omitempty"`
+	VolumeStorageByteUsageMax  map[string]Measurement `json:"volumeStorageByteUsageMax,omitempty"`
+	CpuMillicoreLimitSeconds   Measurement            `json:"cpuMillicoreLimitSeconds,omitempty"`
+	RAMByteSecondsLimit        Measurement            `json:"ramByteSecondsLimit,omitempty"`
+	Start                      time.Time              `json:"start"`
+	End                        time.Time              `json:"end"`
+}
+
+func (c *Container) CpuMillicoreUsageAverage() Measurement {
+	if c.DurationSeconds == 0 {
+		return 0
+	}
+	return c.CpuMillicoreSeconds / c.DurationSeconds
+}
+
+func (c *Container) RAMByteUsageAverage() Measurement {
+	if c.DurationSeconds == 0 {
+		return 0
+	}
+	return c.RAMByteSeconds / c.DurationSeconds
+}
+
+func (c *Container) TotalStorageByteSeconds() Measurement {
+	var total Measurement
+	for _, ByteSeconds := range c.VolumeStorageByteSeconds {
+		total += ByteSeconds
+	}
+	return total
+}
+
+func (c *Container) TotalStorageByteUsageMax() Measurement {
+	var max Measurement
+	for _, usage := range c.VolumeStorageByteUsageMax {
+		if usage > max {
+			max = usage
+		}
+	}
+	return max
+}
+
+func (c *Container) StorageByteUsageAverage() Measurement {
+	if c.DurationSeconds == 0 {
+		return 0
+	}
+	totalByteSeconds := c.TotalStorageByteSeconds()
+	return totalByteSeconds / c.DurationSeconds
+}
+
+func (c *Container) CpuMillicoreRequestAverage() Measurement {
+	if c.DurationSeconds == 0 {
+		return 0
+	}
+	return c.CpuMillicoreRequestSeconds / c.DurationSeconds
+}
+
+func (c *Container) RAMByteRequestAverage() Measurement {
+	if c.DurationSeconds == 0 {
+		return 0
+	}
+	return c.RAMByteSecondRequest / c.DurationSeconds
+}
+
+func (c *Container) CpuMillicoreLimitAverage() Measurement {
+	if c.DurationSeconds == 0 {
+		return 0
+	}
+	return c.CpuMillicoreLimitSeconds / c.DurationSeconds
+}
+
+func (c *Container) RAMByteLimitAverage() Measurement {
+	if c.DurationSeconds == 0 {
+		return 0
+	}
+	return c.RAMByteSecondsLimit / c.DurationSeconds
 }
 }
 
 
 func (kms *KubeModelSet) RegisterContainer(uid, name, podUID string) error {
 func (kms *KubeModelSet) RegisterContainer(uid, name, podUID string) error {

+ 92 - 0
core/pkg/model/kubemodel/device.go

@@ -0,0 +1,92 @@
+package kubemodel
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+// @bingen:generate:Device
+type Device struct {
+	UID               string      `json:"uid"`            // Device UUID (hardware identifier)
+	Type              string      `json:"type,omitempty"` // Device type (e.g., "device", "tpu")
+	NodeUID           string      `json:"nodeUid"`        // Node hosting this device
+	DeviceNumber      int32       `json:"deviceNumber"`
+	ModelName         string      `json:"modelName"`
+	IsShared          bool        `json:"isShared"` // Device sharing information
+	SharePercentage   float64     `json:"sharePercentage"`
+	UsageSeconds      float64     `json:"usageSeconds"`      // Device seconds available
+	MemoryByteSeconds Measurement `json:"memoryByteSeconds"` // Device memory capacity in Byte-seconds
+	PowerWattSeconds  float64     `json:"powerWattSeconds"`  // Device power consumption in watt-seconds (Joules)
+	PowerWattMax      float64     `json:"powerWattMax"`      // Device max power consumption in watts
+	// Version 2 fields - Lifecycle tracking
+	Start           time.Time   `json:"start,omitempty"` // Device availability start
+	End             time.Time   `json:"end,omitempty"`   // Device availability end
+	DurationSeconds Measurement `json:"durationSeconds"` // Duration device was available
+}
+
+// Validate validates the Device fields
+func (d *Device) Validate() error {
+	if d.UID == "" {
+		return errors.New("UID is required")
+	}
+	if d.NodeUID == "" {
+		return errors.New("NodeUID is required")
+	}
+	if d.SharePercentage < 0 || d.SharePercentage > 100 {
+		return fmt.Errorf("SharePercentage must be 0-100, got %.2f", d.SharePercentage)
+	}
+	if d.PowerWattSeconds < 0 {
+		return fmt.Errorf("PowerWattSeconds cannot be negative, got %.2f", d.PowerWattSeconds)
+	}
+	if d.PowerWattMax < 0 {
+		return fmt.Errorf("PowerWattMax cannot be negative, got %.2f", d.PowerWattMax)
+	}
+	return nil
+}
+
+// Clone creates a deep copy of the Device
+func (d *Device) Clone() *Device {
+	if d == nil {
+		return nil
+	}
+
+	cloned := &Device{
+		UID:               d.UID,
+		Type:              d.Type,
+		NodeUID:           d.NodeUID,
+		DeviceNumber:      d.DeviceNumber,
+		ModelName:         d.ModelName,
+		IsShared:          d.IsShared,
+		SharePercentage:   d.SharePercentage,
+		UsageSeconds:      d.UsageSeconds,
+		MemoryByteSeconds: d.MemoryByteSeconds,
+		PowerWattSeconds:  d.PowerWattSeconds,
+		PowerWattMax:      d.PowerWattMax,
+		DurationSeconds:   d.DurationSeconds,
+	}
+
+	cloned.Start = d.Start
+	cloned.End = d.End
+
+	return cloned
+}
+
+func (kms *KubeModelSet) RegisterDevice(uid, nodeUID string) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for Device")
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.Devices[uid]; !ok {
+		kms.Devices[uid] = &Device{
+			UID:     uid,
+			NodeUID: nodeUID,
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 86 - 0
core/pkg/model/kubemodel/device_usage.go

@@ -0,0 +1,86 @@
+package kubemodel
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+// @bingen:generate:DeviceUsage
+type DeviceUsage struct {
+	ContainerUID          string      `json:"containerUid"`
+	DeviceUID             string      `json:"deviceUid"`
+	UsageSeconds          Measurement `json:"usageSeconds"`
+	UsagePercentageMax    float64     `json:"usagePercentageMax"`
+	MemoryByteSecondsUsed Measurement `json:"memoryByteSecondsUsed"`
+	DeviceType            string      `json:"deviceType,omitempty"`
+	DurationSeconds       Measurement `json:"durationSeconds,omitempty"`
+	Start                 time.Time   `json:"start"`
+	End                   time.Time   `json:"end"`
+}
+
+func (u *DeviceUsage) Validate() error {
+	if u.ContainerUID == "" {
+		return errors.New("ContainerUID is required")
+	}
+	if u.DeviceUID == "" {
+		return errors.New("DeviceUID is required")
+	}
+	if u.UsagePercentageMax < 0 || u.UsagePercentageMax > 100 {
+		return fmt.Errorf("UsagePercentageMax must be 0-100, got %.2f", u.UsagePercentageMax)
+	}
+	return nil
+}
+
+func (u *DeviceUsage) Clone() *DeviceUsage {
+	if u == nil {
+		return nil
+	}
+
+	cloned := &DeviceUsage{
+		ContainerUID:          u.ContainerUID,
+		DeviceUID:             u.DeviceUID,
+		UsageSeconds:          u.UsageSeconds,
+		UsagePercentageMax:    u.UsagePercentageMax,
+		MemoryByteSecondsUsed: u.MemoryByteSecondsUsed,
+		DeviceType:            u.DeviceType,
+		DurationSeconds:       u.DurationSeconds,
+		Start:                 u.Start,
+		End:                   u.End,
+	}
+
+	return cloned
+}
+
+func (u *DeviceUsage) UsageAverage() Measurement {
+	if u.DurationSeconds == 0 {
+		return 0
+	}
+	return (u.UsageSeconds / u.DurationSeconds) * 100
+}
+
+func (u *DeviceUsage) MemoryByteUsageAverage() Measurement {
+	if u.DurationSeconds == 0 {
+		return 0
+	}
+	return u.MemoryByteSecondsUsed / u.DurationSeconds
+}
+
+func (kms *KubeModelSet) RegisterUsage(id, containerID, deviceId string) error {
+	if id == "" {
+		err := fmt.Errorf("UID is nil for DeviceUsage")
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.DeviceUsages[id]; !ok {
+		kms.DeviceUsages[id] = &DeviceUsage{
+			ContainerUID: containerID,
+			DeviceUID:    deviceId,
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 0 - 1
core/pkg/model/kubemodel/diagnostic.go

@@ -94,7 +94,6 @@ func (kms *KubeModelSet) Warn(msg string) {
 
 
 func (kms *KubeModelSet) GetInfos() []Diagnostic {
 func (kms *KubeModelSet) GetInfos() []Diagnostic {
 	ds := []Diagnostic{}
 	ds := []Diagnostic{}
-
 	for _, d := range kms.Metadata.Diagnostics {
 	for _, d := range kms.Metadata.Diagnostics {
 		if d.Level == DiagnosticLevelInfo {
 		if d.Level == DiagnosticLevelInfo {
 			ds = append(ds, d)
 			ds = append(ds, d)

+ 75 - 29
core/pkg/model/kubemodel/kubemodel.go

@@ -8,48 +8,94 @@ import (
 
 
 // @bingen:generate[stringtable]:KubeModelSet
 // @bingen:generate[stringtable]:KubeModelSet
 type KubeModelSet struct {
 type KubeModelSet struct {
-	Metadata       *Metadata                 `json:"meta"`                     // @bingen:field[version=1]
-	Window         Window                    `json:"window"`                   // @bingen:field[version=1]
-	Cluster        *Cluster                  `json:"cluster"`                  // @bingen:field[version=1]
-	Containers     map[string]*Container     `json:"containers,omitempty"`     // @bingen:field[ignore]
-	Namespaces     map[string]*Namespace     `json:"namespaces"`               // @bingen:field[version=1]
-	Nodes          map[string]*Node          `json:"nodes,omitempty"`          // @bingen:field[ignore]
-	Owners         map[string]*Owner         `json:"owners,omitempty"`         // @bingen:field[ignore]
-	Pods           map[string]*Pod           `json:"pods,omitempty"`           // @bingen:field[ignore]
-	ResourceQuotas map[string]*ResourceQuota `json:"resourceQuotas,omitempty"` // @bingen:field[version=1]
-	Services       map[string]*Service       `json:"services,omitempty"`       // @bingen:field[ignore]
-	idx            *index                    // @bingen:field[ignore]
+	Metadata               *Metadata                         `json:"meta"`                   // @bingen:field[version=1]
+	Window                 Window                            `json:"window"`                 // @bingen:field[version=1]
+	Cluster                *Cluster                          `json:"cluster"`                // @bingen:field[version=1]
+	Namespaces             map[string]*Namespace             `json:"namespaces"`             // @bingen:field[version=1]
+	ResourceQuotas         map[string]*ResourceQuota         `json:"resourceQuotas"`         // @bingen:field[version=1]
+	Containers             map[string]*Container             `json:"containers,omitempty"`   // @bingen:field[ignore]
+	Owners                 map[string]*Owner                 `json:"owners,omitempty"`       // @bingen:field[ignore]
+	Devices                map[string]*Device                `json:"devices,omitempty"`      // @bingen:field[ignore]
+	DeviceUsages           map[string]*DeviceUsage           `json:"deviceUsages,omitempty"` // @bingen:field[ignore]
+	Nodes                  map[string]*Node                  `json:"nodes,omitempty"`        // @bingen:field[ignore]
+	Pods                   map[string]*Pod                   `json:"pods,omitempty"`         // @bingen:field[ignore]
+	PersistentVolumeClaims map[string]*PersistentVolumeClaim `json:"pvcs,omitempty"`         // @bingen:field[ignore]
+	Services               map[string]*Service               `json:"services,omitempty"`     // @bingen:field[ignore]
+	Volumes                map[string]*PersistentVolume      `json:"volumes,omitempty"`      // @bingen:field[ignore]
+	idx                    *kubeModelSetIndexes              // @bingen:field[ignore]
 }
 }
 
 
-func NewKubeModelSet(start, end time.Time) *KubeModelSet {
-	index := &index{
-		namespaceByName: map[string]*Namespace{},
-	}
-
-	return &KubeModelSet{
+func NewKubeModelSet(start time.Time, end time.Time) *KubeModelSet {
+	now := time.Now().UTC()
+	kms := &KubeModelSet{
 		Metadata: &Metadata{
 		Metadata: &Metadata{
-			CreatedAt:       time.Now().UTC(),
+			CreatedAt:       now,
+			CompletedAt:     now, // Will be updated when processing completes
 			DiagnosticLevel: DefaultDiagnosticLevel,
 			DiagnosticLevel: DefaultDiagnosticLevel,
 		},
 		},
 		Window: Window{
 		Window: Window{
 			Start: start,
 			Start: start,
 			End:   end,
 			End:   end,
 		},
 		},
-		Containers:     map[string]*Container{},
-		Namespaces:     map[string]*Namespace{},
-		Nodes:          map[string]*Node{},
-		Owners:         map[string]*Owner{},
-		Pods:           map[string]*Pod{},
-		ResourceQuotas: map[string]*ResourceQuota{},
-		Services:       map[string]*Service{},
-		idx:            index,
+		Containers:             map[string]*Container{},
+		Owners:                 map[string]*Owner{},
+		Devices:                map[string]*Device{},
+		DeviceUsages:           map[string]*DeviceUsage{},
+		Namespaces:             map[string]*Namespace{},
+		Nodes:                  map[string]*Node{},
+		Pods:                   map[string]*Pod{},
+		PersistentVolumeClaims: map[string]*PersistentVolumeClaim{},
+		ResourceQuotas:         map[string]*ResourceQuota{},
+		Services:               map[string]*Service{},
+		Volumes:                map[string]*PersistentVolume{},
+		idx:                    newKubeModelSetIndexes(),
 	}
 	}
+	return kms
 }
 }
 
 
+// GetNamespaceByName retrieves a namespace by its name using the index
+func (kms *KubeModelSet) GetNamespaceByName(name string) (*Namespace, bool) {
+	if kms.idx == nil {
+		return nil, false
+	}
+
+	uid, ok := kms.idx.namespaceNameToID[name]
+	if !ok {
+		return nil, false
+	}
+
+	ns, ok := kms.Namespaces[uid]
+	return ns, ok
+}
+
+// IsEmpty returns true if the KubeModelSet is nil, has no cluster, or contains no resources
 func (kms *KubeModelSet) IsEmpty() bool {
 func (kms *KubeModelSet) IsEmpty() bool {
-	return kms == nil || kms.Cluster == nil || kms.Metadata.ObjectCount == 0
+	if kms == nil || kms.Cluster == nil {
+		return true
+	}
+
+	// Check if all resource maps are empty
+	return len(kms.Containers) == 0 &&
+		len(kms.Owners) == 0 &&
+		len(kms.Devices) == 0 &&
+		len(kms.DeviceUsages) == 0 &&
+		len(kms.Namespaces) == 0 &&
+		len(kms.Nodes) == 0 &&
+		len(kms.Pods) == 0 &&
+		len(kms.PersistentVolumeClaims) == 0 &&
+		len(kms.ResourceQuotas) == 0 &&
+		len(kms.Services) == 0 &&
+		len(kms.Volumes) == 0
 }
 }
 
 
-type index struct {
-	namespaceByName map[string]*Namespace
+type kubeModelSetIndexes struct {
+	namespaceNameToID map[string]string
+	namespaceByName   map[string]*Namespace
+}
+
+func newKubeModelSetIndexes() *kubeModelSetIndexes {
+	return &kubeModelSetIndexes{
+		namespaceNameToID: make(map[string]string),
+		namespaceByName:   make(map[string]*Namespace),
+	}
 }
 }

+ 627 - 0
core/pkg/model/kubemodel/merge.go

@@ -0,0 +1,627 @@
+package kubemodel
+
+import (
+	"fmt"
+	"maps"
+	"math"
+	"slices"
+)
+
+func Merge(kms1, kms2 *KubeModelSet) (*KubeModelSet, error) {
+	if kms1 == nil && kms2 == nil {
+		return nil, fmt.Errorf("both KubeModelSets are nil")
+	}
+	if kms1 == nil {
+		return kms2, nil
+	}
+	if kms2 == nil {
+		return kms1, nil
+	}
+
+	if kms1.Cluster != nil && kms2.Cluster != nil && kms1.Cluster.UID != kms2.Cluster.UID {
+		return nil, fmt.Errorf(
+			"cannot merge KubeModelSets from different clusters: %s vs %s",
+			kms1.Cluster.UID, kms2.Cluster.UID)
+	}
+
+	windowStart := kms1.Window.Start
+	if kms2.Window.Start.Before(windowStart) {
+		windowStart = kms2.Window.Start
+	}
+	windowEnd := kms1.Window.End
+	if kms2.Window.End.After(windowEnd) {
+		windowEnd = kms2.Window.End
+	}
+
+	merged := NewKubeModelSet(windowStart, windowEnd)
+
+	if kms1.Metadata != nil && kms2.Metadata != nil {
+		if kms2.Metadata.CreatedAt.Before(kms1.Metadata.CreatedAt) {
+			merged.Metadata.CreatedAt = kms2.Metadata.CreatedAt
+		} else {
+			merged.Metadata.CreatedAt = kms1.Metadata.CreatedAt
+		}
+		if kms2.Metadata.CompletedAt.After(kms1.Metadata.CompletedAt) {
+			merged.Metadata.CompletedAt = kms2.Metadata.CompletedAt
+		} else {
+			merged.Metadata.CompletedAt = kms1.Metadata.CompletedAt
+		}
+		merged.Metadata.ObjectCount = kms1.Metadata.ObjectCount + kms2.Metadata.ObjectCount
+		merged.Metadata.Diagnostics = append(
+			append([]Diagnostic{}, kms1.Metadata.Diagnostics...),
+			kms2.Metadata.Diagnostics...,
+		)
+	} else if kms1.Metadata != nil {
+		merged.Metadata.CreatedAt = kms1.Metadata.CreatedAt
+		merged.Metadata.CompletedAt = kms1.Metadata.CompletedAt
+		merged.Metadata.ObjectCount = kms1.Metadata.ObjectCount
+		merged.Metadata.Diagnostics = append([]Diagnostic{}, kms1.Metadata.Diagnostics...)
+	} else if kms2.Metadata != nil {
+		merged.Metadata.CreatedAt = kms2.Metadata.CreatedAt
+		merged.Metadata.CompletedAt = kms2.Metadata.CompletedAt
+		merged.Metadata.ObjectCount = kms2.Metadata.ObjectCount
+		merged.Metadata.Diagnostics = append([]Diagnostic{}, kms2.Metadata.Diagnostics...)
+	}
+
+	merged.Cluster = kms1.Cluster
+	if merged.Cluster == nil {
+		merged.Cluster = kms2.Cluster
+	}
+
+	mergeNamespaces(merged, kms1, kms2)
+	mergeResourceQuotas(merged, kms1, kms2)
+	mergeNodes(merged, kms1, kms2)
+	mergePods(merged, kms1, kms2)
+	mergeContainers(merged, kms1, kms2)
+	mergeOwners(merged, kms1, kms2)
+	mergeServices(merged, kms1, kms2)
+	mergeVolumes(merged, kms1, kms2)
+	mergePVCs(merged, kms1, kms2)
+	mergeDevices(merged, kms1, kms2)
+	mergeDeviceUsages(merged, kms1, kms2)
+
+	return merged, nil
+}
+
+func mergeNamespaces(merged, kms1, kms2 *KubeModelSet) {
+	for uid, ns := range kms1.Namespaces {
+		merged.Namespaces[uid] = copyNamespace(ns)
+		merged.idx.namespaceNameToID[ns.Name] = ns.UID
+		merged.Metadata.ObjectCount++
+	}
+	for uid, ns2 := range kms2.Namespaces {
+		if ns1, exists := merged.Namespaces[uid]; exists {
+			// Merge Start/End timestamps for existing namespace
+			if ns2.Start.Before(ns1.Start) {
+				ns1.Start = ns2.Start
+			}
+			if ns2.End.After(ns1.End) {
+				ns1.End = ns2.End
+			}
+		} else {
+			merged.Namespaces[uid] = copyNamespace(ns2)
+			merged.idx.namespaceNameToID[ns2.Name] = ns2.UID
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergeResourceQuotas(merged, kms1, kms2 *KubeModelSet) {
+	for uid, rq := range kms1.ResourceQuotas {
+		merged.ResourceQuotas[uid] = copyResourceQuota(rq)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, rq2 := range kms2.ResourceQuotas {
+		if rq1, exists := merged.ResourceQuotas[uid]; exists {
+			// Merge Start/End timestamps for existing resource quota
+			if rq2.Start.Before(rq1.Start) {
+				rq1.Start = rq2.Start
+			}
+			if rq2.End.After(rq1.End) {
+				rq1.End = rq2.End
+			}
+		} else {
+			merged.ResourceQuotas[uid] = copyResourceQuota(rq2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergeNodes(merged, kms1, kms2 *KubeModelSet) {
+	for uid, node := range kms1.Nodes {
+		merged.Nodes[uid] = copyNode(node)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, node2 := range kms2.Nodes {
+		if node1, exists := merged.Nodes[uid]; exists {
+			node1.CpuMillicoreSeconds += node2.CpuMillicoreSeconds
+			node1.RAMByteSeconds += node2.RAMByteSeconds
+			node1.CpuMillicoreUsageMax = max(node1.CpuMillicoreUsageMax, node2.CpuMillicoreUsageMax)
+			node1.RAMByteUsageMax = max(node1.RAMByteUsageMax, node2.RAMByteUsageMax)
+			node1.DurationSeconds += node2.DurationSeconds
+
+			if node2.Start.Before(node1.Start) {
+				node1.Start = node2.Start
+			}
+			if node2.End.After(node1.End) {
+				node1.End = node2.End
+			}
+
+			for volumeUID, volume2 := range node2.AttachedVolumes {
+				if volume1, exists := node1.AttachedVolumes[volumeUID]; exists {
+					volume1.UsageByteSeconds += volume2.UsageByteSeconds
+					volume1.DurationSeconds += volume2.DurationSeconds
+					if volume2.CapacityBytes > volume1.CapacityBytes {
+						volume1.CapacityBytes = volume2.CapacityBytes
+					}
+				} else {
+					node1.AttachedVolumes[volumeUID] = &NodeVolumeUsage{
+						VolumeUID:        volume2.VolumeUID,
+						CapacityBytes:    volume2.CapacityBytes,
+						UsageByteSeconds: volume2.UsageByteSeconds,
+						VolumeType:       volume2.VolumeType,
+						ProviderID:       volume2.ProviderID,
+						DurationSeconds:  volume2.DurationSeconds,
+					}
+				}
+			}
+		} else {
+			merged.Nodes[uid] = copyNode(node2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergePods(merged, kms1, kms2 *KubeModelSet) {
+	for uid, pod := range kms1.Pods {
+		merged.Pods[uid] = copyPod(pod)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, pod2 := range kms2.Pods {
+		if pod1, exists := merged.Pods[uid]; exists {
+			pod1.NetworkReceiveBytes += pod2.NetworkReceiveBytes
+			pod1.NetworkTransferBytes += pod2.NetworkTransferBytes
+			pod1.DurationSeconds += pod2.DurationSeconds
+
+			if pod2.Start.Before(pod1.Start) {
+				pod1.Start = pod2.Start
+			}
+			if pod2.End.After(pod1.End) {
+				pod1.End = pod2.End
+			}
+		} else {
+			merged.Pods[uid] = copyPod(pod2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergeContainers(merged, kms1, kms2 *KubeModelSet) {
+	for uid, container := range kms1.Containers {
+		merged.Containers[uid] = copyContainer(container)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, container2 := range kms2.Containers {
+		if container1, exists := merged.Containers[uid]; exists {
+			container1.CpuMillicoreSeconds += container2.CpuMillicoreSeconds
+			container1.RAMByteSeconds += container2.RAMByteSeconds
+			container1.CpuMillicoreUsageMax = max(container1.CpuMillicoreUsageMax, container2.CpuMillicoreUsageMax)
+			container1.RAMByteUsageMax = max(container1.RAMByteUsageMax, container2.RAMByteUsageMax)
+
+			for volumeUID, ByteSeconds := range container2.VolumeStorageByteSeconds {
+				container1.VolumeStorageByteSeconds[volumeUID] += ByteSeconds
+			}
+			for volumeUID, usageMax := range container2.VolumeStorageByteUsageMax {
+				if currentMax, exists := container1.VolumeStorageByteUsageMax[volumeUID]; exists {
+					container1.VolumeStorageByteUsageMax[volumeUID] = max(currentMax, usageMax)
+				} else {
+					container1.VolumeStorageByteUsageMax[volumeUID] = usageMax
+				}
+			}
+
+			container1.CpuMillicoreRequestSeconds += container2.CpuMillicoreRequestSeconds
+			container1.RAMByteSecondRequest += container2.RAMByteSecondRequest
+			container1.CpuMillicoreLimitSeconds += container2.CpuMillicoreLimitSeconds
+			container1.RAMByteSecondsLimit += container2.RAMByteSecondsLimit
+
+			container1.DurationSeconds += container2.DurationSeconds
+
+			// Merge Start/End timestamps
+			if container2.Start.Before(container1.Start) {
+				container1.Start = container2.Start
+			}
+			if container2.End.After(container1.End) {
+				container1.End = container2.End
+			}
+		} else {
+			merged.Containers[uid] = copyContainer(container2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergeOwners(merged, kms1, kms2 *KubeModelSet) {
+	for uid, owner := range kms1.Owners {
+		merged.Owners[uid] = copyOwner(owner)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, owner2 := range kms2.Owners {
+		if owner1, exists := merged.Owners[uid]; exists {
+			if owner2.Start.Before(owner1.Start) {
+				owner1.Start = owner2.Start
+			}
+			if owner2.End.After(owner1.End) {
+				owner1.End = owner2.End
+			}
+		} else {
+			merged.Owners[uid] = copyOwner(owner2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergeServices(merged, kms1, kms2 *KubeModelSet) {
+	for uid, svc := range kms1.Services {
+		merged.Services[uid] = copyService(svc)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, svc2 := range kms2.Services {
+		if svc1, exists := merged.Services[uid]; exists {
+			svc1.NetworkTransferBytes += svc2.NetworkTransferBytes
+			svc1.NetworkReceiveBytes += svc2.NetworkReceiveBytes
+			svc1.DurationSeconds += svc2.DurationSeconds
+
+			if svc2.Start.Before(svc1.Start) {
+				svc1.Start = svc2.Start
+			}
+			if svc2.End.After(svc1.End) {
+				svc1.End = svc2.End
+			}
+		} else {
+			merged.Services[uid] = copyService(svc2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergeVolumes(merged, kms1, kms2 *KubeModelSet) {
+	for uid, vol := range kms1.Volumes {
+		merged.Volumes[uid] = copyVolume(vol)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, vol2 := range kms2.Volumes {
+		if vol1, exists := merged.Volumes[uid]; exists {
+			if vol2.Start.Before(vol1.Start) {
+				vol1.Start = vol2.Start
+			}
+			if vol2.End.After(vol1.End) {
+				vol1.End = vol2.End
+			}
+			vol1.DurationSeconds += vol2.DurationSeconds
+		} else {
+			merged.Volumes[uid] = copyVolume(vol2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergePVCs(merged, kms1, kms2 *KubeModelSet) {
+	for uid, pvc := range kms1.PersistentVolumeClaims {
+		merged.PersistentVolumeClaims[uid] = copyPVC(pvc)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, pvc2 := range kms2.PersistentVolumeClaims {
+		if pvc1, exists := merged.PersistentVolumeClaims[uid]; exists {
+			pvc1.StorageByteSeconds += pvc2.StorageByteSeconds
+			pvc1.ActualUsedByteSeconds += pvc2.ActualUsedByteSeconds
+			pvc1.DurationSeconds += pvc2.DurationSeconds
+
+			if pvc2.Start.Before(pvc1.Start) {
+				pvc1.Start = pvc2.Start
+			}
+			if pvc2.End.After(pvc1.End) {
+				pvc1.End = pvc2.End
+			}
+			if pvc2.BoundAt.After(pvc1.BoundAt) {
+				pvc1.BoundAt = pvc2.BoundAt
+			}
+		} else {
+			merged.PersistentVolumeClaims[uid] = copyPVC(pvc2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergeDevices(merged, kms1, kms2 *KubeModelSet) {
+	for uid, dev := range kms1.Devices {
+		merged.Devices[uid] = copyDevice(dev)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, dev2 := range kms2.Devices {
+		if dev1, exists := merged.Devices[uid]; exists {
+			dev1.UsageSeconds += dev2.UsageSeconds
+			dev1.MemoryByteSeconds += dev2.MemoryByteSeconds
+			dev1.PowerWattSeconds += dev2.PowerWattSeconds
+			dev1.PowerWattMax = math.Max(dev1.PowerWattMax, dev2.PowerWattMax)
+			dev1.DurationSeconds += dev2.DurationSeconds
+
+			if dev2.Start.Before(dev1.Start) {
+				dev1.Start = dev2.Start
+			}
+			if dev2.End.After(dev1.End) {
+				dev1.End = dev2.End
+			}
+		} else {
+			merged.Devices[uid] = copyDevice(dev2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func mergeDeviceUsages(merged, kms1, kms2 *KubeModelSet) {
+	for uid, usage := range kms1.DeviceUsages {
+		merged.DeviceUsages[uid] = copyDeviceUsage(usage)
+		merged.Metadata.ObjectCount++
+	}
+	for uid, usage2 := range kms2.DeviceUsages {
+		if usage1, exists := merged.DeviceUsages[uid]; exists {
+			usage1.UsageSeconds += usage2.UsageSeconds
+			usage1.MemoryByteSecondsUsed += usage2.MemoryByteSecondsUsed
+			usage1.UsagePercentageMax = math.Max(usage1.UsagePercentageMax, usage2.UsagePercentageMax)
+			usage1.DurationSeconds += usage2.DurationSeconds
+
+			// Merge Start/End timestamps
+			if usage2.Start.Before(usage1.Start) {
+				usage1.Start = usage2.Start
+			}
+			if usage2.End.After(usage1.End) {
+				usage1.End = usage2.End
+			}
+		} else {
+			merged.DeviceUsages[uid] = copyDeviceUsage(usage2)
+			merged.Metadata.ObjectCount++
+		}
+	}
+}
+
+func copyNamespace(ns *Namespace) *Namespace {
+	return &Namespace{
+		ClusterUID:  ns.ClusterUID,
+		UID:         ns.UID,
+		Name:        ns.Name,
+		Labels:      maps.Clone(ns.Labels),
+		Annotations: maps.Clone(ns.Annotations),
+		Start:       ns.Start,
+		End:         ns.End,
+	}
+}
+
+func copyResourceQuota(rq *ResourceQuota) *ResourceQuota {
+	copied := &ResourceQuota{
+		UID:          rq.UID,
+		Name:         rq.Name,
+		NamespaceUID: rq.NamespaceUID,
+		Start:        rq.Start,
+		End:          rq.End,
+	}
+	if rq.Spec != nil {
+		copied.Spec = &ResourceQuotaSpec{}
+		if rq.Spec.Hard != nil {
+			copied.Spec.Hard = &ResourceQuotaSpecHard{
+				Requests: copyResourceQuantities(rq.Spec.Hard.Requests),
+				Limits:   copyResourceQuantities(rq.Spec.Hard.Limits),
+			}
+		}
+	}
+	if rq.Status != nil {
+		copied.Status = &ResourceQuotaStatus{}
+		if rq.Status.Used != nil {
+			copied.Status.Used = &ResourceQuotaStatusUsed{
+				Requests: copyResourceQuantities(rq.Status.Used.Requests),
+				Limits:   copyResourceQuantities(rq.Status.Used.Limits),
+			}
+		}
+	}
+	return copied
+}
+
+func copyResourceQuantities(rq ResourceQuantities) ResourceQuantities {
+	if rq == nil {
+		return nil
+	}
+	copied := make(ResourceQuantities, len(rq))
+	for k, v := range rq {
+		copied[k] = v
+	}
+	return copied
+}
+
+func copyNode(node *Node) *Node {
+	copied := &Node{
+		UID:                  node.UID,
+		Name:                 node.Name,
+		ProviderResourceUID:  node.ProviderResourceUID,
+		Labels:               maps.Clone(node.Labels),
+		Annotations:          maps.Clone(node.Annotations),
+		CpuMillicoreSeconds:  node.CpuMillicoreSeconds,
+		RAMByteSeconds:       node.RAMByteSeconds,
+		CpuMillicoreUsageMax: node.CpuMillicoreUsageMax,
+		RAMByteUsageMax:      node.RAMByteUsageMax,
+		DurationSeconds:      node.DurationSeconds,
+		AttachedVolumes:      make(map[string]*NodeVolumeUsage),
+		Start:                node.Start,
+		End:                  node.End,
+	}
+
+	for volumeUID, volume := range node.AttachedVolumes {
+		copied.AttachedVolumes[volumeUID] = &NodeVolumeUsage{
+			VolumeUID:        volume.VolumeUID,
+			CapacityBytes:    volume.CapacityBytes,
+			UsageByteSeconds: volume.UsageByteSeconds,
+			VolumeType:       volume.VolumeType,
+			ProviderID:       volume.ProviderID,
+			DurationSeconds:  volume.DurationSeconds,
+		}
+	}
+
+	return copied
+}
+
+func copyPod(pod *Pod) *Pod {
+	return &Pod{
+		UID:                  pod.UID,
+		Name:                 pod.Name,
+		NamespaceUID:         pod.NamespaceUID,
+		OwnerUID:             pod.OwnerUID,
+		NodeUID:              pod.NodeUID,
+		Labels:               maps.Clone(pod.Labels),
+		Annotations:          maps.Clone(pod.Annotations),
+		NetworkReceiveBytes:  pod.NetworkReceiveBytes,
+		NetworkTransferBytes: pod.NetworkTransferBytes,
+		DurationSeconds:      pod.DurationSeconds,
+		Start:                pod.Start,
+		End:                  pod.End,
+	}
+}
+
+func copyContainer(container *Container) *Container {
+	return &Container{
+		PodUID:                     container.PodUID,
+		Name:                       container.Name,
+		CpuMillicoreSeconds:        container.CpuMillicoreSeconds,
+		RAMByteSeconds:             container.RAMByteSeconds,
+		CpuMillicoreUsageMax:       container.CpuMillicoreUsageMax,
+		RAMByteUsageMax:            container.RAMByteUsageMax,
+		VolumeStorageByteSeconds:   maps.Clone(container.VolumeStorageByteSeconds),
+		VolumeStorageByteUsageMax:  maps.Clone(container.VolumeStorageByteUsageMax),
+		DurationSeconds:            container.DurationSeconds,
+		CpuMillicoreRequestSeconds: container.CpuMillicoreRequestSeconds,
+		RAMByteSecondRequest:       container.RAMByteSecondRequest,
+		CpuMillicoreLimitSeconds:   container.CpuMillicoreLimitSeconds,
+		RAMByteSecondsLimit:        container.RAMByteSecondsLimit,
+		Start:                      container.Start,
+		End:                        container.End,
+	}
+}
+
+func copyOwner(owner *Owner) *Owner {
+	return &Owner{
+		UID:          owner.UID,
+		Name:         owner.Name,
+		NamespaceUID: owner.NamespaceUID,
+		Kind:         owner.Kind,
+		Labels:       maps.Clone(owner.Labels),
+		Annotations:  maps.Clone(owner.Annotations),
+		Start:        owner.Start,
+		End:          owner.End,
+	}
+}
+
+func copyService(svc *Service) *Service {
+	return &Service{
+		UID:                  svc.UID,
+		NamespaceUID:         svc.NamespaceUID,
+		Name:                 svc.Name,
+		Type:                 svc.Type,
+		Hostname:             svc.Hostname,
+		Labels:               maps.Clone(svc.Labels),
+		Annotations:          maps.Clone(svc.Annotations),
+		NetworkTransferBytes: svc.NetworkTransferBytes,
+		NetworkReceiveBytes:  svc.NetworkReceiveBytes,
+		DurationSeconds:      svc.DurationSeconds,
+		Selector:             maps.Clone(svc.Selector),
+		Ports:                slices.Clone(svc.Ports),
+		Start:                svc.Start,
+		End:                  svc.End,
+	}
+}
+
+func copyVolume(vol *PersistentVolume) *PersistentVolume {
+	return &PersistentVolume{
+		UID:                   vol.UID,
+		ClusterUID:            vol.ClusterUID,
+		Name:                  vol.Name,
+		Namespace:             vol.Namespace,
+		Labels:                maps.Clone(vol.Labels),
+		Annotations:           maps.Clone(vol.Annotations),
+		StorageClass:          vol.StorageClass,
+		SizeBytes:             vol.SizeBytes,
+		Type:                  vol.Type,
+		CSIDriver:             vol.CSIDriver,
+		ProviderVolumeID:      vol.ProviderVolumeID,
+		AccessModes:           slices.Clone(vol.AccessModes),
+		ReclaimPolicy:         vol.ReclaimPolicy,
+		Region:                vol.Region,
+		Zone:                  vol.Zone,
+		Start:                 vol.Start,
+		End:                   vol.End,
+		DurationSeconds:       vol.DurationSeconds,
+		NodeAffinity:          vol.NodeAffinity,
+		ProvisionedIOPS:       vol.ProvisionedIOPS,
+		ProvisionedThroughput: vol.ProvisionedThroughput,
+		PerformanceMode:       vol.PerformanceMode,
+	}
+}
+
+func copyPVC(pvc *PersistentVolumeClaim) *PersistentVolumeClaim {
+	copied := &PersistentVolumeClaim{
+		UID:                   pvc.UID,
+		NamespaceUID:          pvc.NamespaceUID,
+		Name:                  pvc.Name,
+		Labels:                maps.Clone(pvc.Labels),
+		Annotations:           maps.Clone(pvc.Annotations),
+		StorageClass:          pvc.StorageClass,
+		StorageByteSeconds:    pvc.StorageByteSeconds,
+		RequestedBytes:        pvc.RequestedBytes,
+		Size:                  pvc.Size,
+		VolumeName:            pvc.VolumeName,
+		AccessModes:           slices.Clone(pvc.AccessModes),
+		Start:                 pvc.Start,
+		End:                   pvc.End,
+		BoundAt:               pvc.BoundAt,
+		DurationSeconds:       pvc.DurationSeconds,
+		ActualUsedByteSeconds: pvc.ActualUsedByteSeconds,
+	}
+	if pvc.VolumeUID != nil {
+		volumeUID := *pvc.VolumeUID
+		copied.VolumeUID = &volumeUID
+	}
+	if pvc.PodUID != nil {
+		podUID := *pvc.PodUID
+		copied.PodUID = &podUID
+	}
+	return copied
+}
+
+func copyDevice(dev *Device) *Device {
+	return &Device{
+		UID:               dev.UID,
+		Type:              dev.Type,
+		NodeUID:           dev.NodeUID,
+		DeviceNumber:      dev.DeviceNumber,
+		ModelName:         dev.ModelName,
+		IsShared:          dev.IsShared,
+		SharePercentage:   dev.SharePercentage,
+		UsageSeconds:      dev.UsageSeconds,
+		MemoryByteSeconds: dev.MemoryByteSeconds,
+		PowerWattSeconds:  dev.PowerWattSeconds,
+		PowerWattMax:      dev.PowerWattMax,
+		DurationSeconds:   dev.DurationSeconds,
+		Start:             dev.Start,
+		End:               dev.End,
+	}
+}
+
+func copyDeviceUsage(usage *DeviceUsage) *DeviceUsage {
+	return &DeviceUsage{
+		ContainerUID:          usage.ContainerUID,
+		DeviceUID:             usage.DeviceUID,
+		UsageSeconds:          usage.UsageSeconds,
+		UsagePercentageMax:    usage.UsagePercentageMax,
+		MemoryByteSecondsUsed: usage.MemoryByteSecondsUsed,
+		DeviceType:            usage.DeviceType,
+		DurationSeconds:       usage.DurationSeconds,
+		Start:                 usage.Start,
+		End:                   usage.End,
+	}
+}

+ 7 - 7
core/pkg/model/kubemodel/namespace.go

@@ -7,13 +7,13 @@ import (
 
 
 // @bingen:generate:Namespace
 // @bingen:generate:Namespace
 type Namespace struct {
 type Namespace struct {
-	UID         string            `json:"uid"`         // @bingen:field[version=1]
-	ClusterUID  string            `json:"clusterUID"`  // @bingen:field[version=1]
-	Name        string            `json:"name"`        // @bingen:field[version=1]
-	Labels      map[string]string `json:"labels"`      // @bingen:field[version=1]
-	Annotations map[string]string `json:"annotations"` // @bingen:field[version=1]
-	Start       time.Time         `json:"start"`       // @bingen:field[version=1]
-	End         time.Time         `json:"end"`         // @bingen:field[version=1]
+	UID         string            `json:"uid"`             // @bingen:field[version=1]
+	ClusterUID  string            `json:"clusterUID"`      // @bingen:field[version=1]
+	Name        string            `json:"name"`            // @bingen:field[version=1]
+	Labels      map[string]string `json:"labels"`          // @bingen:field[version=1]
+	Annotations map[string]string `json:"annotations"`     // @bingen:field[version=1]
+	Start       time.Time         `json:"start,omitempty"` // @bingen:field[version=1]
+	End         time.Time         `json:"end,omitempty"`   // @bingen:field[version=1]
 }
 }
 
 
 func (kms *KubeModelSet) RegisterNamespace(uid, name string) error {
 func (kms *KubeModelSet) RegisterNamespace(uid, name string) error {

+ 77 - 25
core/pkg/model/kubemodel/node.go

@@ -5,25 +5,81 @@ import (
 	"time"
 	"time"
 )
 )
 
 
+// @bingen:generate:Node
+// Node represents a Kubernetes node with capacity-based resource tracking.
+// All resource measures (CPU, RAM) represent node capacity, not requests or limits.
+// This aligns with the principle that cost allocation should be based on provisioned capacity.
 type Node struct {
 type Node struct {
-	UID                          string            `json:"uid"`
-	ClusterUID                   string            `json:"clusterUid"`
-	ProviderResourceUID          string            `json:"providerResourceUid"`
-	Name                         string            `json:"name"`
-	Labels                       map[string]string `json:"labels,omitempty"`
-	Annotations                  map[string]string `json:"annotations,omitempty"`
-	Start                        time.Time         `json:"start"`
-	End                          time.Time         `json:"end"`
-	CpuMillicoreSecondsAllocated uint64            `json:"cpuMillicoreSecondsAllocated"`
-	RAMByteSecondsAllocated      uint64            `json:"ramByteSecondsAllocated"`
-	// PublicIPSeconds represents the cumulative public IP allocation (count × seconds) for this node.
-	// Calculated as: number of ExternalIP addresses from Kubernetes node Status.Addresses × window duration in seconds.
-	// Used for cost attribution of public IP addresses associated with the node.
-	PublicIPSecondsAllocated uint64 `json:"publicIpSecondsAllocated"`
-	CpuMillicoreUsageAverage uint64 `json:"cpuMillicoreUsageAverage"`
-	CpuMillicoreUsageMax     uint64 `json:"cpuMillicoreUsageMax"`
-	RAMByteUsageAverage      uint64 `json:"ramByteUsageAverage"`
-	RAMByteUsageMax          uint64 `json:"ramByteUsageMax"`
+	UID                  string                      `json:"uid"`
+	ProviderResourceUID  string                      `json:"providerResourceUid"`
+	Name                 string                      `json:"name"`
+	Labels               map[string]string           `json:"labels,omitempty"`
+	Annotations          map[string]string           `json:"annotations,omitempty"`
+	DurationSeconds      Measurement                 `json:"durationSeconds"`
+	CpuMillicoreSeconds  Measurement                 `json:"cpuMillicoreSeconds"` // Node CPU capacity in millicore-seconds
+	RAMByteSeconds       Measurement                 `json:"ramByteSeconds"`      // Node RAM capacity in Byte-seconds
+	AttachedVolumes      map[string]*NodeVolumeUsage `json:"attachedVolumes,omitempty"`
+	CpuMillicoreUsageMax Measurement                 `json:"cpuMillicoreUsageMax"` // Peak CPU usage observed
+	RAMByteUsageMax      Measurement                 `json:"ramByteUsageMax"`      // Peak RAM usage observed
+	Start                time.Time                   `json:"start,omitempty"`      // Node creation/start timestamp
+	End                  time.Time                   `json:"end,omitempty"`        // Node deletion/end timestamp (nil if still running)
+}
+
+// NodeVolumeUsage tracks storage usage for a disk volume attached to a node.
+// Used for cost allocation of cloud storage resources (e.g., AWS EBS volumes).
+type NodeVolumeUsage struct {
+	VolumeUID        string      `json:"volumeUid"`        // "root" for primary disk, or actual volume UID for additional volumes
+	CapacityBytes    Measurement `json:"capacityBytes"`    // Total capacity of the volume in bytes
+	UsageByteSeconds Measurement `json:"usageByteSeconds"` // Cumulative usage (Byte × seconds) over measurement window
+	VolumeType       string      `json:"volumeType"`       // "root" for primary disk, "persistent" for additional PVs
+	ProviderID       string      `json:"providerId"`       // Cloud provider volume ID (e.g., "vol-xxxxx" for AWS EBS)
+	DurationSeconds  Measurement `json:"durationSeconds"`  // Duration the volume was attached during measurement window in seconds
+}
+
+// CpuMillicoreUsageAverage calculates the average CPU usage in millicores over the uptime period.
+// Returns 0 if uptime is 0 to avoid division by zero.
+func (n *Node) CpuMillicoreUsageAverage() Measurement {
+	if n.DurationSeconds == 0 {
+		return 0
+	}
+	return n.CpuMillicoreSeconds / n.DurationSeconds
+}
+
+// RAMByteUsageAverage calculates the average RAM usage in bytes over the uptime period.
+// Returns 0 if uptime is 0 to avoid division by zero.
+func (n *Node) RAMByteUsageAverage() Measurement {
+	if n.DurationSeconds == 0 {
+		return 0
+	}
+	return n.RAMByteSeconds / n.DurationSeconds
+}
+
+// TotalVolumeUsageByteSeconds returns the sum of all volume usage Byte-seconds across all attached volumes.
+func (n *Node) TotalVolumeUsageByteSeconds() Measurement {
+	var total Measurement
+	for _, volume := range n.AttachedVolumes {
+		total += volume.UsageByteSeconds
+	}
+	return total
+}
+
+// TotalVolumeCapacityBytes returns the sum of all volume capacities across all attached volumes.
+func (n *Node) TotalVolumeCapacityBytes() Measurement {
+	var total Measurement
+	for _, volume := range n.AttachedVolumes {
+		total += volume.CapacityBytes
+	}
+	return total
+}
+
+// GetVolumeUsageAverage calculates the average storage usage in bytes for a specific volume over the uptime period.
+// Returns 0 if uptime is 0 or volume doesn't exist.
+func (n *Node) GetVolumeUsageAverage(volumeUID string) Measurement {
+	volume, exists := n.AttachedVolumes[volumeUID]
+	if !exists || n.DurationSeconds == 0 {
+		return 0
+	}
+	return volume.UsageByteSeconds / n.DurationSeconds
 }
 }
 
 
 func (kms *KubeModelSet) RegisterNode(uid, name string) error {
 func (kms *KubeModelSet) RegisterNode(uid, name string) error {
@@ -34,18 +90,14 @@ func (kms *KubeModelSet) RegisterNode(uid, name string) error {
 	}
 	}
 
 
 	if _, ok := kms.Nodes[uid]; !ok {
 	if _, ok := kms.Nodes[uid]; !ok {
-		clusterUID := ""
-
 		if kms.Cluster == nil {
 		if kms.Cluster == nil {
 			kms.Warnf("RegisterNode(%s, %s): Cluster is nil", uid, name)
 			kms.Warnf("RegisterNode(%s, %s): Cluster is nil", uid, name)
-		} else {
-			clusterUID = kms.Cluster.UID
 		}
 		}
 
 
 		kms.Nodes[uid] = &Node{
 		kms.Nodes[uid] = &Node{
-			UID:        uid,
-			ClusterUID: clusterUID,
-			Name:       name,
+			UID:             uid,
+			Name:            name,
+			AttachedVolumes: make(map[string]*NodeVolumeUsage),
 		}
 		}
 
 
 		kms.Metadata.ObjectCount++
 		kms.Metadata.ObjectCount++

+ 16 - 17
core/pkg/model/kubemodel/owner.go

@@ -16,20 +16,20 @@ const (
 	OwnerKindReplicaSet  OwnerKind = "replicaset"
 	OwnerKindReplicaSet  OwnerKind = "replicaset"
 )
 )
 
 
-// Owner represents a Kubernetes resource owner
+// Owner represents a Kubernetes resource owner (workload controller)
+// @bingen:generate:Owner
 type Owner struct {
 type Owner struct {
-	UID         string            `json:"uid"`
-	OwnerUID    string            `json:"ownerUid"`
-	Name        string            `json:"name"`
-	Kind        OwnerKind         `json:"kind"`
-	Controller  bool              `json:"controller"`
-	Labels      map[string]string `json:"labels,omitempty"`
-	Annotations map[string]string `json:"annotations,omitempty"`
-	Start       time.Time         `json:"start"`
-	End         time.Time         `json:"end"`
+	UID          string            `json:"uid"`
+	NamespaceUID string            `json:"namespaceUid"`
+	Name         string            `json:"name"`
+	Kind         OwnerKind         `json:"kind"`
+	Labels       map[string]string `json:"labels,omitempty"`
+	Annotations  map[string]string `json:"annotations,omitempty"`
+	Start        time.Time         `json:"start,omitempty"`
+	End          time.Time         `json:"end,omitempty"`
 }
 }
 
 
-func (kms *KubeModelSet) RegisterOwner(uid, name, namespace, kind string, isController bool) error {
+func (kms *KubeModelSet) RegisterOwner(uid, name, namespace, kind string) error {
 	if uid == "" {
 	if uid == "" {
 		err := fmt.Errorf("UID is nil for Owner '%s'", name)
 		err := fmt.Errorf("UID is nil for Owner '%s'", name)
 		kms.Error(err)
 		kms.Error(err)
@@ -40,17 +40,16 @@ func (kms *KubeModelSet) RegisterOwner(uid, name, namespace, kind string, isCont
 		namespaceUID := ""
 		namespaceUID := ""
 
 
 		if ns, ok := kms.idx.namespaceByName[namespace]; !ok {
 		if ns, ok := kms.idx.namespaceByName[namespace]; !ok {
-			kms.Warnf("RegisterOwner(%s, %s, %s, %s, %t): missing namespace '%s'", uid, name, namespace, kind, isController, namespace)
+			kms.Warnf("RegisterOwner(%s, %s, %s, %s): missing namespace '%s'", uid, name, namespace, kind, namespace)
 		} else {
 		} else {
 			namespaceUID = ns.UID
 			namespaceUID = ns.UID
 		}
 		}
 
 
 		kms.Owners[uid] = &Owner{
 		kms.Owners[uid] = &Owner{
-			UID:        uid,
-			Name:       name,
-			OwnerUID:   namespaceUID,
-			Kind:       OwnerKind(kind),
-			Controller: isController,
+			UID:          uid,
+			Name:         name,
+			NamespaceUID: namespaceUID,
+			Kind:         OwnerKind(kind),
 		}
 		}
 
 
 		kms.Metadata.ObjectCount++
 		kms.Metadata.ObjectCount++

+ 6 - 7
core/pkg/model/kubemodel/pod.go

@@ -8,17 +8,16 @@ import (
 type Pod struct {
 type Pod struct {
 	UID                  string            `json:"uid"`
 	UID                  string            `json:"uid"`
 	NamespaceUID         string            `json:"namespaceUid"`
 	NamespaceUID         string            `json:"namespaceUid"`
-	OwnerUID             string            `json:"ownerUid"`
+	OwnerUID             string            `json:"ownerUid"` // Reference to Owner (Deployment, StatefulSet, etc.)
 	NodeUID              string            `json:"nodeUid"`
 	NodeUID              string            `json:"nodeUid"`
 	Name                 string            `json:"name"`
 	Name                 string            `json:"name"`
 	Labels               map[string]string `json:"labels,omitempty"`
 	Labels               map[string]string `json:"labels,omitempty"`
 	Annotations          map[string]string `json:"annotations,omitempty"`
 	Annotations          map[string]string `json:"annotations,omitempty"`
-	Start                time.Time         `json:"start"`
-	End                  time.Time         `json:"end"`
-	CpuMillicoreUsageMax uint64            `json:"cpuMillicoreUsageMax"`
-	RAMByteUsageMax      uint64            `json:"ramByteUsageMax"`
-	NetworkTransferBytes uint64            `json:"networkTransferBytes"`
-	NetworkReceiveBytes  uint64            `json:"networkReceiveBytes"`
+	DurationSeconds      Measurement       `json:"durationSeconds"`
+	NetworkTransferBytes Measurement       `json:"networkTransferBytes"`
+	NetworkReceiveBytes  Measurement       `json:"networkReceiveBytes"`
+	Start                time.Time         `json:"start,omitempty"` // Pod creation/start timestamp
+	End                  time.Time         `json:"end,omitempty"`   // Pod deletion/end timestamp (nil if still running)
 }
 }
 
 
 func (kms *KubeModelSet) RegisterPod(uid, name, namespace string) error {
 func (kms *KubeModelSet) RegisterPod(uid, name, namespace string) error {

+ 72 - 0
core/pkg/model/kubemodel/pv.go

@@ -0,0 +1,72 @@
+package kubemodel
+
+import (
+	"fmt"
+	"time"
+)
+
+// @bingen:generate:PersistentVolume
+type PersistentVolume struct {
+	// Version 1 fields
+	UID          string            `json:"uid"`
+	ClusterUID   string            `json:"clusterUid"`
+	Name         string            `json:"name"`
+	Namespace    string            `json:"namespace"`
+	Labels       map[string]string `json:"labels,omitempty"`
+	Annotations  map[string]string `json:"annotations,omitempty"`
+	StorageClass string            `json:"storageClass"`
+	SizeBytes    Measurement       `json:"size"`
+	// awsElasticBlockStore, azureDisk, gcePersistentDisk, csi, nfs, local, etc.
+	Type string `json:"type,omitempty"`
+	// ebs.csi.aws.com, disk.csi.azure.com, etc.
+	CSIDriver string `json:"csiDriver,omitempty"`
+	// Cloud provider's volume identifier
+	ProviderVolumeID string `json:"providerVolumeId,omitempty"`
+	// ReadWriteOnce, ReadWriteMany, ReadOnlyMany
+	AccessModes []string `json:"accessModes,omitempty"`
+	// Retain, Delete, Recycle
+	ReclaimPolicy string `json:"reclaimPolicy,omitempty"`
+	// Cloud region for cross-region cost tracking
+	Region string `json:"region,omitempty"`
+	// Availability zone for cross-AZ cost tracking
+	Zone string `json:"zone,omitempty"`
+	// Volume lifecycle timestamps
+	Start time.Time `json:"start"`         // Volume creation timestamp
+	End   time.Time `json:"end,omitempty"` // Volume deletion timestamp (nil if still active)
+	// Duration volume existed within measurement window
+	DurationSeconds Measurement `json:"durationSeconds"`
+	// JSON-encoded node affinity for local volumes
+	NodeAffinity string `json:"nodeAffinity,omitempty"`
+	// Storage performance characteristics
+	ProvisionedIOPS       Measurement `json:"provisionedIops,omitempty"`       // Provisioned IOPS (AWS io1/io2, Azure Premium)
+	ProvisionedThroughput Measurement `json:"provisionedThroughput,omitempty"` // Provisioned throughput in MB/s
+	PerformanceMode       string      `json:"performanceMode,omitempty"`       // "generalPurpose", "maxIO", "provisioned"
+}
+
+func (kms *KubeModelSet) RegisterVolume(uid, name string) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for PersistentVolume '%s'", name)
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.Volumes[uid]; !ok {
+		clusterUID := ""
+
+		if kms.Cluster == nil {
+			kms.Warnf("RegisterVolume(%s, %s): Cluster is nil", uid, name)
+		} else {
+			clusterUID = kms.Cluster.UID
+		}
+
+		kms.Volumes[uid] = &PersistentVolume{
+			UID:        uid,
+			ClusterUID: clusterUID,
+			Name:       name,
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 58 - 0
core/pkg/model/kubemodel/pvc.go

@@ -0,0 +1,58 @@
+package kubemodel
+
+import (
+	"fmt"
+	"time"
+)
+
+// @bingen:generate:PersistentVolumeClaim
+type PersistentVolumeClaim struct {
+	// Version 1 fields
+	UID                string            `json:"uid"`
+	NamespaceUID       string            `json:"namespaceUid"`
+	VolumeUID          *string           `json:"volumeUid,omitempty"`
+	PodUID             *string           `json:"podUid,omitempty"`
+	Name               string            `json:"name"`
+	Labels             map[string]string `json:"labels,omitempty"`
+	Annotations        map[string]string `json:"annotations,omitempty"`
+	StorageClass       string            `json:"storageClass"`
+	StorageByteSeconds Measurement       `json:"storageByteSeconds"`
+	RequestedBytes     Measurement       `json:"requestedBytes"`
+	Size               Measurement       `json:"size"` // Size in bytes
+	VolumeName         string            `json:"volumeName"`
+	// ReadWriteOnce, ReadWriteMany, ReadOnlyMany
+	AccessModes           []string    `json:"accessModes,omitempty"`
+	ActualUsedByteSeconds Measurement `json:"actualUsedByteSeconds,omitempty"`
+	Start                 time.Time   `json:"start"`         // PVC creation timestamp
+	End                   time.Time   `json:"end,omitempty"` // PVC deletion timestamp (nil if still active)
+	BoundAt               time.Time   `json:"boundAt,omitempty"`
+	DurationSeconds       Measurement `json:"durationSeconds,omitempty"`
+}
+
+func (kms *KubeModelSet) RegisterPVC(uid, name, namespace string) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for PVC '%s'", name)
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.PersistentVolumeClaims[uid]; !ok {
+		namespaceUID := ""
+
+		if ns, ok := kms.idx.namespaceByName[namespace]; !ok {
+			kms.Warnf("RegisterPVC(%s, %s, %s): missing namespace '%s'", uid, name, namespace, namespace)
+		} else {
+			namespaceUID = ns.UID
+		}
+
+		kms.PersistentVolumeClaims[uid] = &PersistentVolumeClaim{
+			UID:          uid,
+			Name:         name,
+			NamespaceUID: namespaceUID,
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 28 - 3
core/pkg/model/kubemodel/service.go

@@ -19,9 +19,28 @@ type ServicePort struct {
 	Protocol   string `json:"protocol"`
 	Protocol   string `json:"protocol"`
 }
 }
 
 
+// @bingen:generate:Service
+// Service represents a Kubernetes Service with network traffic tracking for cost allocation.
+//
+// Network Cost Allocation Strategy:
+// Services expose applications and route traffic, incurring costs for:
+// 1. Load Balancers (LoadBalancer type) - Cloud provider LB hourly cost + data transfer
+// 2. Data Transfer - Egress charges based on NetworkTransferBytes
+// 3. Public IPs (for LoadBalancer/NodePort with external IPs)
+//
+// Cost Attribution Flow:
+// - LoadBalancer Services: Direct cloud resource cost (e.g., AWS ELB, GCP LB) allocated to service
+// - Data Transfer: NetworkTransferBytes × cloud provider egress rate (varies by region/destination)
+// - NetworkReceiveBytes: Typically free (ingress), tracked for visibility
+// - Use Selector to map service costs to backing pods/containers proportionally
+//
+// Example: AWS Application Load Balancer
+// - Fixed hourly cost: $0.0225/hour
+// - LCU cost: $0.008/hour per LCU (based on connections, requests, bandwidth)
+// - Data transfer: $0.09/GB for internet egress
+// Total Service Cost = (LB hours × hourly rate) + (LCU hours × LCU rate) + (NetworkTransferBytes × transfer rate)
 type Service struct {
 type Service struct {
 	UID                  string            `json:"uid"`
 	UID                  string            `json:"uid"`
-	ClusterUID           string            `json:"clusterUid"`
 	NamespaceUID         string            `json:"namespaceUid"`
 	NamespaceUID         string            `json:"namespaceUid"`
 	Name                 string            `json:"name"`
 	Name                 string            `json:"name"`
 	Type                 ServiceType       `json:"type"`
 	Type                 ServiceType       `json:"type"`
@@ -31,6 +50,12 @@ type Service struct {
 	Ports                []ServicePort     `json:"ports,omitempty"`
 	Ports                []ServicePort     `json:"ports,omitempty"`
 	Start                time.Time         `json:"start"`
 	Start                time.Time         `json:"start"`
 	End                  time.Time         `json:"end"`
 	End                  time.Time         `json:"end"`
-	NetworkTransferBytes uint64            `json:"networkTransferBytes"`
-	NetworkReceiveBytes  uint64            `json:"networkReceiveBytes"`
+	NetworkTransferBytes Measurement       `json:"networkTransferBytes"`
+	NetworkReceiveBytes  Measurement       `json:"networkReceiveBytes"`
+	// Label selector to identify pods/containers targeted by this service
+	// Maps label keys to values (e.g., {"app": "nginx", "tier": "frontend"})
+	// Pods with matching labels will receive traffic from this service
+	Selector map[string]string `json:"selector,omitempty"`
+	// Lifecycle tracking
+	DurationSeconds Measurement `json:"durationSeconds"` // Duration service existed within measurement window
 }
 }

+ 40 - 5
core/pkg/model/kubemodel/stats.go

@@ -1,5 +1,11 @@
 package kubemodel
 package kubemodel
 
 
+import (
+	"errors"
+	"fmt"
+	"math"
+)
+
 // @bingen:generate:StatType
 // @bingen:generate:StatType
 type StatType string
 type StatType string
 
 
@@ -25,7 +31,7 @@ func NewStats(capacity ...int) Stats {
 
 
 func (s Stats) Avg() (float64, bool) {
 func (s Stats) Avg() (float64, bool) {
 	if s == nil {
 	if s == nil {
-		return 0, false
+		return 0.0, false
 	}
 	}
 
 
 	val, ok := s[StatAvg]
 	val, ok := s[StatAvg]
@@ -35,7 +41,7 @@ func (s Stats) Avg() (float64, bool) {
 
 
 func (s Stats) Max() (float64, bool) {
 func (s Stats) Max() (float64, bool) {
 	if s == nil {
 	if s == nil {
-		return 0, false
+		return 0.0, false
 	}
 	}
 
 
 	val, ok := s[StatMax]
 	val, ok := s[StatMax]
@@ -45,7 +51,7 @@ func (s Stats) Max() (float64, bool) {
 
 
 func (s Stats) Min() (float64, bool) {
 func (s Stats) Min() (float64, bool) {
 	if s == nil {
 	if s == nil {
-		return 0, false
+		return 0.0, false
 	}
 	}
 
 
 	val, ok := s[StatMin]
 	val, ok := s[StatMin]
@@ -55,7 +61,7 @@ func (s Stats) Min() (float64, bool) {
 
 
 func (s Stats) P95() (float64, bool) {
 func (s Stats) P95() (float64, bool) {
 	if s == nil {
 	if s == nil {
-		return 0, false
+		return 0.0, false
 	}
 	}
 
 
 	val, ok := s[StatP95]
 	val, ok := s[StatP95]
@@ -65,10 +71,39 @@ func (s Stats) P95() (float64, bool) {
 
 
 func (s Stats) P85() (float64, bool) {
 func (s Stats) P85() (float64, bool) {
 	if s == nil {
 	if s == nil {
-		return 0, false
+		return 0.0, false
 	}
 	}
 
 
 	val, ok := s[StatP85]
 	val, ok := s[StatP85]
 
 
 	return val, ok
 	return val, ok
 }
 }
+
+func (s Stats) Sanitize() error {
+	if s == nil {
+		return nil
+	}
+
+	var errs []error
+
+	for t := range s {
+		if math.IsNaN(s[t]) {
+			delete(s, t)
+			errs = append(errs, fmt.Errorf("%v is NaN", t))
+		}
+		if math.IsInf(s[t], 0) {
+			delete(s, t)
+			errs = append(errs, fmt.Errorf("%v is Inf", t))
+		}
+	}
+
+	if len(errs) > 0 {
+		errStr := fmt.Sprintf("%d errors:", len(errs))
+		for _, e := range errs {
+			errStr += fmt.Sprintf(" [%s]", e)
+		}
+		return errors.New(errStr)
+	}
+
+	return nil
+}

+ 59 - 0
core/pkg/model/kubemodel/stats_test.go

@@ -0,0 +1,59 @@
+package kubemodel
+
+import (
+	"errors"
+	"math"
+	"testing"
+)
+
+func TestStats_Sanitize(t *testing.T) {
+	type testCase struct {
+		stats Stats
+		exp   error
+	}
+
+	testCases := []testCase{
+		{
+			nil,
+			nil,
+		},
+		{
+			Stats{},
+			nil,
+		},
+		{
+			Stats{
+				StatAvg: 0.1,
+				StatMax: 1.0,
+			},
+			nil,
+		},
+		{
+			Stats{
+				StatAvg: math.Inf(0),
+				StatMax: 1.0,
+			},
+			errors.New("1 errors: [avg is Inf]"),
+		},
+		{
+			Stats{
+				StatAvg: math.Inf(0),
+				StatMax: math.NaN(),
+			},
+			errors.New("2 errors: [avg is Inf] [max is NaN]"),
+		},
+	}
+
+	for _, tc := range testCases {
+		err := tc.stats.Sanitize()
+		if err != nil && tc.exp == nil {
+			t.Errorf("unexpected error: %s", err)
+		}
+		if err == nil && tc.exp != nil {
+			t.Errorf("expected error: %s", tc.exp)
+		}
+		if err != nil && tc.exp != nil && err.Error()[0] != tc.exp.Error()[0] {
+			t.Errorf("expected error: %s; received error: %s", tc.exp, err)
+		}
+	}
+}

+ 2 - 0
core/pkg/model/kubemodel/unit.go

@@ -3,6 +3,8 @@ package kubemodel
 // @bingen:generate:Unit
 // @bingen:generate:Unit
 type Unit string
 type Unit string
 
 
+type Measurement = float64
+
 const (
 const (
 	UnitMillicore       = "m"
 	UnitMillicore       = "m"
 	UnitByte            = "B"
 	UnitByte            = "B"