Kaynağa Gözat

ResourceQuotas support: KubeModel types, source, exporter (#3435)

Signed-off-by: Bianca Burtoiu <bianca.burtoiu@ibm.com>
Signed-off-by: Saurav Teli <telisaurav44@gmail.com>
Signed-off-by: thomasvn <thomasvn.dev@gmail.com>
Signed-off-by: Nik Willwerth <nwillwerth@kubecost.com>
Signed-off-by: Sean Holcomb <seanholcomb@gmail.com>
Co-authored-by: Bianca Burtoiu <bianca.burtoiu@ibm.com>
Co-authored-by: Saurav Teli <telisaurav44@gmail.com>
Co-authored-by: Alex Meijer <ameijer@users.noreply.github.com>
Co-authored-by: Thomas Nguyen <thomasvn.dev@gmail.com>
Co-authored-by: nik-kc <127428785+nik-kc@users.noreply.github.com>
Co-authored-by: Sean Holcomb <seanholcomb@gmail.com>
Niko Kovacevic 4 ay önce
ebeveyn
işleme
d3bd2fd27f
66 değiştirilmiş dosya ile 5677 ekleme ve 736 silme
  1. 0 1
      core/go.sum
  2. 18 15
      core/pkg/exporter/pathing/bingenpath.go
  3. 0 5
      core/pkg/exporter/pathing/eventpath.go
  4. 70 0
      core/pkg/exporter/pathing/kubemodelpath.go
  5. 81 0
      core/pkg/exporter/pathing/path_test.go
  6. 1 4
      core/pkg/exporter/pathing/pathing.go
  7. 1 0
      core/pkg/filter/allocation/fields.go
  8. 1 0
      core/pkg/filter/allocation/parser.go
  9. 4 0
      core/pkg/filter/allocation/parser_test.go
  10. 5 0
      core/pkg/filter/fieldstrings/fieldstrings.go
  11. 17 0
      core/pkg/filter/resourcequota/fields.go
  12. 38 0
      core/pkg/filter/resourcequota/parser.go
  13. 43 0
      core/pkg/filter/resourcequota/parser_test.go
  14. 14 0
      core/pkg/kubeconfig/loader.go
  15. 22 1
      core/pkg/model/kubemodel/cluster.go
  16. 39 18
      core/pkg/model/kubemodel/container.go
  17. 181 12
      core/pkg/model/kubemodel/diagnostic.go
  18. 30 174
      core/pkg/model/kubemodel/kubemodel.go
  19. 3086 0
      core/pkg/model/kubemodel/kubemodel_codecs.go
  20. 474 0
      core/pkg/model/kubemodel/kubemodel_codecs_test.go
  21. 219 169
      core/pkg/model/kubemodel/kubemodel_test.go
  22. 5 4
      core/pkg/model/kubemodel/metadata.go
  23. 34 1
      core/pkg/model/kubemodel/namespace.go
  24. 32 1
      core/pkg/model/kubemodel/node.go
  25. 34 1
      core/pkg/model/kubemodel/owner.go
  26. 32 1
      core/pkg/model/kubemodel/pod.go
  27. 8 8
      core/pkg/model/kubemodel/provider.go
  28. 33 0
      core/pkg/model/kubemodel/resource.go
  29. 101 0
      core/pkg/model/kubemodel/resourcequota.go
  30. 0 2
      core/pkg/model/kubemodel/service.go
  31. 74 0
      core/pkg/model/kubemodel/stats.go
  32. 12 0
      core/pkg/model/kubemodel/unit.go
  33. 2 1
      core/pkg/nodestats/nodes_test.go
  34. 1 0
      core/pkg/opencost/allocationprops.go
  35. 40 11
      core/pkg/opencost/exporter/controllers.go
  36. 65 17
      core/pkg/opencost/exporter/exporter_test.go
  37. 43 0
      core/pkg/opencost/exporter/kubemodel/source.go
  38. 22 0
      core/pkg/opencost/mock.go
  39. 5 0
      core/pkg/pipelines/name.go
  40. 5 0
      core/pkg/source/datasource.go
  41. 52 0
      core/pkg/source/decoders.go
  42. 1 1
      go.mod
  43. 63 16
      modules/collector-source/pkg/collector/collector.go
  44. 9 7
      modules/collector-source/pkg/collector/config.go
  45. 5 2
      modules/collector-source/pkg/collector/datasource.go
  46. 12 0
      modules/collector-source/pkg/collector/metricsquerier.go
  47. 8 8
      modules/collector-source/pkg/metric/aggregator/uptime.go
  48. 1 1
      modules/collector-source/pkg/metric/aggregator/uptime_test.go
  49. 3 0
      modules/collector-source/pkg/metric/collector.go
  50. 31 22
      modules/collector-source/pkg/metric/diagnostics.go
  51. 3 0
      modules/collector-source/pkg/metric/metrics.go
  52. 1 1
      modules/collector-source/pkg/metric/walinator_test.go
  53. 14 0
      modules/collector-source/pkg/scrape/clustercache.go
  54. 26 0
      modules/collector-source/pkg/scrape/clustercache_test.go
  55. 56 0
      modules/collector-source/pkg/scrape/clusterinfo.go
  56. 1 0
      modules/collector-source/pkg/scrape/opencost.go
  57. 6 0
      modules/collector-source/pkg/scrape/scrapecontroller.go
  58. 3 0
      modules/prometheus-source/pkg/prom/contextnames.go
  59. 80 18
      modules/prometheus-source/pkg/prom/metricsquerier.go
  60. 4 4
      pkg/cloud/provider/cloud_test.go
  61. 1 2
      pkg/clustercache/clustercache.go
  62. 0 206
      pkg/clustercache/clusterimporter.go
  63. 6 1
      pkg/cmd/agent/agent.go
  64. 23 0
      pkg/costmodel/costmodel.go
  65. 7 1
      pkg/costmodel/router.go
  66. 369 0
      pkg/kubemodel/kubemodel.go

+ 0 - 1
core/go.sum

@@ -253,7 +253,6 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
 github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
 github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
 github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=

+ 18 - 15
core/pkg/exporter/pathing/bingenpath.go

@@ -7,12 +7,14 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/exporter/pathing/pathutils"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/pipelines"
 	"github.com/opencost/opencost/core/pkg/util/timeutil"
 )
 
 const (
-	DefaultRootDir string = "federated"
-	BaseStorageDir string = "etl/bingen"
+	DefaultRootDir   string = "federated"
+	BaseStorageDir   string = "etl/bingen"
+	FinOpsAgentAppID string = "finops-agent"
 )
 
 // BingenStoragePathFormatter is an implementation of the StoragePathFormatter interface for
@@ -27,18 +29,24 @@ type BingenStoragePathFormatter struct {
 }
 
 func NewDefaultStoragePathFormatter(clusterId, pipeline string, resolution *time.Duration) (StoragePathFormatter[opencost.Window], error) {
-	return NewBingenStoragePathFormatter(DefaultRootDir, clusterId, pipeline, resolution)
-}
-
-// NewBingenStoragePathFormatter creates a StoragePathFormatter for a cluster separated storage path
-// with the given root directory, cluster id, pipeline, and resolution. To omit the resolution directory
-// structure, provide a `nil` resolution.
-func NewBingenStoragePathFormatter(rootDir, clusterId, pipeline string, resolution *time.Duration) (StoragePathFormatter[opencost.Window], error) {
 	res := "."
 	if resolution != nil {
 		res = timeutil.FormatStoreResolution(*resolution)
 	}
 
+	// KubeModel uses a distinct pathing pattern which breaks with the original
+	// Allocations and Assets bingen pathing.
+	if pipeline == pipelines.KubeModelPipelineName {
+		return NewKubeModelStoragePathFormatter(FinOpsAgentAppID, clusterId, res)
+	}
+
+	return NewBingenStoragePathFormatter(DefaultRootDir, clusterId, pipeline, res)
+}
+
+// NewBingenStoragePathFormatter creates a StoragePathFormatter for a cluster separated storage path
+// with the given root directory, cluster id, pipeline, and resolution. To omit the resolution directory
+// structure, provide a `nil` resolution.
+func NewBingenStoragePathFormatter(rootDir, clusterId, pipeline, resolution string) (StoragePathFormatter[opencost.Window], error) {
 	if clusterId == "" {
 		return nil, fmt.Errorf("cluster id cannot be empty")
 	}
@@ -51,15 +59,10 @@ func NewBingenStoragePathFormatter(rootDir, clusterId, pipeline string, resoluti
 		rootDir:    rootDir,
 		clusterId:  clusterId,
 		pipeline:   pipeline,
-		resolution: res,
+		resolution: resolution,
 	}, nil
 }
 
-// RootDir returns the root directory of the storage path formatter.
-func (bsf *BingenStoragePathFormatter) RootDir() string {
-	return bsf.rootDir
-}
-
 // Dir returns the director that files will be placed in
 func (bsf *BingenStoragePathFormatter) Dir() string {
 	return path.Join(

+ 0 - 5
core/pkg/exporter/pathing/eventpath.go

@@ -48,11 +48,6 @@ func NewEventStoragePathFormatter(rootDir, clusterId, event string, subPaths ...
 	}, nil
 }
 
-// RootDir returns the root directory of the storage path formatter.
-func (espf *EventStoragePathFormatter) RootDir() string {
-	return espf.rootDir
-}
-
 // Dir  returns the director that files will be placed in
 func (espf *EventStoragePathFormatter) Dir() string {
 	return path.Join(

+ 70 - 0
core/pkg/exporter/pathing/kubemodelpath.go

@@ -0,0 +1,70 @@
+package pathing
+
+import (
+	"fmt"
+	"path"
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/pipelines"
+)
+
+const (
+	KubeModelDateDirTimeFormat = "2006/01/02"
+	KubeModelStorageTimeFormat = "20060102150405"
+)
+
+// KubeModelStoragePathFormatter is an implementation of the StoragePathFormatter interface for
+// a cluster separated storage path of the format:
+//
+//	<root>/<clusterid>/kubemodel/<resolution>/<YYYY>/<MM>/<DD>/<YYYYMMDDHHiiSS>
+//
+// where <root> is, e.g., s3://<bucket>/<appid>
+type KubeModelStoragePathFormatter struct {
+	dir string
+}
+
+func NewKubeModelStoragePathFormatter(rootDir, clusterId, resolution string) (StoragePathFormatter[opencost.Window], error) {
+	if clusterId == "" {
+		return nil, fmt.Errorf("cluster id cannot be empty")
+	}
+
+	return &KubeModelStoragePathFormatter{
+		dir: path.Join(
+			rootDir,
+			clusterId,
+			pipelines.KubeModelPipelineName,
+			resolution,
+		),
+	}, nil
+}
+
+// Dir returns the director that files will be placed in
+func (kmspf *KubeModelStoragePathFormatter) Dir() string {
+	return kmspf.dir
+}
+
+// ToFullPath returns the full path to a file name within the storage directory using the format:
+//
+//	<root>/<clusterid>/kubemodel/<resolution>/<YYYY>/<MM>/<DD>/<prefix>.<YYYYMMDDHHiiSS>.<fileExt>
+func (kmspf *KubeModelStoragePathFormatter) ToFullPath(prefix string, window opencost.Window, fileExt string) string {
+	return path.Join(
+		kmspf.dir,
+		window.Start().Format(KubeModelDateDirTimeFormat),
+		toKubeModelFileName(prefix, window.Start(), fileExt),
+	)
+}
+
+func toKubeModelFileName(prefix string, start *time.Time, fileExt string) string {
+	filename := derefTimeOrZero(start).Format(KubeModelStorageTimeFormat)
+
+	if fileExt != "" {
+		filename = fmt.Sprintf("%s.%s", filename, fileExt)
+	}
+
+	if prefix == "" {
+		return filename
+	}
+
+	return fmt.Sprintf("%s.%s", prefix, filename)
+}

+ 81 - 0
core/pkg/exporter/pathing/path_test.go

@@ -6,6 +6,7 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/stretchr/testify/require"
 )
 
 func TestBingenPathFormatter(t *testing.T) {
@@ -201,3 +202,83 @@ func TestEventPathFormatter(t *testing.T) {
 		})
 	}
 }
+
+func TestKubeModelPathFormatter(t *testing.T) {
+	type testCase struct {
+		name       string
+		start      time.Time
+		rootDir    string
+		clusterID  string
+		resolution string
+		prefix     string
+		exp        string
+	}
+
+	rootDir := "/path/to/root"
+
+	testCases := []testCase{
+		{
+			name:       "10m no prefix",
+			start:      time.Date(2025, time.December, 15, 12, 0, 0, 0, time.UTC),
+			rootDir:    rootDir,
+			clusterID:  "96d1c1d0-2183-416c-b8f7-754f42fd461a",
+			resolution: "10m",
+			prefix:     "",
+			exp:        fmt.Sprintf("%s/96d1c1d0-2183-416c-b8f7-754f42fd461a/kubemodel/%s/%s/%s", rootDir, "10m", "2025/12/15", "20251215120000"),
+		},
+		{
+			name:       "1h no prefix",
+			start:      time.Date(2025, time.December, 15, 12, 0, 0, 0, time.UTC),
+			rootDir:    rootDir,
+			clusterID:  "96d1c1d0-2183-416c-b8f7-754f42fd461a",
+			resolution: "1h",
+			prefix:     "",
+			exp:        fmt.Sprintf("%s/96d1c1d0-2183-416c-b8f7-754f42fd461a/kubemodel/%s/%s/%s", rootDir, "1h", "2025/12/15", "20251215120000"),
+		},
+		{
+			name:       "1d no prefix",
+			start:      time.Date(2025, time.December, 15, 12, 0, 0, 0, time.UTC),
+			rootDir:    rootDir,
+			clusterID:  "96d1c1d0-2183-416c-b8f7-754f42fd461a",
+			resolution: "1d",
+			prefix:     "",
+			exp:        fmt.Sprintf("%s/96d1c1d0-2183-416c-b8f7-754f42fd461a/kubemodel/%s/%s/%s", rootDir, "1d", "2025/12/15", "20251215120000"),
+		},
+		{
+			name:       "1d prefix",
+			start:      time.Date(2025, time.December, 15, 12, 0, 0, 0, time.UTC),
+			rootDir:    rootDir,
+			clusterID:  "96d1c1d0-2183-416c-b8f7-754f42fd461a",
+			resolution: "1d",
+			prefix:     "pre",
+			exp:        fmt.Sprintf("%s/96d1c1d0-2183-416c-b8f7-754f42fd461a/kubemodel/%s/%s/%s", rootDir, "1d", "2025/12/15", "pre.20251215120000"),
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			pathing, err := NewKubeModelStoragePathFormatter(tc.rootDir, tc.clusterID, tc.resolution)
+			if err != nil {
+				t.Fatalf("Unexpected error: %v", err)
+			}
+
+			var dur time.Duration
+			switch tc.resolution {
+			case "10m":
+				dur = 10 * time.Minute
+			case "1h":
+				dur = time.Hour
+			case "1d":
+				dur = 24 * time.Hour
+			default:
+				t.Errorf("unexpected resolution: %s", tc.resolution)
+			}
+			end := tc.start.Add(dur)
+
+			// dir := pathing.Dir()
+
+			act := pathing.ToFullPath(tc.prefix, opencost.NewClosedWindow(tc.start, end), "")
+			require.Equal(t, tc.exp, act)
+		})
+	}
+}

+ 1 - 4
core/pkg/exporter/pathing/pathing.go

@@ -2,10 +2,7 @@ package pathing
 
 // StoragePathFormatter is an interface used to format storage paths for exporting data types.
 type StoragePathFormatter[T any] interface {
-	// RootDir returns the root directory for the storage path.
-	RootDir() string
-
-	// Dir returns the director where files are placed
+	// Dir returns the directory where files are placed
 	Dir() string
 
 	// ToFullPath returns the full path to a file name within the storage

+ 1 - 0
core/pkg/filter/allocation/fields.go

@@ -15,6 +15,7 @@ const (
 	FieldClusterID      AllocationField = AllocationField(fieldstrings.FieldClusterID)
 	FieldNode           AllocationField = AllocationField(fieldstrings.FieldNode)
 	FieldNamespace      AllocationField = AllocationField(fieldstrings.FieldNamespace)
+	FieldNamespaceLabel AllocationField = AllocationField(fieldstrings.FieldNamespaceLabel)
 	FieldControllerKind AllocationField = AllocationField(fieldstrings.FieldControllerKind)
 	FieldControllerName AllocationField = AllocationField(fieldstrings.FieldControllerName)
 	FieldPod            AllocationField = AllocationField(fieldstrings.FieldPod)

+ 1 - 0
core/pkg/filter/allocation/parser.go

@@ -22,6 +22,7 @@ var allocationFilterFields []*ast.Field = []*ast.Field{
 	ast.NewMapField(FieldLabel),
 	ast.NewMapField(FieldAnnotation),
 	ast.NewMapField(FieldNodeLabel),
+	ast.NewMapField(FieldNamespaceLabel),
 }
 
 // fieldMap is a lazily loaded mapping from AllocationField to ast.Field

+ 4 - 0
core/pkg/filter/allocation/parser_test.go

@@ -26,6 +26,10 @@ func TestParse(t *testing.T) {
 			name:  "Single",
 			input: `namespace: "kubecost"`,
 		},
+		{
+			name:  "Single: namespace label",
+			input: `namespaceLabel[app]:"kubecost"`,
+		},
 		{
 			name:  "Single Group",
 			input: `(namespace: "kubecost")`,

+ 5 - 0
core/pkg/filter/fieldstrings/fieldstrings.go

@@ -4,6 +4,8 @@ package fieldstrings
 // filters. Many filter types share fields; defining common consts means that
 // there should be no drift between types.
 const (
+	FieldUID string = "uid"
+
 	FieldClusterID      string = "cluster"
 	FieldNode           string = "node"
 	FieldNamespace      string = "namespace"
@@ -16,6 +18,9 @@ const (
 	FieldLabel          string = "label"
 	FieldAnnotation     string = "annotation"
 	FieldNodeLabel      string = "nodeLabel"
+	FieldNamespaceLabel string = "namespaceLabel"
+
+	FieldResourceQuota string = "resourcequota"
 
 	FieldName       string = "name"
 	FieldType       string = "assetType"

+ 17 - 0
core/pkg/filter/resourcequota/fields.go

@@ -0,0 +1,17 @@
+package resourcequota
+
+import (
+	"github.com/opencost/opencost/core/pkg/filter/fieldstrings"
+)
+
+type ResourceQuotaField string
+
+// If you add a ResourceQuotaField, make sure to update field maps to return the correct
+// Asset value does not enforce exhaustive pattern matching on "enum" types.
+const (
+	FieldClusterID      ResourceQuotaField = ResourceQuotaField(fieldstrings.FieldClusterID)
+	FieldResourceQuota  ResourceQuotaField = ResourceQuotaField(fieldstrings.FieldResourceQuota)
+	FieldNamespace      ResourceQuotaField = ResourceQuotaField(fieldstrings.FieldNamespace)
+	FieldNamespaceLabel ResourceQuotaField = ResourceQuotaField(fieldstrings.FieldNamespaceLabel)
+	FieldUID            ResourceQuotaField = ResourceQuotaField(fieldstrings.FieldUID)
+)

+ 38 - 0
core/pkg/filter/resourcequota/parser.go

@@ -0,0 +1,38 @@
+package resourcequota
+
+import "github.com/opencost/opencost/core/pkg/filter/ast"
+
+var resourceQuotaFilterFields []*ast.Field = []*ast.Field{
+	ast.NewField(FieldClusterID),
+	ast.NewField(FieldResourceQuota),
+	ast.NewField(FieldNamespace),
+	ast.NewMapField(FieldNamespaceLabel),
+	ast.NewField(FieldUID),
+}
+
+// fieldMap is a lazily loaded mapping from ResourceQuotaField to ast.Field
+var fieldMap map[ResourceQuotaField]*ast.Field
+
+func init() {
+	fieldMap = make(map[ResourceQuotaField]*ast.Field, len(resourceQuotaFilterFields))
+	for _, f := range resourceQuotaFilterFields {
+		ff := *f
+		fieldMap[ResourceQuotaField(ff.Name)] = &ff
+	}
+}
+
+// DefaultFieldByName returns only default resource quota filter fields by name.
+func DefaultFieldByName(field ResourceQuotaField) *ast.Field {
+	if af, ok := fieldMap[field]; ok {
+		afcopy := *af
+		return &afcopy
+	}
+
+	return nil
+}
+
+// NewResourceQuotaFilterParser creates a new `ast.FilterParser` implementation
+// which uses resource quota specific fields
+func NewResourceQuotaFilterParser() ast.FilterParser {
+	return ast.NewFilterParser(resourceQuotaFilterFields)
+}

+ 43 - 0
core/pkg/filter/resourcequota/parser_test.go

@@ -0,0 +1,43 @@
+package resourcequota
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/core/pkg/filter/ast"
+)
+
+func TestDefaultFieldByName(t *testing.T) {
+	var rqField ResourceQuotaField
+	var astf *ast.Field
+
+	rqField = FieldResourceQuota
+	astf = DefaultFieldByName(rqField)
+	if astf.Name != "resourcequota" {
+		t.Errorf("expected %s; received %s", "resourcequota", astf.Name)
+	}
+
+	rqField = FieldClusterID
+	astf = DefaultFieldByName(rqField)
+	if astf.Name != "cluster" {
+		t.Errorf("expected %s; received %s", "cluster", astf.Name)
+	}
+
+	rqField = FieldNamespace
+	astf = DefaultFieldByName(rqField)
+	if astf.Name != "namespace" {
+		t.Errorf("expected %s; received %s", "namespace", astf.Name)
+	}
+
+	rqField = FieldNamespaceLabel
+	astf = DefaultFieldByName(rqField)
+	if astf.Name != "namespaceLabel" {
+		t.Errorf("expected %s; received %s", "namespaceLabel", astf.Name)
+	}
+
+	rqField = FieldUID
+	astf = DefaultFieldByName(rqField)
+	if astf.Name != "uid" {
+		t.Errorf("expected %s; received %s", "uid", astf.Name)
+	}
+
+}

+ 14 - 0
core/pkg/kubeconfig/loader.go

@@ -1,8 +1,10 @@
 package kubeconfig
 
 import (
+	"context"
 	"fmt"
 
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes"
 	_ "k8s.io/client-go/plugin/pkg/client/auth"
 	"k8s.io/client-go/rest"
@@ -39,3 +41,15 @@ func LoadKubeClient(path string) (*kubernetes.Clientset, error) {
 	}
 	return kubernetes.NewForConfig(config)
 }
+
+func GetClusterUID(client kubernetes.Interface) (string, error) {
+	ns, err := client.CoreV1().Namespaces().Get(context.Background(), "kube-system", v1.GetOptions{})
+	if err != nil {
+		return "", fmt.Errorf("error getting 'kube-system' namespace: %w", err)
+	}
+	uid := string(ns.ObjectMeta.UID)
+	if uid == "" {
+		return "", fmt.Errorf("uid field in 'kube-system' namespace is empty")
+	}
+	return uid, nil
+}

+ 22 - 1
core/pkg/model/kubemodel/cluster.go

@@ -1,6 +1,9 @@
 package kubemodel
 
-import "time"
+import (
+	"errors"
+	"time"
+)
 
 // @bingen:generate:Cluster
 type Cluster struct {
@@ -11,3 +14,21 @@ type Cluster struct {
 	Start    time.Time `json:"start"`    // @bingen:field[version=1]
 	End      time.Time `json:"end"`      // @bingen:field[version=1]
 }
+
+func (kms *KubeModelSet) RegisterCluster(uid string) error {
+	if uid == "" {
+		err := errors.New("RegisterCluster: uid is nil")
+		kms.Error(err)
+		return err
+	}
+
+	if kms.Cluster == nil {
+		kms.Cluster = &Cluster{UID: uid}
+	} else if uid != kms.Cluster.UID {
+		kms.Warnf("RegisterCluster(%s): attempting to change cluster UID from %s to %s", uid, kms.Cluster.UID, uid)
+	} else {
+		kms.Debugf("RegisterCluster(%s): cluster already registered", uid)
+	}
+
+	return nil
+}

+ 39 - 18
core/pkg/model/kubemodel/container.go

@@ -1,23 +1,44 @@
 package kubemodel
 
-import "time"
+import (
+	"fmt"
+	"time"
+)
 
-// @bingen:generate:Container
 type Container struct {
-	PodUID                              string    `json:"podUid"`                              // @bingen:field[version=1]
-	Name                                string    `json:"name"`                                // @bingen:field[version=1]
-	Start                               time.Time `json:"start"`                               // @bingen:field[version=1]
-	End                                 time.Time `json:"end"`                                 // @bingen:field[version=1]
-	CpuMillicoreSecondsAllocated        uint64    `json:"cpuMillicoreSecondsAllocated"`        // @bingen:field[version=1]
-	CpuMillicoreRequestAverageAllocated uint64    `json:"cpuMillicoreRequestAverageAllocated"` // @bingen:field[version=1]
-	CpuMillicoreUsageAverage            uint64    `json:"cpuMillicoreUsageAverage"`            // @bingen:field[version=1]
-	CpuMillicoreUsageMax                uint64    `json:"cpuMillicoreUsageMax"`                // @bingen:field[version=1]
-	RAMByteSecondsAllocated             uint64    `json:"ramByteSecondsAllocated"`             // @bingen:field[version=1]
-	RAMByteRequestAverageAllocated      uint64    `json:"ramByteRequestAverageAllocated"`      // @bingen:field[version=1]
-	RAMByteUsageAverage                 uint64    `json:"ramByteUsageAverage"`                 // @bingen:field[version=1]
-	RAMByteUsageMax                     uint64    `json:"ramByteUsageMax"`                     // @bingen:field[version=1]
-	StorageByteSecondsAllocated         uint64    `json:"storageByteSecondsAllocated"`         // @bingen:field[version=1]
-	StorageByteRequestAverageAllocated  uint64    `json:"storageByteRequestAverageAllocated"`  // @bingen:field[version=1]
-	StorageByteUsageAverage             uint64    `json:"storageByteUsageAverage"`             // @bingen:field[version=1]
-	StorageByteUsageMax                 uint64    `json:"storageByteUsageMax"`                 // @bingen:field[version=1]
+	PodUID                              string    `json:"podUid"`
+	Name                                string    `json:"name"`
+	Start                               time.Time `json:"start"`
+	End                                 time.Time `json:"end"`
+	CpuMillicoreSecondsAllocated        uint64    `json:"cpuMillicoreSecondsAllocated"`
+	CpuMillicoreRequestAverageAllocated uint64    `json:"cpuMillicoreRequestAverageAllocated"`
+	CpuMillicoreUsageAverage            uint64    `json:"cpuMillicoreUsageAverage"`
+	CpuMillicoreUsageMax                uint64    `json:"cpuMillicoreUsageMax"`
+	RAMByteSecondsAllocated             uint64    `json:"ramByteSecondsAllocated"`
+	RAMByteRequestAverageAllocated      uint64    `json:"ramByteRequestAverageAllocated"`
+	RAMByteUsageAverage                 uint64    `json:"ramByteUsageAverage"`
+	RAMByteUsageMax                     uint64    `json:"ramByteUsageMax"`
+	StorageByteSecondsAllocated         uint64    `json:"storageByteSecondsAllocated"`
+	StorageByteRequestAverageAllocated  uint64    `json:"storageByteRequestAverageAllocated"`
+	StorageByteUsageAverage             uint64    `json:"storageByteUsageAverage"`
+	StorageByteUsageMax                 uint64    `json:"storageByteUsageMax"`
+}
+
+func (kms *KubeModelSet) RegisterContainer(uid, name, podUID string) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for Container '%s'", name)
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.Containers[uid]; !ok {
+		kms.Containers[uid] = &Container{
+			PodUID: podUID,
+			Name:   name,
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
 }

+ 181 - 12
core/pkg/model/kubemodel/diagnostic.go

@@ -1,14 +1,183 @@
 package kubemodel
 
-import "time"
-
-// @bingen:generate:DiagnosticResult
-type DiagnosticResult struct {
-	UID         string            `json:"uid"`               // @bingen:field[version=1]
-	Name        string            `json:"name"`              // @bingen:field[version=1]
-	Description string            `json:"description"`       // @bingen:field[version=1]
-	Category    string            `json:"category"`          // @bingen:field[version=1]
-	Timestamp   time.Time         `json:"timestamp"`         // @bingen:field[version=1]
-	Error       string            `json:"error,omitempty"`   // @bingen:field[version=1]
-	Details     map[string]string `json:"details,omitempty"` // @bingen:field[version=1]
-}
+import (
+	"fmt"
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/log"
+)
+
+// @bingen:generate:DiagnosticLevel
+type DiagnosticLevel int
+
+const (
+	DiagnosticLevelTrace DiagnosticLevel = iota
+	DiagnosticLevelDebug
+	DiagnosticLevelInfo
+	DiagnosticLevelWarning
+	DiagnosticLevelError
+)
+
+const DefaultDiagnosticLevel = DiagnosticLevelInfo
+
+// @bingen:generate:Diagnostic
+type Diagnostic struct {
+	Timestamp time.Time         `json:"timestamp"`         // @bingen:field[version=1]
+	Level     DiagnosticLevel   `json:"level"`             // @bingen:field[version=1]
+	Message   string            `json:"message"`           // @bingen:field[version=1]
+	Details   map[string]string `json:"details,omitempty"` // @bingen:field[version=1]
+}
+
+func (kms *KubeModelSet) RegisterDiagnostic(d Diagnostic) {
+	kms.Metadata.Diagnostics = append(kms.Metadata.Diagnostics, d)
+}
+
+func (kms *KubeModelSet) GetErrors() []Diagnostic {
+	ds := []Diagnostic{}
+
+	for _, d := range kms.Metadata.Diagnostics {
+		if d.Level == DiagnosticLevelError {
+			ds = append(ds, d)
+		}
+	}
+
+	return ds
+}
+
+func (kms *KubeModelSet) Errorf(msg string, a ...any) {
+	kms.Error(fmt.Errorf(msg, a...))
+}
+
+func (kms *KubeModelSet) Error(err error) {
+	if err == nil {
+		return
+	}
+
+	log.Error(fmt.Sprintf("KubeModel: %s", err))
+
+	kms.RegisterDiagnostic(Diagnostic{
+		Timestamp: time.Now().UTC(),
+		Level:     DiagnosticLevelError,
+		Message:   err.Error(),
+	})
+}
+
+func (kms *KubeModelSet) GetWarnings() []Diagnostic {
+	ds := []Diagnostic{}
+
+	for _, d := range kms.Metadata.Diagnostics {
+		if d.Level == DiagnosticLevelWarning {
+			ds = append(ds, d)
+		}
+	}
+
+	return ds
+}
+
+func (kms *KubeModelSet) Warnf(msg string, a ...any) {
+	kms.Warn(fmt.Sprintf(msg, a...))
+}
+
+func (kms *KubeModelSet) Warn(msg string) {
+	if kms.Metadata.DiagnosticLevel > DiagnosticLevelWarning {
+		return
+	}
+
+	log.Warn(fmt.Sprintf("KubeModel: %s", msg))
+
+	kms.RegisterDiagnostic(Diagnostic{
+		Timestamp: time.Now().UTC(),
+		Level:     DiagnosticLevelWarning,
+		Message:   msg,
+	})
+}
+
+func (kms *KubeModelSet) GetInfos() []Diagnostic {
+	ds := []Diagnostic{}
+
+	for _, d := range kms.Metadata.Diagnostics {
+		if d.Level == DiagnosticLevelInfo {
+			ds = append(ds, d)
+		}
+	}
+
+	return ds
+}
+
+func (kms *KubeModelSet) Infof(msg string, a ...any) {
+	kms.Info(fmt.Sprintf(msg, a...))
+}
+
+func (kms *KubeModelSet) Info(msg string) {
+	if kms.Metadata.DiagnosticLevel > DiagnosticLevelInfo {
+		return
+	}
+
+	log.Info(fmt.Sprintf("KubeModel: %s", msg))
+
+	kms.RegisterDiagnostic(Diagnostic{
+		Timestamp: time.Now().UTC(),
+		Level:     DiagnosticLevelInfo,
+		Message:   msg,
+	})
+}
+
+func (kms *KubeModelSet) GetDebugs() []Diagnostic {
+	ds := []Diagnostic{}
+
+	for _, d := range kms.Metadata.Diagnostics {
+		if d.Level == DiagnosticLevelDebug {
+			ds = append(ds, d)
+		}
+	}
+
+	return ds
+}
+
+func (kms *KubeModelSet) Debugf(msg string, a ...any) {
+	kms.Debug(fmt.Sprintf(msg, a...))
+}
+
+func (kms *KubeModelSet) Debug(msg string) {
+	if kms.Metadata.DiagnosticLevel > DiagnosticLevelDebug {
+		return
+	}
+
+	log.Debug(fmt.Sprintf("KubeModel: %s", msg))
+
+	kms.RegisterDiagnostic(Diagnostic{
+		Timestamp: time.Now().UTC(),
+		Level:     DiagnosticLevelDebug,
+		Message:   msg,
+	})
+}
+
+func (kms *KubeModelSet) GetTraces() []Diagnostic {
+	ds := []Diagnostic{}
+
+	for _, d := range kms.Metadata.Diagnostics {
+		if d.Level == DiagnosticLevelTrace {
+			ds = append(ds, d)
+		}
+	}
+
+	return ds
+}
+
+func (kms *KubeModelSet) Tracef(msg string, a ...any) {
+	kms.Trace(fmt.Sprintf(msg, a...))
+}
+
+func (kms *KubeModelSet) Trace(msg string) {
+	if kms.Metadata.DiagnosticLevel > DiagnosticLevelTrace {
+		return
+	}
+
+	log.Trace(fmt.Sprintf("KubeModel: %s", msg))
+
+	kms.RegisterDiagnostic(Diagnostic{
+		Timestamp: time.Now().UTC(),
+		Level:     DiagnosticLevelTrace,
+		Message:   msg,
+	})
+}

+ 30 - 174
core/pkg/model/kubemodel/kubemodel.go

@@ -1,199 +1,55 @@
 package kubemodel
 
 import (
-	"errors"
-	"fmt"
 	"time"
 )
 
+// TODO: should we add a lock so that we can safely modify KubeModelSet in parallel?
+
 // @bingen:generate[stringtable]:KubeModelSet
 type KubeModelSet struct {
-	Metadata   *Metadata             `json:"meta"`                 // @bingen:field[version=1]
-	Window     Window                `json:"window"`               // @bingen:field[version=1]
-	Cluster    *Cluster              `json:"cluster"`              // @bingen:field[version=1]
-	Namespaces map[string]*Namespace `json:"namespaces"`           // @bingen:field[version=1]
-	Containers map[string]*Container `json:"containers,omitempty"` // @bingen:field[ignore]
-	Owners     map[string]*Owner     `json:"owners,omitempty"`     // @bingen:field[ignore]
-	Nodes      map[string]*Node      `json:"nodes,omitempty"`      // @bingen:field[ignore]
-	Pods       map[string]*Pod       `json:"pods,omitempty"`       // @bingen:field[ignore]
-	Services   map[string]*Service   `json:"services,omitempty"`   // @bingen:field[ignore]
-	idx        *kubeModelSetIndexes  // @bingen:field[ignore]
+	Metadata       *Metadata                 `json:"meta"`                     // @bingen:field[version=1]
+	Window         Window                    `json:"window"`                   // @bingen:field[version=1]
+	Cluster        *Cluster                  `json:"cluster"`                  // @bingen:field[version=1]
+	Containers     map[string]*Container     `json:"containers,omitempty"`     // @bingen:field[ignore]
+	Namespaces     map[string]*Namespace     `json:"namespaces"`               // @bingen:field[version=1]
+	Nodes          map[string]*Node          `json:"nodes,omitempty"`          // @bingen:field[ignore]
+	Owners         map[string]*Owner         `json:"owners,omitempty"`         // @bingen:field[ignore]
+	Pods           map[string]*Pod           `json:"pods,omitempty"`           // @bingen:field[ignore]
+	ResourceQuotas map[string]*ResourceQuota `json:"resourceQuotas,omitempty"` // @bingen:field[version=1]
+	Services       map[string]*Service       `json:"services,omitempty"`       // @bingen:field[ignore]
+	idx            *index                    // @bingen:field[ignore]
 }
 
-func (kms *KubeModelSet) MarshalBinary() (data []byte, err error) {
-	//TODO implement me
-	panic("implement me")
-}
+func NewKubeModelSet(start, end time.Time) *KubeModelSet {
+	index := &index{
+		namespaceByName: map[string]*Namespace{},
+	}
 
-func NewKubeModelSet(start time.Time, end time.Time) *KubeModelSet {
 	return &KubeModelSet{
 		Metadata: &Metadata{
-			CreatedAt: time.Now().UTC(),
+			CreatedAt:       time.Now().UTC(),
+			DiagnosticLevel: DefaultDiagnosticLevel,
 		},
 		Window: Window{
 			Start: start,
 			End:   end,
 		},
-		Containers: map[string]*Container{},
-		Owners:     map[string]*Owner{},
-		Namespaces: map[string]*Namespace{},
-		Nodes:      map[string]*Node{},
-		Pods:       map[string]*Pod{},
-		Services:   map[string]*Service{},
-		idx: &kubeModelSetIndexes{
-			namespaceNameToID: map[string]string{},
-		},
+		Containers:     map[string]*Container{},
+		Namespaces:     map[string]*Namespace{},
+		Nodes:          map[string]*Node{},
+		Owners:         map[string]*Owner{},
+		Pods:           map[string]*Pod{},
+		ResourceQuotas: map[string]*ResourceQuota{},
+		Services:       map[string]*Service{},
+		idx:            index,
 	}
 }
 
-func (kms *KubeModelSet) RegisterNamespace(id string, name string) error {
-	if _, ok := kms.Namespaces[id]; !ok {
-		if kms.Cluster == nil {
-			return errors.New("KubeModelSet missing Cluster")
-		}
-
-		kms.Namespaces[id] = &Namespace{
-			UID:        id,
-			ClusterUID: kms.Cluster.UID,
-			Name:       name,
-		}
-
-		// Index namespace name-to-ID for fast lookup
-		if name != "" {
-			kms.idx.namespaceNameToID[name] = id
-		}
-
-		kms.Metadata.ObjectCount++
-	}
-
-	return nil
-}
-
-// GetNamespaceByName retrieves a namespace by its name using the index
-func (kms *KubeModelSet) GetNamespaceByName(name string) (*Namespace, bool) {
-	if kms.idx == nil {
-		return nil, false
-	}
-
-	id, ok := kms.idx.namespaceNameToID[name]
-	if !ok {
-		return nil, false
-	}
-
-	ns, ok := kms.Namespaces[id]
-	return ns, ok
-}
-
-// IsEmpty returns true if the KubeModelSet is nil, has no cluster, or contains no resources
 func (kms *KubeModelSet) IsEmpty() bool {
-	if kms == nil || kms.Cluster == nil {
-		return true
-	}
-
-	// Check if all resource maps are empty
-	return len(kms.Containers) == 0 &&
-		len(kms.Owners) == 0 &&
-		len(kms.Namespaces) == 0 &&
-		len(kms.Nodes) == 0 &&
-		len(kms.Pods) == 0 &&
-		len(kms.Services) == 0
-}
-
-func (kms *KubeModelSet) RegisterPod(id, name, namespace string) error {
-	if _, ok := kms.Pods[id]; !ok {
-		nsID, ok := kms.idx.namespaceNameToID[namespace]
-		if !ok {
-			return fmt.Errorf("KubeModelSet missing namespace '%s'", namespace)
-		}
-
-		kms.Pods[id] = &Pod{
-			UID:          id,
-			Name:         name,
-			NamespaceUID: nsID,
-		}
-
-		kms.Metadata.ObjectCount++
-	}
-
-	return nil
-}
-
-func (kms *KubeModelSet) RegisterNode(id, name string) error {
-	if _, ok := kms.Nodes[id]; !ok {
-		if kms.Cluster == nil {
-			return errors.New("KubeModelSet missing Cluster")
-		}
-
-		kms.Nodes[id] = &Node{
-			UID:        id,
-			ClusterUID: kms.Cluster.UID,
-			Name:       name,
-		}
-
-		kms.Metadata.ObjectCount++
-	}
-
-	return nil
-}
-
-func (kms *KubeModelSet) RegisterOwner(id, name, namespace, kind string, isController bool) error {
-	if _, ok := kms.Owners[id]; !ok {
-		nsID, ok := kms.idx.namespaceNameToID[namespace]
-		if !ok {
-			return fmt.Errorf("KubeModelSet missing namespace '%s'", namespace)
-		}
-
-		kms.Owners[id] = &Owner{
-			UID:        id,
-			Name:       name,
-			OwnerUID:   nsID,
-			Kind:       OwnerKind(kind),
-			Controller: isController,
-		}
-
-		kms.Metadata.ObjectCount++
-	}
-
-	return nil
-}
-
-func (kms *KubeModelSet) RegisterService(id, name, namespace string) error {
-	if _, ok := kms.Services[id]; !ok {
-		if kms.Cluster == nil {
-			return errors.New("KubeModelSet missing Cluster")
-		}
-
-		nsID, ok := kms.idx.namespaceNameToID[namespace]
-		if !ok {
-			return fmt.Errorf("KubeModelSet missing namespace '%s'", namespace)
-		}
-
-		kms.Services[id] = &Service{
-			UID:          id,
-			ClusterUID:   kms.Cluster.UID,
-			NamespaceUID: nsID,
-			Name:         name,
-		}
-
-		kms.Metadata.ObjectCount++
-	}
-
-	return nil
-}
-
-func (kms *KubeModelSet) RegisterContainer(id, name, podID string) error {
-	if _, ok := kms.Containers[id]; !ok {
-		kms.Containers[id] = &Container{
-			PodUID: podID,
-			Name:   name,
-		}
-
-		kms.Metadata.ObjectCount++
-	}
-
-	return nil
+	return kms == nil || kms.Cluster == nil || kms.Metadata.ObjectCount == 0
 }
 
-type kubeModelSetIndexes struct {
-	namespaceNameToID map[string]string
+type index struct {
+	namespaceByName map[string]*Namespace
 }

+ 3086 - 0
core/pkg/model/kubemodel/kubemodel_codecs.go

@@ -0,0 +1,3086 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+//                             DO NOT MODIFY
+//
+//                          ┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻
+//
+//
+//            This source file was automatically generated by bingen.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+package kubemodel
+
+import (
+	"fmt"
+	util "github.com/opencost/opencost/core/pkg/util"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	// GeneratorPackageName is the package the generator is targetting
+	GeneratorPackageName string = "kubemodel"
+)
+
+// BinaryTags represent the formatting tag used for specific optimization features
+const (
+	// BinaryTagStringTable is written and/or read prior to the existence of a string
+	// table (where each index is encoded as a string entry in the resource
+	BinaryTagStringTable string = "BGST"
+)
+
+const (
+	// DefaultCodecVersion is used for any resources listed in the Default version set
+	DefaultCodecVersion uint8 = 1
+)
+
+//--------------------------------------------------------------------------
+//  Type Map
+//--------------------------------------------------------------------------
+
+// Generated type map for resolving interface implementations to
+// to concrete types
+var typeMap map[string]reflect.Type = map[string]reflect.Type{
+	"Cluster":                 reflect.TypeOf((*Cluster)(nil)).Elem(),
+	"Diagnostic":              reflect.TypeOf((*Diagnostic)(nil)).Elem(),
+	"KubeModelSet":            reflect.TypeOf((*KubeModelSet)(nil)).Elem(),
+	"Metadata":                reflect.TypeOf((*Metadata)(nil)).Elem(),
+	"Namespace":               reflect.TypeOf((*Namespace)(nil)).Elem(),
+	"ResourceQuantity":        reflect.TypeOf((*ResourceQuantity)(nil)).Elem(),
+	"ResourceQuota":           reflect.TypeOf((*ResourceQuota)(nil)).Elem(),
+	"ResourceQuotaSpec":       reflect.TypeOf((*ResourceQuotaSpec)(nil)).Elem(),
+	"ResourceQuotaSpecHard":   reflect.TypeOf((*ResourceQuotaSpecHard)(nil)).Elem(),
+	"ResourceQuotaStatus":     reflect.TypeOf((*ResourceQuotaStatus)(nil)).Elem(),
+	"ResourceQuotaStatusUsed": reflect.TypeOf((*ResourceQuotaStatusUsed)(nil)).Elem(),
+	"Window":                  reflect.TypeOf((*Window)(nil)).Elem(),
+}
+
+//--------------------------------------------------------------------------
+//  Type Helpers
+//--------------------------------------------------------------------------
+
+// isBinaryTag returns true when the first bytes in the provided binary matches the tag
+func isBinaryTag(data []byte, tag string) bool {
+	return string(data[:len(tag)]) == tag
+}
+
+// appendBytes combines a and b into a new byte array
+func appendBytes(a []byte, b []byte) []byte {
+	al := len(a)
+	bl := len(b)
+	tl := al + bl
+
+	// allocate a new byte array for the combined
+	// use native copy for speedy byte copying
+	result := make([]byte, tl, tl)
+	copy(result, a)
+	copy(result[al:], b)
+
+	return result
+}
+
+// typeToString determines the basic properties of the type, the qualifier, package path, and
+// type name, and returns the qualified type
+func typeToString(f interface{}) string {
+	qual := ""
+	t := reflect.TypeOf(f)
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+		qual = "*"
+	}
+
+	return fmt.Sprintf("%s%s.%s", qual, t.PkgPath(), t.Name())
+}
+
+// resolveType uses the name of a type and returns the package, base type name, and whether
+// or not it's a pointer.
+func resolveType(t string) (pkg string, name string, isPtr bool) {
+	isPtr = t[:1] == "*"
+	if isPtr {
+		t = t[1:]
+	}
+
+	slashIndex := strings.LastIndex(t, "/")
+	if slashIndex >= 0 {
+		t = t[slashIndex+1:]
+	}
+	parts := strings.Split(t, ".")
+	if parts[0] == GeneratorPackageName {
+		parts[0] = ""
+	}
+
+	pkg = parts[0]
+	name = parts[1]
+	return
+}
+
+//--------------------------------------------------------------------------
+//  StringTable
+//--------------------------------------------------------------------------
+
+// StringTable maps strings to specific indices for encoding
+type StringTable struct {
+	l       *sync.Mutex
+	indices map[string]int
+	next    int
+}
+
+// NewStringTable Creates a new StringTable instance with provided contents
+func NewStringTable(contents ...string) *StringTable {
+	st := &StringTable{
+		l:       new(sync.Mutex),
+		indices: make(map[string]int),
+		next:    len(contents),
+	}
+
+	for i, entry := range contents {
+		st.indices[entry] = i
+	}
+
+	return st
+}
+
+// AddOrGet atomically retrieves a string entry's index if it exist. Otherwise, it will
+// add the entry and return the index.
+func (st *StringTable) AddOrGet(s string) int {
+	st.l.Lock()
+	defer st.l.Unlock()
+
+	if ind, ok := st.indices[s]; ok {
+		return ind
+	}
+
+	current := st.next
+	st.next++
+
+	st.indices[s] = current
+	return current
+}
+
+// ToSlice Converts the contents to a string array for encoding.
+func (st *StringTable) ToSlice() []string {
+	st.l.Lock()
+	defer st.l.Unlock()
+
+	if st.next == 0 {
+		return []string{}
+	}
+
+	sl := make([]string, st.next, st.next)
+	for s, i := range st.indices {
+		sl[i] = s
+	}
+	return sl
+}
+
+// ToBytes Converts the contents to a binary encoded representation
+func (st *StringTable) ToBytes() []byte {
+	buff := util.NewBuffer()
+	buff.WriteBytes([]byte(BinaryTagStringTable)) // bingen table header
+
+	strs := st.ToSlice()
+
+	buff.WriteInt(len(strs)) // table length
+	for _, s := range strs {
+		buff.WriteString(s)
+	}
+
+	return buff.Bytes()
+}
+
+//--------------------------------------------------------------------------
+//  Codec Context
+//--------------------------------------------------------------------------
+
+// EncodingContext is a context object passed to the encoders to ensure reuse of buffer
+// and table data
+type EncodingContext struct {
+	Buffer *util.Buffer
+	Table  *StringTable
+}
+
+// IsStringTable returns true if the table is available
+func (ec *EncodingContext) IsStringTable() bool {
+	return ec.Table != nil
+}
+
+// DecodingContext is a context object passed to the decoders to ensure parent objects
+// reuse as much data as possible
+type DecodingContext struct {
+	Buffer *util.Buffer
+	Table  []string
+}
+
+// IsStringTable returns true if the table is available
+func (dc *DecodingContext) IsStringTable() bool {
+	return len(dc.Table) > 0
+}
+
+//--------------------------------------------------------------------------
+//  Binary Codec
+//--------------------------------------------------------------------------
+
+// BinEncoder is an encoding interface which defines a context based marshal contract.
+type BinEncoder interface {
+	MarshalBinaryWithContext(*EncodingContext) error
+}
+
+// BinDecoder is a decoding interface which defines a context based unmarshal contract.
+type BinDecoder interface {
+	UnmarshalBinaryWithContext(*DecodingContext) error
+}
+
+//--------------------------------------------------------------------------
+//  Cluster
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Cluster instance
+// into a byte array
+func (target *Cluster) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this Cluster instance
+// into a byte array leveraging a predefined context.
+func (target *Cluster) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	if ctx.IsStringTable() {
+		a := ctx.Table.AddOrGet(target.UID)
+		buff.WriteInt(a) // write table index
+	} else {
+		buff.WriteString(target.UID) // write string
+	}
+	// --- [begin][write][alias](Provider) ---
+	if ctx.IsStringTable() {
+		b := ctx.Table.AddOrGet(string(target.Provider))
+		buff.WriteInt(b) // write table index
+	} else {
+		buff.WriteString(string(target.Provider)) // write string
+	}
+	// --- [end][write][alias](Provider) ---
+
+	if ctx.IsStringTable() {
+		c := ctx.Table.AddOrGet(target.Account)
+		buff.WriteInt(c) // write table index
+	} else {
+		buff.WriteString(target.Account) // write string
+	}
+	if ctx.IsStringTable() {
+		d := ctx.Table.AddOrGet(target.Name)
+		buff.WriteInt(d) // write table index
+	} else {
+		buff.WriteString(target.Name) // write string
+	}
+	// --- [begin][write][reference](time.Time) ---
+	e, errA := target.Start.MarshalBinary()
+	if errA != nil {
+		return errA
+	}
+	buff.WriteInt(len(e))
+	buff.WriteBytes(e)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	f, errB := target.End.MarshalBinary()
+	if errB != nil {
+		return errB
+	}
+	buff.WriteInt(len(f))
+	buff.WriteBytes(f)
+	// --- [end][write][reference](time.Time) ---
+
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Cluster type
+func (target *Cluster) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the Cluster type
+func (target *Cluster) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Cluster. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var b string
+		if ctx.IsStringTable() {
+			c := buff.ReadInt() // read string index
+			b = ctx.Table[c]
+		} else {
+			b = buff.ReadString() // read string
+		}
+		a := b
+		target.UID = a
+
+	} else {
+		target.UID = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](Provider) ---
+		var d string
+		var f string
+		if ctx.IsStringTable() {
+			g := buff.ReadInt() // read string index
+			f = ctx.Table[g]
+		} else {
+			f = buff.ReadString() // read string
+		}
+		e := f
+		d = e
+
+		target.Provider = Provider(d)
+		// --- [end][read][alias](Provider) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var k string
+		if ctx.IsStringTable() {
+			l := buff.ReadInt() // read string index
+			k = ctx.Table[l]
+		} else {
+			k = buff.ReadString() // read string
+		}
+		h := k
+		target.Account = h
+
+	} else {
+		target.Account = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var n string
+		if ctx.IsStringTable() {
+			o := buff.ReadInt() // read string index
+			n = ctx.Table[o]
+		} else {
+			n = buff.ReadString() // read string
+		}
+		m := n
+		target.Name = m
+
+	} else {
+		target.Name = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		p := &time.Time{}
+		q := buff.ReadInt()    // byte array length
+		r := buff.ReadBytes(q) // byte array
+		errA := p.UnmarshalBinary(r)
+		if errA != nil {
+			return errA
+		}
+		target.Start = *p
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		s := &time.Time{}
+		t := buff.ReadInt()    // byte array length
+		u := buff.ReadBytes(t) // byte array
+		errB := s.UnmarshalBinary(u)
+		if errB != nil {
+			return errB
+		}
+		target.End = *s
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Diagnostic
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Diagnostic instance
+// into a byte array
+func (target *Diagnostic) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this Diagnostic instance
+// into a byte array leveraging a predefined context.
+func (target *Diagnostic) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	// --- [begin][write][reference](time.Time) ---
+	a, errA := target.Timestamp.MarshalBinary()
+	if errA != nil {
+		return errA
+	}
+	buff.WriteInt(len(a))
+	buff.WriteBytes(a)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][alias](DiagnosticLevel) ---
+	buff.WriteInt(int(target.Level)) // write int
+	// --- [end][write][alias](DiagnosticLevel) ---
+
+	if ctx.IsStringTable() {
+		b := ctx.Table.AddOrGet(target.Message)
+		buff.WriteInt(b) // write table index
+	} else {
+		buff.WriteString(target.Message) // write string
+	}
+	if target.Details == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(target.Details)) // map length
+		for v, z := range target.Details {
+			if ctx.IsStringTable() {
+				c := ctx.Table.AddOrGet(v)
+				buff.WriteInt(c) // write table index
+			} else {
+				buff.WriteString(v) // write string
+			}
+			if ctx.IsStringTable() {
+				d := ctx.Table.AddOrGet(z)
+				buff.WriteInt(d) // write table index
+			} else {
+				buff.WriteString(z) // write string
+			}
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Diagnostic type
+func (target *Diagnostic) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the Diagnostic type
+func (target *Diagnostic) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Diagnostic. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		a := &time.Time{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.Timestamp = *a
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](DiagnosticLevel) ---
+		var d int
+		e := buff.ReadInt() // read int
+		d = e
+
+		target.Level = DiagnosticLevel(d)
+		// --- [end][read][alias](DiagnosticLevel) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var g string
+		if ctx.IsStringTable() {
+			h := buff.ReadInt() // read string index
+			g = ctx.Table[h]
+		} else {
+			g = buff.ReadString() // read string
+		}
+		f := g
+		target.Message = f
+
+	} else {
+		target.Message = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Details = nil
+		} else {
+			// --- [begin][read][map](map[string]string) ---
+			l := buff.ReadInt() // map len
+			k := make(map[string]string, l)
+			for i := 0; i < l; i++ {
+				var v string
+				var n string
+				if ctx.IsStringTable() {
+					o := buff.ReadInt() // read string index
+					n = ctx.Table[o]
+				} else {
+					n = buff.ReadString() // read string
+				}
+				m := n
+				v = m
+
+				var z string
+				var q string
+				if ctx.IsStringTable() {
+					r := buff.ReadInt() // read string index
+					q = ctx.Table[r]
+				} else {
+					q = buff.ReadString() // read string
+				}
+				p := q
+				z = p
+
+				k[v] = z
+			}
+			target.Details = k
+			// --- [end][read][map](map[string]string) ---
+
+		}
+	} else {
+		target.Details = nil
+
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  KubeModelSet
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this KubeModelSet instance
+// into a byte array
+func (target *KubeModelSet) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  NewStringTable(),
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	sTableBytes := ctx.Table.ToBytes()
+	merged := appendBytes(sTableBytes, encBytes)
+	return merged, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this KubeModelSet instance
+// into a byte array leveraging a predefined context.
+func (target *KubeModelSet) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	if target.Metadata == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](Metadata) ---
+		buff.WriteInt(0) // [compatibility, unused]
+		errA := target.Metadata.MarshalBinaryWithContext(ctx)
+		if errA != nil {
+			return errA
+		}
+		// --- [end][write][struct](Metadata) ---
+
+	}
+	// --- [begin][write][struct](Window) ---
+	buff.WriteInt(0) // [compatibility, unused]
+	errB := target.Window.MarshalBinaryWithContext(ctx)
+	if errB != nil {
+		return errB
+	}
+	// --- [end][write][struct](Window) ---
+
+	if target.Cluster == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](Cluster) ---
+		buff.WriteInt(0) // [compatibility, unused]
+		errC := target.Cluster.MarshalBinaryWithContext(ctx)
+		if errC != nil {
+			return errC
+		}
+		// --- [end][write][struct](Cluster) ---
+
+	}
+	if target.Namespaces == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]*Namespace) ---
+		buff.WriteInt(len(target.Namespaces)) // map length
+		for v, z := range target.Namespaces {
+			if ctx.IsStringTable() {
+				a := ctx.Table.AddOrGet(v)
+				buff.WriteInt(a) // write table index
+			} else {
+				buff.WriteString(v) // write string
+			}
+			if z == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](Namespace) ---
+				buff.WriteInt(0) // [compatibility, unused]
+				errD := z.MarshalBinaryWithContext(ctx)
+				if errD != nil {
+					return errD
+				}
+				// --- [end][write][struct](Namespace) ---
+
+			}
+		}
+		// --- [end][write][map](map[string]*Namespace) ---
+
+	}
+	if target.ResourceQuotas == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]*ResourceQuota) ---
+		buff.WriteInt(len(target.ResourceQuotas)) // map length
+		for vv, zz := range target.ResourceQuotas {
+			if ctx.IsStringTable() {
+				b := ctx.Table.AddOrGet(vv)
+				buff.WriteInt(b) // write table index
+			} else {
+				buff.WriteString(vv) // write string
+			}
+			if zz == nil {
+				buff.WriteUInt8(uint8(0)) // write nil byte
+			} else {
+				buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+				// --- [begin][write][struct](ResourceQuota) ---
+				buff.WriteInt(0) // [compatibility, unused]
+				errE := zz.MarshalBinaryWithContext(ctx)
+				if errE != nil {
+					return errE
+				}
+				// --- [end][write][struct](ResourceQuota) ---
+
+			}
+		}
+		// --- [end][write][map](map[string]*ResourceQuota) ---
+
+	}
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the KubeModelSet type
+func (target *KubeModelSet) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the KubeModelSet type
+func (target *KubeModelSet) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling KubeModelSet. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Metadata = nil
+		} else {
+			// --- [begin][read][struct](Metadata) ---
+			a := &Metadata{}
+			buff.ReadInt() // [compatibility, unused]
+			errA := a.UnmarshalBinaryWithContext(ctx)
+			if errA != nil {
+				return errA
+			}
+			target.Metadata = a
+			// --- [end][read][struct](Metadata) ---
+
+		}
+	} else {
+		target.Metadata = nil
+
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][struct](Window) ---
+		b := &Window{}
+		buff.ReadInt() // [compatibility, unused]
+		errB := b.UnmarshalBinaryWithContext(ctx)
+		if errB != nil {
+			return errB
+		}
+		target.Window = *b
+		// --- [end][read][struct](Window) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Cluster = nil
+		} else {
+			// --- [begin][read][struct](Cluster) ---
+			c := &Cluster{}
+			buff.ReadInt() // [compatibility, unused]
+			errC := c.UnmarshalBinaryWithContext(ctx)
+			if errC != nil {
+				return errC
+			}
+			target.Cluster = c
+			// --- [end][read][struct](Cluster) ---
+
+		}
+	} else {
+		target.Cluster = nil
+
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Namespaces = nil
+		} else {
+			// --- [begin][read][map](map[string]*Namespace) ---
+			e := buff.ReadInt() // map len
+			d := make(map[string]*Namespace, e)
+			for i := 0; i < e; i++ {
+				var v string
+				var g string
+				if ctx.IsStringTable() {
+					h := buff.ReadInt() // read string index
+					g = ctx.Table[h]
+				} else {
+					g = buff.ReadString() // read string
+				}
+				f := g
+				v = f
+
+				var z *Namespace
+				if buff.ReadUInt8() == uint8(0) {
+					z = nil
+				} else {
+					// --- [begin][read][struct](Namespace) ---
+					k := &Namespace{}
+					buff.ReadInt() // [compatibility, unused]
+					errD := k.UnmarshalBinaryWithContext(ctx)
+					if errD != nil {
+						return errD
+					}
+					z = k
+					// --- [end][read][struct](Namespace) ---
+
+				}
+				d[v] = z
+			}
+			target.Namespaces = d
+			// --- [end][read][map](map[string]*Namespace) ---
+
+		}
+	} else {
+		target.Namespaces = nil
+
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.ResourceQuotas = nil
+		} else {
+			// --- [begin][read][map](map[string]*ResourceQuota) ---
+			m := buff.ReadInt() // map len
+			l := make(map[string]*ResourceQuota, m)
+			for j := 0; j < m; j++ {
+				var vv string
+				var o string
+				if ctx.IsStringTable() {
+					p := buff.ReadInt() // read string index
+					o = ctx.Table[p]
+				} else {
+					o = buff.ReadString() // read string
+				}
+				n := o
+				vv = n
+
+				var zz *ResourceQuota
+				if buff.ReadUInt8() == uint8(0) {
+					zz = nil
+				} else {
+					// --- [begin][read][struct](ResourceQuota) ---
+					q := &ResourceQuota{}
+					buff.ReadInt() // [compatibility, unused]
+					errE := q.UnmarshalBinaryWithContext(ctx)
+					if errE != nil {
+						return errE
+					}
+					zz = q
+					// --- [end][read][struct](ResourceQuota) ---
+
+				}
+				l[vv] = zz
+			}
+			target.ResourceQuotas = l
+			// --- [end][read][map](map[string]*ResourceQuota) ---
+
+		}
+	} else {
+		target.ResourceQuotas = nil
+
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Metadata
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Metadata instance
+// into a byte array
+func (target *Metadata) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this Metadata instance
+// into a byte array leveraging a predefined context.
+func (target *Metadata) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	// --- [begin][write][reference](time.Time) ---
+	a, errA := target.CreatedAt.MarshalBinary()
+	if errA != nil {
+		return errA
+	}
+	buff.WriteInt(len(a))
+	buff.WriteBytes(a)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.CompletedAt.MarshalBinary()
+	if errB != nil {
+		return errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	buff.WriteInt(target.ObjectCount) // write int
+	if target.Diagnostics == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][slice]([]Diagnostic) ---
+		buff.WriteInt(len(target.Diagnostics)) // array length
+		for i := 0; i < len(target.Diagnostics); i++ {
+			// --- [begin][write][struct](Diagnostic) ---
+			buff.WriteInt(0) // [compatibility, unused]
+			errC := target.Diagnostics[i].MarshalBinaryWithContext(ctx)
+			if errC != nil {
+				return errC
+			}
+			// --- [end][write][struct](Diagnostic) ---
+
+		}
+		// --- [end][write][slice]([]Diagnostic) ---
+
+	}
+	// --- [begin][write][alias](DiagnosticLevel) ---
+	buff.WriteInt(int(target.DiagnosticLevel)) // write int
+	// --- [end][write][alias](DiagnosticLevel) ---
+
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Metadata type
+func (target *Metadata) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the Metadata type
+func (target *Metadata) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Metadata. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		a := &time.Time{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.CreatedAt = *a
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		d := &time.Time{}
+		e := buff.ReadInt()    // byte array length
+		f := buff.ReadBytes(e) // byte array
+		errB := d.UnmarshalBinary(f)
+		if errB != nil {
+			return errB
+		}
+		target.CompletedAt = *d
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		g := buff.ReadInt() // read int
+		target.ObjectCount = g
+
+	} else {
+		target.ObjectCount = int(0) // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Diagnostics = nil
+		} else {
+			// --- [begin][read][slice]([]Diagnostic) ---
+			k := buff.ReadInt() // array len
+			h := make([]Diagnostic, k)
+			for i := 0; i < k; i++ {
+				// --- [begin][read][struct](Diagnostic) ---
+				m := &Diagnostic{}
+				buff.ReadInt() // [compatibility, unused]
+				errC := m.UnmarshalBinaryWithContext(ctx)
+				if errC != nil {
+					return errC
+				}
+				l := *m
+				// --- [end][read][struct](Diagnostic) ---
+
+				h[i] = l
+			}
+			target.Diagnostics = h
+			// --- [end][read][slice]([]Diagnostic) ---
+
+		}
+	} else {
+		target.Diagnostics = nil
+
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](DiagnosticLevel) ---
+		var n int
+		o := buff.ReadInt() // read int
+		n = o
+
+		target.DiagnosticLevel = DiagnosticLevel(n)
+		// --- [end][read][alias](DiagnosticLevel) ---
+
+	} else {
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Namespace
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Namespace instance
+// into a byte array
+func (target *Namespace) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this Namespace instance
+// into a byte array leveraging a predefined context.
+func (target *Namespace) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	if ctx.IsStringTable() {
+		a := ctx.Table.AddOrGet(target.UID)
+		buff.WriteInt(a) // write table index
+	} else {
+		buff.WriteString(target.UID) // write string
+	}
+	if ctx.IsStringTable() {
+		b := ctx.Table.AddOrGet(target.ClusterUID)
+		buff.WriteInt(b) // write table index
+	} else {
+		buff.WriteString(target.ClusterUID) // write string
+	}
+	if ctx.IsStringTable() {
+		c := ctx.Table.AddOrGet(target.Name)
+		buff.WriteInt(c) // write table index
+	} else {
+		buff.WriteString(target.Name) // write string
+	}
+	if target.Labels == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(target.Labels)) // map length
+		for v, z := range target.Labels {
+			if ctx.IsStringTable() {
+				d := ctx.Table.AddOrGet(v)
+				buff.WriteInt(d) // write table index
+			} else {
+				buff.WriteString(v) // write string
+			}
+			if ctx.IsStringTable() {
+				e := ctx.Table.AddOrGet(z)
+				buff.WriteInt(e) // write table index
+			} else {
+				buff.WriteString(z) // write string
+			}
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	if target.Annotations == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[string]string) ---
+		buff.WriteInt(len(target.Annotations)) // map length
+		for vv, zz := range target.Annotations {
+			if ctx.IsStringTable() {
+				f := ctx.Table.AddOrGet(vv)
+				buff.WriteInt(f) // write table index
+			} else {
+				buff.WriteString(vv) // write string
+			}
+			if ctx.IsStringTable() {
+				g := ctx.Table.AddOrGet(zz)
+				buff.WriteInt(g) // write table index
+			} else {
+				buff.WriteString(zz) // write string
+			}
+		}
+		// --- [end][write][map](map[string]string) ---
+
+	}
+	// --- [begin][write][reference](time.Time) ---
+	h, errA := target.Start.MarshalBinary()
+	if errA != nil {
+		return errA
+	}
+	buff.WriteInt(len(h))
+	buff.WriteBytes(h)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	k, errB := target.End.MarshalBinary()
+	if errB != nil {
+		return errB
+	}
+	buff.WriteInt(len(k))
+	buff.WriteBytes(k)
+	// --- [end][write][reference](time.Time) ---
+
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Namespace type
+func (target *Namespace) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the Namespace type
+func (target *Namespace) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Namespace. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var b string
+		if ctx.IsStringTable() {
+			c := buff.ReadInt() // read string index
+			b = ctx.Table[c]
+		} else {
+			b = buff.ReadString() // read string
+		}
+		a := b
+		target.UID = a
+
+	} else {
+		target.UID = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var e string
+		if ctx.IsStringTable() {
+			f := buff.ReadInt() // read string index
+			e = ctx.Table[f]
+		} else {
+			e = buff.ReadString() // read string
+		}
+		d := e
+		target.ClusterUID = d
+
+	} else {
+		target.ClusterUID = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var h string
+		if ctx.IsStringTable() {
+			k := buff.ReadInt() // read string index
+			h = ctx.Table[k]
+		} else {
+			h = buff.ReadString() // read string
+		}
+		g := h
+		target.Name = g
+
+	} else {
+		target.Name = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Labels = nil
+		} else {
+			// --- [begin][read][map](map[string]string) ---
+			m := buff.ReadInt() // map len
+			l := make(map[string]string, m)
+			for i := 0; i < m; i++ {
+				var v string
+				var o string
+				if ctx.IsStringTable() {
+					p := buff.ReadInt() // read string index
+					o = ctx.Table[p]
+				} else {
+					o = buff.ReadString() // read string
+				}
+				n := o
+				v = n
+
+				var z string
+				var r string
+				if ctx.IsStringTable() {
+					s := buff.ReadInt() // read string index
+					r = ctx.Table[s]
+				} else {
+					r = buff.ReadString() // read string
+				}
+				q := r
+				z = q
+
+				l[v] = z
+			}
+			target.Labels = l
+			// --- [end][read][map](map[string]string) ---
+
+		}
+	} else {
+		target.Labels = nil
+
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Annotations = nil
+		} else {
+			// --- [begin][read][map](map[string]string) ---
+			u := buff.ReadInt() // map len
+			t := make(map[string]string, u)
+			for j := 0; j < u; j++ {
+				var vv string
+				var x string
+				if ctx.IsStringTable() {
+					y := buff.ReadInt() // read string index
+					x = ctx.Table[y]
+				} else {
+					x = buff.ReadString() // read string
+				}
+				w := x
+				vv = w
+
+				var zz string
+				var bb string
+				if ctx.IsStringTable() {
+					cc := buff.ReadInt() // read string index
+					bb = ctx.Table[cc]
+				} else {
+					bb = buff.ReadString() // read string
+				}
+				aa := bb
+				zz = aa
+
+				t[vv] = zz
+			}
+			target.Annotations = t
+			// --- [end][read][map](map[string]string) ---
+
+		}
+	} else {
+		target.Annotations = nil
+
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		dd := &time.Time{}
+		ee := buff.ReadInt()     // byte array length
+		ff := buff.ReadBytes(ee) // byte array
+		errA := dd.UnmarshalBinary(ff)
+		if errA != nil {
+			return errA
+		}
+		target.Start = *dd
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		gg := &time.Time{}
+		hh := buff.ReadInt()     // byte array length
+		kk := buff.ReadBytes(hh) // byte array
+		errB := gg.UnmarshalBinary(kk)
+		if errB != nil {
+			return errB
+		}
+		target.End = *gg
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  ResourceQuantity
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this ResourceQuantity instance
+// into a byte array
+func (target *ResourceQuantity) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this ResourceQuantity instance
+// into a byte array leveraging a predefined context.
+func (target *ResourceQuantity) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	// --- [begin][write][alias](Resource) ---
+	if ctx.IsStringTable() {
+		a := ctx.Table.AddOrGet(string(target.Resource))
+		buff.WriteInt(a) // write table index
+	} else {
+		buff.WriteString(string(target.Resource)) // write string
+	}
+	// --- [end][write][alias](Resource) ---
+
+	// --- [begin][write][alias](Unit) ---
+	if ctx.IsStringTable() {
+		b := ctx.Table.AddOrGet(string(target.Unit))
+		buff.WriteInt(b) // write table index
+	} else {
+		buff.WriteString(string(target.Unit)) // write string
+	}
+	// --- [end][write][alias](Unit) ---
+
+	// --- [begin][write][alias](Stats) ---
+	if map[StatType]float64(target.Values) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[StatType]float64) ---
+		buff.WriteInt(len(map[StatType]float64(target.Values))) // map length
+		for v, z := range map[StatType]float64(target.Values) {
+			// --- [begin][write][alias](StatType) ---
+			if ctx.IsStringTable() {
+				c := ctx.Table.AddOrGet(string(v))
+				buff.WriteInt(c) // write table index
+			} else {
+				buff.WriteString(string(v)) // write string
+			}
+			// --- [end][write][alias](StatType) ---
+
+			buff.WriteFloat64(z) // write float64
+		}
+		// --- [end][write][map](map[StatType]float64) ---
+
+	}
+	// --- [end][write][alias](Stats) ---
+
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the ResourceQuantity type
+func (target *ResourceQuantity) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the ResourceQuantity type
+func (target *ResourceQuantity) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling ResourceQuantity. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](Resource) ---
+		var a string
+		var c string
+		if ctx.IsStringTable() {
+			d := buff.ReadInt() // read string index
+			c = ctx.Table[d]
+		} else {
+			c = buff.ReadString() // read string
+		}
+		b := c
+		a = b
+
+		target.Resource = Resource(a)
+		// --- [end][read][alias](Resource) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](Unit) ---
+		var e string
+		var g string
+		if ctx.IsStringTable() {
+			h := buff.ReadInt() // read string index
+			g = ctx.Table[h]
+		} else {
+			g = buff.ReadString() // read string
+		}
+		f := g
+		e = f
+
+		target.Unit = Unit(e)
+		// --- [end][read][alias](Unit) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](Stats) ---
+		var k map[StatType]float64
+		if buff.ReadUInt8() == uint8(0) {
+			k = nil
+		} else {
+			// --- [begin][read][map](map[StatType]float64) ---
+			m := buff.ReadInt() // map len
+			l := make(map[StatType]float64, m)
+			for i := 0; i < m; i++ {
+				// --- [begin][read][alias](StatType) ---
+				var n string
+				var p string
+				if ctx.IsStringTable() {
+					q := buff.ReadInt() // read string index
+					p = ctx.Table[q]
+				} else {
+					p = buff.ReadString() // read string
+				}
+				o := p
+				n = o
+
+				v := StatType(n)
+				// --- [end][read][alias](StatType) ---
+
+				var z float64
+				r := buff.ReadFloat64() // read float64
+				z = r
+
+				l[v] = z
+			}
+			k = l
+			// --- [end][read][map](map[StatType]float64) ---
+
+		}
+		target.Values = Stats(k)
+		// --- [end][read][alias](Stats) ---
+
+	} else {
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  ResourceQuota
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this ResourceQuota instance
+// into a byte array
+func (target *ResourceQuota) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this ResourceQuota instance
+// into a byte array leveraging a predefined context.
+func (target *ResourceQuota) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	if ctx.IsStringTable() {
+		a := ctx.Table.AddOrGet(target.UID)
+		buff.WriteInt(a) // write table index
+	} else {
+		buff.WriteString(target.UID) // write string
+	}
+	if ctx.IsStringTable() {
+		b := ctx.Table.AddOrGet(target.NamespaceUID)
+		buff.WriteInt(b) // write table index
+	} else {
+		buff.WriteString(target.NamespaceUID) // write string
+	}
+	if ctx.IsStringTable() {
+		c := ctx.Table.AddOrGet(target.Name)
+		buff.WriteInt(c) // write table index
+	} else {
+		buff.WriteString(target.Name) // write string
+	}
+	if target.Spec == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](ResourceQuotaSpec) ---
+		buff.WriteInt(0) // [compatibility, unused]
+		errA := target.Spec.MarshalBinaryWithContext(ctx)
+		if errA != nil {
+			return errA
+		}
+		// --- [end][write][struct](ResourceQuotaSpec) ---
+
+	}
+	if target.Status == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](ResourceQuotaStatus) ---
+		buff.WriteInt(0) // [compatibility, unused]
+		errB := target.Status.MarshalBinaryWithContext(ctx)
+		if errB != nil {
+			return errB
+		}
+		// --- [end][write][struct](ResourceQuotaStatus) ---
+
+	}
+	// --- [begin][write][reference](time.Time) ---
+	d, errC := target.Start.MarshalBinary()
+	if errC != nil {
+		return errC
+	}
+	buff.WriteInt(len(d))
+	buff.WriteBytes(d)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	e, errD := target.End.MarshalBinary()
+	if errD != nil {
+		return errD
+	}
+	buff.WriteInt(len(e))
+	buff.WriteBytes(e)
+	// --- [end][write][reference](time.Time) ---
+
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the ResourceQuota type
+func (target *ResourceQuota) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the ResourceQuota type
+func (target *ResourceQuota) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling ResourceQuota. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var b string
+		if ctx.IsStringTable() {
+			c := buff.ReadInt() // read string index
+			b = ctx.Table[c]
+		} else {
+			b = buff.ReadString() // read string
+		}
+		a := b
+		target.UID = a
+
+	} else {
+		target.UID = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var e string
+		if ctx.IsStringTable() {
+			f := buff.ReadInt() // read string index
+			e = ctx.Table[f]
+		} else {
+			e = buff.ReadString() // read string
+		}
+		d := e
+		target.NamespaceUID = d
+
+	} else {
+		target.NamespaceUID = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		var h string
+		if ctx.IsStringTable() {
+			k := buff.ReadInt() // read string index
+			h = ctx.Table[k]
+		} else {
+			h = buff.ReadString() // read string
+		}
+		g := h
+		target.Name = g
+
+	} else {
+		target.Name = "" // default
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Spec = nil
+		} else {
+			// --- [begin][read][struct](ResourceQuotaSpec) ---
+			l := &ResourceQuotaSpec{}
+			buff.ReadInt() // [compatibility, unused]
+			errA := l.UnmarshalBinaryWithContext(ctx)
+			if errA != nil {
+				return errA
+			}
+			target.Spec = l
+			// --- [end][read][struct](ResourceQuotaSpec) ---
+
+		}
+	} else {
+		target.Spec = nil
+
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Status = nil
+		} else {
+			// --- [begin][read][struct](ResourceQuotaStatus) ---
+			m := &ResourceQuotaStatus{}
+			buff.ReadInt() // [compatibility, unused]
+			errB := m.UnmarshalBinaryWithContext(ctx)
+			if errB != nil {
+				return errB
+			}
+			target.Status = m
+			// --- [end][read][struct](ResourceQuotaStatus) ---
+
+		}
+	} else {
+		target.Status = nil
+
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		n := &time.Time{}
+		o := buff.ReadInt()    // byte array length
+		p := buff.ReadBytes(o) // byte array
+		errC := n.UnmarshalBinary(p)
+		if errC != nil {
+			return errC
+		}
+		target.Start = *n
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		q := &time.Time{}
+		r := buff.ReadInt()    // byte array length
+		s := buff.ReadBytes(r) // byte array
+		errD := q.UnmarshalBinary(s)
+		if errD != nil {
+			return errD
+		}
+		target.End = *q
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  ResourceQuotaSpec
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this ResourceQuotaSpec instance
+// into a byte array
+func (target *ResourceQuotaSpec) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this ResourceQuotaSpec instance
+// into a byte array leveraging a predefined context.
+func (target *ResourceQuotaSpec) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	if target.Hard == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](ResourceQuotaSpecHard) ---
+		buff.WriteInt(0) // [compatibility, unused]
+		errA := target.Hard.MarshalBinaryWithContext(ctx)
+		if errA != nil {
+			return errA
+		}
+		// --- [end][write][struct](ResourceQuotaSpecHard) ---
+
+	}
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the ResourceQuotaSpec type
+func (target *ResourceQuotaSpec) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the ResourceQuotaSpec type
+func (target *ResourceQuotaSpec) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling ResourceQuotaSpec. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Hard = nil
+		} else {
+			// --- [begin][read][struct](ResourceQuotaSpecHard) ---
+			a := &ResourceQuotaSpecHard{}
+			buff.ReadInt() // [compatibility, unused]
+			errA := a.UnmarshalBinaryWithContext(ctx)
+			if errA != nil {
+				return errA
+			}
+			target.Hard = a
+			// --- [end][read][struct](ResourceQuotaSpecHard) ---
+
+		}
+	} else {
+		target.Hard = nil
+
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  ResourceQuotaSpecHard
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this ResourceQuotaSpecHard instance
+// into a byte array
+func (target *ResourceQuotaSpecHard) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this ResourceQuotaSpecHard instance
+// into a byte array leveraging a predefined context.
+func (target *ResourceQuotaSpecHard) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	// --- [begin][write][alias](ResourceQuantities) ---
+	if map[Resource]ResourceQuantity(target.Requests) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[Resource]ResourceQuantity) ---
+		buff.WriteInt(len(map[Resource]ResourceQuantity(target.Requests))) // map length
+		for v, z := range map[Resource]ResourceQuantity(target.Requests) {
+			// --- [begin][write][alias](Resource) ---
+			if ctx.IsStringTable() {
+				a := ctx.Table.AddOrGet(string(v))
+				buff.WriteInt(a) // write table index
+			} else {
+				buff.WriteString(string(v)) // write string
+			}
+			// --- [end][write][alias](Resource) ---
+
+			// --- [begin][write][struct](ResourceQuantity) ---
+			buff.WriteInt(0) // [compatibility, unused]
+			errA := z.MarshalBinaryWithContext(ctx)
+			if errA != nil {
+				return errA
+			}
+			// --- [end][write][struct](ResourceQuantity) ---
+
+		}
+		// --- [end][write][map](map[Resource]ResourceQuantity) ---
+
+	}
+	// --- [end][write][alias](ResourceQuantities) ---
+
+	// --- [begin][write][alias](ResourceQuantities) ---
+	if map[Resource]ResourceQuantity(target.Limits) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[Resource]ResourceQuantity) ---
+		buff.WriteInt(len(map[Resource]ResourceQuantity(target.Limits))) // map length
+		for vv, zz := range map[Resource]ResourceQuantity(target.Limits) {
+			// --- [begin][write][alias](Resource) ---
+			if ctx.IsStringTable() {
+				b := ctx.Table.AddOrGet(string(vv))
+				buff.WriteInt(b) // write table index
+			} else {
+				buff.WriteString(string(vv)) // write string
+			}
+			// --- [end][write][alias](Resource) ---
+
+			// --- [begin][write][struct](ResourceQuantity) ---
+			buff.WriteInt(0) // [compatibility, unused]
+			errB := zz.MarshalBinaryWithContext(ctx)
+			if errB != nil {
+				return errB
+			}
+			// --- [end][write][struct](ResourceQuantity) ---
+
+		}
+		// --- [end][write][map](map[Resource]ResourceQuantity) ---
+
+	}
+	// --- [end][write][alias](ResourceQuantities) ---
+
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the ResourceQuotaSpecHard type
+func (target *ResourceQuotaSpecHard) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the ResourceQuotaSpecHard type
+func (target *ResourceQuotaSpecHard) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling ResourceQuotaSpecHard. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](ResourceQuantities) ---
+		var a map[Resource]ResourceQuantity
+		if buff.ReadUInt8() == uint8(0) {
+			a = nil
+		} else {
+			// --- [begin][read][map](map[Resource]ResourceQuantity) ---
+			c := buff.ReadInt() // map len
+			b := make(map[Resource]ResourceQuantity, c)
+			for i := 0; i < c; i++ {
+				// --- [begin][read][alias](Resource) ---
+				var d string
+				var f string
+				if ctx.IsStringTable() {
+					g := buff.ReadInt() // read string index
+					f = ctx.Table[g]
+				} else {
+					f = buff.ReadString() // read string
+				}
+				e := f
+				d = e
+
+				v := Resource(d)
+				// --- [end][read][alias](Resource) ---
+
+				// --- [begin][read][struct](ResourceQuantity) ---
+				h := &ResourceQuantity{}
+				buff.ReadInt() // [compatibility, unused]
+				errA := h.UnmarshalBinaryWithContext(ctx)
+				if errA != nil {
+					return errA
+				}
+				z := *h
+				// --- [end][read][struct](ResourceQuantity) ---
+
+				b[v] = z
+			}
+			a = b
+			// --- [end][read][map](map[Resource]ResourceQuantity) ---
+
+		}
+		target.Requests = ResourceQuantities(a)
+		// --- [end][read][alias](ResourceQuantities) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](ResourceQuantities) ---
+		var k map[Resource]ResourceQuantity
+		if buff.ReadUInt8() == uint8(0) {
+			k = nil
+		} else {
+			// --- [begin][read][map](map[Resource]ResourceQuantity) ---
+			m := buff.ReadInt() // map len
+			l := make(map[Resource]ResourceQuantity, m)
+			for j := 0; j < m; j++ {
+				// --- [begin][read][alias](Resource) ---
+				var n string
+				var p string
+				if ctx.IsStringTable() {
+					q := buff.ReadInt() // read string index
+					p = ctx.Table[q]
+				} else {
+					p = buff.ReadString() // read string
+				}
+				o := p
+				n = o
+
+				vv := Resource(n)
+				// --- [end][read][alias](Resource) ---
+
+				// --- [begin][read][struct](ResourceQuantity) ---
+				r := &ResourceQuantity{}
+				buff.ReadInt() // [compatibility, unused]
+				errB := r.UnmarshalBinaryWithContext(ctx)
+				if errB != nil {
+					return errB
+				}
+				zz := *r
+				// --- [end][read][struct](ResourceQuantity) ---
+
+				l[vv] = zz
+			}
+			k = l
+			// --- [end][read][map](map[Resource]ResourceQuantity) ---
+
+		}
+		target.Limits = ResourceQuantities(k)
+		// --- [end][read][alias](ResourceQuantities) ---
+
+	} else {
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  ResourceQuotaStatus
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this ResourceQuotaStatus instance
+// into a byte array
+func (target *ResourceQuotaStatus) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this ResourceQuotaStatus instance
+// into a byte array leveraging a predefined context.
+func (target *ResourceQuotaStatus) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	if target.Used == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](ResourceQuotaStatusUsed) ---
+		buff.WriteInt(0) // [compatibility, unused]
+		errA := target.Used.MarshalBinaryWithContext(ctx)
+		if errA != nil {
+			return errA
+		}
+		// --- [end][write][struct](ResourceQuotaStatusUsed) ---
+
+	}
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the ResourceQuotaStatus type
+func (target *ResourceQuotaStatus) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the ResourceQuotaStatus type
+func (target *ResourceQuotaStatus) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling ResourceQuotaStatus. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Used = nil
+		} else {
+			// --- [begin][read][struct](ResourceQuotaStatusUsed) ---
+			a := &ResourceQuotaStatusUsed{}
+			buff.ReadInt() // [compatibility, unused]
+			errA := a.UnmarshalBinaryWithContext(ctx)
+			if errA != nil {
+				return errA
+			}
+			target.Used = a
+			// --- [end][read][struct](ResourceQuotaStatusUsed) ---
+
+		}
+	} else {
+		target.Used = nil
+
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  ResourceQuotaStatusUsed
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this ResourceQuotaStatusUsed instance
+// into a byte array
+func (target *ResourceQuotaStatusUsed) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this ResourceQuotaStatusUsed instance
+// into a byte array leveraging a predefined context.
+func (target *ResourceQuotaStatusUsed) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	// --- [begin][write][alias](ResourceQuantities) ---
+	if map[Resource]ResourceQuantity(target.Requests) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[Resource]ResourceQuantity) ---
+		buff.WriteInt(len(map[Resource]ResourceQuantity(target.Requests))) // map length
+		for v, z := range map[Resource]ResourceQuantity(target.Requests) {
+			// --- [begin][write][alias](Resource) ---
+			if ctx.IsStringTable() {
+				a := ctx.Table.AddOrGet(string(v))
+				buff.WriteInt(a) // write table index
+			} else {
+				buff.WriteString(string(v)) // write string
+			}
+			// --- [end][write][alias](Resource) ---
+
+			// --- [begin][write][struct](ResourceQuantity) ---
+			buff.WriteInt(0) // [compatibility, unused]
+			errA := z.MarshalBinaryWithContext(ctx)
+			if errA != nil {
+				return errA
+			}
+			// --- [end][write][struct](ResourceQuantity) ---
+
+		}
+		// --- [end][write][map](map[Resource]ResourceQuantity) ---
+
+	}
+	// --- [end][write][alias](ResourceQuantities) ---
+
+	// --- [begin][write][alias](ResourceQuantities) ---
+	if map[Resource]ResourceQuantity(target.Limits) == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][map](map[Resource]ResourceQuantity) ---
+		buff.WriteInt(len(map[Resource]ResourceQuantity(target.Limits))) // map length
+		for vv, zz := range map[Resource]ResourceQuantity(target.Limits) {
+			// --- [begin][write][alias](Resource) ---
+			if ctx.IsStringTable() {
+				b := ctx.Table.AddOrGet(string(vv))
+				buff.WriteInt(b) // write table index
+			} else {
+				buff.WriteString(string(vv)) // write string
+			}
+			// --- [end][write][alias](Resource) ---
+
+			// --- [begin][write][struct](ResourceQuantity) ---
+			buff.WriteInt(0) // [compatibility, unused]
+			errB := zz.MarshalBinaryWithContext(ctx)
+			if errB != nil {
+				return errB
+			}
+			// --- [end][write][struct](ResourceQuantity) ---
+
+		}
+		// --- [end][write][map](map[Resource]ResourceQuantity) ---
+
+	}
+	// --- [end][write][alias](ResourceQuantities) ---
+
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the ResourceQuotaStatusUsed type
+func (target *ResourceQuotaStatusUsed) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the ResourceQuotaStatusUsed type
+func (target *ResourceQuotaStatusUsed) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling ResourceQuotaStatusUsed. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](ResourceQuantities) ---
+		var a map[Resource]ResourceQuantity
+		if buff.ReadUInt8() == uint8(0) {
+			a = nil
+		} else {
+			// --- [begin][read][map](map[Resource]ResourceQuantity) ---
+			c := buff.ReadInt() // map len
+			b := make(map[Resource]ResourceQuantity, c)
+			for i := 0; i < c; i++ {
+				// --- [begin][read][alias](Resource) ---
+				var d string
+				var f string
+				if ctx.IsStringTable() {
+					g := buff.ReadInt() // read string index
+					f = ctx.Table[g]
+				} else {
+					f = buff.ReadString() // read string
+				}
+				e := f
+				d = e
+
+				v := Resource(d)
+				// --- [end][read][alias](Resource) ---
+
+				// --- [begin][read][struct](ResourceQuantity) ---
+				h := &ResourceQuantity{}
+				buff.ReadInt() // [compatibility, unused]
+				errA := h.UnmarshalBinaryWithContext(ctx)
+				if errA != nil {
+					return errA
+				}
+				z := *h
+				// --- [end][read][struct](ResourceQuantity) ---
+
+				b[v] = z
+			}
+			a = b
+			// --- [end][read][map](map[Resource]ResourceQuantity) ---
+
+		}
+		target.Requests = ResourceQuantities(a)
+		// --- [end][read][alias](ResourceQuantities) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][alias](ResourceQuantities) ---
+		var k map[Resource]ResourceQuantity
+		if buff.ReadUInt8() == uint8(0) {
+			k = nil
+		} else {
+			// --- [begin][read][map](map[Resource]ResourceQuantity) ---
+			m := buff.ReadInt() // map len
+			l := make(map[Resource]ResourceQuantity, m)
+			for j := 0; j < m; j++ {
+				// --- [begin][read][alias](Resource) ---
+				var n string
+				var p string
+				if ctx.IsStringTable() {
+					q := buff.ReadInt() // read string index
+					p = ctx.Table[q]
+				} else {
+					p = buff.ReadString() // read string
+				}
+				o := p
+				n = o
+
+				vv := Resource(n)
+				// --- [end][read][alias](Resource) ---
+
+				// --- [begin][read][struct](ResourceQuantity) ---
+				r := &ResourceQuantity{}
+				buff.ReadInt() // [compatibility, unused]
+				errB := r.UnmarshalBinaryWithContext(ctx)
+				if errB != nil {
+					return errB
+				}
+				zz := *r
+				// --- [end][read][struct](ResourceQuantity) ---
+
+				l[vv] = zz
+			}
+			k = l
+			// --- [end][read][map](map[Resource]ResourceQuantity) ---
+
+		}
+		target.Limits = ResourceQuantities(k)
+		// --- [end][read][alias](ResourceQuantities) ---
+
+	} else {
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  Window
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this Window instance
+// into a byte array
+func (target *Window) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this Window instance
+// into a byte array leveraging a predefined context.
+func (target *Window) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(DefaultCodecVersion) // version
+
+	// --- [begin][write][reference](time.Time) ---
+	a, errA := target.Start.MarshalBinary()
+	if errA != nil {
+		return errA
+	}
+	buff.WriteInt(len(a))
+	buff.WriteBytes(a)
+	// --- [end][write][reference](time.Time) ---
+
+	// --- [begin][write][reference](time.Time) ---
+	b, errB := target.End.MarshalBinary()
+	if errB != nil {
+		return errB
+	}
+	buff.WriteInt(len(b))
+	buff.WriteBytes(b)
+	// --- [end][write][reference](time.Time) ---
+
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the Window type
+func (target *Window) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the Window type
+func (target *Window) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > DefaultCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling Window. Expected %d or less, got %d", DefaultCodecVersion, version)
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		a := &time.Time{}
+		b := buff.ReadInt()    // byte array length
+		c := buff.ReadBytes(b) // byte array
+		errA := a.UnmarshalBinary(c)
+		if errA != nil {
+			return errA
+		}
+		target.Start = *a
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	// field version check
+	if uint8(1) <= version {
+		// --- [begin][read][reference](time.Time) ---
+		d := &time.Time{}
+		e := buff.ReadInt()    // byte array length
+		f := buff.ReadBytes(e) // byte array
+		errB := d.UnmarshalBinary(f)
+		if errB != nil {
+			return errB
+		}
+		target.End = *d
+		// --- [end][read][reference](time.Time) ---
+
+	} else {
+	}
+
+	return nil
+}

+ 474 - 0
core/pkg/model/kubemodel/kubemodel_codecs_test.go

@@ -0,0 +1,474 @@
+package kubemodel
+
+import (
+	"errors"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestKubeModelMarshalBinary(t *testing.T) {
+	s := time.Now().UTC().Truncate(time.Hour)
+	e := s.Add(time.Hour)
+
+	// Test empty KubeModelSet
+
+	kms := NewKubeModelSet(s, e)
+
+	b, err := kms.MarshalBinary()
+	require.NoError(t, err)
+
+	var act = new(KubeModelSet)
+	err = act.UnmarshalBinary(b)
+	require.NoError(t, err)
+
+	require.Equal(t, kms.Metadata, act.Metadata)
+	require.Equal(t, kms.Window, act.Window)
+	require.Equal(t, kms.Cluster, act.Cluster)
+	require.Equal(t, kms.Namespaces, act.Namespaces)
+	require.Equal(t, kms.ResourceQuotas, act.ResourceQuotas)
+
+	// Test non-empty KubeModelSet
+
+	kms = NewKubeModelSet(s, e)
+
+	kms.Metadata.CreatedAt = time.Now().UTC()
+
+	kms.RegisterCluster("cluster")
+	kms.Cluster.Start = s
+	kms.Cluster.End = e
+
+	kms.RegisterNamespace("ns1", "ns1")
+	kms.Namespaces["ns1"].Start = s
+	kms.Namespaces["ns1"].End = e
+	kms.Namespaces["ns1"].Labels = map[string]string{"label1": "label1"}
+	kms.Namespaces["ns1"].Annotations = map[string]string{"anno1": "anno1"}
+
+	kms.RegisterNamespace("ns2", "ns2")
+	kms.Namespaces["ns2"].Start = s
+	kms.Namespaces["ns2"].End = e
+	kms.Namespaces["ns2"].Labels = map[string]string{"label2": "label2"}
+	kms.Namespaces["ns2"].Annotations = map[string]string{"anno2": "anno2"}
+
+	kms.RegisterResourceQuota("rq1", "rq1", "ns1")
+	kms.ResourceQuotas["rq1"].Start = s
+	kms.ResourceQuotas["rq1"].End = e
+	kms.ResourceQuotas["rq1"].Spec = &ResourceQuotaSpec{
+		Hard: &ResourceQuotaSpecHard{
+			Requests: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+			Limits: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+		},
+	}
+	kms.ResourceQuotas["rq1"].Status = &ResourceQuotaStatus{
+		Used: &ResourceQuotaStatusUsed{
+			Requests: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+			Limits: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+		},
+	}
+
+	kms.RegisterResourceQuota("rq2", "rq2", "ns1")
+	kms.ResourceQuotas["rq2"].Start = s
+	kms.ResourceQuotas["rq2"].End = e
+	kms.ResourceQuotas["rq2"].Spec = &ResourceQuotaSpec{
+		Hard: &ResourceQuotaSpecHard{
+			Requests: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+			Limits: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+		},
+	}
+	kms.ResourceQuotas["rq2"].Status = &ResourceQuotaStatus{
+		Used: &ResourceQuotaStatusUsed{
+			Requests: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+			Limits: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+		},
+	}
+
+	kms.RegisterResourceQuota("rq3", "rq3", "ns2")
+	kms.ResourceQuotas["rq3"].Start = s
+	kms.ResourceQuotas["rq3"].End = e
+	kms.ResourceQuotas["rq3"].Spec = &ResourceQuotaSpec{
+		Hard: &ResourceQuotaSpecHard{
+			Requests: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+			Limits: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+		},
+	}
+	kms.ResourceQuotas["rq3"].Status = &ResourceQuotaStatus{
+		Used: &ResourceQuotaStatusUsed{
+			Requests: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+			Limits: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+		},
+	}
+
+	kms.RegisterResourceQuota("rq4", "rq4", "ns2")
+	kms.ResourceQuotas["rq4"].Start = s
+	kms.ResourceQuotas["rq4"].End = e
+	kms.ResourceQuotas["rq4"].Spec = &ResourceQuotaSpec{
+		Hard: &ResourceQuotaSpecHard{
+			Requests: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+			Limits: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+		},
+	}
+	kms.ResourceQuotas["rq4"].Status = &ResourceQuotaStatus{
+		Used: &ResourceQuotaStatusUsed{
+			Requests: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+			Limits: ResourceQuantities{
+				ResourceCPU: ResourceQuantity{
+					Resource: ResourceCPU,
+					Unit:     UnitMillicore,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+				ResourceMemory: ResourceQuantity{
+					Resource: ResourceMemory,
+					Unit:     UnitByte,
+					Values: Stats{
+						StatAvg: 1,
+						StatMax: 1,
+						StatP85: 1,
+						StatP95: 1,
+					},
+				},
+			},
+		},
+	}
+
+	kms.Error(errors.New("test error"))
+	kms.Warn("test warning")
+	kms.Info("test info")
+	kms.Debug("test debug")
+	kms.Trace("test trace")
+
+	kms.Metadata.CompletedAt = time.Now().UTC()
+
+	b, err = kms.MarshalBinary()
+	require.NoError(t, err)
+
+	act = new(KubeModelSet)
+	err = act.UnmarshalBinary(b)
+	require.NoError(t, err)
+
+	require.Equal(t, kms.Metadata, act.Metadata)
+	require.Equal(t, kms.Window, act.Window)
+	require.Equal(t, kms.Cluster, act.Cluster)
+	require.Equal(t, kms.Namespaces, act.Namespaces)
+	require.Equal(t, kms.ResourceQuotas, act.ResourceQuotas)
+}

+ 219 - 169
core/pkg/model/kubemodel/kubemodel_test.go

@@ -1,6 +1,7 @@
 package kubemodel
 
 import (
+	"errors"
 	"testing"
 	"time"
 
@@ -8,230 +9,279 @@ import (
 )
 
 func TestKubeModel(t *testing.T) {
-	start := time.Now().Add(-1 * time.Hour)
-	end := time.Now()
+	start := time.Now().UTC().Truncate(time.Hour)
+	end := start.Add(time.Hour)
+
+	t.Run("RegisterError", func(t *testing.T) {
+		kms := NewKubeModelSet(start, end)
+
+		require.NotNil(t, kms.Metadata)
+		require.Len(t, kms.GetErrors(), 0)
+
+		kms.Error(errors.New("test error"))
+		require.Len(t, kms.GetErrors(), 1)
+		require.Equal(t, "test error", kms.GetErrors()[0].Message)
+
+		kms.Error(errors.New("test error 2"))
+		require.Len(t, kms.GetErrors(), 2)
+		require.Equal(t, "test error 2", kms.GetErrors()[1].Message)
+	})
+
+	t.Run("RegisterCluster", func(t *testing.T) {
+		t.Run("empty cluster UID", func(t *testing.T) {
+			var err error
 
-	t.Run("RegisterNamespace", func(t *testing.T) {
-		t.Run("register new namespace", func(t *testing.T) {
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
 
-			err := kms.RegisterNamespace("ns-1", "default")
-			require.NoError(t, err)
+			err = kms.RegisterCluster("")
+			require.NotNil(t, err)
 
-			require.Len(t, kms.Namespaces, 1)
-			ns, ok := kms.Namespaces["ns-1"]
-			require.True(t, ok)
-			require.NotNil(t, ns)
-			require.Equal(t, "ns-1", ns.UID)
-			require.Equal(t, "default", ns.Name)
-			require.Equal(t, 1, kms.Metadata.ObjectCount)
+			require.Len(t, kms.GetErrors(), 1)
+			require.Equal(t, "RegisterCluster: uid is nil", kms.GetErrors()[0].Message)
+			require.Nil(t, kms.Cluster)
 		})
 
-		t.Run("register duplicate namespace", func(t *testing.T) {
+		t.Run("new cluster UID", func(t *testing.T) {
+			var err error
+			var clusterUID = "cluster-uid"
+
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
 
-			err := kms.RegisterNamespace("ns-1", "default")
-			require.NoError(t, err)
-			require.Equal(t, 1, kms.Metadata.ObjectCount)
+			err = kms.RegisterCluster(clusterUID)
+			require.Nil(t, err)
 
-			err = kms.RegisterNamespace("ns-1", "default")
-			require.NoError(t, err)
-			require.Len(t, kms.Namespaces, 1)
-			require.Equal(t, 1, kms.Metadata.ObjectCount, "ObjectCount should not increment for duplicate")
+			require.Len(t, kms.GetErrors(), 0)
+			require.NotNil(t, kms.Cluster)
+			require.Equal(t, clusterUID, kms.Cluster.UID)
 		})
 
-		t.Run("register multiple namespaces", func(t *testing.T) {
+		t.Run("multiple Register calls", func(t *testing.T) {
+			var err error
+			var clusterUID = "cluster-uid"
+
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
 
-			err := kms.RegisterNamespace("ns-1", "default")
-			require.NoError(t, err)
+			err = kms.RegisterCluster(clusterUID)
+			require.Nil(t, err)
 
-			err = kms.RegisterNamespace("ns-2", "kube-system")
-			require.NoError(t, err)
+			require.Len(t, kms.GetErrors(), 0)
+			require.NotNil(t, kms.Cluster)
+			require.Equal(t, clusterUID, kms.Cluster.UID)
 
-			require.Len(t, kms.Namespaces, 2)
-			require.Equal(t, 2, kms.Metadata.ObjectCount)
+			// Register cluster with same UID, expect no-op on second try
+			err = kms.RegisterCluster(clusterUID)
+			require.Nil(t, err)
+
+			require.Len(t, kms.GetErrors(), 0)
+			require.NotNil(t, kms.Cluster)
+			require.Equal(t, clusterUID, kms.Cluster.UID)
+
+			// Register cluster with another UID (should not happen), expect no-op
+			err = kms.RegisterCluster("another-uid")
+			require.Nil(t, err)
+
+			require.Len(t, kms.GetWarnings(), 1)
+			require.Equal(t, "RegisterCluster(another-uid): attempting to change cluster UID from cluster-uid to another-uid", kms.GetWarnings()[0].Message)
+			require.NotNil(t, kms.Cluster)
+			require.Equal(t, clusterUID, kms.Cluster.UID) // original kms.Cluster is not modified
 		})
 	})
 
-	t.Run("RegisterPod", func(t *testing.T) {
-		t.Run("register new pod", func(t *testing.T) {
-			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
-
-			err := kms.RegisterPod("pod-1", "nginx", "default")
-			require.NoError(t, err)
-
-			require.Len(t, kms.Pods, 1)
-			pod, ok := kms.Pods["pod-1"]
-			require.True(t, ok)
-			require.NotNil(t, pod)
-			require.Equal(t, "pod-1", pod.UID)
-			require.Equal(t, "nginx", pod.Name)
-			require.Equal(t, "ns-1", pod.NamespaceUID)
-		})
+	t.Run("RegisterNamespace", func(t *testing.T) {
+		t.Run("empty namespace UID", func(t *testing.T) {
+			var err error
 
-		t.Run("register duplicate pod", func(t *testing.T) {
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
 
-			err := kms.RegisterPod("pod-1", "nginx", "default")
-			require.NoError(t, err)
+			err = kms.RegisterNamespace("", "")
+			require.NotNil(t, err)
 
-			err = kms.RegisterPod("pod-1", "nginx", "default")
-			require.NoError(t, err)
-			require.Len(t, kms.Pods, 1)
+			require.Len(t, kms.GetErrors(), 1)
+			require.Equal(t, "UID is nil for Namespace ''", kms.GetErrors()[0].Message)
+			require.Len(t, kms.Namespaces, 0)
 		})
-	})
 
-	t.Run("RegisterNode", func(t *testing.T) {
-		t.Run("register new node", func(t *testing.T) {
+		t.Run("register namespace on KMS w/o cluster", func(t *testing.T) {
+			var err error
+
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
 
-			err := kms.RegisterNode("node-1", "worker-1")
-			require.NoError(t, err)
+			testUID := "uid"
+			testName := "name"
 
-			require.Len(t, kms.Nodes, 1)
-			node, ok := kms.Nodes["node-1"]
-			require.True(t, ok)
-			require.NotNil(t, node)
-			require.Equal(t, "node-1", node.UID)
-			require.Equal(t, "worker-1", node.Name)
-		})
+			err = kms.RegisterNamespace(testUID, testName)
+			require.Nil(t, err)
 
-		t.Run("register duplicate node", func(t *testing.T) {
-			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
+			require.Len(t, kms.GetWarnings(), 1)
+			require.Equal(t, "RegisterNamespace(uid, name): Cluster is nil", kms.GetWarnings()[0].Message)
 
-			err := kms.RegisterNode("node-1", "worker-1")
-			require.NoError(t, err)
+			testNamespace := &Namespace{UID: testUID, ClusterUID: "", Name: testName}
 
-			err = kms.RegisterNode("node-1", "worker-1")
-			require.NoError(t, err)
-			require.Len(t, kms.Nodes, 1)
+			require.NotNil(t, kms.Namespaces[testUID])
+			require.Equal(t, testNamespace, kms.Namespaces[testUID])
+			require.NotNil(t, kms.idx.namespaceByName[testName])
+			require.Equal(t, testNamespace, kms.idx.namespaceByName[testName])
+			require.Equal(t, 1, kms.Metadata.ObjectCount)
 		})
-	})
 
-	t.Run("RegisterOwner", func(t *testing.T) {
-		t.Run("register new owner", func(t *testing.T) {
-			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
-
-			err := kms.RegisterOwner("ctrl-1", "nginx-deployment", "default", "Deployment", true)
-			require.NoError(t, err)
-
-			require.Len(t, kms.Owners, 1)
-			owner, ok := kms.Owners["ctrl-1"]
-			require.True(t, ok)
-			require.NotNil(t, owner)
-			require.Equal(t, "ctrl-1", owner.UID)
-			require.Equal(t, "nginx-deployment", owner.Name)
-			require.Equal(t, OwnerKind("Deployment"), owner.Kind)
-			require.True(t, owner.Controller)
-		})
+		t.Run("register namespace on KMS w/ cluster", func(t *testing.T) {
+			var err error
 
-		t.Run("register duplicate owner", func(t *testing.T) {
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
+			err = kms.RegisterCluster("cluster-uid")
+			require.Nil(t, err)
 
-			err := kms.RegisterOwner("ctrl-1", "nginx-deployment", "default", "Deployment", true)
-			require.NoError(t, err)
+			// At this point we have a KMS with a cluster registered
 
-			err = kms.RegisterOwner("ctrl-1", "nginx-deployment", "default", "Deployment", true)
-			require.NoError(t, err)
-			require.Len(t, kms.Owners, 1)
-		})
-	})
+			testUID := "uid"
+			testName := "name"
 
-	t.Run("RegisterService", func(t *testing.T) {
-		t.Run("register new service", func(t *testing.T) {
-			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
-
-			err := kms.RegisterService("svc-1", "nginx-service", "default")
-			require.NoError(t, err)
-
-			require.Len(t, kms.Services, 1)
-			svc, ok := kms.Services["svc-1"]
-			require.True(t, ok)
-			require.NotNil(t, svc)
-			require.Equal(t, "svc-1", svc.UID)
-			require.Equal(t, "nginx-service", svc.Name)
-		})
+			err = kms.RegisterNamespace(testUID, testName)
+			require.Nil(t, err)
 
-		t.Run("register duplicate service", func(t *testing.T) {
-			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
+			require.Len(t, kms.GetErrors(), 0)
+			require.NotNil(t, kms.Namespaces[testUID])
+
+			testNamespace := &Namespace{UID: testUID, ClusterUID: "cluster-uid", Name: testName}
+
+			require.Equal(t, testNamespace, kms.Namespaces[testUID])
+			require.Equal(t, testNamespace, kms.idx.namespaceByName[testName])
+			require.Equal(t, 1, kms.Metadata.ObjectCount)
 
-			err := kms.RegisterService("svc-1", "nginx-service", "default")
-			require.NoError(t, err)
+			// Register same namespace again, expect no-op on second try
+			err = kms.RegisterNamespace(testUID, testName)
+			require.Nil(t, err)
 
-			err = kms.RegisterService("svc-1", "nginx-service", "default")
-			require.NoError(t, err)
-			require.Len(t, kms.Services, 1)
+			require.Len(t, kms.GetErrors(), 0)
+			require.NotNil(t, kms.Namespaces[testUID])
+			require.Equal(t, testNamespace, kms.Namespaces[testUID])
+			require.Equal(t, testNamespace, kms.idx.namespaceByName[testName])
+			require.Equal(t, 1, kms.Metadata.ObjectCount) // remains 1
 		})
 	})
 
-	t.Run("RegisterContainer", func(t *testing.T) {
-		t.Run("register new container", func(t *testing.T) {
+	t.Run("RegisterResourceQuota", func(t *testing.T) {
+		t.Run("empty resourceQuota UID", func(t *testing.T) {
+			var err error
+
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
-			kms.RegisterPod("pod-1", "nginx", "default")
-
-			err := kms.RegisterContainer("container-1", "nginx-container", "pod-1")
-			require.NoError(t, err)
-
-			require.Len(t, kms.Containers, 1)
-			container, ok := kms.Containers["container-1"]
-			require.True(t, ok)
-			require.NotNil(t, container)
-			require.Equal(t, "nginx-container", container.Name)
-			require.Equal(t, "pod-1", container.PodUID)
+
+			err = kms.RegisterResourceQuota("", "test", "")
+			require.NotNil(t, err)
+			require.Len(t, kms.GetErrors(), 1)
+			require.Equal(t, "UID is nil for ResourceQuota 'test'", kms.GetErrors()[0].Message)
+			require.Len(t, kms.ResourceQuotas, 0)
 		})
 
-		t.Run("register duplicate container", func(t *testing.T) {
+		t.Run("register resource quota on KMS w/o namespace", func(t *testing.T) {
+			var err error
+
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
-			kms.RegisterPod("pod-1", "nginx", "default")
 
-			err := kms.RegisterContainer("container-1", "nginx-container", "pod-1")
-			require.NoError(t, err)
+			testUID := "uid"
+			testName := "name"
 
-			err = kms.RegisterContainer("container-1", "nginx-container", "pod-1")
-			require.NoError(t, err)
-			require.Len(t, kms.Containers, 1)
-		})
-	})
+			err = kms.RegisterResourceQuota(testUID, testName, "unregistered-namespace")
+			require.Nil(t, err)
+			require.Len(t, kms.GetWarnings(), 1)
+			require.Equal(t, "RegisterResourceQuota(uid, name, unregistered-namespace): missing namespace", kms.GetWarnings()[0].Message)
 
-	t.Run("IsEmpty", func(t *testing.T) {
-		t.Run("empty KubeModelSet", func(t *testing.T) {
-			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
+			testRQ := &ResourceQuota{
+				UID:          "uid",
+				NamespaceUID: "",
+				Name:         "name",
+				Spec:         &ResourceQuotaSpec{Hard: &ResourceQuotaSpecHard{}},
+				Status:       &ResourceQuotaStatus{Used: &ResourceQuotaStatusUsed{}},
+			}
 
-			isEmpty := kms.IsEmpty()
-			require.True(t, isEmpty)
+			require.NotNil(t, kms.ResourceQuotas[testUID])
+			require.Equal(t, testRQ, kms.ResourceQuotas[testUID])
+			require.Equal(t, 1, kms.Metadata.ObjectCount)
 		})
 
-		t.Run("KubeModelSet with namespace", func(t *testing.T) {
+		t.Run("register resource quota on KMS w/ namespace", func(t *testing.T) {
 			kms := NewKubeModelSet(start, end)
-			kms.Cluster = &Cluster{UID: "cluster-1"}
-			kms.RegisterNamespace("ns-1", "default")
-
-			isEmpty := kms.IsEmpty()
-			require.False(t, isEmpty)
+			kms.RegisterCluster("cluster-uid")
+			kms.RegisterNamespace("namespace-uid", "namespace")
+			// At this point we have a KMS with a cluster and namespace registered
+
+			testUID := "uid"
+			testName := "name"
+			testNamespace := "namespace" // Register RQ in namespace that was already registered
+
+			kms.RegisterResourceQuota(testUID, testName, testNamespace)
+
+			testRQ := &ResourceQuota{
+				UID:          "uid",
+				NamespaceUID: "namespace-uid",
+				Name:         "name",
+				Spec:         &ResourceQuotaSpec{Hard: &ResourceQuotaSpecHard{}},
+				Status:       &ResourceQuotaStatus{Used: &ResourceQuotaStatusUsed{}},
+			}
+
+			require.Len(t, kms.GetErrors(), 0)
+			require.NotNil(t, kms.ResourceQuotas[testUID])
+			require.Equal(t, testRQ, kms.ResourceQuotas[testUID])
+			require.Equal(t, 2, kms.Metadata.ObjectCount) // 1 namespace and 1 RQ
+
+			// Register same RQ again, expect no-op on second try
+			kms.RegisterResourceQuota(testUID, testName, testNamespace)
+			require.Len(t, kms.GetErrors(), 0)
+			require.NotNil(t, kms.ResourceQuotas[testUID])
+			require.Equal(t, testRQ, kms.ResourceQuotas[testUID])
+			require.Equal(t, 2, kms.Metadata.ObjectCount) // 1 namespace and 1 RQ
 		})
 
+		t.Run("register multiple RQs in multiple namespaces", func(t *testing.T) {
+			kms := NewKubeModelSet(start, end)
+			kms.RegisterCluster("cluster-uid")
+			kms.RegisterNamespace("namespace-1-uid", "namespace-1")
+			kms.RegisterNamespace("namespace-2-uid", "namespace-2")
+
+			kms.RegisterResourceQuota("uid-1", "name-1", "namespace-1")
+			kms.RegisterResourceQuota("uid-2", "name-2", "namespace-2")
+
+			require.Len(t, kms.GetErrors(), 0)
+			require.NotNil(t, kms.ResourceQuotas)
+			require.Len(t, kms.ResourceQuotas, 2)
+
+			testRQ1 := &ResourceQuota{
+				UID:          "uid-1",
+				NamespaceUID: "namespace-1-uid",
+				Name:         "name-1",
+				Spec:         &ResourceQuotaSpec{Hard: &ResourceQuotaSpecHard{}},
+				Status:       &ResourceQuotaStatus{Used: &ResourceQuotaStatusUsed{}},
+			}
+			testRQ2 := &ResourceQuota{
+				UID:          "uid-2",
+				NamespaceUID: "namespace-2-uid",
+				Name:         "name-2",
+				Spec:         &ResourceQuotaSpec{Hard: &ResourceQuotaSpecHard{}},
+				Status:       &ResourceQuotaStatus{Used: &ResourceQuotaStatusUsed{}},
+			}
+
+			require.Equal(t, testRQ1, kms.ResourceQuotas["uid-1"])
+			require.Equal(t, testRQ2, kms.ResourceQuotas["uid-2"])
+			require.Equal(t, 4, kms.Metadata.ObjectCount) // 2 namespaces and 2 RQs
+
+			// Register a third RQ with an invalid namespace
+			kms.RegisterResourceQuota("uid-3", "name-3", "namespace-3")
+
+			require.Len(t, kms.GetWarnings(), 1)
+			require.Equal(t, "RegisterResourceQuota(uid-3, name-3, namespace-3): missing namespace", kms.GetWarnings()[0].Message)
+
+			testRQ3 := &ResourceQuota{
+				UID:          "uid-3",
+				NamespaceUID: "",
+				Name:         "name-3",
+				Spec:         &ResourceQuotaSpec{Hard: &ResourceQuotaSpecHard{}},
+				Status:       &ResourceQuotaStatus{Used: &ResourceQuotaStatusUsed{}},
+			}
+
+			require.Len(t, kms.ResourceQuotas, 3)
+			require.NotNil(t, kms.ResourceQuotas["uid-3"])
+			require.Equal(t, testRQ3, kms.ResourceQuotas["uid-3"])
+			require.Equal(t, 5, kms.Metadata.ObjectCount) // 2 namespaces and 3 RQs
+		})
 	})
 }

+ 5 - 4
core/pkg/model/kubemodel/metadata.go

@@ -6,8 +6,9 @@ import (
 
 // @bingen:generate:Metadata
 type Metadata struct {
-	CreatedAt   time.Time           `json:"createdAt"`             // @bingen:field[version=1]
-	CompletedAt time.Time           `json:"completedAt"`           // @bingen:field[version=1]
-	ObjectCount int                 `json:"objectCount"`           // @bingen:field[version=1]
-	Diagnostics []*DiagnosticResult `json:"diagnostics,omitempty"` // @bingen:field[version=1]
+	CreatedAt       time.Time       `json:"createdAt"`             // @bingen:field[version=1]
+	CompletedAt     time.Time       `json:"completedAt"`           // @bingen:field[version=1]
+	ObjectCount     int             `json:"objectCount"`           // @bingen:field[version=1]
+	Diagnostics     []Diagnostic    `json:"diagnostics,omitempty"` // @bingen:field[version=1]
+	DiagnosticLevel DiagnosticLevel `json:"diagnosticLevel"`       // @bingen:field[version=1]
 }

+ 34 - 1
core/pkg/model/kubemodel/namespace.go

@@ -1,6 +1,9 @@
 package kubemodel
 
-import "time"
+import (
+	"fmt"
+	"time"
+)
 
 // @bingen:generate:Namespace
 type Namespace struct {
@@ -12,3 +15,33 @@ type Namespace struct {
 	Start       time.Time         `json:"start"`       // @bingen:field[version=1]
 	End         time.Time         `json:"end"`         // @bingen:field[version=1]
 }
+
+func (kms *KubeModelSet) RegisterNamespace(uid, name string) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for Namespace '%s'", name)
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.Namespaces[uid]; !ok {
+		clusterUID := ""
+
+		if kms.Cluster == nil {
+			kms.Warnf("RegisterNamespace(%s, %s): Cluster is nil", uid, name)
+		} else {
+			clusterUID = kms.Cluster.UID
+		}
+
+		kms.Namespaces[uid] = &Namespace{
+			UID:        uid,
+			ClusterUID: clusterUID,
+			Name:       name,
+		}
+
+		kms.idx.namespaceByName[name] = kms.Namespaces[uid]
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 32 - 1
core/pkg/model/kubemodel/node.go

@@ -1,6 +1,9 @@
 package kubemodel
 
-import "time"
+import (
+	"fmt"
+	"time"
+)
 
 type Node struct {
 	UID                          string            `json:"uid"`
@@ -22,3 +25,31 @@ type Node struct {
 	RAMByteUsageAverage      uint64 `json:"ramByteUsageAverage"`
 	RAMByteUsageMax          uint64 `json:"ramByteUsageMax"`
 }
+
+func (kms *KubeModelSet) RegisterNode(uid, name string) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for Node '%s'", name)
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.Nodes[uid]; !ok {
+		clusterUID := ""
+
+		if kms.Cluster == nil {
+			kms.Warnf("RegisterNode(%s, %s): Cluster is nil", uid, name)
+		} else {
+			clusterUID = kms.Cluster.UID
+		}
+
+		kms.Nodes[uid] = &Node{
+			UID:        uid,
+			ClusterUID: clusterUID,
+			Name:       name,
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 34 - 1
core/pkg/model/kubemodel/owner.go

@@ -1,6 +1,9 @@
 package kubemodel
 
-import "time"
+import (
+	"fmt"
+	"time"
+)
 
 type OwnerKind string
 
@@ -25,3 +28,33 @@ type Owner struct {
 	Start       time.Time         `json:"start"`
 	End         time.Time         `json:"end"`
 }
+
+func (kms *KubeModelSet) RegisterOwner(uid, name, namespace, kind string, isController bool) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for Owner '%s'", name)
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.Owners[uid]; !ok {
+		namespaceUID := ""
+
+		if ns, ok := kms.idx.namespaceByName[namespace]; !ok {
+			kms.Warnf("RegisterOwner(%s, %s, %s, %s, %t): missing namespace '%s'", uid, name, namespace, kind, isController, namespace)
+		} else {
+			namespaceUID = ns.UID
+		}
+
+		kms.Owners[uid] = &Owner{
+			UID:        uid,
+			Name:       name,
+			OwnerUID:   namespaceUID,
+			Kind:       OwnerKind(kind),
+			Controller: isController,
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 32 - 1
core/pkg/model/kubemodel/pod.go

@@ -1,6 +1,9 @@
 package kubemodel
 
-import "time"
+import (
+	"fmt"
+	"time"
+)
 
 type Pod struct {
 	UID                  string            `json:"uid"`
@@ -17,3 +20,31 @@ type Pod struct {
 	NetworkTransferBytes uint64            `json:"networkTransferBytes"`
 	NetworkReceiveBytes  uint64            `json:"networkReceiveBytes"`
 }
+
+func (kms *KubeModelSet) RegisterPod(uid, name, namespace string) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for Pod '%s'", name)
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.Pods[uid]; !ok {
+		namespaceUID := ""
+
+		if ns, ok := kms.idx.namespaceByName[namespace]; !ok {
+			kms.Warnf("RegisterPod(%s, %s, %s): missing namespace '%s'", uid, name, namespace, namespace)
+		} else {
+			namespaceUID = ns.UID
+		}
+
+		kms.Pods[uid] = &Pod{
+			UID:          uid,
+			Name:         name,
+			NamespaceUID: namespaceUID,
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 8 - 8
core/pkg/model/kubemodel/provider.go

@@ -4,11 +4,11 @@ package kubemodel
 type Provider string
 
 const (
-	ProviderAWS          Provider = "aws"
-	ProviderGCP          Provider = "gcp"
-	ProviderAzure        Provider = "azure"
-	ProviderOnPremises   Provider = "on_premises"
-	ProviderAlibaba      Provider = "alibaba"
-	ProviderDigitalOcean Provider = "digitalocean"
-	ProviderOracle       Provider = "oracle"
-)
+	ProviderEmpty        Provider = ""
+	ProviderAWS          Provider = "AWS"
+	ProviderGCP          Provider = "GCP"
+	ProviderAzure        Provider = "Azure"
+	ProviderAlibaba      Provider = "Alibaba"
+	ProviderDigitalOcean Provider = "DigitalOcean"
+	ProviderOracle       Provider = "Oracle"
+)

+ 33 - 0
core/pkg/model/kubemodel/resource.go

@@ -0,0 +1,33 @@
+package kubemodel
+
+// @bingen:generate:Resource
+type Resource string
+
+const (
+	ResourceCPU     Resource = "cpu"
+	ResourceMemory  Resource = "memory"
+	ResourceGPU     Resource = "gpu"
+	ResourceStorage Resource = "storage"
+)
+
+// @bingen:generate:ResourceQuantity
+type ResourceQuantity struct {
+	Resource Resource `json:"resource"` // @bingen:field[version=1]
+	Unit     Unit     `json:"unit"`     // @bingen:field[version=1]
+	Values   Stats    `json:"values"`   // @bingen:field[version=1]
+}
+
+// @bingen:generate:ResourceQuantities
+type ResourceQuantities map[Resource]ResourceQuantity
+
+func (rqs ResourceQuantities) Set(resource Resource, unit Unit, statType StatType, value float64) {
+	if _, ok := rqs[resource]; !ok {
+		rqs[resource] = ResourceQuantity{
+			Resource: resource,
+			Unit:     unit,
+			Values:   NewStats(),
+		}
+	}
+
+	rqs[resource].Values[statType] = value
+}

+ 101 - 0
core/pkg/model/kubemodel/resourcequota.go

@@ -0,0 +1,101 @@
+package kubemodel
+
+import (
+	"fmt"
+	"time"
+)
+
+// @bingen:generate:ResourceQuota
+type ResourceQuota struct {
+	UID          string               `json:"uid"`          // @bingen:field[version=1]
+	NamespaceUID string               `json:"namespaceUID"` // @bingen:field[version=1]
+	Name         string               `json:"name"`         // @bingen:field[version=1]
+	Spec         *ResourceQuotaSpec   `json:"spec"`         // @bingen:field[version=1]
+	Status       *ResourceQuotaStatus `json:"status"`       // @bingen:field[version=1]
+	Start        time.Time            `json:"start"`        // @bingen:field[version=1]
+	End          time.Time            `json:"end"`          // @bingen:field[version=1]
+}
+
+// @bingen:generate:ResourceQuotaSpec
+type ResourceQuotaSpec struct {
+	Hard *ResourceQuotaSpecHard `json:"hard"` // @bingen:field[version=1]
+}
+
+// @bingen:generate:ResourceQuotaSpecHard
+type ResourceQuotaSpecHard struct {
+	Requests ResourceQuantities `json:"requests,omitempty"` // @bingen:field[version=1]
+	Limits   ResourceQuantities `json:"limits,omitempty"`   // @bingen:field[version=1]
+}
+
+func (spec *ResourceQuotaSpecHard) SetRequest(resource Resource, unit Unit, statType StatType, value float64) {
+	if spec.Requests == nil {
+		spec.Requests = ResourceQuantities{}
+	}
+
+	spec.Requests.Set(resource, unit, statType, value)
+}
+
+func (spec *ResourceQuotaSpecHard) SetLimit(resource Resource, unit Unit, statType StatType, value float64) {
+	if spec.Limits == nil {
+		spec.Limits = ResourceQuantities{}
+	}
+
+	spec.Limits.Set(resource, unit, statType, value)
+}
+
+// @bingen:generate:ResourceQuotaStatus
+type ResourceQuotaStatus struct {
+	Used *ResourceQuotaStatusUsed `json:"used"` // @bingen:field[version=1]
+}
+
+// @bingen:generate:ResourceQuotaStatusUsed
+type ResourceQuotaStatusUsed struct {
+	Requests ResourceQuantities `json:"requests,omitempty"` // @bingen:field[version=1]
+	Limits   ResourceQuantities `json:"limits,omitempty"`   // @bingen:field[version=1]
+}
+
+func (stat *ResourceQuotaStatusUsed) SetRequest(resource Resource, unit Unit, statType StatType, value float64) {
+	if stat.Requests == nil {
+		stat.Requests = ResourceQuantities{}
+	}
+
+	stat.Requests.Set(resource, unit, statType, value)
+}
+
+func (stat *ResourceQuotaStatusUsed) SetLimit(resource Resource, unit Unit, statType StatType, value float64) {
+	if stat.Limits == nil {
+		stat.Limits = ResourceQuantities{}
+	}
+
+	stat.Limits.Set(resource, unit, statType, value)
+}
+
+func (kms *KubeModelSet) RegisterResourceQuota(uid, name, namespace string) error {
+	if uid == "" {
+		err := fmt.Errorf("UID is nil for ResourceQuota '%s'", name)
+		kms.Error(err)
+		return err
+	}
+
+	if _, ok := kms.ResourceQuotas[uid]; !ok {
+		namespaceUID := ""
+
+		if _, ok := kms.idx.namespaceByName[namespace]; !ok {
+			kms.Warnf("RegisterResourceQuota(%s, %s, %s): missing namespace", uid, name, namespace)
+		} else {
+			namespaceUID = kms.idx.namespaceByName[namespace].UID
+		}
+
+		kms.ResourceQuotas[uid] = &ResourceQuota{
+			UID:          uid,
+			Name:         name,
+			NamespaceUID: namespaceUID,
+			Spec:         &ResourceQuotaSpec{Hard: &ResourceQuotaSpecHard{}},
+			Status:       &ResourceQuotaStatus{Used: &ResourceQuotaStatusUsed{}},
+		}
+
+		kms.Metadata.ObjectCount++
+	}
+
+	return nil
+}

+ 0 - 2
core/pkg/model/kubemodel/service.go

@@ -11,7 +11,6 @@ const (
 	ServiceTypeExternalName ServiceType = "ExternalName"
 )
 
-// @bingen:generate:ServicePort
 type ServicePort struct {
 	Name       string `json:"name"`
 	Port       uint16 `json:"port"`
@@ -20,7 +19,6 @@ type ServicePort struct {
 	Protocol   string `json:"protocol"`
 }
 
-// @bingen:generate:Service
 type Service struct {
 	UID                  string            `json:"uid"`
 	ClusterUID           string            `json:"clusterUid"`

+ 74 - 0
core/pkg/model/kubemodel/stats.go

@@ -0,0 +1,74 @@
+package kubemodel
+
+// @bingen:generate:StatType
+type StatType string
+
+const (
+	StatAvg StatType = "avg"
+	StatMax StatType = "max"
+	StatMin StatType = "min"
+	StatP95 StatType = "p95"
+	StatP85 StatType = "p85"
+)
+
+// @bingen:generate:Stats
+type Stats map[StatType]float64
+
+func NewStats(capacity ...int) Stats {
+	if len(capacity) == 1 {
+		s := make(map[StatType]float64, capacity[0])
+		return s
+	}
+
+	return map[StatType]float64{}
+}
+
+func (s Stats) Avg() (float64, bool) {
+	if s == nil {
+		return 0, false
+	}
+
+	val, ok := s[StatAvg]
+
+	return val, ok
+}
+
+func (s Stats) Max() (float64, bool) {
+	if s == nil {
+		return 0, false
+	}
+
+	val, ok := s[StatMax]
+
+	return val, ok
+}
+
+func (s Stats) Min() (float64, bool) {
+	if s == nil {
+		return 0, false
+	}
+
+	val, ok := s[StatMin]
+
+	return val, ok
+}
+
+func (s Stats) P95() (float64, bool) {
+	if s == nil {
+		return 0, false
+	}
+
+	val, ok := s[StatP95]
+
+	return val, ok
+}
+
+func (s Stats) P85() (float64, bool) {
+	if s == nil {
+		return 0, false
+	}
+
+	val, ok := s[StatP85]
+
+	return val, ok
+}

+ 12 - 0
core/pkg/model/kubemodel/unit.go

@@ -0,0 +1,12 @@
+package kubemodel
+
+// @bingen:generate:Unit
+type Unit string
+
+const (
+	UnitMillicore       = "m"
+	UnitByte            = "B"
+	UnitSecond          = "s"
+	UnitMillicoreSecond = "m-s"
+	UnitByteSecond      = "B-s"
+)

+ 2 - 1
core/pkg/nodestats/nodes_test.go

@@ -58,7 +58,8 @@ func TestNodeSummaryLive(t *testing.T) {
 }
 
 type NodesOnlyClusterCache struct {
-	k8sClient kubernetes.Interface
+	clusterUID string
+	k8sClient  kubernetes.Interface
 }
 
 func NewTestClusterCache(k8sClient kubernetes.Interface) *NodesOnlyClusterCache {

+ 1 - 0
core/pkg/opencost/allocationprops.go

@@ -85,6 +85,7 @@ const (
 	AllocationServiceProp                           = "service"
 	AllocationLabelProp                             = "label"
 	AllocationAnnotationProp                        = "annotation"
+	AllocationNamespaceLabelProp                    = "namespaceLabel"
 	AllocationDeploymentProp                        = "deployment"
 	AllocationStatefulSetProp                       = "statefulset"
 	AllocationDaemonSetProp                         = "daemonset"

+ 40 - 11
core/pkg/opencost/exporter/controllers.go

@@ -5,9 +5,11 @@ import (
 
 	export "github.com/opencost/opencost/core/pkg/exporter"
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/model/kubemodel"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/opencost/exporter/allocation"
 	"github.com/opencost/opencost/core/pkg/opencost/exporter/asset"
+	km "github.com/opencost/opencost/core/pkg/opencost/exporter/kubemodel"
 	"github.com/opencost/opencost/core/pkg/opencost/exporter/networkinsight"
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/storage"
@@ -21,6 +23,7 @@ type ComputePipelineSource interface {
 	allocation.AllocationSource
 	asset.AssetSource
 	networkinsight.NetworkInsightSource
+	km.KubeModelSource
 
 	GetDataSource() source.OpenCostDataSource
 }
@@ -28,9 +31,12 @@ type ComputePipelineSource interface {
 // PipelinesExportConfig is a configuration struct that contains the export resolutions for
 // allocation, assets, and network insights pipelines.
 type PipelinesExportConfig struct {
+	ClusterUID                        string
+	ClusterName                       string
 	AllocationPiplineResolutions      []time.Duration
 	AssetPipelineResolutons           []time.Duration
 	NetworkInsightPipelineResolutions []time.Duration
+	KubeModelPipelineResolutions      []time.Duration
 }
 
 // defaultPipelineExportResolutions returns the default export configuration for the pipeline
@@ -42,13 +48,16 @@ func defaultPipelineExportResolutions() []time.Duration {
 	}
 }
 
-// DefaultPipelinesExportConfig returns the default export configuration for all pipelines
+// NewPipelinesExportConfig returns the default export configuration for all pipelines
 // which is set to export hourly and daily for allocations, assets, and network insights.
-func DefaultPipelinesExportConfig() *PipelinesExportConfig {
-	return &PipelinesExportConfig{
+func NewPipelinesExportConfig(clusterUID, clusterName string) PipelinesExportConfig {
+	return PipelinesExportConfig{
+		ClusterUID:                        clusterUID,
+		ClusterName:                       clusterName,
 		AllocationPiplineResolutions:      defaultPipelineExportResolutions(),
 		AssetPipelineResolutons:           defaultPipelineExportResolutions(),
 		NetworkInsightPipelineResolutions: defaultPipelineExportResolutions(),
+		KubeModelPipelineResolutions:      defaultPipelineExportResolutions(),
 	}
 }
 
@@ -57,15 +66,12 @@ type PipelineExportControllers struct {
 	AllocationExportController     *export.ComputeExportControllerGroup[opencost.AllocationSet]
 	AssetExportController          *export.ComputeExportControllerGroup[opencost.AssetSet]
 	NetworkInsightExportController *export.ComputeExportControllerGroup[opencost.NetworkInsightSet]
+	KubeModelExportController      *export.ComputeExportControllerGroup[kubemodel.KubeModelSet]
 }
 
 // NewPipelineExportControllers creates a new PipelineExportControllers instance with the given cluster ID, storage implementation, cost model, and configuration.
 // Setting the config to nil will use the default hourly and daily export resolutions for each pipeline.
-func NewPipelineExportControllers(clusterId string, store storage.Storage, cm ComputePipelineSource, config *PipelinesExportConfig) *PipelineExportControllers {
-	if config == nil {
-		config = DefaultPipelinesExportConfig()
-	}
-
+func NewPipelineExportControllers(store storage.Storage, cm ComputePipelineSource, config PipelinesExportConfig) *PipelineExportControllers {
 	mins := int(cm.GetDataSource().Resolution().Minutes())
 	if mins <= 0 {
 		mins = 1
@@ -84,7 +90,8 @@ func NewPipelineExportControllers(clusterId string, store storage.Storage, cm Co
 			continue
 		}
 
-		allocController, err := NewComputePipelineExportController(clusterId, store, allocSource, res)
+		// Use ClusterName for "clusterId" here to maintain legacy pattern
+		allocController, err := NewComputePipelineExportController(config.ClusterName, store, allocSource, res)
 		if err != nil {
 			log.Errorf("Failed to create allocation export controller for resolution: %s - %v", timeutil.DurationString(res), err)
 			continue
@@ -103,7 +110,8 @@ func NewPipelineExportControllers(clusterId string, store storage.Storage, cm Co
 			continue
 		}
 
-		assetController, err := NewComputePipelineExportController(clusterId, store, assetSource, res)
+		// Use ClusterName for "clusterId" here to maintain legacy pattern
+		assetController, err := NewComputePipelineExportController(config.ClusterName, store, assetSource, res)
 		if err != nil {
 			log.Errorf("Failed to create asset export controller for resolution: %s - %v", timeutil.DurationString(res), err)
 			continue
@@ -122,7 +130,8 @@ func NewPipelineExportControllers(clusterId string, store storage.Storage, cm Co
 			continue
 		}
 
-		networkInsightController, err := NewComputePipelineExportController(clusterId, store, networkInsightSource, res)
+		// Use ClusterName for "clusterId" here to maintain legacy pattern
+		networkInsightController, err := NewComputePipelineExportController(config.ClusterName, store, networkInsightSource, res)
 		if err != nil {
 			log.Errorf("Failed to create network insight export controller for resolution: %s - %v", timeutil.DurationString(res), err)
 			continue
@@ -131,10 +140,30 @@ func NewPipelineExportControllers(clusterId string, store storage.Storage, cm Co
 		networkInsightExportControllers = append(networkInsightExportControllers, networkInsightController)
 	}
 
+	// KubeModel sources and exporters
+	kubeModelSource := km.NewKubeModelComputeSource(cm)
+	kubeModelExportControllers := []*export.ComputeExportController[kubemodel.KubeModelSet]{}
+
+	for _, res := range config.KubeModelPipelineResolutions {
+		if res < sourceResolution {
+			log.Warnf("Configured KubeModel pipeline resolution %dm is less than source resolution %dm. Not configuring the exporter for this resolution.", int64(res.Minutes()), int64(sourceResolution.Minutes()))
+			continue
+		}
+
+		kubeModelController, err := NewComputePipelineExportController(config.ClusterUID, store, kubeModelSource, res)
+		if err != nil {
+			log.Errorf("Failed to create KubeModel export controller for resolution: %s - %v", timeutil.DurationString(res), err)
+			continue
+		}
+
+		kubeModelExportControllers = append(kubeModelExportControllers, kubeModelController)
+	}
+
 	return &PipelineExportControllers{
 		AllocationExportController:     export.NewComputeExportControllerGroup(allocExportControllers...),
 		AssetExportController:          export.NewComputeExportControllerGroup(assetExportControllers...),
 		NetworkInsightExportController: export.NewComputeExportControllerGroup(networkInsightExportControllers...),
+		KubeModelExportController:      export.NewComputeExportControllerGroup(kubeModelExportControllers...),
 	}
 }
 

+ 65 - 17
core/pkg/opencost/exporter/exporter_test.go

@@ -9,6 +9,7 @@ import (
 	"github.com/opencost/opencost/core/pkg/diagnostics"
 	"github.com/opencost/opencost/core/pkg/exporter"
 	"github.com/opencost/opencost/core/pkg/exporter/pathing"
+	"github.com/opencost/opencost/core/pkg/model/kubemodel"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/pipelines"
 	"github.com/opencost/opencost/core/pkg/source"
@@ -58,6 +59,14 @@ func NewMockNetworkInsightSource() exporter.ComputeSource[opencost.NetworkInsigh
 	}
 }
 
+func NewMockKubeModelSource() exporter.ComputeSource[kubemodel.KubeModelSet] {
+	return &MockSource[kubemodel.KubeModelSet]{
+		generate: func(start, end time.Time) *kubemodel.KubeModelSet {
+			return opencost.GenerateMockKubeModelSet(start, end)
+		},
+	}
+}
+
 type MockDataSource struct {
 	resolution time.Duration
 }
@@ -81,27 +90,30 @@ func (mds *MockDataSource) BatchDuration() time.Duration
 func (mds *MockDataSource) Resolution() time.Duration                                     { return mds.resolution }
 
 type MockPipelineComputeSource struct {
-	allocSource exporter.ComputeSource[opencost.AllocationSet]
-	assetSource exporter.ComputeSource[opencost.AssetSet]
-	netSource   exporter.ComputeSource[opencost.NetworkInsightSet]
-	ds          *MockDataSource
+	allocSource     exporter.ComputeSource[opencost.AllocationSet]
+	assetSource     exporter.ComputeSource[opencost.AssetSet]
+	netSource       exporter.ComputeSource[opencost.NetworkInsightSet]
+	kubeModelSource exporter.ComputeSource[kubemodel.KubeModelSet]
+	ds              *MockDataSource
 }
 
 func NewMockPipelineComputeSource() *MockPipelineComputeSource {
 	return &MockPipelineComputeSource{
-		allocSource: NewMockAllocationSource(),
-		assetSource: NewMockAssetSource(),
-		netSource:   NewMockNetworkInsightSource(),
-		ds:          NewMockDataSource(),
+		allocSource:     NewMockAllocationSource(),
+		assetSource:     NewMockAssetSource(),
+		netSource:       NewMockNetworkInsightSource(),
+		kubeModelSource: NewMockKubeModelSource(),
+		ds:              NewMockDataSource(),
 	}
 }
 
 func NewMockPipelineComputeSourceWith(srcResolution time.Duration) *MockPipelineComputeSource {
 	return &MockPipelineComputeSource{
-		allocSource: NewMockAllocationSource(),
-		assetSource: NewMockAssetSource(),
-		netSource:   NewMockNetworkInsightSource(),
-		ds:          NewMockDataSourceWith(srcResolution),
+		allocSource:     NewMockAllocationSource(),
+		assetSource:     NewMockAssetSource(),
+		netSource:       NewMockNetworkInsightSource(),
+		kubeModelSource: NewMockKubeModelSource(),
+		ds:              NewMockDataSourceWith(srcResolution),
 	}
 }
 
@@ -114,6 +126,9 @@ func (mpcs *MockPipelineComputeSource) ComputeAssets(start, end time.Time) (*ope
 func (mpcs *MockPipelineComputeSource) ComputeNetworkInsights(start, end time.Time) (*opencost.NetworkInsightSet, error) {
 	return mpcs.netSource.Compute(start, end)
 }
+func (mpcs *MockPipelineComputeSource) ComputeKubeModelSet(start, end time.Time) (*kubemodel.KubeModelSet, error) {
+	return mpcs.kubeModelSource.Compute(start, end)
+}
 func (mpcs *MockPipelineComputeSource) GetDataSource() source.OpenCostDataSource {
 	return mpcs.ds
 }
@@ -228,6 +243,35 @@ func TestExporters(t *testing.T) {
 		validateFileCreation[opencost.NetworkInsightSet](t, memStore, p, start, end)
 	})
 
+	t.Run("KubeModel exporter", func(t *testing.T) {
+		kubeModelSource := NewMockKubeModelSource()
+		memStore := storage.NewMemoryStorage()
+		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.KubeModelPipelineName, ptr(TestResolution))
+		if err != nil {
+			t.Fatalf("failed to create path formatter: %v", err)
+		}
+
+		kubeModelExporter, err := NewComputePipelineExporter[kubemodel.KubeModelSet](TestClusterId, TestResolution, memStore)
+		if err != nil {
+			t.Fatalf("failed to create KubeModel exporter: %v", err)
+		}
+
+		end := time.Now().UTC().Truncate(TestResolution)
+		start := end.Add(-TestResolution)
+
+		data, err := kubeModelSource.Compute(start, end)
+		if err != nil {
+			t.Fatalf("failed to compute KubeModel data: %v", err)
+		}
+
+		err = kubeModelExporter.Export(opencost.NewClosedWindow(start, end), data)
+		if err != nil {
+			t.Fatalf("failed to export KubeModel data: %v", err)
+		}
+
+		validateFileCreation[kubemodel.KubeModelSet](t, memStore, p, start, end)
+	})
+
 	t.Run("unknown exporter", func(t *testing.T) {
 		memStore := storage.NewMemoryStorage()
 
@@ -250,7 +294,9 @@ func TestPipelineExportControllers(t *testing.T) {
 		pipelineComputeSource := NewMockPipelineComputeSource()
 		memStore := storage.NewMemoryStorage()
 
-		exportControllers := NewPipelineExportControllers(TestClusterId, memStore, pipelineComputeSource, &PipelinesExportConfig{
+		exportControllers := NewPipelineExportControllers(memStore, pipelineComputeSource, PipelinesExportConfig{
+			ClusterUID:                        TestClusterId,
+			ClusterName:                       TestClusterId,
 			AllocationPiplineResolutions:      []time.Duration{TestResolution},
 			AssetPipelineResolutons:           []time.Duration{TestResolution},
 			NetworkInsightPipelineResolutions: []time.Duration{TestResolution},
@@ -286,7 +332,9 @@ func TestPipelineExportControllers(t *testing.T) {
 		pipelineComputeSource := NewMockPipelineComputeSourceWith(30 * time.Second)
 		memStore := storage.NewMemoryStorage()
 
-		exportControllers := NewPipelineExportControllers(TestClusterId, memStore, pipelineComputeSource, &PipelinesExportConfig{
+		exportControllers := NewPipelineExportControllers(memStore, pipelineComputeSource, PipelinesExportConfig{
+			ClusterUID:                        TestClusterId,
+			ClusterName:                       TestClusterId,
 			AllocationPiplineResolutions:      []time.Duration{TestResolution},
 			AssetPipelineResolutons:           []time.Duration{TestResolution},
 			NetworkInsightPipelineResolutions: []time.Duration{TestResolution},
@@ -322,7 +370,7 @@ func TestPipelineExportControllers(t *testing.T) {
 		pipelineComputeSource := NewMockPipelineComputeSource()
 		memStore := storage.NewMemoryStorage()
 
-		exportControllers := NewPipelineExportControllers(TestClusterId, memStore, pipelineComputeSource, nil)
+		exportControllers := NewPipelineExportControllers(memStore, pipelineComputeSource, NewPipelinesExportConfig(TestClusterId, TestClusterId))
 
 		if len(exportControllers.AllocationExportController.Resolutions()) != 2 {
 			t.Fatalf("expected 2 allocation resolutions, got %d", len(exportControllers.AllocationExportController.Resolutions()))
@@ -340,7 +388,7 @@ func TestPipelineExportControllers(t *testing.T) {
 		pipelineComputeSource := NewMockPipelineComputeSourceWith(48 * time.Hour)
 		memStore := storage.NewMemoryStorage()
 
-		exportControllers := NewPipelineExportControllers(TestClusterId, memStore, pipelineComputeSource, nil)
+		exportControllers := NewPipelineExportControllers(memStore, pipelineComputeSource, NewPipelinesExportConfig(TestClusterId, TestClusterId))
 
 		if len(exportControllers.AllocationExportController.Resolutions()) != 0 {
 			t.Fatalf("expected 0 allocation resolutions, got %d", len(exportControllers.AllocationExportController.Resolutions()))
@@ -357,7 +405,7 @@ func TestPipelineExportControllers(t *testing.T) {
 		pipelineComputeSource := NewMockPipelineComputeSource()
 		memStore := storage.NewMemoryStorage()
 
-		exportControllers := NewPipelineExportControllers("", memStore, pipelineComputeSource, nil)
+		exportControllers := NewPipelineExportControllers(memStore, pipelineComputeSource, NewPipelinesExportConfig("", ""))
 
 		if len(exportControllers.AllocationExportController.Resolutions()) != 0 {
 			t.Fatalf("expected 0 allocation resolutions, got %d", len(exportControllers.AllocationExportController.Resolutions()))

+ 43 - 0
core/pkg/opencost/exporter/kubemodel/source.go

@@ -0,0 +1,43 @@
+package kubemodel
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/exporter"
+	"github.com/opencost/opencost/core/pkg/model/kubemodel"
+	"github.com/opencost/opencost/core/pkg/pipelines"
+)
+
+type KubeModelSource interface {
+	ComputeKubeModelSet(start, end time.Time) (*kubemodel.KubeModelSet, error)
+}
+
+type KubeModelComputeSource struct {
+	src KubeModelSource
+}
+
+// NewKubeModelComputeSource creates an `exporter.ComputeSource[opencost.KubeModelSet]` implementation
+func NewKubeModelComputeSource(src KubeModelSource) exporter.ComputeSource[kubemodel.KubeModelSet] {
+	return &KubeModelComputeSource{
+		src: src,
+	}
+}
+
+// CanCompute should return true iff the ComputeSource can effectively act as
+// a source of T data for the given time range. For example, a ComputeSource
+// with two-day coverage cannot fulfill a range from three days ago, and should
+// not be left to return an error in Compute. Instead, it should report that is
+// cannot compute and allow another Source to handle the computation.
+func (acs *KubeModelComputeSource) CanCompute(start, end time.Time) bool {
+	return true
+}
+
+// Compute should compute a single T for the given time range.
+func (acs *KubeModelComputeSource) Compute(start, end time.Time) (*kubemodel.KubeModelSet, error) {
+	return acs.src.ComputeKubeModelSet(start, end)
+}
+
+// Name returns the name of the ComputeSource
+func (acs *KubeModelComputeSource) Name() string {
+	return pipelines.KubeModelPipelineName
+}

+ 22 - 0
core/pkg/opencost/mock.go

@@ -3,6 +3,8 @@ package opencost
 import (
 	"fmt"
 	"time"
+
+	"github.com/opencost/opencost/core/pkg/model/kubemodel"
 )
 
 const gb = 1024 * 1024 * 1024
@@ -1013,3 +1015,23 @@ func GenerateMockCloudCostSet(start, end time.Time, provider, integration string
 
 	return ccs
 }
+
+// GenerateMockKubeModelSet creates generic KubeModel set
+func GenerateMockKubeModelSet(start, end time.Time) *kubemodel.KubeModelSet {
+	kms := kubemodel.NewKubeModelSet(start, end)
+
+	kms.Cluster = &kubemodel.Cluster{
+		UID:  "clusterUID",
+		Name: "cluster",
+	}
+
+	kms.RegisterNamespace("namespace-1", "namespace-1")
+	kms.RegisterNamespace("namespace-2", "namespace-2")
+
+	kms.RegisterResourceQuota("resourcequota-1", "resourcequota-1", "namespace-1")
+	kms.RegisterResourceQuota("resourcequota-2", "resourcequota-2", "namespace-1")
+	kms.RegisterResourceQuota("resourcequota-3", "resourcequota-3", "namespace-2")
+	kms.RegisterResourceQuota("resourcequota-4", "resourcequota-4", "namespace-2")
+
+	return kms
+}

+ 5 - 0
core/pkg/pipelines/name.go

@@ -3,6 +3,7 @@ package pipelines
 import (
 	"github.com/opencost/opencost/core/pkg/diagnostics"
 	"github.com/opencost/opencost/core/pkg/heartbeat"
+	"github.com/opencost/opencost/core/pkg/model/kubemodel"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util/typeutil"
 )
@@ -16,6 +17,7 @@ const (
 	TurbonomicActionsPipelineName string = "turbonomicactions"
 	HeartbeatPipelineName         string = "heartbeat"
 	DiagnosticsPipelineName       string = "diagnostics"
+	KubeModelPipelineName         string = "kubemodel"
 )
 
 var nameByType map[string]string
@@ -37,6 +39,8 @@ func init() {
 	heartbeatKey := typeutil.TypeOf[heartbeat.Heartbeat]()
 	diagnosticsKey := typeutil.TypeOf[diagnostics.DiagnosticsRunReport]()
 
+	kubeModelSetKey := typeutil.TypeOf[kubemodel.KubeModelSet]()
+
 	nameByType = map[string]string{
 		allocSetKey:          AllocationPipelineName,
 		allocKey:             AllocationPipelineName,
@@ -48,6 +52,7 @@ func init() {
 		networkInsightKey:    NetworkInsightPipelineName,
 		heartbeatKey:         HeartbeatPipelineName,
 		diagnosticsKey:       DiagnosticsPipelineName,
+		kubeModelSetKey:      KubeModelPipelineName,
 	}
 }
 

+ 5 - 0
core/pkg/source/datasource.go

@@ -39,6 +39,7 @@ type MetricsQuerier interface {
 	QueryLBPricePerHr(start, end time.Time) *Future[LBPricePerHrResult]
 
 	// Cluster Management
+	QueryClusterUptime(start, end time.Time) *Future[UptimeResult]
 	QueryClusterManagementDuration(start, end time.Time) *Future[ClusterManagementDurationResult]
 	QueryClusterManagementPricePerHr(start, end time.Time) *Future[ClusterManagementPricePerHrResult]
 
@@ -81,6 +82,9 @@ type MetricsQuerier interface {
 	QueryPVPricePerGiBHour(start, end time.Time) *Future[PVPricePerGiBHourResult]
 	QueryPVInfo(start, end time.Time) *Future[PVInfoResult]
 
+	// Namespace
+	QueryNamespaceUptime(start, end time.Time) *Future[UptimeResult]
+
 	// Network Egress
 	QueryNetZoneGiB(start, end time.Time) *Future[NetZoneGiBResult]
 	QueryNetZonePricePerGiB(start, end time.Time) *Future[NetZonePricePerGiBResult]
@@ -118,6 +122,7 @@ type MetricsQuerier interface {
 	QueryReplicaSetsWithRollout(start, end time.Time) *Future[ReplicaSetsWithRolloutResult]
 
 	// ResourceQuotas
+	QueryResourceQuotaUptime(start, end time.Time) *Future[UptimeResult]
 	QueryResourceQuotaSpecCPURequestAverage(start, end time.Time) *Future[ResourceQuotaSpecCPURequestAvgResult]
 	QueryResourceQuotaSpecCPURequestMax(start, end time.Time) *Future[ResourceQuotaSpecCPURequestMaxResult]
 	QueryResourceQuotaSpecRAMRequestAverage(start, end time.Time) *Future[ResourceQuotaSpecRAMRequestAvgResult]

+ 52 - 0
core/pkg/source/decoders.go

@@ -1,10 +1,16 @@
 package source
 
 import (
+	"time"
+
 	"github.com/opencost/opencost/core/pkg/util"
 )
 
 const (
+	ProviderLabel        = "provider"
+	AccountIDLabel       = "account_id"
+	ClusterNameLabel     = "cluster_name"
+	RegionLabel          = "region"
 	ClusterIDLabel       = "cluster_id"
 	NamespaceLabel       = "namespace"
 	NodeLabel            = "node"
@@ -45,6 +51,52 @@ const (
 	NoneLabelValue = "<none>"
 )
 
+// UptimeResult represents the first and last recorded sample timestamp within the query window
+type UptimeResult struct {
+	UID   string
+	First time.Time
+	Last  time.Time
+}
+
+func (res *UptimeResult) GetStartEnd(windowStart, windowEnd time.Time, resolution time.Duration) (time.Time, time.Time) {
+	first := res.First
+	last := res.Last
+	// The only corner-case here is what to do if you only get one timestamp.
+	// This dilemma still requires the use of the resolution, and can be
+	// clamped using the window. In this case, we want to honor the existence
+	// of the pod by giving "one resolution" worth of duration, half on each
+	// side of the given timestamp.
+	if first.Equal(last) {
+		first = first.Add(-1 * resolution / time.Duration(2))
+		last = last.Add(resolution / time.Duration(2))
+	}
+	if first.Before(windowStart) {
+		first = windowStart
+	}
+	if last.After(windowEnd) {
+		last = windowEnd
+	}
+	// prevent end times in the future
+	now := time.Now().UTC()
+	if last.After(now) {
+		last = now
+	}
+
+	return first, last
+}
+
+func DecodeUptimeResult(result *QueryResult) *UptimeResult {
+	uid, _ := result.GetString(UIDLabel)
+	first := time.Unix(int64(result.Values[0].Timestamp), 0).UTC()
+	last := time.Unix(int64(result.Values[len(result.Values)-1].Timestamp), 0).UTC()
+
+	return &UptimeResult{
+		UID:   uid,
+		First: first,
+		Last:  last,
+	}
+}
+
 type PVResult struct {
 	UID              string
 	Cluster          string

+ 1 - 1
go.mod

@@ -155,7 +155,7 @@ require (
 	github.com/golang/protobuf v1.5.4 // indirect
 	github.com/google/flatbuffers v23.5.26+incompatible // indirect
 	github.com/google/gnostic-models v0.6.9 // indirect
-	github.com/google/go-cmp v0.7.0 // indirect
+	github.com/google/go-cmp v0.7.0
 	github.com/google/s2a-go v0.1.9 // indirect
 	github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
 	github.com/googleapis/gax-go/v2 v2.15.0 // indirect

+ 63 - 16
modules/collector-source/pkg/collector/collector.go

@@ -34,6 +34,7 @@ func NewOpenCostMetricStore() metric.MetricStore {
 	memStore.Register(NewNodeRAMUserUsageAverageMetricCollector())
 	memStore.Register(NewLBPricePerHourMetricCollector())
 	memStore.Register(NewLBActiveMinutesMetricCollector())
+	memStore.Register(NewClusterUptimeMetricCollector())
 	memStore.Register(NewClusterManagementDurationMetricCollector())
 	memStore.Register(NewClusterManagementPricePerHourMetricCollector())
 	memStore.Register(NewPodActiveMinutesMetricCollector())
@@ -74,6 +75,7 @@ func NewOpenCostMetricStore() metric.MetricStore {
 	memStore.Register(NewNetInternetIngressGiBMetricCollector())
 	memStore.Register(NewNetInternetServiceIngressGiBMetricCollector())
 	memStore.Register(NewNetTransferBytesMetricCollector())
+	memStore.Register(NewNamespaceUptimeMetricCollector())
 	memStore.Register(NewNamespaceLabelsMetricCollector())
 	memStore.Register(NewNamespaceAnnotationsMetricCollector())
 	memStore.Register(NewPodLabelsMetricCollector())
@@ -86,6 +88,7 @@ func NewOpenCostMetricStore() metric.MetricStore {
 	memStore.Register(NewPodsWithReplicaSetOwnerMetricCollector())
 	memStore.Register(NewReplicaSetsWithoutOwnersMetricCollector())
 	memStore.Register(NewReplicaSetsWithRolloutMetricCollector())
+	memStore.Register(NewResourceQuotaUptimeMetricCollector())
 	memStore.Register(NewResourceQuotaSpecCPURequestAverageMetricCollector())
 	memStore.Register(NewResourceQuotaSpecCPURequestMaxMetricCollector())
 	memStore.Register(NewResourceQuotaSpecRAMRequestAverageMetricCollector())
@@ -191,7 +194,7 @@ func NewPVCInfoMetricCollector() *metric.MetricCollector {
 			source.StorageClassLabel,
 			source.UIDLabel,
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
 		func(labels map[string]string) bool {
 			return labels[source.VolumeNameLabel] != ""
 		},
@@ -212,7 +215,7 @@ func NewPVActiveMinutesMetricCollector() *metric.MetricCollector {
 			source.PVLabel,
 			source.UIDLabel,
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
 		nil,
 	)
 }
@@ -239,7 +242,7 @@ func NewLocalStorageUsedActiveMinutesMetricCollector() *metric.MetricCollector {
 			source.DeviceLabel,
 			source.UIDLabel,
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
 		nil, // filter not required here because only container root file system is being scraped
 	)
 }
@@ -338,7 +341,7 @@ func NewLocalStorageActiveMinutesMetricCollector() *metric.MetricCollector {
 			source.ProviderIDLabel,
 			source.UIDLabel,
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
 		nil,
 	)
 }
@@ -484,7 +487,7 @@ func NewNodeActiveMinutesMetricCollector() *metric.MetricCollector {
 			source.ProviderIDLabel,
 			source.UIDLabel,
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
 		nil,
 	)
 }
@@ -602,7 +605,25 @@ func NewLBActiveMinutesMetricCollector() *metric.MetricCollector {
 			source.IngressIPLabel,
 			source.UIDLabel,
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
+		nil,
+	)
+}
+
+//	avg(
+//		cluster_info{
+//			<some_custom_filter>
+//		}
+//	) by (uid)[%s:%dm]
+
+func NewClusterUptimeMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ClusterUptimeID,
+		metric.ClusterInfo,
+		[]string{
+			source.UIDLabel,
+		},
+		aggregator.Uptime,
 		nil,
 	)
 }
@@ -621,7 +642,7 @@ func NewClusterManagementDurationMetricCollector() *metric.MetricCollector {
 			source.ProvisionerNameLabel,
 			source.UIDLabel,
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
 		nil,
 	)
 }
@@ -662,7 +683,7 @@ func NewPodActiveMinutesMetricCollector() *metric.MetricCollector {
 			source.NamespaceLabel,
 			source.PodLabel,
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
 		nil,
 	)
 }
@@ -1640,6 +1661,24 @@ func NewNetTransferBytesMetricCollector() *metric.MetricCollector {
 	)
 }
 
+//	avg(
+//		namespace_info{
+//			<some_custom_filter>
+//		}
+//	) by (uid)[%s:%dm]
+
+func NewNamespaceUptimeMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.NamespaceUptimeID,
+		metric.NamespaceInfo,
+		[]string{
+			source.UIDLabel,
+		},
+		aggregator.Uptime,
+		nil,
+	)
+}
+
 //	avg_over_time(
 //		kube_namespace_labels{
 //			<some_custom_filter>
@@ -1909,15 +1948,23 @@ func NewReplicaSetsWithRolloutMetricCollector() *metric.MetricCollector {
 	)
 }
 
-// avg(
-//	avg_over_time(
-//		resourcequota_spec_resource_requests{
-//			resource="cpu",
-//			unit="core",
+//	avg(
+//		resourcequota_info{
 //			<some_custom_filter>
-//		}[1h]
-//	)
-//) by (resourcequota, namespace, uid, cluster_id)
+//		}
+//	) by (uid)[%s:%dm]
+
+func NewResourceQuotaUptimeMetricCollector() *metric.MetricCollector {
+	return metric.NewMetricCollector(
+		metric.ResourceQuotaUptimeID,
+		metric.ResourceQuotaInfo,
+		[]string{
+			source.UIDLabel,
+		},
+		aggregator.Uptime,
+		nil,
+	)
+}
 
 func NewResourceQuotaSpecCPURequestAverageMetricCollector() *metric.MetricCollector {
 	return metric.NewMetricCollector(

+ 9 - 7
modules/collector-source/pkg/collector/config.go

@@ -7,14 +7,15 @@ import (
 )
 
 type CollectorConfig struct {
-	Resolutions     []util.ResolutionConfiguration `json:"resolutions"`
-	ScrapeInterval  string                         `json:"scrape_interval"`
-	ClusterID       string                         `json:"cluster_id"`
-	ApplicationName string                         `json:"application_name"`
-	NetworkPort     int                            `json:"network_port"`
+	Resolutions     []util.ResolutionConfiguration
+	ScrapeInterval  string
+	ClusterUID      string
+	ClusterName     string
+	ApplicationName string
+	NetworkPort     int
 }
 
-func NewOpenCostCollectorConfigFromEnv() CollectorConfig {
+func NewOpenCostCollectorConfigFromEnv(clusterUID string) CollectorConfig {
 	return CollectorConfig{
 		Resolutions: []util.ResolutionConfiguration{
 			{
@@ -31,7 +32,8 @@ func NewOpenCostCollectorConfigFromEnv() CollectorConfig {
 			},
 		},
 		ScrapeInterval:  env.GetCollectorScrapeIntervalSeconds(),
-		ClusterID:       coreenv.GetClusterID(),
+		ClusterUID:      clusterUID,
+		ClusterName:     coreenv.GetClusterID(),
 		ApplicationName: coreenv.GetAppName(),
 		NetworkPort:     env.GetNetworkPort(),
 	}

+ 5 - 2
modules/collector-source/pkg/collector/datasource.go

@@ -26,12 +26,13 @@ type collectorDataSource struct {
 }
 
 func NewDefaultCollectorDataSource(
+	clusterUID string,
 	store storage.Storage,
 	clusterInfoProvider clusters.ClusterInfoProvider,
 	clusterCache clustercache.ClusterCache,
 	statSummaryClient nodestats.StatSummaryClient,
 ) source.OpenCostDataSource {
-	config := NewOpenCostCollectorConfigFromEnv()
+	config := NewOpenCostCollectorConfigFromEnv(clusterUID)
 	return NewCollectorDataSource(
 		config,
 		store,
@@ -66,7 +67,7 @@ func NewCollectorDataSource(
 	updater = repo
 	if store != nil {
 		wal, err := metric.NewWalinator(
-			config.ClusterID,
+			config.ClusterName,
 			config.ApplicationName,
 			store,
 			resolutions,
@@ -82,9 +83,11 @@ func NewCollectorDataSource(
 
 	diagnosticsModule := metric.NewDiagnosticsModule()
 	scrapeController := scrape.NewScrapeController(
+		config.ClusterUID,
 		config.ScrapeInterval,
 		config.NetworkPort,
 		updater,
+		clusterInfoProvider,
 		clusterCache,
 		statSummaryClient,
 	)

+ 12 - 0
modules/collector-source/pkg/collector/metricsquerier.go

@@ -299,6 +299,10 @@ func (c *collectorMetricsQuerier) QueryLBPricePerHr(start, end time.Time) *sourc
 	return queryCollector(c, start, end, metric.LBPricePerHourID, source.DecodeLBPricePerHrResult)
 }
 
+func (c *collectorMetricsQuerier) QueryClusterUptime(start, end time.Time) *source.Future[source.UptimeResult] {
+	return queryCollector(c, start, end, metric.ClusterUptimeID, source.DecodeUptimeResult)
+}
+
 func (c *collectorMetricsQuerier) QueryClusterManagementDuration(start, end time.Time) *source.Future[source.ClusterManagementDurationResult] {
 	return queryCollector(c, start, end, metric.ClusterManagementDurationID, source.DecodeClusterManagementDurationResult)
 }
@@ -416,6 +420,10 @@ func (c *collectorMetricsQuerier) QueryPVInfo(start, end time.Time) *source.Futu
 	return queryCollector(c, start, end, metric.PVInfoID, source.DecodePVInfoResult)
 }
 
+func (c *collectorMetricsQuerier) QueryNamespaceUptime(start, end time.Time) *source.Future[source.UptimeResult] {
+	return queryCollector(c, start, end, metric.NamespaceUptimeID, source.DecodeUptimeResult)
+}
+
 func (c *collectorMetricsQuerier) QueryNetZoneGiB(start, end time.Time) *source.Future[source.NetZoneGiBResult] {
 	return queryCollectorGiB(c, start, end, metric.NetZoneGiBID, source.DecodeNetZoneGiBResult)
 }
@@ -520,6 +528,10 @@ func (c *collectorMetricsQuerier) QueryReplicaSetsWithRollout(start, end time.Ti
 	return queryCollector(c, start, end, metric.ReplicaSetsWithRolloutID, source.DecodeReplicaSetsWithRolloutResult)
 }
 
+func (c *collectorMetricsQuerier) QueryResourceQuotaUptime(start, end time.Time) *source.Future[source.UptimeResult] {
+	return queryCollector(c, start, end, metric.ResourceQuotaUptimeID, source.DecodeUptimeResult)
+}
+
 func (c *collectorMetricsQuerier) QueryResourceQuotaSpecCPURequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPURequestAvgResult] {
 	return queryCollector(c, start, end, metric.ResourceQuotaSpecCPURequestAverageID, source.DecodeResourceQuotaSpecCPURequestAvgResult)
 }

+ 8 - 8
modules/collector-source/pkg/metric/aggregator/activeminutes.go → modules/collector-source/pkg/metric/aggregator/uptime.go

@@ -5,29 +5,29 @@ import (
 	"time"
 )
 
-// activateMinutesAggregator is a MetricAggregator which records the first and last timestamp of updates called on it
-type activeMinutesAggregator struct {
+// uptimeAggregator is a MetricAggregator which records the first and last timestamp of updates called on it
+type uptimeAggregator struct {
 	lock        sync.Mutex
 	labelValues []string
 	start       *time.Time
 	end         *time.Time
 }
 
-func ActiveMinutes(labelValues []string) MetricAggregator {
-	return &activeMinutesAggregator{
+func Uptime(labelValues []string) MetricAggregator {
+	return &uptimeAggregator{
 		labelValues: labelValues,
 	}
 }
 
-func (a *activeMinutesAggregator) AdditionInfo() map[string]string {
+func (a *uptimeAggregator) AdditionInfo() map[string]string {
 	return nil
 }
 
-func (a *activeMinutesAggregator) LabelValues() []string {
+func (a *uptimeAggregator) LabelValues() []string {
 	return a.labelValues
 }
 
-func (a *activeMinutesAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
+func (a *uptimeAggregator) Update(value float64, timestamp time.Time, additionalInfo map[string]string) {
 	a.lock.Lock()
 	defer a.lock.Unlock()
 	if a.start == nil {
@@ -38,7 +38,7 @@ func (a *activeMinutesAggregator) Update(value float64, timestamp time.Time, add
 	}
 }
 
-func (a *activeMinutesAggregator) Value() []MetricValue {
+func (a *uptimeAggregator) Value() []MetricValue {
 	a.lock.Lock()
 	defer a.lock.Unlock()
 	metricValues := make([]MetricValue, 0)

+ 1 - 1
modules/collector-source/pkg/metric/aggregator/activeminutes_test.go → modules/collector-source/pkg/metric/aggregator/uptime_test.go

@@ -106,7 +106,7 @@ func TestActiveMinutesAggregator_Value(t *testing.T) {
 	}
 	for name, tt := range tests {
 		t.Run(name, func(t *testing.T) {
-			agg := activeMinutesAggregator{}
+			agg := uptimeAggregator{}
 			for _, u := range tt.updates {
 				agg.Update(u.value, u.timestamp, u.additionalInformation)
 			}

+ 3 - 0
modules/collector-source/pkg/metric/collector.go

@@ -37,6 +37,7 @@ const (
 	NodeRAMUserUsageAverageID                  MetricCollectorID = "NodeRAMUserUsageAverage"
 	LBPricePerHourID                           MetricCollectorID = "LBPricePerHour"
 	LBActiveMinutesID                          MetricCollectorID = "LBActiveMinutes"
+	ClusterUptimeID                            MetricCollectorID = "ClusterUptime"
 	ClusterManagementDurationID                MetricCollectorID = "ClusterManagementDuration"
 	ClusterManagementPricePerHourID            MetricCollectorID = "ClusterManagementPricePerHour"
 	PodActiveMinutesID                         MetricCollectorID = "PodActiveMinutes"
@@ -77,6 +78,7 @@ const (
 	NetInternetIngressGiBID                    MetricCollectorID = "NetInternetIngressGiB"
 	NetInternetServiceIngressGiBID             MetricCollectorID = "NetInternetServiceIngressGiB"
 	NetReceiveBytesID                          MetricCollectorID = "NetReceiveBytes"
+	NamespaceUptimeID                          MetricCollectorID = "NamespaceUptime"
 	NamespaceLabelsID                          MetricCollectorID = "NamespaceLabels"
 	NamespaceAnnotationsID                     MetricCollectorID = "NamespaceAnnotations"
 	PodLabelsID                                MetricCollectorID = "PodLabels"
@@ -89,6 +91,7 @@ const (
 	PodsWithReplicaSetOwnerID                  MetricCollectorID = "PodsWithReplicaSetOwner"
 	ReplicaSetsWithoutOwnersID                 MetricCollectorID = "ReplicaSetsWithoutOwners"
 	ReplicaSetsWithRolloutID                   MetricCollectorID = "ReplicaSetsWithRollout"
+	ResourceQuotaUptimeID                      MetricCollectorID = "ResourceQuotaUptime"
 	ResourceQuotaSpecCPURequestAverageID       MetricCollectorID = "ResourceQuotaSpecCPURequestAverage"
 	ResourceQuotaSpecCPURequestMaxID           MetricCollectorID = "ResourceQuotaSpecCPURequestMax"
 	ResourceQuotaSpecRAMRequestAverageID       MetricCollectorID = "ResourceQuotaSpecRAMRequestAverage"

+ 31 - 22
modules/collector-source/pkg/metric/diagnostics.go

@@ -26,30 +26,32 @@ const (
 	NetworkCostsScraperDiagnosticID = event.NetworkCostsScraperName
 
 	// Kubernetes scrapers contains the identifiers for all the specific KubernetesCluster scrapers.
-	KubernetesNodesScraperDiagnosticID        = event.KubernetesClusterScraperName + "-" + event.NodeScraperType
-	KubernetesNamespacesScraperDiagnosticID   = event.KubernetesClusterScraperName + "-" + event.NamespaceScraperType
-	KubernetesReplicaSetsScraperDiagnosticID  = event.KubernetesClusterScraperName + "-" + event.ReplicaSetScraperType
-	KubernetesDeploymentsScraperDiagnosticID  = event.KubernetesClusterScraperName + "-" + event.DeploymentScraperType
-	KubernetesStatefulSetsScraperDiagnosticID = event.KubernetesClusterScraperName + "-" + event.StatefulSetScraperType
-	KubernetesServicesScraperDiagnosticID     = event.KubernetesClusterScraperName + "-" + event.ServiceScraperType
-	KubernetesPodsScraperDiagnosticID         = event.KubernetesClusterScraperName + "-" + event.PodScraperType
-	KubernetesPvsScraperDiagnosticID          = event.KubernetesClusterScraperName + "-" + event.PvScraperType
-	KubernetesPvcsScraperDiagnosticID         = event.KubernetesClusterScraperName + "-" + event.PvcScraperType
+	KubernetesNodesScraperDiagnosticID          = event.KubernetesClusterScraperName + "-" + event.NodeScraperType
+	KubernetesNamespacesScraperDiagnosticID     = event.KubernetesClusterScraperName + "-" + event.NamespaceScraperType
+	KubernetesReplicaSetsScraperDiagnosticID    = event.KubernetesClusterScraperName + "-" + event.ReplicaSetScraperType
+	KubernetesDeploymentsScraperDiagnosticID    = event.KubernetesClusterScraperName + "-" + event.DeploymentScraperType
+	KubernetesStatefulSetsScraperDiagnosticID   = event.KubernetesClusterScraperName + "-" + event.StatefulSetScraperType
+	KubernetesServicesScraperDiagnosticID       = event.KubernetesClusterScraperName + "-" + event.ServiceScraperType
+	KubernetesPodsScraperDiagnosticID           = event.KubernetesClusterScraperName + "-" + event.PodScraperType
+	KubernetesPvsScraperDiagnosticID            = event.KubernetesClusterScraperName + "-" + event.PvScraperType
+	KubernetesPvcsScraperDiagnosticID           = event.KubernetesClusterScraperName + "-" + event.PvcScraperType
+	KubernetesResourceQuotasScraperDiagnosticID = event.KubernetesClusterScraperName + "-" + event.ResourceQuotaScraperType
 
 	// Metric Names for the diagnostics (used in the UI)
-	DGGMScraperDiagnosticMetricName                   = "DCGM Metrics"
-	OpenCostScraperDiagnosticMetricName               = "Opencost Metrics"
-	NodeStatsScraperDiagnosticMetricName              = "Node Stats Metrics"
-	NetworkCostsScraperDiagnosticMetricName           = "Network Costs Metrics"
-	KubernetesNodesScraperDiagnosticMetricName        = "Kubernetes Nodes Metrics"
-	KubernetesNamespacesScraperDiagnosticMetricName   = "Kubernetes Namespaces Metrics"
-	KubernetesReplicaSetsScraperDiagnosticMetricName  = "Kubernetes Replica Sets Metrics"
-	KubernetesDeploymentsScraperDiagnosticMetricName  = "Kubernetes Deployments Metrics"
-	KubernetesStatefulSetsScraperDiagnosticMetricName = "Kubernetes Stateful Sets Metrics"
-	KubernetesServicesScraperDiagnosticMetricName     = "Kubernetes Services Metrics"
-	KubernetesPodsScraperDiagnosticMetricName         = "Kubernetes Pods Metrics"
-	KubernetesPvsScraperDiagnosticMetricName          = "Kubernetes PVs Metrics"
-	KubernetesPvcsScraperDiagnosticMetricName         = "Kubernetes PVCs Metrics"
+	DGGMScraperDiagnosticMetricName                     = "DCGM Metrics"
+	OpenCostScraperDiagnosticMetricName                 = "Opencost Metrics"
+	NodeStatsScraperDiagnosticMetricName                = "Node Stats Metrics"
+	NetworkCostsScraperDiagnosticMetricName             = "Network Costs Metrics"
+	KubernetesNodesScraperDiagnosticMetricName          = "Kubernetes Nodes Metrics"
+	KubernetesNamespacesScraperDiagnosticMetricName     = "Kubernetes Namespaces Metrics"
+	KubernetesReplicaSetsScraperDiagnosticMetricName    = "Kubernetes Replica Sets Metrics"
+	KubernetesDeploymentsScraperDiagnosticMetricName    = "Kubernetes Deployments Metrics"
+	KubernetesStatefulSetsScraperDiagnosticMetricName   = "Kubernetes Stateful Sets Metrics"
+	KubernetesServicesScraperDiagnosticMetricName       = "Kubernetes Services Metrics"
+	KubernetesPodsScraperDiagnosticMetricName           = "Kubernetes Pods Metrics"
+	KubernetesPvsScraperDiagnosticMetricName            = "Kubernetes PVs Metrics"
+	KubernetesPvcsScraperDiagnosticMetricName           = "Kubernetes PVCs Metrics"
+	KubernetesResourceQuotasScraperDiagnosticMetricName = "Kubernetes Resource Quotas Metrics"
 )
 
 // diagnostic defintion is the type used to define a deterministic list of specific diagnostics we _expect_ to collect
@@ -153,6 +155,13 @@ var diagnosticDefinitions map[string]*diagnosticDefinition = map[string]*diagnos
 		Label:       fmt.Sprintf("Kubernetes cluster resources: %s are available and being scraped", event.PvcScraperType),
 		Description: scraperDiagnosticDescriptionFor(event.KubernetesClusterScraperName, event.PvcScraperType),
 	},
+
+	KubernetesResourceQuotasScraperDiagnosticID: {
+		ID:          KubernetesResourceQuotasScraperDiagnosticID,
+		MetricName:  KubernetesResourceQuotasScraperDiagnosticMetricName,
+		Label:       fmt.Sprintf("Kubernetes cluster resources: %s are available and being scraped", event.ResourceQuotaScraperType),
+		Description: scraperDiagnosticDescriptionFor(event.KubernetesClusterScraperName, event.ResourceQuotaScraperType),
+	},
 }
 
 // scraper identifier for diagnostic mapping _must_ match diagnostic ids defined above

+ 3 - 0
modules/collector-source/pkg/metric/metrics.go

@@ -2,6 +2,7 @@ package metric
 
 const (
 	// Cluster Cache Metrics
+	ClusterInfo                                           = "cluster_info"
 	KubeNodeStatusCapacityCPUCores                        = "kube_node_status_capacity_cpu_cores"
 	KubeNodeStatusCapacityMemoryBytes                     = "kube_node_status_capacity_memory_bytes"
 	KubeNodeStatusAllocatableCPUCores                     = "kube_node_status_allocatable_cpu_cores"
@@ -18,11 +19,13 @@ const (
 	KubecostPVInfo                                        = "kubecost_pv_info"
 	KubePersistentVolumeCapacityBytes                     = "kube_persistentvolume_capacity_bytes"
 	DeploymentMatchLabels                                 = "deployment_match_labels"
+	NamespaceInfo                                         = "namespace_info"
 	KubeNamespaceLabels                                   = "kube_namespace_labels"
 	KubeNamespaceAnnotations                              = "kube_namespace_annotations"
 	ServiceSelectorLabels                                 = "service_selector_labels"
 	StatefulSetMatchLabels                                = "statefulSet_match_labels"
 	KubeReplicasetOwner                                   = "kube_replicaset_owner"
+	ResourceQuotaInfo                                     = "resourcequota_info"
 	KubeResourceQuotaSpecResourceRequests                 = "resourcequota_spec_resource_requests"
 	KubeResourceQuotaSpecResourceLimits                   = "resourcequota_spec_resource_limits"
 	KubeResourceQuotaStatusUsedResourceRequests           = "resourcequota_status_used_resource_requests"

+ 1 - 1
modules/collector-source/pkg/metric/walinator_test.go

@@ -25,7 +25,7 @@ func testMetricCollector() MetricStore {
 		[]string{
 			"test",
 		},
-		aggregator.ActiveMinutes,
+		aggregator.Uptime,
 		nil,
 	))
 

+ 14 - 0
modules/collector-source/pkg/scrape/clustercache.go

@@ -174,6 +174,13 @@ func (ccs *ClusterCacheScraper) scrapeNamespaces(namespaces []*clustercache.Name
 			source.UIDLabel:       string(namespace.UID),
 		}
 
+		scrapeResults = append(scrapeResults, metric.Update{
+			Name:           metric.NamespaceInfo,
+			Labels:         namespaceInfo,
+			AdditionalInfo: namespaceInfo,
+			Value:          0,
+		})
+
 		// namespace labels
 		labelNames, labelValues := promutil.KubeLabelsToLabels(namespace.Labels)
 		namespaceLabels := util.ToMap(labelNames, labelValues)
@@ -568,6 +575,13 @@ func (ccs *ClusterCacheScraper) scrapeResourceQuotas(resourceQuotas []*clusterca
 			source.UIDLabel:           string(resourceQuota.UID),
 		}
 
+		scrapeResults = append(scrapeResults, metric.Update{
+			Name:           metric.ResourceQuotaInfo,
+			Labels:         resourceQuotaInfo,
+			AdditionalInfo: resourceQuotaInfo,
+			Value:          0,
+		})
+
 		if resourceQuota.Spec.Hard != nil {
 			// CPU/memory requests can also be aliased as "cpu" and "memory". For now, however, only scrape the complete names
 			// https://kubernetes.io/docs/concepts/policy/resource-quotas/#compute-resource-quota

+ 26 - 0
modules/collector-source/pkg/scrape/clustercache_test.go

@@ -244,6 +244,18 @@ func Test_kubernetesScraper_scrapeNamespaces(t *testing.T) {
 				},
 			},
 			expected: []metric.Update{
+				{
+					Name: metric.NamespaceInfo,
+					Labels: map[string]string{
+						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uuid1",
+					},
+					Value: 0,
+					AdditionalInfo: map[string]string{
+						source.NamespaceLabel: "namespace1",
+						source.UIDLabel:       "uuid1",
+					},
+				},
 				{
 					Name: metric.KubeNamespaceLabels,
 					Labels: map[string]string{
@@ -956,6 +968,20 @@ func Test_kubernetesScraper_scrapeResourceQuotas(t *testing.T) {
 				},
 			},
 			expected: []metric.Update{
+				{
+					Name: metric.ResourceQuotaInfo,
+					Labels: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+					},
+					Value: 0,
+					AdditionalInfo: map[string]string{
+						source.ResourceQuotaLabel: "resourceQuota1",
+						source.NamespaceLabel:     "namespace1",
+						source.UIDLabel:           "uuid1",
+					},
+				},
 				{
 					Name: metric.KubeResourceQuotaSpecResourceRequests,
 					Labels: map[string]string{

+ 56 - 0
modules/collector-source/pkg/scrape/clusterinfo.go

@@ -0,0 +1,56 @@
+package scrape
+
+import (
+	"github.com/opencost/opencost/core/pkg/clusters"
+	"github.com/opencost/opencost/core/pkg/source"
+	"github.com/opencost/opencost/modules/collector-source/pkg/metric"
+)
+
+type ClusterInfoScrapper struct {
+	clusterUID          string
+	clusterInfoProvider clusters.ClusterInfoProvider
+}
+
+func newClusterInfoScrapper(clusterUID string, clusterInfoProvider clusters.ClusterInfoProvider) Scraper {
+	return &ClusterInfoScrapper{
+		clusterUID:          clusterUID,
+		clusterInfoProvider: clusterInfoProvider,
+	}
+}
+
+func (cis *ClusterInfoScrapper) Scrape() []metric.Update {
+	var scrapeResults []metric.Update
+
+	// extract label values from cluster info provider
+	clusterInfoMap := cis.clusterInfoProvider.GetClusterInfo()
+	clusterName := clusterInfoMap[clusters.ClusterInfoIdKey]
+	provider := clusterInfoMap[clusters.ClusterInfoProviderKey]
+
+	accountID := clusterInfoMap[clusters.ClusterInfoAccountKey]
+	// GCP special case
+	if accountID == "" {
+		accountID = clusterInfoMap[clusters.ClusterInfoProjectKey]
+	}
+
+	provisioner := clusterInfoMap[clusters.ClusterInfoProvisionerKey]
+
+	region := clusterInfoMap[clusters.ClusterInfoRegionKey]
+
+	clusterInfo := map[string]string{
+		source.UIDLabel:             cis.clusterUID,
+		source.ClusterNameLabel:     clusterName,
+		source.ProviderLabel:        provider,
+		source.AccountIDLabel:       accountID,
+		source.ProvisionerNameLabel: provisioner,
+		source.RegionLabel:          region,
+	}
+
+	scrapeResults = append(scrapeResults, metric.Update{
+		Name:           metric.ClusterInfo,
+		Labels:         clusterInfo,
+		AdditionalInfo: clusterInfo,
+		Value:          0,
+	})
+	return scrapeResults
+
+}

+ 1 - 0
modules/collector-source/pkg/scrape/opencost.go

@@ -20,6 +20,7 @@ func newOpencostTargetScraper(provider target.TargetProvider) *TargetScraper {
 		event.OpenCostScraperName,
 		provider,
 		[]string{
+			metric.ClusterInfo,
 			metric.KubecostClusterManagementCost,
 			metric.KubecostNetworkZoneEgressCost,
 			metric.KubecostNetworkRegionEgressCost,

+ 6 - 0
modules/collector-source/pkg/scrape/scrapecontroller.go

@@ -5,6 +5,7 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
+	"github.com/opencost/opencost/core/pkg/clusters"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/nodestats"
 	"github.com/opencost/opencost/core/pkg/util/atomic"
@@ -21,14 +22,19 @@ type ScrapeController struct {
 }
 
 func NewScrapeController(
+	clusterUID string,
 	scrapeInterval string,
 	networkPort int,
 	updater metric.Updater,
+	clusterInfoProvider clusters.ClusterInfoProvider,
 	clusterCache clustercache.ClusterCache,
 	statSummaryClient nodestats.StatSummaryClient,
 ) *ScrapeController {
 
 	var scrapers []Scraper
+	clusterInfoScrapper := newClusterInfoScrapper(clusterUID, clusterInfoProvider)
+	scrapers = append(scrapers, clusterInfoScrapper)
+
 	clusterCacheScraper := newClusterCacheScraper(clusterCache)
 	scrapers = append(scrapers, clusterCacheScraper)
 

+ 3 - 0
modules/prometheus-source/pkg/prom/contextnames.go

@@ -31,4 +31,7 @@ const (
 
 	// NetworkInsightsContextName is the name we assign the network insights query context [metadata]
 	NetworkInsightsContextName = "networkinsight"
+
+	// KubeModelContextName is the name we assign the kubemodel query context [metadata]
+	KubeModelContextName = "kube-model"
 )

+ 80 - 18
modules/prometheus-source/pkg/prom/metricsquerier.go

@@ -474,6 +474,27 @@ func (pds *PrometheusMetricsQuerier) QueryLBActiveMinutes(start, end time.Time)
 	return source.NewFuture(source.DecodeLBActiveMinutesResult, ctx.QueryAtTime(queryLBActiveMins, end))
 }
 
+// Note: cluster_info is not currently emitted
+func (pds *PrometheusMetricsQuerier) QueryClusterUptime(start, end time.Time) *source.Future[source.UptimeResult] {
+	const queryName = "QueryClusterUptime"
+	const queryFmtClusterUptime = `avg(cluster_info{%s}) by (%s, uid)[%s:%dm]`
+
+	cfg := pds.promConfig
+
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := pds.durationStringFor(start, end, minsPerResolution, false)
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryClusterUptime := fmt.Sprintf(queryFmtClusterUptime, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryFmtClusterUptime)
+
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
+	return source.NewFuture(source.DecodeUptimeResult, ctx.QueryAtTime(queryClusterUptime, end))
+}
+
 func (pds *PrometheusMetricsQuerier) QueryClusterManagementDuration(start, end time.Time) *source.Future[source.ClusterManagementDurationResult] {
 	const queryName = "QueryClusterManagementDuration"
 	const clusterManagementDurationQuery = `avg(kubecost_cluster_management_cost{%s}) by (%s, provisioner_name)[%s:%dm]`
@@ -1268,6 +1289,27 @@ func (pds *PrometheusMetricsQuerier) QueryNetReceiveBytes(start, end time.Time)
 	return source.NewFuture(source.DecodeNetReceiveBytesResult, ctx.QueryAtTime(queryNetReceiveBytes, end))
 }
 
+// Note: namespace_info is not currently emitted
+func (pds *PrometheusMetricsQuerier) QueryNamespaceUptime(start, end time.Time) *source.Future[source.UptimeResult] {
+	const queryName = "QueryNamespaceUptime"
+	const queryFmtNamespaceUptime = `avg(namespace_info{%s}) by (%s, uid)[%s:%dm]`
+
+	cfg := pds.promConfig
+
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := pds.durationStringFor(start, end, minsPerResolution, false)
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryNamespaceUptime := fmt.Sprintf(queryFmtNamespaceUptime, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryFmtNamespaceUptime)
+
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
+	return source.NewFuture(source.DecodeUptimeResult, ctx.QueryAtTime(queryNamespaceUptime, end))
+}
+
 func (pds *PrometheusMetricsQuerier) QueryNamespaceLabels(start, end time.Time) *source.Future[source.NamespaceLabelsResult] {
 	const queryName = "QueryNamespaceLabels"
 	const queryFmtNamespaceLabels = `avg_over_time(kube_namespace_labels{%s}[%s])`
@@ -1486,6 +1528,26 @@ func (pds *PrometheusMetricsQuerier) QueryReplicaSetsWithRollout(start, end time
 
 // Note: The ResourceQuota metrics are _not_ emitted at the moment. Leaving the query implementations here in case we add metric emission later on.
 
+func (pds *PrometheusMetricsQuerier) QueryResourceQuotaUptime(start, end time.Time) *source.Future[source.UptimeResult] {
+	const queryName = "QueryResourceQuotaUptime"
+	const queryFmtResourceQuotaUptime = `avg(resourcequota_info{%s}) by (%s, uid)[%s:%dm]`
+
+	cfg := pds.promConfig
+
+	minsPerResolution := cfg.DataResolutionMinutes
+
+	durStr := pds.durationStringFor(start, end, minsPerResolution, false)
+	if durStr == "" {
+		panic(fmt.Sprintf("failed to parse duration string passed to %s", queryName))
+	}
+
+	queryResourceQuotaUptime := fmt.Sprintf(queryFmtResourceQuotaUptime, cfg.ClusterFilter, cfg.ClusterLabel, durStr, minsPerResolution)
+	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryFmtResourceQuotaUptime)
+
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
+	return source.NewFuture(source.DecodeUptimeResult, ctx.QueryAtTime(queryResourceQuotaUptime, end))
+}
+
 func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPURequestAverage(start, end time.Time) *source.Future[source.ResourceQuotaSpecCPURequestAvgResult] {
 	const queryName = "QueryResourceQuotaSpecCPURequestAverage"
 	const queryFmtResourceQuotaSpecCPURequests = `avg(avg_over_time(resourcequota_spec_resource_requests{resource="cpu",unit="core", %s}[%s])) by (resourcequota, namespace, uid, %s)`
@@ -1500,7 +1562,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPURequestAverage(sta
 	queryResourceQuotaSpecCPURequests := fmt.Sprintf(queryFmtResourceQuotaSpecCPURequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecCPURequests)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaSpecCPURequestAvgResult, ctx.QueryAtTime(queryResourceQuotaSpecCPURequests, end))
 }
 
@@ -1518,7 +1580,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPURequestMax(start,
 	queryResourceQuotaSpecCPURequests := fmt.Sprintf(queryFmtResourceQuotaSpecCPURequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecCPURequests)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaSpecCPURequestMaxResult, ctx.QueryAtTime(queryResourceQuotaSpecCPURequests, end))
 }
 
@@ -1536,7 +1598,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecRAMRequestAverage(sta
 	queryResourceQuotaSpecRAMRequests := fmt.Sprintf(queryFmtResourceQuotaSpecRAMRequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecRAMRequests)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaSpecRAMRequestAvgResult, ctx.QueryAtTime(queryResourceQuotaSpecRAMRequests, end))
 }
 
@@ -1554,7 +1616,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecRAMRequestMax(start,
 	queryResourceQuotaSpecRAMRequests := fmt.Sprintf(queryFmtResourceQuotaSpecRAMRequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecRAMRequests)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaSpecRAMRequestMaxResult, ctx.QueryAtTime(queryResourceQuotaSpecRAMRequests, end))
 }
 
@@ -1572,7 +1634,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPULimitAverage(start
 	queryResourceQuotaSpecCPULimits := fmt.Sprintf(queryFmtResourceQuotaSpecCPULimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecCPULimits)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaSpecCPULimitAvgResult, ctx.QueryAtTime(queryResourceQuotaSpecCPULimits, end))
 }
 
@@ -1590,7 +1652,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecCPULimitMax(start, en
 	queryResourceQuotaSpecCPULimits := fmt.Sprintf(queryFmtResourceQuotaSpecCPULimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecCPULimits)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaSpecCPULimitMaxResult, ctx.QueryAtTime(queryResourceQuotaSpecCPULimits, end))
 }
 
@@ -1608,7 +1670,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecRAMLimitAverage(start
 	queryResourceQuotaSpecRAMLimits := fmt.Sprintf(queryFmtResourceQuotaSpecRAMLimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecRAMLimits)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaSpecRAMLimitAvgResult, ctx.QueryAtTime(queryResourceQuotaSpecRAMLimits, end))
 }
 
@@ -1626,7 +1688,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaSpecRAMLimitMax(start, en
 	queryResourceQuotaSpecRAMLimits := fmt.Sprintf(queryFmtResourceQuotaSpecRAMLimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaSpecRAMLimits)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaSpecRAMLimitMaxResult, ctx.QueryAtTime(queryResourceQuotaSpecRAMLimits, end))
 }
 
@@ -1644,7 +1706,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedCPURequestAvera
 	queryResourceQuotaStatusUsedCPURequests := fmt.Sprintf(queryFmtResourceQuotaStatusUsedCPURequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedCPURequests)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaStatusUsedCPURequestAvgResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedCPURequests, end))
 }
 
@@ -1662,7 +1724,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedCPURequestMax(s
 	queryResourceQuotaStatusUsedCPURequests := fmt.Sprintf(queryFmtResourceQuotaStatusUsedCPURequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedCPURequests)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaStatusUsedCPURequestMaxResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedCPURequests, end))
 }
 
@@ -1680,7 +1742,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedRAMRequestAvera
 	queryResourceQuotaStatusUsedRAMRequests := fmt.Sprintf(queryFmtResourceQuotaStatusUsedRAMRequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedRAMRequests)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaStatusUsedRAMRequestAvgResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedRAMRequests, end))
 }
 
@@ -1698,7 +1760,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedRAMRequestMax(s
 	queryResourceQuotaStatusUsedRAMRequests := fmt.Sprintf(queryFmtResourceQuotaStatusUsedRAMRequests, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedRAMRequests)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaStatusUsedRAMRequestMaxResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedRAMRequests, end))
 }
 
@@ -1716,7 +1778,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedCPULimitAverage
 	queryResourceQuotaStatusUsedCPULimits := fmt.Sprintf(queryFmtResourceQuotaStatusUsedCPULimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedCPULimits)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaStatusUsedCPULimitAvgResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedCPULimits, end))
 }
 
@@ -1734,7 +1796,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedCPULimitMax(sta
 	queryResourceQuotaStatusUsedCPULimits := fmt.Sprintf(queryFmtResourceQuotaStatusUsedCPULimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedCPULimits)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaStatusUsedCPULimitMaxResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedCPULimits, end))
 }
 
@@ -1752,7 +1814,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedRAMLimitAverage
 	queryResourceQuotaStatusUsedRAMLimits := fmt.Sprintf(queryFmtResourceQuotaStatusUsedRAMLimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedRAMLimits)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaStatusUsedRAMLimitAvgResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedRAMLimits, end))
 }
 
@@ -1770,7 +1832,7 @@ func (pds *PrometheusMetricsQuerier) QueryResourceQuotaStatusUsedRAMLimitMax(sta
 	queryResourceQuotaStatusUsedRAMLimits := fmt.Sprintf(queryFmtResourceQuotaStatusUsedRAMLimits, cfg.ClusterFilter, durStr, cfg.ClusterLabel)
 	log.Debugf(PrometheusMetricsQueryLogFormat, queryName, end.Unix(), queryResourceQuotaStatusUsedRAMLimits)
 
-	ctx := pds.promContexts.NewNamedContext(AllocationContextName)
+	ctx := pds.promContexts.NewNamedContext(KubeModelContextName)
 	return source.NewFuture(source.DecodeResourceQuotaStatusUsedRAMLimitMaxResult, ctx.QueryAtTime(queryResourceQuotaStatusUsedRAMLimits, end))
 }
 
@@ -1803,11 +1865,11 @@ func (pds *PrometheusMetricsQuerier) QueryDataCoverage(limitDays int) (time.Time
 		// If node_cpu_hourly_cost metric is not available, fallback to a reasonable time range
 		// This prevents CSV export from failing when the metric doesn't exist yet
 		log.Warnf("QueryDataCoverage: node_cpu_hourly_cost metric not available, using fallback time range")
-		
+
 		// Use a reasonable fallback: start from 1 day ago to account for metric collection delay
 		fallbackEnd := time.Now().UTC().Truncate(timeutil.Day)
 		fallbackStart := fallbackEnd.AddDate(0, 0, -1) // 1 day ago
-		
+
 		return fallbackStart, fallbackEnd, nil
 	}
 

+ 4 - 4
pkg/cloud/provider/cloud_test.go

@@ -283,7 +283,7 @@ func TestNodePriceFromCSVWithGPULabels(t *testing.T) {
 	fm := FakeClusterMap{}
 	d, _ := time.ParseDuration("1m")
 
-	model := costmodel.NewCostModel(nil, c, fc, fm, d)
+	model := costmodel.NewCostModel("cluster-uid", nil, c, fc, fm, d)
 
 	nodeMap, err := model.GetNodeCost()
 	if err != nil {
@@ -351,7 +351,7 @@ func TestRKE2NodePriceFromCSVWithGPULabels(t *testing.T) {
 	fm := FakeClusterMap{}
 	d, _ := time.ParseDuration("1m")
 
-	model := costmodel.NewCostModel(nil, c, fc, fm, d)
+	model := costmodel.NewCostModel("cluster-uid", nil, c, fc, fm, d)
 
 	nodeMap, err := model.GetNodeCost()
 	if err != nil {
@@ -668,7 +668,7 @@ func TestNodePriceFromCSVWithBadConfig(t *testing.T) {
 	fm := FakeClusterMap{}
 	d, _ := time.ParseDuration("1m")
 
-	model := costmodel.NewCostModel(nil, c, fc, fm, d)
+	model := costmodel.NewCostModel("cluster-uid", nil, c, fc, fm, d)
 
 	_, err = model.GetNodeCost()
 	if err != nil {
@@ -725,7 +725,7 @@ func TestSourceMatchesFromCSV(t *testing.T) {
 	fm := FakeClusterMap{}
 	d, _ := time.ParseDuration("1m")
 
-	model := costmodel.NewCostModel(nil, c, fc, fm, d)
+	model := costmodel.NewCostModel("cluster-uid", nil, c, fc, fm, d)
 
 	_, err = model.GetNodeCost()
 	if err != nil {

+ 1 - 2
pkg/clustercache/clustercache.go

@@ -18,8 +18,7 @@ import (
 
 // KubernetesClusterCache is the implementation of ClusterCache
 type KubernetesClusterCache struct {
-	client kubernetes.Interface
-
+	client                     kubernetes.Interface
 	namespaceWatch             WatchController
 	nodeWatch                  WatchController
 	podWatch                   WatchController

+ 0 - 206
pkg/clustercache/clusterimporter.go

@@ -1,206 +0,0 @@
-package clustercache
-
-import (
-	"sync"
-
-	cc "github.com/opencost/opencost/core/pkg/clustercache"
-	"github.com/opencost/opencost/core/pkg/log"
-	"github.com/opencost/opencost/core/pkg/util/json"
-	"github.com/opencost/opencost/pkg/config"
-	"golang.org/x/exp/slices"
-)
-
-// ClusterImporter is an implementation of ClusterCache which leverages a backing configuration file
-// as it's source of the cluster data.
-type ClusterImporter struct {
-	source          *config.ConfigFile
-	sourceHandlerID config.HandlerID
-	dataLock        *sync.Mutex
-	data            *clusterEncoding
-}
-
-// Creates a new ClusterCache implementation which uses an import process to provide cluster data
-func NewClusterImporter(source *config.ConfigFile) cc.ClusterCache {
-	return &ClusterImporter{
-		source:   source,
-		dataLock: new(sync.Mutex),
-		data:     new(clusterEncoding),
-	}
-}
-
-// onImportSourceChanged handles the source data updating
-func (ci *ClusterImporter) onImportSourceChanged(changeType config.ChangeType, data []byte) {
-	if changeType == config.ChangeTypeDeleted {
-		ci.dataLock.Lock()
-		ci.data = new(clusterEncoding)
-		ci.dataLock.Unlock()
-		return
-	}
-
-	ci.update(data)
-}
-
-// update replaces the underlying cluster data with the provided new data if it decodes
-func (ci *ClusterImporter) update(data []byte) {
-	ce := new(clusterEncoding)
-	err := json.Unmarshal(data, ce)
-	if err != nil {
-		log.Warnf("Failed to unmarshal cluster during import: %s", err)
-		return
-	}
-
-	ci.dataLock.Lock()
-	ci.data = ce
-	ci.dataLock.Unlock()
-}
-
-// Run starts the watcher processes
-func (ci *ClusterImporter) Run() {
-	if ci.source == nil {
-		log.Errorf("ClusterImporter source does not exist, not running")
-		return
-	}
-
-	exists, err := ci.source.Exists()
-	if err != nil {
-		log.Errorf("Failed to import source for cluster: %s", err)
-		return
-	}
-
-	if exists {
-		data, err := ci.source.Read()
-		if err != nil {
-			log.Warnf("Failed to import cluster: %s", err)
-		} else {
-			ci.update(data)
-		}
-	}
-
-	ci.sourceHandlerID = ci.source.AddChangeHandler(ci.onImportSourceChanged)
-}
-
-// Stops the watcher processes
-func (ci *ClusterImporter) Stop() {
-	if ci.sourceHandlerID != "" {
-		ci.source.RemoveChangeHandler(ci.sourceHandlerID)
-		ci.sourceHandlerID = ""
-	}
-}
-
-// GetAllNamespaces returns all the cached namespaces
-func (ci *ClusterImporter) GetAllNamespaces() []*cc.Namespace {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.Namespaces)
-}
-
-// GetAllNodes returns all the cached nodes
-func (ci *ClusterImporter) GetAllNodes() []*cc.Node {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.Nodes)
-}
-
-// GetAllPods returns all the cached pods
-func (ci *ClusterImporter) GetAllPods() []*cc.Pod {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.Pods)
-}
-
-// GetAllServices returns all the cached services
-func (ci *ClusterImporter) GetAllServices() []*cc.Service {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.Services)
-}
-
-// GetAllDaemonSets returns all the cached DaemonSets
-func (ci *ClusterImporter) GetAllDaemonSets() []*cc.DaemonSet {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.DaemonSets)
-}
-
-// GetAllDeployments returns all the cached deployments
-func (ci *ClusterImporter) GetAllDeployments() []*cc.Deployment {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.Deployments)
-}
-
-// GetAllStatfulSets returns all the cached StatefulSets
-func (ci *ClusterImporter) GetAllStatefulSets() []*cc.StatefulSet {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.StatefulSets)
-}
-
-// GetAllReplicaSets returns all the cached ReplicaSets
-func (ci *ClusterImporter) GetAllReplicaSets() []*cc.ReplicaSet {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.ReplicaSets)
-}
-
-// GetAllPersistentVolumes returns all the cached persistent volumes
-func (ci *ClusterImporter) GetAllPersistentVolumes() []*cc.PersistentVolume {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.PersistentVolumes)
-}
-
-// GetAllPersistentVolumeClaims returns all the cached persistent volume claims
-func (ci *ClusterImporter) GetAllPersistentVolumeClaims() []*cc.PersistentVolumeClaim {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.PersistentVolumeClaims)
-}
-
-// GetAllStorageClasses returns all the cached storage classes
-func (ci *ClusterImporter) GetAllStorageClasses() []*cc.StorageClass {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.StorageClasses)
-}
-
-// GetAllJobs returns all the cached jobs
-func (ci *ClusterImporter) GetAllJobs() []*cc.Job {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.Jobs)
-}
-
-// GetAllPodDisruptionBudgets returns all cached pod disruption budgets
-func (ci *ClusterImporter) GetAllPodDisruptionBudgets() []*cc.PodDisruptionBudget {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.PodDisruptionBudgets)
-}
-
-func (ci *ClusterImporter) GetAllReplicationControllers() []*cc.ReplicationController {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.ReplicationControllers)
-}
-
-func (ci *ClusterImporter) GetAllResourceQuotas() []*cc.ResourceQuota {
-	ci.dataLock.Lock()
-	defer ci.dataLock.Unlock()
-
-	return slices.Clone(ci.data.ResourceQuotas)
-}

+ 6 - 1
pkg/cmd/agent/agent.go

@@ -73,6 +73,11 @@ func Execute(opts *AgentOpts) error {
 		panic(err.Error())
 	}
 
+	clusterUID, err := kubeconfig.GetClusterUID(k8sClient)
+	if err != nil {
+		return fmt.Errorf("error getting cluster UID: %w", err)
+	}
+
 	// Create ConfigFileManager for synchronization of shared configuration
 	confManager := config.NewConfigFileManager(nil)
 
@@ -138,7 +143,7 @@ func Execute(opts *AgentOpts) error {
 	// Initialize ClusterMap for maintaining ClusterInfo by ClusterID
 	clusterMap := dataSource.ClusterMap()
 
-	costModel := costmodel.NewCostModel(dataSource, cloudProvider, clusterCache, clusterMap, dataSource.BatchDuration())
+	costModel := costmodel.NewCostModel(clusterUID, dataSource, cloudProvider, clusterCache, clusterMap, dataSource.BatchDuration())
 
 	// initialize Kubernetes Metrics Emitter
 	metricsEmitter := costmodel.NewCostModelMetricsEmitter(clusterCache, cloudProvider, clusterInfoProvider, costModel)

+ 23 - 0
pkg/costmodel/costmodel.go

@@ -15,11 +15,13 @@ import (
 	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/filter/allocation"
 	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/model/kubemodel"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/core/pkg/util/promutil"
 	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
+	km "github.com/opencost/opencost/pkg/kubemodel"
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
@@ -47,10 +49,12 @@ type CostModel struct {
 	RequestGroup    *singleflight.Group
 	DataSource      source.OpenCostDataSource
 	Provider        costAnalyzerCloud.Provider
+	KubeModel       *km.KubeModel
 	pricingMetadata *costAnalyzerCloud.PricingMatchMetadata
 }
 
 func NewCostModel(
+	clusterUID string,
 	dataSource source.OpenCostDataSource,
 	provider costAnalyzerCloud.Provider,
 	cache clustercache.ClusterCache,
@@ -60,6 +64,16 @@ func NewCostModel(
 	// request grouping to prevent over-requesting the same data prior to caching
 	requestGroup := new(singleflight.Group)
 
+	var kubeModel *km.KubeModel
+	var err error
+	if dataSource != nil {
+		kubeModel, err = km.NewKubeModel(clusterUID, dataSource)
+		if err != nil {
+			// KubeModel is required. Log a fatal error if we fail to init.
+			log.Fatalf("error initializing KubeModel: %s", err)
+		}
+	}
+
 	return &CostModel{
 		Cache:         cache,
 		ClusterMap:    clusterMap,
@@ -67,9 +81,18 @@ func NewCostModel(
 		DataSource:    dataSource,
 		Provider:      provider,
 		RequestGroup:  requestGroup,
+		KubeModel:     kubeModel,
 	}
 }
 
+func (cm *CostModel) ComputeKubeModelSet(start, end time.Time) (*kubemodel.KubeModelSet, error) {
+	if cm.KubeModel == nil {
+		return nil, fmt.Errorf("KubeModel not initialized")
+	}
+
+	return cm.KubeModel.ComputeKubeModelSet(start, end)
+}
+
 type CostData struct {
 	Name            string                       `json:"name,omitempty"`
 	PodName         string                       `json:"podName,omitempty"`

+ 7 - 1
pkg/costmodel/router.go

@@ -406,6 +406,11 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 		log.Fatalf("Failed to build Kubernetes client: %s", err.Error())
 	}
 
+	clusterUID, err := kubeconfig.GetClusterUID(kubeClientset)
+	if err != nil {
+		log.Fatalf("Failed to determine cluster UID: %s", err)
+	}
+
 	// Create Kubernetes Cluster Cache + Watchers
 	k8sCache := clusterc.NewKubernetesClusterCache(kubeClientset)
 	k8sCache.Run()
@@ -459,6 +464,7 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 			}
 			nodeStatClient := nodestats.NewNodeStatsSummaryClient(k8sCache, nodeStatConf, clusterConfig)
 			ds := collector.NewDefaultCollectorDataSource(
+				clusterUID,
 				store,
 				clusterInfoProvider,
 				k8sCache,
@@ -491,7 +497,7 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	clusterMap := dataSource.ClusterMap()
 	settingsCache := cache.New(cache.NoExpiration, cache.NoExpiration)
 
-	costModel := NewCostModel(dataSource, cloudProvider, k8sCache, clusterMap, dataSource.BatchDuration())
+	costModel := NewCostModel(clusterUID, dataSource, cloudProvider, k8sCache, clusterMap, dataSource.BatchDuration())
 	metricsEmitter := NewCostModelMetricsEmitter(k8sCache, cloudProvider, clusterInfoProvider, costModel)
 
 	a := &Accesses{

+ 369 - 0
pkg/kubemodel/kubemodel.go

@@ -0,0 +1,369 @@
+package kubemodel
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/env"
+	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/model/kubemodel"
+	"github.com/opencost/opencost/core/pkg/source"
+)
+
+const logTimeFmt string = "2006-01-02T15:04:05"
+
+type KubeModel struct {
+	ds         source.OpenCostDataSource
+	clusterUID string
+}
+
+func NewKubeModel(clusterUID string, dataSource source.OpenCostDataSource) (*KubeModel, error) {
+	if dataSource == nil {
+		return nil, errors.New("OpenCostDataSource cannot be nil")
+	}
+
+	km := &KubeModel{
+		ds:         dataSource,
+		clusterUID: clusterUID,
+	}
+
+	km.clusterUID = clusterUID
+
+	log.Debugf("NewKubeModel(%s)", km.clusterUID)
+
+	return km, nil
+}
+
+// ComputeKubeModel uses the CostModel instance to compute an KubeModelSet
+// for the window defined by the given start and end times. The KubeModels
+// returned are unaggregated (i.e. down to the container level).
+func (km *KubeModel) ComputeKubeModelSet(start, end time.Time) (*kubemodel.KubeModelSet, error) {
+	// 1. Initialize new KubeModelSet for requested Window
+	kms := kubemodel.NewKubeModelSet(start, end)
+
+	// 2. Query CostModel for each set of objects
+	var err error
+
+	// 2.1 Compute Cluster
+	err = km.computeCluster(kms, start, end)
+	if err != nil {
+		kms.Error(err)
+		return kms, fmt.Errorf("error computing kubemodel.Cluster for (%s, %s): %w", start.Format(logTimeFmt), end.Format(logTimeFmt), err)
+	}
+
+	// 2.2 Compute Namespaces
+	err = km.computeNamespaces(kms, start, end)
+	if err != nil {
+		kms.Error(err)
+	}
+
+	// 2.3 Compute ResourceQuotas
+	err = km.computeResourceQuotas(kms, start, end)
+	if err != nil {
+		kms.Error(err)
+	}
+
+	// 3. Mark KubeModelSet as completed
+	kms.Metadata.CompletedAt = time.Now().UTC()
+
+	return kms, nil
+}
+
+func (km *KubeModel) computeCluster(kms *kubemodel.KubeModelSet, start, end time.Time) error {
+	kms.Cluster = &kubemodel.Cluster{
+		UID:  km.clusterUID,
+		Name: env.GetClusterID(),
+	}
+
+	grp := source.NewQueryGroup()
+	metrics := km.ds.Metrics()
+	clusterUptimeResultFuture := source.WithGroup(grp, metrics.QueryClusterUptime(start, end))
+
+	clusterUptimeResult, _ := clusterUptimeResultFuture.Await()
+
+	if len(clusterUptimeResult) != 1 {
+		kms.Errorf("%d clusters returning from cluster uptime query", len(clusterUptimeResult))
+	}
+
+	for _, res := range clusterUptimeResult {
+		if res.UID == km.clusterUID {
+			s, e := res.GetStartEnd(start, end, km.ds.Resolution())
+			kms.Cluster.Start = s
+			kms.Cluster.End = e
+		}
+	}
+
+	return nil
+}
+
+func (km *KubeModel) computeNamespaces(kms *kubemodel.KubeModelSet, start, end time.Time) error {
+	grp := source.NewQueryGroup()
+	metrics := km.ds.Metrics()
+
+	nsUptimeResultFuture := source.WithGroup(grp, metrics.QueryNamespaceUptime(start, end))
+	nsLabelsResultFuture := source.WithGroup(grp, metrics.QueryNamespaceLabels(start, end))
+	nsAnnosResultFuture := source.WithGroup(grp, metrics.QueryNamespaceAnnotations(start, end))
+
+	nsUptimeResult, _ := nsUptimeResultFuture.Await()
+	nsLabelsResult, _ := nsLabelsResultFuture.Await()
+	nsAnnosResult, _ := nsAnnosResultFuture.Await()
+
+	for _, res := range nsLabelsResult {
+		err := kms.RegisterNamespace(res.UID, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering namespace (%s, %s): %s", res.UID, res.Namespace, err)
+			continue
+		}
+		kms.Namespaces[res.UID].Labels = res.Labels
+	}
+
+	for _, res := range nsAnnosResult {
+		err := kms.RegisterNamespace(res.UID, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering namespace (%s, %s): %s", res.UID, res.Namespace, err)
+			continue
+		}
+		kms.Namespaces[res.UID].Annotations = res.Annotations
+	}
+
+	for _, res := range nsUptimeResult {
+		if _, ok := kms.Namespaces[res.UID]; !ok {
+			log.Warnf("could not find ns with uid '%s'", res.UID)
+			continue
+		}
+		s, e := res.GetStartEnd(start, end, km.ds.Resolution())
+		kms.Namespaces[res.UID].Start = s
+		kms.Namespaces[res.UID].End = e
+	}
+
+	return nil
+}
+
+func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, end time.Time) error {
+	grp := source.NewQueryGroup()
+	metrics := km.ds.Metrics()
+
+	rqUptimeResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaUptime(start, end))
+
+	// spec.hard.requests
+	rqSpecCPURequestAverageResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaSpecCPURequestAverage(start, end))
+	rqSpecCPURequestMaxResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaSpecCPURequestMax(start, end))
+	rqSpecRAMRequestAverageResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaSpecRAMRequestAverage(start, end))
+	rqSpecRAMRequestMaxResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaSpecRAMRequestMax(start, end))
+
+	// spec.hard.limits
+	rqSpecCPULimitAverageResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaSpecCPULimitAverage(start, end))
+	rqSpecCPULimitMaxResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaSpecCPULimitMax(start, end))
+	rqSpecRAMLimitAverageResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaSpecRAMLimitAverage(start, end))
+	rqSpecRAMLimitMaxResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaSpecRAMLimitMax(start, end))
+
+	// status.used.requests
+	rqStatusUsedCPURequestAverageResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaStatusUsedCPURequestAverage(start, end))
+	rqStatusUsedCPURequestMaxResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaStatusUsedCPURequestMax(start, end))
+	rqStatusUsedRAMRequestAverageResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaStatusUsedRAMRequestAverage(start, end))
+	rqStatusUsedRAMRequestMaxResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaStatusUsedRAMRequestMax(start, end))
+
+	// status.used.limits
+	rqStatusUsedCPULimitAverageResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaStatusUsedCPULimitAverage(start, end))
+	rqStatusUsedCPULimitMaxResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaStatusUsedCPULimitMax(start, end))
+	rqStatusUsedRAMLimitAverageResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaStatusUsedRAMLimitAverage(start, end))
+	rqStatusUsedRAMLimitMaxResultFuture := source.WithGroup(grp, metrics.QueryResourceQuotaStatusUsedRAMLimitMax(start, end))
+
+	rqSpecCPURequestAverageResult, _ := rqSpecCPURequestAverageResultFuture.Await()
+	for _, res := range rqSpecCPURequestAverageResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		mcpu := res.Data[0].Value * 1000
+		kms.ResourceQuotas[res.UID].Spec.Hard.SetRequest(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatAvg, mcpu)
+	}
+
+	rqSpecCPURequestMaxResult, _ := rqSpecCPURequestMaxResultFuture.Await()
+	for _, res := range rqSpecCPURequestMaxResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		mcpu := res.Data[0].Value * 1000
+		kms.ResourceQuotas[res.UID].Spec.Hard.SetRequest(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatMax, mcpu)
+	}
+
+	rqSpecRAMRequestAverageResult, _ := rqSpecRAMRequestAverageResultFuture.Await()
+	for _, res := range rqSpecRAMRequestAverageResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		kms.ResourceQuotas[res.UID].Spec.Hard.SetRequest(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatAvg, res.Data[0].Value)
+	}
+
+	rqSpecRAMRequestMaxResult, _ := rqSpecRAMRequestMaxResultFuture.Await()
+	for _, res := range rqSpecRAMRequestMaxResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		kms.ResourceQuotas[res.UID].Spec.Hard.SetRequest(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatMax, res.Data[0].Value)
+	}
+
+	rqSpecCPULimitAverageResult, _ := rqSpecCPULimitAverageResultFuture.Await()
+	for _, res := range rqSpecCPULimitAverageResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		mcpu := res.Data[0].Value * 1000
+		kms.ResourceQuotas[res.UID].Spec.Hard.SetLimit(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatAvg, mcpu)
+	}
+
+	rqSpecCPULimitMaxResult, _ := rqSpecCPULimitMaxResultFuture.Await()
+	for _, res := range rqSpecCPULimitMaxResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		mcpu := res.Data[0].Value * 1000
+		kms.ResourceQuotas[res.UID].Spec.Hard.SetLimit(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatMax, mcpu)
+	}
+
+	rqSpecRAMLimitAverageResult, _ := rqSpecRAMLimitAverageResultFuture.Await()
+	for _, res := range rqSpecRAMLimitAverageResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		kms.ResourceQuotas[res.UID].Spec.Hard.SetLimit(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatAvg, res.Data[0].Value)
+	}
+
+	rqSpecRAMLimitMaxResult, _ := rqSpecRAMLimitMaxResultFuture.Await()
+	for _, res := range rqSpecRAMLimitMaxResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		kms.ResourceQuotas[res.UID].Spec.Hard.SetLimit(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatMax, res.Data[0].Value)
+	}
+
+	rqStatusUsedCPURequestAverageResult, _ := rqStatusUsedCPURequestAverageResultFuture.Await()
+	for _, res := range rqStatusUsedCPURequestAverageResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		mcpu := res.Data[0].Value * 1000
+		kms.ResourceQuotas[res.UID].Status.Used.SetRequest(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatAvg, mcpu)
+	}
+
+	rqStatusUsedCPURequestMaxResult, _ := rqStatusUsedCPURequestMaxResultFuture.Await()
+	for _, res := range rqStatusUsedCPURequestMaxResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		mcpu := res.Data[0].Value * 1000
+		kms.ResourceQuotas[res.UID].Status.Used.SetRequest(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatMax, mcpu)
+	}
+
+	rqStatusUsedRAMRequestAverageResult, _ := rqStatusUsedRAMRequestAverageResultFuture.Await()
+	for _, res := range rqStatusUsedRAMRequestAverageResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		kms.ResourceQuotas[res.UID].Status.Used.SetRequest(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatAvg, res.Data[0].Value)
+	}
+
+	rqStatusUsedRAMRequestMaxResult, _ := rqStatusUsedRAMRequestMaxResultFuture.Await()
+	for _, res := range rqStatusUsedRAMRequestMaxResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		kms.ResourceQuotas[res.UID].Status.Used.SetRequest(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatMax, res.Data[0].Value)
+	}
+
+	rqStatusUsedCPULimitAverageResult, _ := rqStatusUsedCPULimitAverageResultFuture.Await()
+	for _, res := range rqStatusUsedCPULimitAverageResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		mcpu := res.Data[0].Value * 1000
+		kms.ResourceQuotas[res.UID].Status.Used.SetLimit(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatAvg, mcpu)
+	}
+
+	rqStatusUsedCPULimitMaxResult, _ := rqStatusUsedCPULimitMaxResultFuture.Await()
+	for _, res := range rqStatusUsedCPULimitMaxResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		mcpu := res.Data[0].Value * 1000
+		kms.ResourceQuotas[res.UID].Status.Used.SetLimit(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatMax, mcpu)
+	}
+
+	rqStatusUsedRAMLimitAverageResult, _ := rqStatusUsedRAMLimitAverageResultFuture.Await()
+	for _, res := range rqStatusUsedRAMLimitAverageResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		kms.ResourceQuotas[res.UID].Status.Used.SetLimit(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatAvg, res.Data[0].Value)
+	}
+
+	rqStatusUsedRAMLimitMaxResult, _ := rqStatusUsedRAMLimitMaxResultFuture.Await()
+	for _, res := range rqStatusUsedRAMLimitMaxResult {
+		err := kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
+		if err != nil {
+			log.Warnf("error registering resource quota (%s, %s, %s): %s", res.UID, res.ResourceQuota, res.Namespace, err)
+			continue
+		}
+
+		kms.ResourceQuotas[res.UID].Status.Used.SetLimit(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatMax, res.Data[0].Value)
+	}
+
+	rqUptimeResult, _ := rqUptimeResultFuture.Await()
+	for _, res := range rqUptimeResult {
+		if _, ok := kms.ResourceQuotas[res.UID]; !ok {
+			log.Warnf("could not find rq with uid '%s'", res.UID)
+			continue
+		}
+		s, e := res.GetStartEnd(start, end, km.ds.Resolution())
+		kms.ResourceQuotas[res.UID].Start = s
+		kms.ResourceQuotas[res.UID].End = e
+	}
+
+	return nil
+}