Niko Kovacevic 5 месяцев назад
Родитель
Сommit
a0917b38ee

+ 0 - 1
core/go.sum

@@ -253,7 +253,6 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
 github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
 github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
 github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=

+ 11 - 0
core/pkg/exporter/pathing/kubemodelpath.go

@@ -5,6 +5,7 @@ import (
 	"path"
 	"time"
 
+	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/pipelines"
 )
@@ -25,10 +26,16 @@ type KubeModelStoragePathFormatter struct {
 }
 
 func NewKubeModelStoragePathFormatter(rootDir, clusterId, resolution string) (StoragePathFormatter[opencost.Window], error) {
+	// TODO remove
+	log.Infof("[KM] NewKubeModelStoragePathFormatter(%s, %s, %s)", rootDir, clusterId, resolution)
+
 	if clusterId == "" {
 		return nil, fmt.Errorf("cluster id cannot be empty")
 	}
 
+	// TODO remove
+	log.Infof("[KM] pathing: dir: %s", path.Join(rootDir, clusterId, pipelines.KubeModelPipelineName, resolution))
+
 	return &KubeModelStoragePathFormatter{
 		dir: path.Join(
 			rootDir,
@@ -48,6 +55,10 @@ func (kmspf *KubeModelStoragePathFormatter) Dir() string {
 //
 //	<root>/<clusterid>/kubemodel/<resolution>/<YYYY>/<MM>/<DD>/<prefix>.<YYYYMMDDHHiiSS>.<fileExt>
 func (kmspf *KubeModelStoragePathFormatter) ToFullPath(prefix string, window opencost.Window, fileExt string) string {
+
+	// TODO remove
+	log.Infof("[KM] pathing: full path (%s): %s", window.String(), path.Join(kmspf.dir, window.Start().Format(KubeModelDateDirTimeFormat), toKubeModelFileName(prefix, window.Start(), fileExt)))
+
 	return path.Join(
 		kmspf.dir,
 		window.Start().Format(KubeModelDateDirTimeFormat),

+ 6 - 0
core/pkg/opencost/exporter/controllers.go

@@ -141,11 +141,17 @@ func NewPipelineExportControllers(clusterId string, store storage.Storage, cm Co
 	kubeModelExportControllers := []*export.ComputeExportController[kubemodel.KubeModelSet]{}
 
 	for _, res := range config.KubeModelPipelineResolutions {
+		// TODO remove
+		log.Infof("[KM] pipeline: res: %s", res)
+
 		if res < sourceResolution {
 			log.Warnf("Configured KubeModel pipeline resolution %dm is less than source resolution %dm. Not configuring the exporter for this resolution.", int64(res.Minutes()), int64(sourceResolution.Minutes()))
 			continue
 		}
 
+		// TODO remove
+		log.Infof("[KM] NewComputePipelineExportController(%s, _, _ %s)", clusterId, res)
+
 		kubeModelController, err := NewComputePipelineExportController(clusterId, store, kubeModelSource, res)
 		if err != nil {
 			log.Errorf("Failed to create KubeModel export controller for resolution: %s - %v", timeutil.DurationString(res), err)

+ 2 - 1
go.mod

@@ -83,6 +83,7 @@ require (
 	github.com/go-playground/universal-translator v0.18.1 // indirect
 	github.com/gofrs/flock v0.8.1 // indirect
 	github.com/google/jsonschema-go v0.3.0 // indirect
+	github.com/google/martian/v3 v3.3.3 // indirect
 	github.com/leodido/go-urn v1.4.0 // indirect
 	github.com/minio/crc64nvme v1.0.1 // indirect
 	github.com/minio/minio-go/v7 v7.0.88 // indirect
@@ -155,7 +156,7 @@ require (
 	github.com/golang/protobuf v1.5.4 // indirect
 	github.com/google/flatbuffers v23.5.26+incompatible // indirect
 	github.com/google/gnostic-models v0.6.9 // indirect
-	github.com/google/go-cmp v0.7.0 // indirect
+	github.com/google/go-cmp v0.7.0
 	github.com/google/s2a-go v0.1.9 // indirect
 	github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
 	github.com/googleapis/gax-go/v2 v2.15.0 // indirect

+ 52 - 0
pkg/kubemodel/kubemodel.go

@@ -6,6 +6,7 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/env"
+	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/model/kubemodel"
 	"github.com/opencost/opencost/core/pkg/source"
 )
@@ -38,6 +39,9 @@ func NewKubeModel(dataSource source.OpenCostDataSource) (*KubeModel, error) {
 // for the window defined by the given start and end times. The KubeModels
 // returned are unaggregated (i.e. down to the container level).
 func (km *KubeModel) ComputeKubeModelSet(start, end time.Time) (*kubemodel.KubeModelSet, error) {
+	// TODO remove
+	log.Infof("[KM] ComputeKubeModelSet(%s, %s)", start, end)
+
 	// 1. Initialize new KubeModelSet for requested Window
 	kms := kubemodel.NewKubeModelSet(start, end)
 
@@ -154,6 +158,9 @@ func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, e
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		mcpu := uint64(res.Data[0].Value * 1000)
 		kms.ResourceQuotas[res.UID].Spec.Hard.Requests.Set(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatAvg, mcpu)
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].SpecCPURequestAvg: %d", start, end, res.UID, mcpu)
 	}
 
 	rqSpecCPURequestMaxResult, _ := rqSpecCPURequestMaxResultFuture.Await()
@@ -161,18 +168,27 @@ func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, e
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		mcpu := uint64(res.Data[0].Value * 1000)
 		kms.ResourceQuotas[res.UID].Spec.Hard.Requests.Set(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatMax, mcpu)
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].SpecCPURequestMax: %d", start, end, res.UID, mcpu)
 	}
 
 	rqSpecRAMRequestAverageResult, _ := rqSpecRAMRequestAverageResultFuture.Await()
 	for _, res := range rqSpecRAMRequestAverageResult {
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		kms.ResourceQuotas[res.UID].Spec.Hard.Requests.Set(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatAvg, uint64(res.Data[0].Value))
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].SpecRAMRequestAvg: %d", start, end, res.UID, uint64(res.Data[0].Value))
 	}
 
 	rqSpecRAMRequestMaxResult, _ := rqSpecRAMRequestMaxResultFuture.Await()
 	for _, res := range rqSpecRAMRequestMaxResult {
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		kms.ResourceQuotas[res.UID].Spec.Hard.Requests.Set(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatMax, uint64(res.Data[0].Value))
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].SpecRAMRequestMax: %d", start, end, res.UID, uint64(res.Data[0].Value))
 	}
 
 	rqSpecCPULimitAverageResult, _ := rqSpecCPULimitAverageResultFuture.Await()
@@ -180,6 +196,9 @@ func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, e
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		mcpu := uint64(res.Data[0].Value * 1000)
 		kms.ResourceQuotas[res.UID].Spec.Hard.Limits.Set(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatAvg, mcpu)
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].SpecCPULimitAvg: %d", start, end, res.UID, mcpu)
 	}
 
 	rqSpecCPULimitMaxResult, _ := rqSpecCPULimitMaxResultFuture.Await()
@@ -187,18 +206,27 @@ func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, e
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		mcpu := uint64(res.Data[0].Value * 1000)
 		kms.ResourceQuotas[res.UID].Spec.Hard.Limits.Set(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatMax, mcpu)
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].SpecCPULimitMax: %d", start, end, res.UID, mcpu)
 	}
 
 	rqSpecRAMLimitAverageResult, _ := rqSpecRAMLimitAverageResultFuture.Await()
 	for _, res := range rqSpecRAMLimitAverageResult {
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		kms.ResourceQuotas[res.UID].Spec.Hard.Limits.Set(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatAvg, uint64(res.Data[0].Value))
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].SpecRAMLimitAvg: %d", start, end, res.UID, uint64(res.Data[0].Value))
 	}
 
 	rqSpecRAMLimitMaxResult, _ := rqSpecRAMLimitMaxResultFuture.Await()
 	for _, res := range rqSpecRAMLimitMaxResult {
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		kms.ResourceQuotas[res.UID].Spec.Hard.Limits.Set(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatMax, uint64(res.Data[0].Value))
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].SpecRAMLimitMax: %d", start, end, res.UID, uint64(res.Data[0].Value))
 	}
 
 	rqStatusUsedCPURequestAverageResult, _ := rqStatusUsedCPURequestAverageResultFuture.Await()
@@ -206,6 +234,9 @@ func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, e
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		mcpu := uint64(res.Data[0].Value * 1000)
 		kms.ResourceQuotas[res.UID].Status.Used.Requests.Set(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatAvg, mcpu)
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].StatusUsedCPURequestAvg: %d", start, end, res.UID, mcpu)
 	}
 
 	rqStatusUsedCPURequestMaxResult, _ := rqStatusUsedCPURequestMaxResultFuture.Await()
@@ -213,18 +244,27 @@ func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, e
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		mcpu := uint64(res.Data[0].Value * 1000)
 		kms.ResourceQuotas[res.UID].Status.Used.Requests.Set(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatMax, mcpu)
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].StatusUsedCPURequestMax: %d", start, end, res.UID, mcpu)
 	}
 
 	rqStatusUsedRAMRequestAverageResult, _ := rqStatusUsedRAMRequestAverageResultFuture.Await()
 	for _, res := range rqStatusUsedRAMRequestAverageResult {
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		kms.ResourceQuotas[res.UID].Status.Used.Requests.Set(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatAvg, uint64(res.Data[0].Value))
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].StatusUsedRAMRequestAvg: %d", start, end, res.UID, uint64(res.Data[0].Value))
 	}
 
 	rqStatusUsedRAMRequestMaxResult, _ := rqStatusUsedRAMRequestMaxResultFuture.Await()
 	for _, res := range rqStatusUsedRAMRequestMaxResult {
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		kms.ResourceQuotas[res.UID].Status.Used.Requests.Set(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatMax, uint64(res.Data[0].Value))
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].StatusUsedRAMRequestMax: %d", start, end, res.UID, uint64(res.Data[0].Value))
 	}
 
 	rqStatusUsedCPULimitAverageResult, _ := rqStatusUsedCPULimitAverageResultFuture.Await()
@@ -232,6 +272,9 @@ func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, e
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		mcpu := uint64(res.Data[0].Value * 1000)
 		kms.ResourceQuotas[res.UID].Status.Used.Limits.Set(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatAvg, mcpu)
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].StatusUsedCPULimitAvg: %d", start, end, res.UID, mcpu)
 	}
 
 	rqStatusUsedCPULimitMaxResult, _ := rqStatusUsedCPULimitMaxResultFuture.Await()
@@ -239,18 +282,27 @@ func (km *KubeModel) computeResourceQuotas(kms *kubemodel.KubeModelSet, start, e
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		mcpu := uint64(res.Data[0].Value * 1000)
 		kms.ResourceQuotas[res.UID].Status.Used.Limits.Set(kubemodel.ResourceCPU, kubemodel.UnitMillicore, kubemodel.StatMax, mcpu)
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].StatusUsedCPULimitMax: %d", start, end, res.UID, mcpu)
 	}
 
 	rqStatusUsedRAMLimitAverageResult, _ := rqStatusUsedRAMLimitAverageResultFuture.Await()
 	for _, res := range rqStatusUsedRAMLimitAverageResult {
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		kms.ResourceQuotas[res.UID].Status.Used.Limits.Set(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatAvg, uint64(res.Data[0].Value))
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].StatusUsedRAMLimitAvg: %d", start, end, res.UID, uint64(res.Data[0].Value))
 	}
 
 	rqStatusUsedRAMLimitMaxResult, _ := rqStatusUsedRAMLimitMaxResultFuture.Await()
 	for _, res := range rqStatusUsedRAMLimitMaxResult {
 		kms.RegisterResourceQuota(res.UID, res.ResourceQuota, res.Namespace)
 		kms.ResourceQuotas[res.UID].Status.Used.Limits.Set(kubemodel.ResourceMemory, kubemodel.UnitByte, kubemodel.StatMax, uint64(res.Data[0].Value))
+
+		// TODO remove
+		log.Infof("[KM] ComputeKubeModelSet(%s, %s): ResourceQuotas[%s].StatusUsedRAMLimitMax: %d", start, end, res.UID, uint64(res.Data[0].Value))
 	}
 
 	// TODO: query for (Start, End)