Explorar o código

Revert "Code Clean Up" (#3267)

Alex Meijer hai 9 meses
pai
achega
3e8777889c
Modificáronse 69 ficheiros con 1653 adicións e 875 borrados
  1. 1 1
      core/pkg/diagnostics/exporter/exporter.go
  2. 0 53
      core/pkg/env/core.go
  3. 2 2
      core/pkg/exporter/exporter_test.go
  4. 2 7
      core/pkg/exporter/pathing/bingenpath.go
  5. 3 3
      core/pkg/exporter/pathing/eventpath.go
  6. 30 24
      core/pkg/exporter/pathing/path_test.go
  7. 1 1
      core/pkg/heartbeat/exporter/exporter.go
  8. 2 2
      core/pkg/heartbeat/exporter/heartbeat_test.go
  9. 1 1
      core/pkg/opencost/allocation.go
  10. 24 24
      core/pkg/opencost/allocation_test.go
  11. 288 3
      core/pkg/opencost/asset.go
  12. 203 0
      core/pkg/opencost/asset_test.go
  13. 9 9
      core/pkg/opencost/exporter/exporter_test.go
  14. 1 1
      core/pkg/opencost/exporter/exporters.go
  15. 4 4
      core/pkg/opencost/opencost_codecs_test.go
  16. 66 0
      core/pkg/opencost/status.go
  17. 43 2
      core/pkg/opencost/totals.go
  18. 5 15
      core/pkg/pipelines/name.go
  19. 0 31
      core/pkg/storage/storefactory.go
  20. 0 32
      core/pkg/util/apiutil/apiutil.go
  21. 0 51
      core/pkg/util/apiutil/loglevel.go
  22. 1 1
      core/pkg/util/timeutil/timeutil.go
  23. 1 2
      modules/collector-source/pkg/collector/config.go
  24. 5 0
      modules/collector-source/pkg/env/collectorenv.go
  25. 41 10
      modules/prometheus-source/pkg/env/promenv.go
  26. 3 4
      modules/prometheus-source/pkg/prom/config.go
  27. 1 1
      modules/prometheus-source/pkg/prom/metricsquerier.go
  28. 16 16
      pkg/cloud/alibaba/provider.go
  29. 20 20
      pkg/cloud/aws/provider.go
  30. 2 3
      pkg/cloud/azure/provider.go
  31. 1 1
      pkg/cloud/azure/storagebillingparser.go
  32. 4 1
      pkg/cloud/config/controller.go
  33. 0 2
      pkg/cloud/config/controller_test.go
  34. 7 2
      pkg/cloud/config/watcher.go
  35. 8 5
      pkg/cloud/gcp/provider.go
  36. 2 3
      pkg/cloud/oracle/provider.go
  37. 1 2
      pkg/cloud/otc/provider.go
  38. 47 22
      pkg/cloud/provider/cloud_test.go
  39. 2 2
      pkg/cloud/provider/customprovider.go
  40. 88 3
      pkg/cloud/provider/provider.go
  41. 8 2
      pkg/cloud/provider/providerconfig.go
  42. 2 3
      pkg/cloud/scaleway/provider.go
  43. 1 1
      pkg/cloudcost/pipelineservice.go
  44. 2 2
      pkg/clustercache/clustercache.go
  45. 4 2
      pkg/clustercache/clustercache2.go
  46. 11 5
      pkg/cmd/agent/agent.go
  47. 3 3
      pkg/cmd/commands.go
  48. 0 31
      pkg/cmd/costmodel/config.go
  49. 77 12
      pkg/cmd/costmodel/costmodel.go
  50. 53 3
      pkg/config/configmanager.go
  51. 9 9
      pkg/costmodel/allocation_helpers.go
  52. 14 15
      pkg/costmodel/cluster.go
  53. 17 17
      pkg/costmodel/cluster_helpers.go
  54. 6 6
      pkg/costmodel/costmodel.go
  55. 8 8
      pkg/costmodel/key.go
  56. 2 2
      pkg/costmodel/networkinsight.go
  57. 1 2
      pkg/costmodel/nodeclientconfig.go
  58. 23 8
      pkg/costmodel/router.go
  59. 0 79
      pkg/env/cloudcost.go
  60. 0 38
      pkg/env/cloudcost_test.go
  61. 0 81
      pkg/env/costmodel_test.go
  62. 239 46
      pkg/env/costmodelenv.go
  63. 236 0
      pkg/env/costmodelenv_test.go
  64. 0 0
      pkg/env/kubemetricsenv.go
  65. 0 41
      pkg/env/nodestats.go
  66. 0 1
      pkg/env/nodestats_test.go
  67. 0 44
      pkg/env/opencost.go
  68. 0 47
      pkg/env/opencost_test.go
  69. 2 1
      pkg/metrics/metricsconfig.go

+ 1 - 1
core/pkg/diagnostics/exporter/exporter.go

@@ -10,7 +10,7 @@ import (
 
 // NewDiagnosticExporter creates a new `StorageExporter[DiagnosticsRunReport]` instance for exporting diagnostic run events.
 func NewDiagnosticExporter(clusterId string, applicationName string, storage storage.Storage) exporter.EventExporter[diagnostics.DiagnosticsRunReport] {
-	pathing, err := pathing.NewEventStoragePathFormatter(applicationName, clusterId, diagnostics.DiagnosticsEventName)
+	pathing, err := pathing.NewEventStoragePathFormatter("federated", clusterId, diagnostics.DiagnosticsEventName, applicationName)
 	if err != nil {
 		log.Errorf("failed to create pathing formatter: %v", err)
 		return nil

+ 0 - 53
core/pkg/env/core.go

@@ -1,53 +0,0 @@
-package env
-
-import (
-	"path"
-)
-
-const DefaultConfigPath = "/var/configs"
-const DefaultStorageFile = "federated-store.yaml"
-
-const (
-	APIPortEnvVar    = "API_PORT"
-	ClusterIDEnvVar  = "CLUSTER_ID"
-	ConfigPathEnvVar = "CONFIG_PATH"
-
-	PProfEnabledEnvVar = "PPROF_ENABLED"
-
-	InstallNamespaceEnvVar = "INSTALL_NAMESPACE"
-)
-
-// GetAPIPort returns the environment variable value for APIPortEnvVar which
-// is the port number the API is available on.
-func GetAPIPortWithDefault(def int) int {
-	return GetInt(APIPortEnvVar, def)
-}
-
-// GetClusterID returns the environment variable value for ClusterIDEnvVar which represents the
-// configurable identifier used for multi-cluster metric emission.
-func GetClusterID() string {
-	return Get(ClusterIDEnvVar, "")
-}
-
-// GetConfigPath returns the environment variable value for ConfigPathEnvVar which represents the cost
-// model configuration path
-func GetConfigPath() string {
-	return Get(ConfigPathEnvVar, DefaultConfigPath)
-}
-
-func GetPathFromConfig(subPaths ...string) string {
-	subPath := path.Join(subPaths...)
-	return path.Join(GetConfigPath(), subPath)
-}
-
-func GetDefaultStorageConfigFilePath() string {
-	return path.Join(GetConfigPath(), DefaultStorageFile)
-}
-
-func IsPProfEnabled() bool {
-	return GetBool(PProfEnabledEnvVar, false)
-}
-
-func GetInstallNamespace(def string) string {
-	return Get(InstallNamespaceEnvVar, def)
-}

+ 2 - 2
core/pkg/exporter/exporter_test.go

@@ -24,7 +24,7 @@ type TestData struct {
 func TestStorageExporters(t *testing.T) {
 	t.Run("test event storage exporter", func(t *testing.T) {
 		store := storage.NewMemoryStorage()
-		p, err := pathing.NewEventStoragePathFormatter("root", TestClusterId, TestEventName)
+		p, err := pathing.NewEventStoragePathFormatter("federated", TestClusterId, TestEventName)
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}
@@ -65,7 +65,7 @@ func TestStorageExporters(t *testing.T) {
 	t.Run("test compute storage exporter", func(t *testing.T) {
 		res := 24 * time.Hour
 		store := storage.NewMemoryStorage()
-		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AllocationPipelineName, &res)
+		p, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AllocationPipelineName, &res)
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}

+ 2 - 7
core/pkg/exporter/pathing/bingenpath.go

@@ -11,14 +11,13 @@ import (
 )
 
 const (
-	defaultRootDir string = "federated"
 	baseStorageDir string = "etl/bingen"
 )
 
 // BingenStoragePathFormatter is an implementation of the StoragePathFormatter interface for
 // a cluster separated storage path of the format:
 //
-//	<root>/<cluster>/etl/bingen/<pipeline>/<resolution>/<epoch-start>-<epoch-end>
+//	<root>/federated/<cluster>/etl/bingen/<pipeline>/<resolution>/<epoch-start>-<epoch-end>
 type BingenStoragePathFormatter struct {
 	rootDir    string
 	clusterId  string
@@ -26,10 +25,6 @@ type BingenStoragePathFormatter struct {
 	resolution string
 }
 
-func NewDefaultStoragePathFormatter(clusterId, pipeline string, resolution *time.Duration) (StoragePathFormatter[opencost.Window], error) {
-	return NewBingenStoragePathFormatter(defaultRootDir, clusterId, pipeline, resolution)
-}
-
 // NewBingenStoragePathFormatter creates a StoragePathFormatter for a cluster separated storage path
 // with the given root directory, cluster id, pipeline, and resolution. To omit the resolution directory
 // structure, provide a `nil` resolution.
@@ -73,7 +68,7 @@ func (bsf *BingenStoragePathFormatter) Dir() string {
 
 // ToFullPath returns the full path to a file name within the storage directory using the format:
 //
-//	<root>/<cluster>/etl/bingen/<pipeline>/<resolution>/<prefix>.<start-epoch>-<end-epoch>
+//	<root>/federated/<cluster>/etl/bingen/<pipeline>/<resolution>/<prefix>.<start-epoch>-<end-epoch>
 func (bsf *BingenStoragePathFormatter) ToFullPath(prefix string, window opencost.Window, fileExt string) string {
 	fileName := toBingenFileName(prefix, window, fileExt)
 

+ 3 - 3
core/pkg/exporter/pathing/eventpath.go

@@ -14,7 +14,7 @@ const EventStorageTimeFormat = "20060102150405"
 // EventStoragePathFormatter is an implementation of the StoragePathFormatter interface for
 // a cluster separated storage path of the format:
 //
-//	<root>/<cluster>/<event>/<sub-paths...>/YYYYMMDDHHmmss
+//	<root>/federated/<cluster>/<event>/<sub-paths...>/YYYYMMDDHHmmss
 type EventStoragePathFormatter struct {
 	rootDir   string
 	clusterId string
@@ -22,7 +22,7 @@ type EventStoragePathFormatter struct {
 	subPaths  []string
 }
 
-// NewEventStoragePathFormatter creates a StoragePathFormatter for a cluster separated storage path
+// NewBingenStoragePathFormatter creates a StoragePathFormatter for a cluster separated storage path
 // with the given root directory, cluster id, pipeline, and resolution. To omit the resolution directory
 // structure, provide a `nil` resolution.
 func NewEventStoragePathFormatter(rootDir, clusterId, event string, subPaths ...string) (StoragePathFormatter[time.Time], error) {
@@ -65,7 +65,7 @@ func (espf *EventStoragePathFormatter) Dir() string {
 
 // ToFullPath returns the full path to a file name within the storage directory using the format:
 //
-//	<root>/<cluster>/<event>/YYYYMMDDHHmm.json
+//	<root>/federated/<cluster>/<event>/YYYYMMDDHHmm.json
 func (espf *EventStoragePathFormatter) ToFullPath(prefix string, timestamp time.Time, fileExt string) string {
 	fileName := toEventFileName(prefix, timestamp, fileExt)
 

+ 30 - 24
core/pkg/exporter/pathing/path_test.go

@@ -1,7 +1,6 @@
 package pathing
 
 import (
-	"fmt"
 	"testing"
 	"time"
 
@@ -11,6 +10,7 @@ import (
 func TestBingenPathFormatter(t *testing.T) {
 	type testCase struct {
 		name       string
+		rootPath   string
 		clusterID  string
 		pipeline   string
 		resolution *time.Duration
@@ -21,57 +21,63 @@ func TestBingenPathFormatter(t *testing.T) {
 	testCases := []testCase{
 		{
 			name:       "no resolution",
+			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: nil,
 			prefix:     "",
-			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1704110400-1704114000", defaultRootDir, baseStorageDir),
+			expected:   "federated/cluster-a/etl/bingen/allocation/1704110400-1704114000",
 		},
 		{
 			name:       "with resolution",
+			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: &[]time.Duration{1 * time.Hour}[0],
 			prefix:     "",
-			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1h/1704110400-1704114000", defaultRootDir, baseStorageDir),
+			expected:   "federated/cluster-a/etl/bingen/allocation/1h/1704110400-1704114000",
 		},
 		{
 			name:       "no resolution with prefix",
+			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: nil,
 			prefix:     "test",
-			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/test.1704110400-1704114000", defaultRootDir, baseStorageDir),
+			expected:   "federated/cluster-a/etl/bingen/allocation/test.1704110400-1704114000",
 		},
 		{
 			name:       "with resolution with prefix",
+			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: &[]time.Duration{1 * time.Hour}[0],
 			prefix:     "test",
-			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1h/test.1704110400-1704114000", defaultRootDir, baseStorageDir),
+			expected:   "federated/cluster-a/etl/bingen/allocation/1h/test.1704110400-1704114000",
 		},
 		{
 			name:       "daily resolution",
+			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: &[]time.Duration{24 * time.Hour}[0],
 			prefix:     "",
-			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1d/1704110400-1704196800", defaultRootDir, baseStorageDir),
+			expected:   "federated/cluster-a/etl/bingen/allocation/1d/1704110400-1704196800",
 		},
 		{
 			name:       "weekly resolution",
+			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: &[]time.Duration{7 * 24 * time.Hour}[0],
 			prefix:     "",
-			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1w/1704110400-1704715200", defaultRootDir, baseStorageDir),
+			expected:   "federated/cluster-a/etl/bingen/allocation/1w/1704110400-1704715200",
 		},
 	}
 
 	for _, tc := range testCases {
 		t.Run(tc.name, func(t *testing.T) {
-			pathing, err := NewDefaultStoragePathFormatter(tc.clusterID, tc.pipeline, tc.resolution)
+			pathing, err := NewBingenStoragePathFormatter(tc.rootPath, tc.clusterID, tc.pipeline, tc.resolution)
 			if err != nil {
 				t.Fatalf("Unexpected error: %v", err)
 			}
@@ -105,83 +111,83 @@ func TestEventPathFormatter(t *testing.T) {
 	testCases := []testCase{
 		{
 			name:      "with root path with file extension",
-			rootPath:  "/tmp/root",
+			rootPath:  "/tmp/federated",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "",
 			fileExt:   "json",
-			expected:  "/tmp/root/cluster-a/heartbeat/20240101124000.json",
+			expected:  "/tmp/federated/cluster-a/heartbeat/20240101124000.json",
 		},
 		{
 			name:      "with file extension",
-			rootPath:  "root",
+			rootPath:  "federated",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "",
 			fileExt:   "json",
-			expected:  "root/cluster-a/heartbeat/20240101124000.json",
+			expected:  "federated/cluster-a/heartbeat/20240101124000.json",
 		},
 		{
 			name:      "with root path with file extension with sub-paths",
-			rootPath:  "/tmp/root",
+			rootPath:  "/tmp/federated",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{"foo", "bar"},
 			prefix:    "",
 			fileExt:   "json",
-			expected:  "/tmp/root/cluster-a/heartbeat/foo/bar/20240101124000.json",
+			expected:  "/tmp/federated/cluster-a/heartbeat/foo/bar/20240101124000.json",
 		},
 		{
 			name:      "without file extension",
-			rootPath:  "root",
+			rootPath:  "federated",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "",
 			fileExt:   "",
-			expected:  "root/cluster-a/heartbeat/20240101124000",
+			expected:  "federated/cluster-a/heartbeat/20240101124000",
 		},
 		{
 			name:      "with prefix with file extension",
-			rootPath:  "root",
+			rootPath:  "federated",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "test",
 			fileExt:   "json",
-			expected:  "root/cluster-a/heartbeat/test.20240101124000.json",
+			expected:  "federated/cluster-a/heartbeat/test.20240101124000.json",
 		},
 		{
 			name:      "with prefix with file extension with sub-paths",
-			rootPath:  "root",
+			rootPath:  "federated",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{"foo", "bar", "baz"},
 			prefix:    "test",
 			fileExt:   "json",
-			expected:  "root/cluster-a/heartbeat/foo/bar/baz/test.20240101124000.json",
+			expected:  "federated/cluster-a/heartbeat/foo/bar/baz/test.20240101124000.json",
 		},
 		{
 			name:      "with prefix without file extension",
-			rootPath:  "root",
+			rootPath:  "federated",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "test",
 			fileExt:   "",
-			expected:  "root/cluster-a/heartbeat/test.20240101124000",
+			expected:  "federated/cluster-a/heartbeat/test.20240101124000",
 		},
 		{
 			name:      "with prefix without file extension with sub-paths",
-			rootPath:  "root",
+			rootPath:  "federated",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{"foo"},
 			prefix:    "test",
 			fileExt:   "",
-			expected:  "root/cluster-a/heartbeat/foo/test.20240101124000",
+			expected:  "federated/cluster-a/heartbeat/foo/test.20240101124000",
 		},
 	}
 

+ 1 - 1
core/pkg/heartbeat/exporter/exporter.go

@@ -10,7 +10,7 @@ import (
 
 // NewHeartbeatExporter creates a new `StorageExporter[Heartbeat]` instance for exporting Heartbeat events.
 func NewHeartbeatExporter(clusterId string, applicationName string, storage storage.Storage) exporter.EventExporter[heartbeat.Heartbeat] {
-	pathing, err := pathing.NewEventStoragePathFormatter(applicationName, clusterId, heartbeat.HeartbeatEventName)
+	pathing, err := pathing.NewEventStoragePathFormatter("federated", clusterId, heartbeat.HeartbeatEventName, applicationName)
 	if err != nil {
 		log.Errorf("failed to create pathing formatter: %v", err)
 		return nil

+ 2 - 2
core/pkg/heartbeat/exporter/heartbeat_test.go

@@ -47,7 +47,7 @@ func TestHeartbeatExporter(t *testing.T) {
 	time.Sleep(10 * time.Second)
 	controller.Stop()
 
-	files, _ := store.List(path.Join(MockApplicationName, MockClusterId, heartbeat.HeartbeatEventName))
+	files, _ := store.List(path.Join("federated", MockClusterId, heartbeat.HeartbeatEventName, MockApplicationName))
 	if len(files) == 0 {
 		t.Fatal("No files found in storage")
 	}
@@ -61,7 +61,7 @@ func TestHeartbeatExporter(t *testing.T) {
 	lastCheck := time.Time{}
 
 	for _, f := range fileNames {
-		fpath := filepath.Join(MockApplicationName, MockClusterId, heartbeat.HeartbeatEventName, f)
+		fpath := filepath.Join("federated", MockClusterId, "heartbeat", MockApplicationName, f)
 		data, err := store.Read(fpath)
 		if err != nil {
 			t.Fatalf("Failed to read file %s: %v", fpath, err)

+ 1 - 1
core/pkg/opencost/allocation.go

@@ -2249,7 +2249,7 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 		if coeffs[a] > 0 && total > 0 {
 			coeffs[a] /= total
 		} else {
-			log.Warnf("Allocation: invalid values for shared coefficients: %v, %v", coeffs[a], total)
+			log.Warnf("ETL: invalid values for shared coefficients: %v, %v", coeffs[a], total)
 			coeffs[a] = 0.0
 		}
 	}

+ 24 - 24
core/pkg/opencost/allocation_test.go

@@ -527,7 +527,7 @@ func TestAllocationSet_generateKey(t *testing.T) {
 }
 
 func TestNewAllocationSet(t *testing.T) {
-	// TODO niko
+	// TODO niko/etl
 }
 
 func assertAllocationSetTotals(t *testing.T, as *AllocationSet, msg string, err error, length int, totalCost float64) {
@@ -662,7 +662,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 
 	// 3  Share idle
 	// 3a AggregationProperties=(Namespace) ShareIdle=ShareWeighted
-	// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven (TODO niko)
+	// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven (TODO niko/etl)
 
 	// 4  Share resources
 	// 4a Share namespace ShareEven
@@ -2032,19 +2032,19 @@ func TestAllocationSet_AggregateBy_SharedCostBreakdown(t *testing.T) {
 	}
 }
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Clone(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Delete(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_End(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_IdleAllocations(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Insert(t *testing.T) {}
 
 // Asserts that all Allocations within an AllocationSet have a Window that
@@ -2297,34 +2297,34 @@ func TestParcInsert(t *testing.T) {
 	}
 }
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_IsEmpty(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Length(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Map(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_MarshalJSON(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Resolution(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Seconds(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Set(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_Start(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestAllocationSet_TotalCost(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 //func TestNewAllocationSetRange(t *testing.T) {}
 
 func TestAllocationSetRange_AccumulateRepeat(t *testing.T) {
@@ -2779,16 +2779,16 @@ func TestAllocationSetRange_AccumulateBy_Month(t *testing.T) {
 	}
 }
 
-// TODO niko
+// TODO niko/etl
 // func TestAllocationSetRange_AggregateBy(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 // func TestAllocationSetRange_Append(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 // func TestAllocationSetRange_Each(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 // func TestAllocationSetRange_Get(t *testing.T) {}
 
 func TestAllocationSetRange_InsertRange(t *testing.T) {
@@ -2958,7 +2958,7 @@ func TestAllocationSetRange_InsertRange(t *testing.T) {
 	}
 }
 
-// TODO niko
+// TODO niko/etl
 // func TestAllocationSetRange_Length(t *testing.T) {}
 
 func TestAllocationSetRange_MarshalJSON(t *testing.T) {
@@ -3016,10 +3016,10 @@ func TestAllocationSetRange_MarshalJSON(t *testing.T) {
 	}
 }
 
-// TODO niko
+// TODO niko/etl
 // func TestAllocationSetRange_Slice(t *testing.T) {}
 
-// TODO niko
+// TODO niko/etl
 // func TestAllocationSetRange_Window(t *testing.T) {}
 
 func TestAllocationSetRange_Start(t *testing.T) {

+ 288 - 3
core/pkg/opencost/asset.go

@@ -69,6 +69,190 @@ type Asset interface {
 	fmt.Stringer
 }
 
+// AssetToExternalAllocation converts the given asset to an Allocation, given
+// the Properties to use to aggregate, and the mapping from Allocation property
+// to Asset label. For example, consider this asset:
+//
+// CURRENT: Asset ETL stores its data ALREADY MAPPED from label to k8s concept. This isn't ideal-- see the TODO.
+//
+//	  Cloud {
+//		   TotalCost: 10.00,
+//		   Labels{
+//	      "kubernetes_namespace":"monitoring",
+//		     "env":"prod"
+//		   }
+//	  }
+//
+// Given the following parameters, we expect to return:
+//
+//  1. single-prop full match
+//     aggregateBy = ["namespace"]
+//     => Allocation{Name: "monitoring", ExternalCost: 10.00, TotalCost: 10.00}, nil
+//
+//  2. multi-prop full match
+//     aggregateBy = ["namespace", "label:env"]
+//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+//     => Allocation{Name: "monitoring/env=prod", ExternalCost: 10.00, TotalCost: 10.00}, nil
+//
+//  3. multi-prop partial match
+//     aggregateBy = ["namespace", "label:foo"]
+//     => Allocation{Name: "monitoring/__unallocated__", ExternalCost: 10.00, TotalCost: 10.00}, nil
+//
+//  4. no match
+//     aggregateBy = ["cluster"]
+//     => nil, err
+//
+// TODO:
+//
+//	  Cloud {
+//		   TotalCost: 10.00,
+//		   Labels{
+//	      "kubernetes_namespace":"monitoring",
+//		     "env":"prod"
+//		   }
+//	  }
+//
+// Given the following parameters, we expect to return:
+//
+//  1. single-prop full match
+//     aggregateBy = ["namespace"]
+//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+//     => Allocation{Name: "monitoring", ExternalCost: 10.00, TotalCost: 10.00}, nil
+//
+//  2. multi-prop full match
+//     aggregateBy = ["namespace", "label:env"]
+//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+//     => Allocation{Name: "monitoring/env=prod", ExternalCost: 10.00, TotalCost: 10.00}, nil
+//
+//  3. multi-prop partial match
+//     aggregateBy = ["namespace", "label:foo"]
+//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+//     => Allocation{Name: "monitoring/__unallocated__", ExternalCost: 10.00, TotalCost: 10.00}, nil
+//
+//  4. no match
+//     aggregateBy = ["cluster"]
+//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+//     => nil, err
+//
+// (See asset_test.go for assertions of these examples and more.)
+func AssetToExternalAllocation(asset Asset, aggregateBy []string, labelConfig *LabelConfig) (*Allocation, error) {
+	if asset == nil {
+		return nil, fmt.Errorf("asset is nil")
+	}
+
+	// Use default label config if one is not provided.
+	if labelConfig == nil {
+		labelConfig = NewLabelConfig()
+	}
+
+	// names will collect the slash-separated names accrued by iterating over
+	// aggregateBy and checking the relevant labels.
+	names := []string{}
+
+	// match records whether or not a match was found in the Asset labels,
+	// such that is can genuinely be turned into an external Allocation.
+	match := false
+
+	// props records the relevant Properties to set on the resultant Allocation
+	props := AllocationProperties{}
+
+	// For each aggregation parameter, try to find a match in the asset's
+	// labels, using the labelConfig to translate. For an aggregation parameter
+	// defined by a label (e.g. "label:app") this is simple: look for the label
+	// and use it (e.g. if "app" is a defined label on the asset, then use its
+	// value). For an aggregation parameter defined by a non-label property
+	// (e.g. "namespace") this requires using the labelConfig to look up the
+	// label name associated with that property and to use the value under that
+	// label, if set (e.g. if the aggregation property is "namespace" and the
+	// labelConfig is configured with "namespace_external_label" => "kubens"
+	// and the asset has label "kubens":"kubecost", then file the asset as an
+	// external cost under "kubecost").
+	for _, aggBy := range aggregateBy {
+		name := labelConfig.GetExternalAllocationName(asset.GetLabels(), aggBy)
+
+		if name == "" {
+			// No matching label has been defined in the cost-analyzer label config
+			// relating to the given aggregateBy property.
+			names = append(names, UnallocatedSuffix)
+			continue
+		} else {
+			names = append(names, name)
+			match = true
+
+			// Default labels to an empty map, if necessary
+			if props.Labels == nil {
+				props.Labels = map[string]string{}
+			}
+
+			// Set the corresponding property on props
+			switch aggBy {
+			case AllocationClusterProp:
+				props.Cluster = name
+			case AllocationNodeProp:
+				props.Node = name
+			case AllocationNamespaceProp:
+				props.Namespace = name
+			case AllocationControllerKindProp:
+				props.ControllerKind = name
+			case AllocationControllerProp:
+				props.Controller = name
+			case AllocationPodProp:
+				props.Pod = name
+			case AllocationContainerProp:
+				props.Container = name
+			case AllocationServiceProp:
+				props.Services = []string{name}
+			case AllocationDeploymentProp:
+				props.Controller = name
+				props.ControllerKind = "deployment"
+			case AllocationStatefulSetProp:
+				props.Controller = name
+				props.ControllerKind = "statefulset"
+			case AllocationDaemonSetProp:
+				props.Controller = name
+				props.ControllerKind = "daemonset"
+			case AllocationDepartmentProp:
+				props.Labels[labelConfig.DepartmentLabel] = name
+			case AllocationEnvironmentProp:
+				props.Labels[labelConfig.EnvironmentLabel] = name
+			case AllocationOwnerProp:
+				props.Labels[labelConfig.OwnerLabel] = name
+			case AllocationProductProp:
+				props.Labels[labelConfig.ProductLabel] = name
+			case AllocationTeamProp:
+				props.Labels[labelConfig.TeamLabel] = name
+			default:
+				if strings.HasPrefix(aggBy, "label:") {
+					// Set the corresponding label in props
+					labelName := strings.TrimPrefix(aggBy, "label:")
+					labelValue := strings.TrimPrefix(name, labelName+"=")
+					props.Labels[labelName] = labelValue
+				}
+			}
+		}
+	}
+
+	// If not a signle aggregation property generated a matching label property,
+	// then consider the asset ineligible to be treated as an external allocation.
+	if !match {
+		return nil, fmt.Errorf("asset does not qualify as an external allocation")
+	}
+
+	// Use naming to label as an external allocation. See IsExternal() for more.
+	names = append(names, ExternalSuffix)
+
+	// TODO: external allocation: efficiency?
+	// TODO: external allocation: resource totals?
+	return &Allocation{
+		Name:         strings.Join(names, "/"),
+		Properties:   &props,
+		Window:       asset.GetWindow().Clone(),
+		Start:        asset.GetStart(),
+		End:          asset.GetEnd(),
+		ExternalCost: asset.TotalCost(),
+	}, nil
+}
+
 // key is used to determine uniqueness of an Asset, for instance during Insert
 // to determine if two Assets should be combined. Passing `nil` `aggregateBy` indicates
 // that all available `AssetProperty` keys should be used. Passing empty `aggregateBy` indicates that
@@ -1055,7 +1239,7 @@ func (d *Disk) Minutes() float64 {
 	windowMins := d.Window.Minutes()
 
 	if diskMins > windowMins {
-		log.Warnf("Asset: Disk.Minutes exceeds window: %.2f > %.2f", diskMins, windowMins)
+		log.Warnf("Asset ETL: Disk.Minutes exceeds window: %.2f > %.2f", diskMins, windowMins)
 		diskMins = windowMins
 	}
 
@@ -1515,7 +1699,7 @@ func (n *Network) Minutes() float64 {
 	windowMins := n.Window.Minutes()
 
 	if netMins > windowMins {
-		log.Warnf("Asset: Network.Minutes exceeds window: %.2f > %.2f", netMins, windowMins)
+		log.Warnf("Asset ETL: Network.Minutes exceeds window: %.2f > %.2f", netMins, windowMins)
 		netMins = windowMins
 	}
 
@@ -1835,7 +2019,7 @@ func (n *Node) Minutes() float64 {
 	windowMins := n.Window.Minutes()
 
 	if nodeMins > windowMins {
-		log.Warnf("Asset: Node.Minutes exceeds window: %.2f > %.2f", nodeMins, windowMins)
+		log.Warnf("Asset ETL: Node.Minutes exceeds window: %.2f > %.2f", nodeMins, windowMins)
 		nodeMins = windowMins
 	}
 
@@ -3002,6 +3186,107 @@ func (as *AssetSet) FindMatch(query Asset, aggregateBy []string, labelConfig *La
 	return nil, fmt.Errorf("Asset not found to match %s on %v", query, aggregateBy)
 }
 
+// ReconciliationMatch attempts to find an exact match in the AssetSet on
+// (Category, ProviderID). If a match is found, it returns the Asset with the
+// intent to adjust it. If no match exists, it attempts to find one on only
+// (ProviderID). If that match is found, it returns the Asset with the intent
+// to insert the associated Cloud cost.
+func (as *AssetSet) ReconciliationMatch(query Asset) (Asset, bool, error) {
+	// Full match means matching on (Category, ProviderID)
+	fullMatchProps := []string{string(AssetCategoryProp), string(AssetProviderIDProp)}
+	fullMatchKey, err := key(query, fullMatchProps, nil)
+
+	// This should never happen because we are using enumerated Properties,
+	// but the check is here in case that changes
+	if err != nil {
+		return nil, false, err
+	}
+
+	// Partial match means matching only on (ProviderID)
+	providerIDMatchProps := []string{string(AssetProviderIDProp)}
+	providerIDMatchKey, err := key(query, providerIDMatchProps, nil)
+
+	// This should never happen because we are using enumerated Properties,
+	// but the check is here in case that changes
+	if err != nil {
+		return nil, false, err
+	}
+
+	var providerIDMatch Asset
+	for _, asset := range as.Assets {
+		// Ignore cloud assets when looking for reconciliation matches
+		if asset.Type() == CloudAssetType {
+			continue
+		}
+		if k, err := key(asset, fullMatchProps, nil); err != nil {
+			return nil, false, err
+		} else if k == fullMatchKey {
+			log.DedupedInfof(10, "Asset ETL: Reconciliation[rcnw]: ReconcileRange Match: %s", fullMatchKey)
+			return asset, true, nil
+		}
+		if k, err := key(asset, providerIDMatchProps, nil); err != nil {
+			return nil, false, err
+		} else if k == providerIDMatchKey {
+			// Found a partial match. Save it until after all other options
+			// have been checked for full matches.
+			providerIDMatch = asset
+		}
+	}
+
+	// No full match was found, so return partial match, if found.
+	if providerIDMatch != nil {
+		return providerIDMatch, false, nil
+	}
+
+	return nil, false, fmt.Errorf("Asset not found to match %s", query)
+}
+
+// ReconciliationMatchMap returns a map of the calling AssetSet's Assets, by provider id and category. This data structure
+// allows for reconciliation matching to be done in constant time and prevents duplicate reconciliation.
+func (as *AssetSet) ReconciliationMatchMap() map[string]map[string]Asset {
+	matchMap := make(map[string]map[string]Asset)
+
+	if as == nil {
+		return matchMap
+	}
+
+	for _, asset := range as.Assets {
+		if asset == nil {
+			continue
+		}
+		props := asset.GetProperties()
+		// Ignore assets that cannot be matched when looking for reconciliation matches
+		if props == nil || props.ProviderID == "" {
+			continue
+		}
+
+		// we can't guarantee case in providerID for Azure provider to have map working for all providers,
+		// lower casing providerID  while creating reconciliation map
+		providerID := strings.ToLower(props.ProviderID)
+		if _, ok := matchMap[providerID]; !ok {
+			matchMap[providerID] = make(map[string]Asset)
+		}
+
+		// Check if a match is already in the map
+		if duplicateAsset, ok := matchMap[providerID][props.Category]; ok {
+			log.DedupedWarningf(5, "duplicate asset found when reconciling for %s", props.ProviderID)
+			// if one asset already has adjustment use that one
+			if duplicateAsset.GetAdjustment() == 0 && asset.GetAdjustment() != 0 {
+				matchMap[providerID][props.Category] = asset
+			} else if duplicateAsset.GetAdjustment() != 0 && asset.GetAdjustment() == 0 {
+				matchMap[providerID][props.Category] = duplicateAsset
+				// otherwise use the one with the higher cost
+			} else if duplicateAsset.TotalCost() < asset.TotalCost() {
+				matchMap[providerID][props.Category] = asset
+			}
+		} else {
+			matchMap[providerID][props.Category] = asset
+		}
+
+	}
+	return matchMap
+}
+
 // Get returns the Asset in the AssetSet at the given key, or nil and false
 // if no Asset exists for the given key
 func (as *AssetSet) Get(key string) (Asset, bool) {

+ 203 - 0
core/pkg/opencost/asset_test.go

@@ -913,6 +913,32 @@ func TestAssetSet_InsertMatchingWindow(t *testing.T) {
 	}
 }
 
+func TestAssetSet_ReconciliationMatchMap(t *testing.T) {
+	endYesterday := time.Now().UTC().Truncate(day)
+	startYesterday := endYesterday.Add(-day)
+
+	as := GenerateMockAssetSet(startYesterday, day)
+	matchMap := as.ReconciliationMatchMap()
+
+	// Determine the number of assets by provider ID
+	assetCountByProviderId := make(map[string]int, len(matchMap))
+	for _, a := range as.Assets {
+		if a == nil || a.GetProperties() == nil || a.GetProperties().ProviderID == "" {
+			return
+		}
+		if _, ok := assetCountByProviderId[a.GetProperties().ProviderID]; !ok {
+			assetCountByProviderId[a.GetProperties().ProviderID] = 0
+		}
+		assetCountByProviderId[a.GetProperties().ProviderID] += 1
+	}
+
+	for k, count := range assetCountByProviderId {
+		if len(matchMap[k]) != count {
+			t.Errorf("AssetSet.ReconciliationMatchMap: incorrect asset count for provider id: %s", k)
+		}
+	}
+}
+
 func TestAssetSetRange_AccumulateToAssetSet(t *testing.T) {
 	endYesterday := time.Now().UTC().Truncate(day)
 	startYesterday := endYesterday.Add(-day)
@@ -1032,6 +1058,183 @@ func TestAssetSetRange_AccumulateToAssetSet(t *testing.T) {
 	}, nil)
 }
 
+func TestAssetToExternalAllocation(t *testing.T) {
+	var asset Asset
+	var alloc *Allocation
+	var err error
+
+	labelConfig := NewLabelConfig()
+
+	_, err = AssetToExternalAllocation(asset, []string{"namespace"}, labelConfig)
+	if err == nil {
+		t.Fatalf("expected error due to nil asset; no error returned")
+	}
+
+	// Consider this Asset:
+	//   Cloud {
+	// 	   TotalCost: 10.00,
+	// 	   Labels{
+	//       "kubernetes_namespace":"monitoring",
+	// 	     "env":"prod"
+	// 	   }
+	//   }
+	cloud := NewCloud(ComputeCategory, "abc123", start1, start2, windows[0])
+	cloud.SetLabels(map[string]string{
+		"kubernetes_namespace":        "monitoring",
+		"env":                         "prod",
+		"app":                         "cost-analyzer",
+		"kubernetes_label_app":        "app",
+		"kubernetes_label_department": "department",
+		"kubernetes_label_env":        "env",
+		"kubernetes_label_owner":      "owner",
+		"kubernetes_label_team":       "team",
+	})
+	cloud.Cost = 10.00
+	asset = cloud
+
+	_, err = AssetToExternalAllocation(asset, []string{"namespace"}, nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	_, err = AssetToExternalAllocation(asset, nil, nil)
+	if err == nil {
+		t.Fatalf("expected error due to nil aggregateBy; no error returned")
+	}
+
+	// Given the following parameters, we expect to return:
+	//
+	//   1) single-prop full match
+	//   aggregateBy = ["namespace"]
+	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+	//   => Allocation{Name: "monitoring", ExternalCost: 10.00, TotalCost: 10.00}, nil
+	//
+	//   2) multi-prop full match
+	//   aggregateBy = ["namespace", "label:env"]
+	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+	//   => Allocation{Name: "monitoring/env=prod", ExternalCost: 10.00, TotalCost: 10.00}, nil
+	//
+	//   3) multi-prop partial match
+	//   aggregateBy = ["namespace", "label:foo"]
+	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+	//   => Allocation{Name: "monitoring/__unallocated__", ExternalCost: 10.00, TotalCost: 10.00}, nil
+	//
+	//	 4) label alias match(es)
+	//	 aggregateBy = ["product", "deployment", "environment", "owner", "team"]
+	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+	//   => Allocation{Name: "app/department/env/owner/team", ExternalCost: 10.00, TotalCost: 10.00}, nil
+	//
+	//   5) no match
+	//   aggregateBy = ["cluster"]
+	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
+	//   => nil, err
+
+	// 1) single-prop full match
+	alloc, err = AssetToExternalAllocation(asset, []string{"namespace"}, nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if alloc.Name != "monitoring/__external__" {
+		t.Fatalf("expected external allocation with name '%s'; got '%s'", "monitoring/__external__", alloc.Name)
+	}
+	if ns := alloc.Properties.Namespace; ns != "monitoring" {
+		t.Fatalf("expected external allocation with AllocationProperties.Namespace '%s'; got '%s'", "monitoring", ns)
+	}
+	if alloc.ExternalCost != 10.00 {
+		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
+	}
+	if alloc.TotalCost() != 10.00 {
+		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
+	}
+
+	// 2) multi-prop full match
+	alloc, err = AssetToExternalAllocation(asset, []string{"namespace", "label:env"}, nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if alloc.Name != "monitoring/env=prod/__external__" {
+		t.Fatalf("expected external allocation with name '%s'; got '%s'", "monitoring/env=prod/__external__", alloc.Name)
+	}
+	if ns := alloc.Properties.Namespace; ns != "monitoring" {
+		t.Fatalf("expected external allocation with AllocationProperties.Namespace '%s'; got '%s' (%s)", "monitoring", ns, err)
+	}
+	if ls := alloc.Properties.Labels; len(ls) == 0 || ls["env"] != "prod" {
+		t.Fatalf("expected external allocation with AllocationProperties.Labels[\"env\"] '%s'; got '%s' (%s)", "prod", ls["env"], err)
+	}
+	if alloc.ExternalCost != 10.00 {
+		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
+	}
+	if alloc.TotalCost() != 10.00 {
+		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
+	}
+
+	// 3) multi-prop partial match
+	alloc, err = AssetToExternalAllocation(asset, []string{"namespace", "label:foo"}, nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if alloc.Name != "monitoring/__unallocated__/__external__" {
+		t.Fatalf("expected external allocation with name '%s'; got '%s'", "monitoring/__unallocated__/__external__", alloc.Name)
+	}
+	if ns := alloc.Properties.Namespace; ns != "monitoring" {
+		t.Fatalf("expected external allocation with AllocationProperties.Namespace '%s'; got '%s' (%s)", "monitoring", ns, err)
+	}
+	if alloc.ExternalCost != 10.00 {
+		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
+	}
+	if alloc.TotalCost() != 10.00 {
+		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
+	}
+
+	// 4) label alias match(es)
+	alloc, err = AssetToExternalAllocation(asset, []string{"product", "department", "environment", "owner", "team"}, nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if alloc.Name != "app/department/env/owner/team/__external__" {
+		t.Fatalf("expected external allocation with name '%s'; got '%s'", "app/department/env/owner/team/__external__", alloc.Name)
+	}
+	if alloc.Properties.Labels[labelConfig.ProductLabel] != "app" {
+		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.ProductLabel, "app", alloc.Properties.Labels[labelConfig.ProductLabel])
+	}
+	if alloc.Properties.Labels[labelConfig.DepartmentLabel] != "department" {
+		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.DepartmentLabel, "department", alloc.Properties.Labels[labelConfig.DepartmentLabel])
+	}
+	if alloc.Properties.Labels[labelConfig.EnvironmentLabel] != "env" {
+		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.EnvironmentLabel, "env", alloc.Properties.Labels[labelConfig.EnvironmentLabel])
+	}
+	if alloc.Properties.Labels[labelConfig.OwnerLabel] != "owner" {
+		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.OwnerLabel, "owner", alloc.Properties.Labels[labelConfig.OwnerLabel])
+	}
+	if alloc.Properties.Labels[labelConfig.TeamLabel] != "team" {
+		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.TeamLabel, "team", alloc.Properties.Labels[labelConfig.TeamLabel])
+	}
+	if alloc.ExternalCost != 10.00 {
+		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
+	}
+	if alloc.TotalCost() != 10.00 {
+		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
+	}
+
+	// 5) no match
+	_, err = AssetToExternalAllocation(asset, []string{"cluster"}, nil)
+	if err == nil {
+		t.Fatalf("expected 'no match' error")
+	}
+
+	// other cases
+
+	alloc, err = AssetToExternalAllocation(asset, []string{"namespace", "label:app"}, nil)
+	if err != nil {
+		t.Fatalf("unexpected error: %s", err)
+	}
+	if alloc.ExternalCost != 10.00 {
+		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
+	}
+	if alloc.TotalCost() != 10.00 {
+		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
+	}
+}
+
 func TestAssetSetRange_Start(t *testing.T) {
 	tests := []struct {
 		name string

+ 9 - 9
core/pkg/opencost/exporter/exporter_test.go

@@ -144,7 +144,7 @@ func TestExporters(t *testing.T) {
 	t.Run("allocation exporter", func(t *testing.T) {
 		allocSource := NewMockAllocationSource()
 		memStore := storage.NewMemoryStorage()
-		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
+		p, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}
@@ -173,7 +173,7 @@ func TestExporters(t *testing.T) {
 	t.Run("asset exporter", func(t *testing.T) {
 		assetSource := NewMockAssetSource()
 		memStore := storage.NewMemoryStorage()
-		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
+		p, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}
@@ -202,7 +202,7 @@ func TestExporters(t *testing.T) {
 	t.Run("network insight exporter", func(t *testing.T) {
 		netInsightSource := NewMockNetworkInsightSource()
 		memStore := storage.NewMemoryStorage()
-		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
+		p, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}
@@ -264,15 +264,15 @@ func TestPipelineExportControllers(t *testing.T) {
 		time.Sleep(time.Second + (750 * time.Millisecond))
 		exportControllers.Stop()
 
-		allocPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
+		allocPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create allocations path formatter: %v", err)
 		}
-		assetPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
+		assetPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create assets path formatter: %v", err)
 		}
-		netPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
+		netPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create net insights path formatter: %v", err)
 		}
@@ -300,15 +300,15 @@ func TestPipelineExportControllers(t *testing.T) {
 		time.Sleep(time.Second + (750 * time.Millisecond))
 		exportControllers.Stop()
 
-		allocPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
+		allocPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create allocations path formatter: %v", err)
 		}
-		assetPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
+		assetPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create assets path formatter: %v", err)
 		}
-		netPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
+		netPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create net insights path formatter: %v", err)
 		}

+ 1 - 1
core/pkg/opencost/exporter/exporters.go

@@ -24,7 +24,7 @@ func NewComputePipelineExporter[T any, U export.BinaryMarshalerPtr[T], S validat
 		return nil, fmt.Errorf("failed to extract pipeline name for type: %s", typeutil.TypeOf[T]())
 	}
 
-	pathing, err := pathing.NewDefaultStoragePathFormatter(clusterId, pipelineName, &resolution)
+	pathing, err := pathing.NewBingenStoragePathFormatter("federated", clusterId, pipelineName, &resolution)
 	if err != nil {
 		return nil, fmt.Errorf("failed to create path formatter: %w", err)
 	}

+ 4 - 4
core/pkg/opencost/opencost_codecs_test.go

@@ -6,11 +6,11 @@ import (
 )
 
 func TestAllocation_BinaryEncoding(t *testing.T) {
-	// TODO niko
+	// TODO niko/etl
 }
 
 func TestAllocationSet_BinaryEncoding(t *testing.T) {
-	// TODO niko
+	// TODO niko/etl
 }
 
 func BenchmarkAllocationSetRange_BinaryEncoding(b *testing.B) {
@@ -193,11 +193,11 @@ func TestAny_BinaryEncoding(t *testing.T) {
 }
 
 func TestAsset_BinaryEncoding(t *testing.T) {
-	// TODO niko
+	// TODO niko/etl
 }
 
 func TestAssetSet_BinaryEncoding(t *testing.T) {
-	// TODO niko
+	// TODO niko/etl
 }
 
 func TestAssetSetRange_BinaryEncoding(t *testing.T) {

+ 66 - 0
core/pkg/opencost/status.go

@@ -0,0 +1,66 @@
+package opencost
+
+import "time"
+
+// ETLStatus describes ETL metadata
+type ETLStatus struct {
+	Coverage                   Window           `json:"coverage"`
+	LastRun                    time.Time        `json:"lastRun"`
+	Progress                   float64          `json:"progress"`
+	RefreshRate                string           `json:"refreshRate"`
+	Resolution                 string           `json:"resolution"`
+	MaxPrometheusQueryDuration string           `json:"maxPrometheusQueryDuration"`
+	StartTime                  time.Time        `json:"startTime"`
+	UTCOffset                  string           `json:"utcOffset"`
+	Backup                     *DirectoryStatus `json:"backup,omitempty"`
+}
+
+// DirectoryStatus describes metadata of a directory of files
+type DirectoryStatus struct {
+	Path         string       `json:"path"`
+	Size         string       `json:"size"`
+	LastModified time.Time    `json:"lastModified"`
+	FileCount    int          `json:"fileCount"`
+	Files        []FileStatus `json:"files"`
+}
+
+// FileStatus describes the metadata of a single file
+type FileStatus struct {
+	Name         string            `json:"name"`
+	Size         string            `json:"size"`
+	LastModified time.Time         `json:"lastModified"`
+	IsRepairing  bool              `json:"isRepairing"`
+	Details      map[string]string `json:"details,omitempty"`
+	Errors       []string          `json:"errors,omitempty"`
+	Warnings     []string          `json:"warnings,omitempty"`
+}
+
+// CloudStatus describes CloudStore metadata
+type CloudStatus struct {
+	ConnectionStatus string                `json:"cloudConnectionStatus"`
+	ProviderType     string                `json:"providerType"`
+	CloudUsage       *CloudAssetStatus     `json:"cloudUsage,omitempty"`
+	Reconciliation   *ReconciliationStatus `json:"reconciliation,omitempty"`
+}
+
+// CloudAssetStatus describes CloudAsset metadata of a CloudStore
+type CloudAssetStatus struct {
+	Coverage    Window    `json:"coverage"`
+	LastRun     time.Time `json:"lastRun"`
+	NextRun     time.Time `json:"nextRun"`
+	Progress    float64   `json:"progress"`
+	RefreshRate string    `json:"refreshRate"`
+	Resolution  string    `json:"resolution"`
+	StartTime   time.Time `json:"startTime"`
+}
+
+// ReconciliationStatus describes Reconciliation metadata of a CloudStore
+type ReconciliationStatus struct {
+	Coverage    Window    `json:"coverage"`
+	LastRun     time.Time `json:"lastRun"`
+	NextRun     time.Time `json:"nextRun"`
+	Progress    float64   `json:"progress"`
+	RefreshRate string    `json:"refreshRate"`
+	Resolution  string    `json:"resolution"`
+	StartTime   time.Time `json:"startTime"`
+}

+ 43 - 2
core/pkg/opencost/totals.go

@@ -488,7 +488,7 @@ func ComputeAssetTotals(as *AssetSet, byAsset bool) map[string]*AssetTotals {
 		if isAttached {
 			// Record attached volume data at the cluster and node level, using
 			// name matching to distinguish from PersistentVolumes.
-			// TODO can we make a stronger match at the underlying costmodel layer?
+			// TODO can we make a stronger match at the underlying ETL layer?
 			arts[key].Count++
 			arts[key].AttachedVolumeCost += disk.Cost
 			arts[key].AttachedVolumeCostAdjustment += disk.Adjustment
@@ -573,6 +573,47 @@ type AllocationTotalsStore interface {
 	SetAllocationTotalsByNode(start, end time.Time, rts map[string]*AllocationTotals)
 }
 
+// UpdateAllocationTotalsStore updates an AllocationTotalsStore
+// by totaling the given AllocationSet and saving the totals.
+func UpdateAllocationTotalsStore(arts AllocationTotalsStore, as *AllocationSet) (*AllocationTotalsSet, error) {
+	if arts == nil {
+		return nil, errors.New("cannot update nil AllocationTotalsStore")
+	}
+
+	if as == nil {
+		return nil, errors.New("cannot update AllocationTotalsStore from nil AllocationSet")
+	}
+
+	if as.Window.IsOpen() {
+		return nil, errors.New("cannot update AllocationTotalsStore from AllocationSet with open window")
+	}
+
+	start := *as.Window.Start()
+	end := *as.Window.End()
+
+	artsByCluster := ComputeAllocationTotals(as, AllocationClusterProp)
+	arts.SetAllocationTotalsByCluster(start, end, artsByCluster)
+
+	artsByNode := ComputeAllocationTotals(as, AllocationNodeProp)
+	arts.SetAllocationTotalsByNode(start, end, artsByNode)
+
+	log.Debugf("ETL: Allocation: updated resource totals for %s", as.Window)
+
+	win := NewClosedWindow(start, end)
+
+	abc := map[string]*AllocationTotals{}
+	for key, val := range artsByCluster {
+		abc[key] = val.Clone()
+	}
+
+	abn := map[string]*AllocationTotals{}
+	for key, val := range artsByNode {
+		abn[key] = val.Clone()
+	}
+
+	return NewAllocationTotalsSet(win, abc, abn), nil
+}
+
 // AssetTotalsStore allows for storing (i.e. setting and getting)
 // AssetTotals by cluster and by node.
 type AssetTotalsStore interface {
@@ -606,7 +647,7 @@ func UpdateAssetTotalsStore(arts AssetTotalsStore, as *AssetSet) (*AssetTotalsSe
 	artsByNode := ComputeAssetTotals(as, true)
 	arts.SetAssetTotalsByNode(start, end, artsByNode)
 
-	log.Debugf("Asset: updated resource totals for %s", as.Window)
+	log.Debugf("ETL: Asset: updated resource totals for %s", as.Window)
 
 	win := NewClosedWindow(start, end)
 

+ 5 - 15
core/pkg/pipelines/name.go

@@ -1,21 +1,16 @@
 package pipelines
 
 import (
-	"github.com/opencost/opencost/core/pkg/diagnostics"
-	"github.com/opencost/opencost/core/pkg/heartbeat"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util/typeutil"
 )
 
 const (
-	AllocationPipelineName        string = "allocations"
-	AssetsPipelineName            string = "assets"
-	CloudCostsPipelineName        string = "cloudcosts"
-	NetworkInsightPipelineName    string = "networkinsights"
-	CustomCostsPipelineName       string = "customcosts"
-	TurbonomicActionsPipelineName string = "turbonomicactions"
-	HeartbeatPipelineName         string = "heartbeat"
-	DiagnosticsPipelineName       string = "diagnostics"
+	AllocationPipelineName     string = "allocations"
+	AssetsPipelineName         string = "assets"
+	CloudCostsPipelineName     string = "cloudcosts"
+	NetworkInsightPipelineName string = "networkinsights"
+	CustomCostsPipelineName    string = "customcosts"
 )
 
 var nameByType map[string]string
@@ -34,9 +29,6 @@ func init() {
 	networkInsightSetKey := typeutil.TypeOf[opencost.NetworkInsightSet]()
 	networkInsightKey := typeutil.TypeOf[opencost.NetworkInsight]()
 
-	heartbeatKey := typeutil.TypeOf[heartbeat.Heartbeat]()
-	diagnosticsKey := typeutil.TypeOf[diagnostics.DiagnosticsRunReport]()
-
 	nameByType = map[string]string{
 		allocSetKey:          AllocationPipelineName,
 		allocKey:             AllocationPipelineName,
@@ -46,8 +38,6 @@ func init() {
 		cloudCostKey:         CloudCostsPipelineName,
 		networkInsightSetKey: NetworkInsightPipelineName,
 		networkInsightKey:    NetworkInsightPipelineName,
-		heartbeatKey:         HeartbeatPipelineName,
-		diagnosticsKey:       DiagnosticsPipelineName,
 	}
 }
 

+ 0 - 31
core/pkg/storage/storefactory.go

@@ -1,31 +0,0 @@
-package storage
-
-import (
-	"fmt"
-	"os"
-
-	"github.com/opencost/opencost/core/pkg/env"
-)
-
-// GetDefaultStorage initializes the default shared storage which is required for kubecost
-func GetDefaultStorage() Storage {
-	store, err := InitializeStorage(env.GetDefaultStorageConfigFilePath())
-	if err != nil {
-		panic(fmt.Sprintf("failed to initialize default storage: %s", err.Error()))
-	}
-	return store
-}
-
-// InitializeStorage creates a storage from the config file at the given path
-func InitializeStorage(configPath string) (Storage, error) {
-	storageConfig, err := os.ReadFile(configPath)
-	if err != nil {
-		return nil, fmt.Errorf("failed to read file '%s': %w", configPath, err)
-	}
-	store, err := NewBucketStorage(storageConfig)
-	if err != nil {
-		return nil, fmt.Errorf("failed to create storage from config '%s': %w", configPath, err)
-	}
-
-	return store, nil
-}

+ 0 - 32
core/pkg/util/apiutil/apiutil.go

@@ -1,32 +0,0 @@
-package apiutil
-
-import (
-	"net/http"
-	"net/http/pprof"
-
-	"github.com/julienschmidt/httprouter"
-	"github.com/opencost/opencost/core/pkg/env"
-)
-
-func ApplyContainerDiagnosticEndpoints(router *httprouter.Router) {
-	router.HandlerFunc("GET", "/healthz", healthz)
-
-	router.GET("/logs/level", GetLogLevel)
-	router.POST("/logs/level", SetLogLevel)
-
-	if env.IsPProfEnabled() {
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/", pprof.Index)
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/cmdline", pprof.Cmdline)
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/profile", pprof.Profile)
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/symbol", pprof.Symbol)
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/trace", pprof.Trace)
-		router.Handler(http.MethodGet, "/debug/pprof/goroutine", pprof.Handler("goroutine"))
-		router.Handler(http.MethodGet, "/debug/pprof/heap", pprof.Handler("heap"))
-	}
-}
-
-func healthz(w http.ResponseWriter, _ *http.Request) {
-	w.WriteHeader(200)
-	w.Header().Set("Content-Length", "0")
-	w.Header().Set("Content-Type", "text/plain")
-}

+ 0 - 51
core/pkg/util/apiutil/loglevel.go

@@ -1,51 +0,0 @@
-package apiutil
-
-import (
-	"encoding/json"
-	"fmt"
-	"net/http"
-
-	"github.com/julienschmidt/httprouter"
-	"github.com/opencost/opencost/core/pkg/log"
-)
-
-type LogLevelRequestResponse struct {
-	Level string `json:"level"`
-}
-
-func GetLogLevel(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	level := log.GetLogLevel()
-	llrr := LogLevelRequestResponse{
-		Level: level,
-	}
-
-	body, err := json.Marshal(llrr)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to retrive log level"), http.StatusInternalServerError)
-		return
-	}
-	_, err = w.Write(body)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to write response: %s", body), http.StatusInternalServerError)
-		return
-	}
-}
-
-func SetLogLevel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
-	params := LogLevelRequestResponse{}
-	err := json.NewDecoder(r.Body).Decode(&params)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to decode request body, error: %s", err), http.StatusBadRequest)
-		return
-	}
-
-	err = log.SetLogLevel(params.Level)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("level must be a valid log level according to zerolog; level given: %s, error: %s", params.Level, err), http.StatusBadRequest)
-		return
-	}
-	w.WriteHeader(http.StatusOK)
-}

+ 1 - 1
core/pkg/util/timeutil/timeutil.go

@@ -117,7 +117,7 @@ func ParseUTCOffset(offsetStr string) (time.Duration, error) {
 	return offset, nil
 }
 
-// FormatStoreResolution provides a clean notation for store resolutions.
+// FormatStoreResolution provides a clean notation for ETL store resolutions.
 // e.g. daily => 1d; hourly => 1h
 func FormatStoreResolution(dur time.Duration) string {
 	if dur >= (7 * 24 * time.Hour) {

+ 1 - 2
modules/collector-source/pkg/collector/config.go

@@ -1,7 +1,6 @@
 package collector
 
 import (
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/modules/collector-source/pkg/env"
 	"github.com/opencost/opencost/modules/collector-source/pkg/util"
 )
@@ -30,7 +29,7 @@ func NewOpenCostCollectorConfigFromEnv() CollectorConfig {
 			},
 		},
 		ScrapeInterval: env.GetCollectorScrapeIntervalSeconds(),
-		ClusterID:      coreenv.GetClusterID(),
+		ClusterID:      env.GetClusterID(),
 		NetworkPort:    env.GetNetworkPort(),
 	}
 }

+ 5 - 0
modules/collector-source/pkg/env/collectorenv.go

@@ -5,6 +5,7 @@ import (
 )
 
 const (
+	ClusterIDEnvVar                 = "CLUSTER_ID"
 	NetworkPortEnvVar               = "NETWORK_PORT"
 	Collector10mResolutionRetention = "COLLECTOR_10M_RESOLUTION_RETENTION"
 	Collector1hResolutionRetention  = "COLLECTOR_1H_RESOLUTION_RETENTION"
@@ -12,6 +13,10 @@ const (
 	CollectorScrapeInterval         = "COLLECTOR_SCRAPE_INTERVAL"
 )
 
+func GetClusterID() string {
+	return env.Get(ClusterIDEnvVar, "")
+}
+
 func GetNetworkPort() int {
 	return env.GetInt(NetworkPortEnvVar, 3001)
 }

+ 41 - 10
modules/prometheus-source/pkg/env/promenv.go

@@ -20,10 +20,10 @@ const (
 	PrometheusTLSHandshakeTimeoutEnvVar = "PROMETHEUS_TLS_HANDSHAKE_TIMEOUT"
 	ScrapeIntervalEnvVar                = "KUBECOST_SCRAPE_INTERVAL"
 
-	PrometheusMaxQueryDurationMinutesEnvVar = "PROMETHEUS_MAX_QUERY_DURATION_MINUTES"
-	PrometheusQueryResolutionSecondsEnvVar  = "PROMETHEUS_QUERY_RESOLUTION_SECONDS"
+	ETLMaxPrometheusQueryDurationMinutes = "ETL_MAX_PROMETHEUS_QUERY_DURATION_MINUTES"
 
 	MaxQueryConcurrencyEnvVar = "MAX_QUERY_CONCURRENCY"
+	QueryLoggingFileEnvVar    = "QUERY_LOGGING_FILE"
 	PromClusterIDLabelEnvVar  = "PROM_CLUSTER_ID_LABEL"
 
 	PrometheusHeaderXScopeOrgIdEnvVar = "PROMETHEUS_HEADER_X_SCOPE_ORGID"
@@ -34,9 +34,15 @@ const (
 	DBBasicAuthPassword = "DB_BASIC_AUTH_PW"
 	DBBearerToken       = "DB_BEARER_TOKEN"
 
+	MultiClusterBasicAuthUsername = "MC_BASIC_AUTH_USERNAME"
+	MultiClusterBasicAuthPassword = "MC_BASIC_AUTH_PW"
+	MultiClusterBearerToken       = "MC_BEARER_TOKEN"
+
 	CurrentClusterIdFilterEnabledVar = "CURRENT_CLUSTER_ID_FILTER_ENABLED"
+	ClusterIDEnvVar                  = "CLUSTER_ID"
 
-	KubecostJobNameEnvVar = "KUBECOST_JOB_NAME"
+	KubecostJobNameEnvVar      = "KUBECOST_JOB_NAME"
+	ETLResolutionSecondsEnvVar = "ETL_RESOLUTION_SECONDS"
 )
 
 // IsPrometheusRetryOnRateLimitResponse will attempt to retry if a 429 response is received OR a 400 with a body containing
@@ -102,13 +108,13 @@ func IsKubeRbacProxyEnabled() bool {
 	return env.GetBool(KubeRbacProxyEnabledEnvVar, false)
 }
 
-// GetPrometheusQueryResolution determines the resolution of prom queries. The smaller the
+// GetETLResolution determines the resolution of ETL queries. The smaller the
 // duration, the higher the resolution; the higher the resolution, the more
 // accurate the query results, but the more computationally expensive.
-func GetPrometheusQueryResolution() time.Duration {
-	// Use the configured query resolution, or default to
+func GetETLResolution() time.Duration {
+	// Use the configured ETL resolution, or default to
 	// 5m (i.e. 300s)
-	secs := time.Duration(env.GetInt64(PrometheusQueryResolutionSecondsEnvVar, 300))
+	secs := time.Duration(env.GetInt64(ETLResolutionSecondsEnvVar, 300))
 	return secs * time.Second
 }
 
@@ -121,6 +127,11 @@ func GetMaxQueryConcurrency() int {
 	return maxQueryConcurrency
 }
 
+// GetQueryLoggingFile returns a file location if query logging is enabled. Otherwise, empty string
+func GetQueryLoggingFile() string {
+	return env.Get(QueryLoggingFileEnvVar, "")
+}
+
 func GetDBBasicAuthUsername() string {
 	return env.Get(DBBasicAuthUsername, "")
 }
@@ -133,9 +144,23 @@ func GetDBBearerToken() string {
 	return env.Get(DBBearerToken, "")
 }
 
-func GetPrometheusMaxQueryDuration() time.Duration {
+// GetMultiClusterBasicAuthUsername returns the environment variable value for MultiClusterBasicAuthUsername
+func GetMultiClusterBasicAuthUsername() string {
+	return env.Get(MultiClusterBasicAuthUsername, "")
+}
+
+// GetMultiClusterBasicAuthPassword returns the environment variable value for MultiClusterBasicAuthPassword
+func GetMultiClusterBasicAuthPassword() string {
+	return env.Get(MultiClusterBasicAuthPassword, "")
+}
+
+func GetMultiClusterBearerToken() string {
+	return env.Get(MultiClusterBearerToken, "")
+}
+
+func GetETLMaxPrometheusQueryDuration() time.Duration {
 	dayMins := 60 * 24
-	mins := time.Duration(env.GetInt64(PrometheusMaxQueryDurationMinutesEnvVar, int64(dayMins)))
+	mins := time.Duration(env.GetInt64(ETLMaxPrometheusQueryDurationMinutes, int64(dayMins)))
 	return mins * time.Minute
 }
 
@@ -144,11 +169,17 @@ func GetPromClusterLabel() string {
 	return env.Get(PromClusterIDLabelEnvVar, "cluster_id")
 }
 
+// GetClusterID returns the environment variable value for ClusterIDEnvVar which represents the
+// configurable identifier used for multi-cluster metric emission.
+func GetClusterID() string {
+	return env.Get(ClusterIDEnvVar, "")
+}
+
 // GetPromClusterFilter returns environment variable value CurrentClusterIdFilterEnabledVar which
 // represents additional prometheus filter for all metrics for current cluster id
 func GetPromClusterFilter() string {
 	if env.GetBool(CurrentClusterIdFilterEnabledVar, false) {
-		return fmt.Sprintf("%s=\"%s\"", GetPromClusterLabel(), env.GetClusterID())
+		return fmt.Sprintf("%s=\"%s\"", GetPromClusterLabel(), GetClusterID())
 	}
 	return ""
 }

+ 3 - 4
modules/prometheus-source/pkg/prom/config.go

@@ -5,7 +5,6 @@ import (
 	"fmt"
 	"time"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/modules/prometheus-source/pkg/env"
 
@@ -55,9 +54,9 @@ func NewOpenCostPrometheusConfigFromEnv() (*OpenCostPrometheusConfig, error) {
 	jobName := env.GetJobName()
 	scrapeInterval := env.GetScrapeInterval()
 
-	maxQueryDuration := env.GetPrometheusMaxQueryDuration()
+	maxQueryDuration := env.GetETLMaxPrometheusQueryDuration()
 
-	clusterId := coreenv.GetClusterID()
+	clusterId := env.GetClusterID()
 	clusterLabel := env.GetPromClusterLabel()
 	clusterFilter := env.GetPromClusterFilter()
 
@@ -90,7 +89,7 @@ func NewOpenCostPrometheusConfigFromEnv() (*OpenCostPrometheusConfig, error) {
 		}
 	}
 
-	dataResolution := env.GetPrometheusQueryResolution()
+	dataResolution := env.GetETLResolution()
 
 	// Ensuring if data resolution is less than 60s default it to 1m
 	resolutionMinutes := int(dataResolution.Minutes())

+ 1 - 1
modules/prometheus-source/pkg/prom/metricsquerier.go

@@ -648,7 +648,7 @@ func (pds *PrometheusMetricsQuerier) QueryCPUUsageMax(start, end time.Time) *sou
 	// The parameter after the metric ...{}[<thisone>] should be set to 2x
 	// the resolution, to make sure the irate always has two points to query
 	// in case the Prom scrape duration has been reduced to be equal to the
-	// query resolution.
+	// ETL resolution.
 	const queryFmtCPUUsageMaxSubquery = `max(max_over_time(irate(container_cpu_usage_seconds_total{container!="POD", container!="", %s}[%dm])[%s:%dm])) by (container, pod_name, pod, namespace, node, instance, %s)`
 	// env.GetPromClusterFilter(), doubleResStr, durStr, resStr, env.GetPromClusterLabel()
 

+ 16 - 16
pkg/cloud/alibaba/provider.go

@@ -15,7 +15,7 @@ import (
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers"
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
 	"github.com/opencost/opencost/core/pkg/clustercache"
-	coreenv "github.com/opencost/opencost/core/pkg/env"
+	"github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util/fileutil"
@@ -24,7 +24,7 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 
-	"github.com/opencost/opencost/pkg/env"
+	ocenv "github.com/opencost/opencost/pkg/env"
 	"golang.org/x/exp/slices"
 )
 
@@ -329,10 +329,10 @@ func (alibaba *Alibaba) GetAlibabaAccessKey() (*credentials.AccessKeyCredential,
 	}
 
 	if config.AlibabaServiceKeyName == "" {
-		config.AlibabaServiceKeyName = env.GetAlibabaAccessKeyID()
+		config.AlibabaServiceKeyName = ocenv.GetAlibabaAccessKeyID()
 	}
 	if config.AlibabaServiceKeySecret == "" {
-		config.AlibabaServiceKeySecret = env.GetAlibabaAccessKeySecret()
+		config.AlibabaServiceKeySecret = ocenv.GetAlibabaAccessKeySecret()
 	}
 
 	if config.AlibabaServiceKeyName == "" && config.AlibabaServiceKeySecret == "" {
@@ -341,8 +341,8 @@ func (alibaba *Alibaba) GetAlibabaAccessKey() (*credentials.AccessKeyCredential,
 		if err != nil {
 			return nil, fmt.Errorf("unable to set the Alibaba Cloud key/secret from config file %w", err)
 		}
-		config.AlibabaServiceKeyName = env.GetAlibabaAccessKeyID()
-		config.AlibabaServiceKeySecret = env.GetAlibabaAccessKeySecret()
+		config.AlibabaServiceKeyName = ocenv.GetAlibabaAccessKeyID()
+		config.AlibabaServiceKeySecret = ocenv.GetAlibabaAccessKeySecret()
 	}
 
 	if config.AlibabaServiceKeyName == "" && config.AlibabaServiceKeySecret == "" {
@@ -636,13 +636,13 @@ func (alibaba *Alibaba) loadAlibabaAuthSecretAndSetEnv(force bool) error {
 		return fmt.Errorf("failed to unmarshall access key id and access key secret with err: %w", err)
 	}
 
-	err = coreenv.Set(env.AlibabaAccessKeyIDEnvVar, ak.AccessKeyID)
+	err = env.Set(ocenv.AlibabaAccessKeyIDEnvVar, ak.AccessKeyID)
 	if err != nil {
-		return fmt.Errorf("failed to set environment variable: %s with err: %w", env.AlibabaAccessKeyIDEnvVar, err)
+		return fmt.Errorf("failed to set environment variable: %s with err: %w", ocenv.AlibabaAccessKeyIDEnvVar, err)
 	}
-	err = coreenv.Set(env.AlibabaAccessKeySecretEnvVar, ak.SecretAccessKey)
+	err = env.Set(ocenv.AlibabaAccessKeySecretEnvVar, ak.SecretAccessKey)
 	if err != nil {
-		return fmt.Errorf("failed to set environment variable: %s with err: %w", env.AlibabaAccessKeySecretEnvVar, err)
+		return fmt.Errorf("failed to set environment variable: %s with err: %w", ocenv.AlibabaAccessKeySecretEnvVar, err)
 	}
 
 	alibaba.accessKey = &credentials.AccessKeyCredential{
@@ -655,7 +655,7 @@ func (alibaba *Alibaba) loadAlibabaAuthSecretAndSetEnv(force bool) error {
 // Regions returns a current supported list of Alibaba regions
 func (alibaba *Alibaba) Regions() []string {
 
-	regionOverrides := env.GetRegionOverrideList()
+	regionOverrides := ocenv.GetRegionOverrideList()
 
 	if len(regionOverrides) > 0 {
 		log.Debugf("Overriding Alibaba regions with configured region list: %+v", regionOverrides)
@@ -680,7 +680,7 @@ func (alibaba *Alibaba) ClusterInfo() (map[string]string, error) {
 
 	// Set it to environment clusterID if not set at this point
 	if clusterName == "" {
-		clusterName = coreenv.GetClusterID()
+		clusterName = ocenv.GetClusterID()
 	}
 
 	m := make(map[string]string)
@@ -688,7 +688,7 @@ func (alibaba *Alibaba) ClusterInfo() (map[string]string, error) {
 	m["provider"] = opencost.AlibabaProvider
 	m["project"] = alibaba.ClusterAccountId
 	m["region"] = alibaba.ClusterRegion
-	m["id"] = coreenv.GetClusterID()
+	m["id"] = ocenv.GetClusterID()
 	return m, nil
 }
 
@@ -731,8 +731,8 @@ func (alibaba *Alibaba) UpdateConfig(r io.Reader, updateType string) (*models.Cu
 			}
 		}
 
-		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
+		if ocenv.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(ocenv.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return err
 			}
@@ -1391,7 +1391,7 @@ func determinePVRegion(pv *clustercache.PersistentVolume) string {
 		}
 	}
 
-	regionOverrides := env.GetRegionOverrideList()
+	regionOverrides := ocenv.GetRegionOverrideList()
 	regions := alibabaRegions
 
 	if len(regionOverrides) > 0 {

+ 20 - 20
pkg/cloud/aws/provider.go

@@ -21,7 +21,7 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/utils"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
-	coreenv "github.com/opencost/opencost/core/pkg/env"
+	"github.com/opencost/opencost/core/pkg/env"
 	errs "github.com/opencost/opencost/core/pkg/errors"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
@@ -29,7 +29,7 @@ import (
 	"github.com/opencost/opencost/core/pkg/util/fileutil"
 	"github.com/opencost/opencost/core/pkg/util/json"
 	"github.com/opencost/opencost/core/pkg/util/timeutil"
-	"github.com/opencost/opencost/pkg/env"
+	ocenv "github.com/opencost/opencost/pkg/env"
 
 	awsSDK "github.com/aws/aws-sdk-go-v2/aws"
 	"github.com/aws/aws-sdk-go-v2/config"
@@ -465,10 +465,10 @@ func (aws *AWS) GetAWSAccessKey() (*AWSAccessKey, error) {
 	}
 	//Look for service key values in env if not present in config
 	if config.ServiceKeyName == "" {
-		config.ServiceKeyName = env.GetAWSAccessKeyID()
+		config.ServiceKeyName = ocenv.GetAWSAccessKeyID()
 	}
 	if config.ServiceKeySecret == "" {
-		config.ServiceKeySecret = env.GetAWSAccessKeySecret()
+		config.ServiceKeySecret = ocenv.GetAWSAccessKeySecret()
 	}
 
 	if config.ServiceKeyName == "" && config.ServiceKeySecret == "" {
@@ -581,8 +581,8 @@ func (aws *AWS) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 			}
 		}
 
-		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
+		if ocenv.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(ocenv.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return err
 			}
@@ -800,8 +800,8 @@ func (aws *AWS) getRegionPricing(nodeList []*clustercache.Node) (*http.Response,
 
 	pricingURL += "index.json"
 
-	if env.GetAWSPricingURL() != "" { // Allow override of pricing URL
-		pricingURL = env.GetAWSPricingURL()
+	if ocenv.GetAWSPricingURL() != "" { // Allow override of pricing URL
+		pricingURL = ocenv.GetAWSPricingURL()
 	}
 
 	log.Infof("starting download of \"%s\", which is quite large ...", pricingURL)
@@ -1457,17 +1457,17 @@ func (awsProvider *AWS) ClusterInfo() (map[string]string, error) {
 	// Determine cluster name
 	clusterName := c.ClusterName
 	if clusterName == "" {
-		awsClusterID := env.GetAWSClusterID()
+		awsClusterID := ocenv.GetAWSClusterID()
 		if awsClusterID != "" {
 			log.Infof("Returning \"%s\" as ClusterName", awsClusterID)
 			clusterName = awsClusterID
-			log.Warnf("Warning - %s will be deprecated in a future release. Use %s instead", env.AWSClusterIDEnvVar, coreenv.ClusterIDEnvVar)
-		} else if clusterName = coreenv.GetClusterID(); clusterName != "" {
-			log.DedupedInfof(5, "Setting cluster name to %s from %s ", clusterName, coreenv.ClusterIDEnvVar)
+			log.Warnf("Warning - %s will be deprecated in a future release. Use %s instead", ocenv.AWSClusterIDEnvVar, ocenv.ClusterIDEnvVar)
+		} else if clusterName = ocenv.GetClusterID(); clusterName != "" {
+			log.DedupedInfof(5, "Setting cluster name to %s from %s ", clusterName, ocenv.ClusterIDEnvVar)
 		} else {
 			clusterName = defaultClusterName
 			log.DedupedWarningf(5, "Unable to detect cluster name - using default of %s", defaultClusterName)
-			log.DedupedWarningf(5, "Please set cluster name through configmap or via %s env var", coreenv.ClusterIDEnvVar)
+			log.DedupedWarningf(5, "Please set cluster name through configmap or via %s env var", ocenv.ClusterIDEnvVar)
 		}
 	}
 
@@ -1483,8 +1483,8 @@ func (awsProvider *AWS) ClusterInfo() (map[string]string, error) {
 	m["provider"] = opencost.AWSProvider
 	m["account"] = clusterAccountID
 	m["region"] = awsProvider.ClusterRegion
-	m["id"] = coreenv.GetClusterID()
-	m["remoteReadEnabled"] = strconv.FormatBool(env.IsRemoteEnabled())
+	m["id"] = ocenv.GetClusterID()
+	m["remoteReadEnabled"] = strconv.FormatBool(ocenv.IsRemoteEnabled())
 	m["provisioner"] = awsProvider.clusterProvisioner
 	return m, nil
 }
@@ -1502,11 +1502,11 @@ func (aws *AWS) ConfigureAuth() error {
 func (aws *AWS) ConfigureAuthWith(config *models.CustomPricing) error {
 	accessKeyID, accessKeySecret := aws.getAWSAuth(false, config)
 	if accessKeyID != "" && accessKeySecret != "" { // credentials may exist on the actual AWS node-- if so, use those. If not, override with the service key
-		err := coreenv.Set(env.AWSAccessKeyIDEnvVar, accessKeyID)
+		err := env.Set(ocenv.AWSAccessKeyIDEnvVar, accessKeyID)
 		if err != nil {
 			return err
 		}
-		err = coreenv.Set(env.AWSAccessKeySecretEnvVar, accessKeySecret)
+		err = env.Set(ocenv.AWSAccessKeySecretEnvVar, accessKeySecret)
 		if err != nil {
 			return err
 		}
@@ -1536,7 +1536,7 @@ func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string,
 	}
 
 	// 3. Fall back to env vars
-	if env.GetAWSAccessKeyID() == "" || env.GetAWSAccessKeySecret() == "" {
+	if ocenv.GetAWSAccessKeyID() == "" || ocenv.GetAWSAccessKeySecret() == "" {
 		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
 			Message: "AWS ServiceKey exists",
 			Status:  false,
@@ -1547,7 +1547,7 @@ func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string,
 			Status:  true,
 		})
 	}
-	return env.GetAWSAccessKeyID(), env.GetAWSAccessKeySecret()
+	return ocenv.GetAWSAccessKeyID(), ocenv.GetAWSAccessKeySecret()
 }
 
 // Load once and cache the result (even on failure). This is an install time secret, so
@@ -2452,7 +2452,7 @@ func (aws *AWS) CombinedDiscountForNode(instanceType string, isPreemptible bool,
 // Regions returns a predefined list of AWS regions
 func (aws *AWS) Regions() []string {
 
-	regionOverrides := env.GetRegionOverrideList()
+	regionOverrides := ocenv.GetRegionOverrideList()
 
 	if len(regionOverrides) > 0 {
 		log.Debugf("Overriding AWS regions with configured region list: %+v", regionOverrides)

+ 2 - 3
pkg/cloud/azure/provider.go

@@ -20,7 +20,6 @@ import (
 	"github.com/Azure/go-autorest/autorest"
 	"github.com/Azure/go-autorest/autorest/azure"
 	"github.com/Azure/go-autorest/autorest/azure/auth"
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
 	"github.com/opencost/opencost/core/pkg/log"
@@ -1508,7 +1507,7 @@ func (az *Azure) ClusterInfo() (map[string]string, error) {
 	m["account"] = az.ClusterAccountID
 	m["region"] = az.ClusterRegion
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
-	m["id"] = coreenv.GetClusterID()
+	m["id"] = env.GetClusterID()
 	return m, nil
 
 }
@@ -1565,7 +1564,7 @@ func (az *Azure) UpdateConfig(r io.Reader, updateType string) (*models.CustomPri
 		}
 
 		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
+			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return fmt.Errorf("error updating cluster metadata: %s", err)
 			}

+ 1 - 1
pkg/cloud/azure/storagebillingparser.go

@@ -62,7 +62,7 @@ func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, re
 
 	if env.IsAzureDownloadBillingDataToDisk() {
 		// clean up old files that have been saved to disk before downloading new ones
-		localPath := env.GetAzureDownloadBillingDataPath()
+		localPath := filepath.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), "db", "cloudcost")
 		if _, err := asbp.deleteFilesOlderThan7d(localPath); err != nil {
 			log.Warnf("CloudCost: Azure: ParseBillingData: failed to remove the following stale files: %v", err)
 		}

+ 4 - 1
pkg/cloud/config/controller.go

@@ -3,6 +3,7 @@ package config
 import (
 	"fmt"
 	"os"
+	"path/filepath"
 	"sync"
 	"time"
 
@@ -14,6 +15,8 @@ import (
 	"github.com/opencost/opencost/pkg/env"
 )
 
+const configFile = "cloud-configurations.json"
+
 // Controller manages the cloud.Config using config Watcher(s) to track various configuration
 // methods. To do this it has a map of config watchers mapped on configuration source and a list Observers that it updates
 // upon any change detected from the config watchers.
@@ -30,7 +33,7 @@ func NewController(providerConfig models.ProviderConfig) *Controller {
 	watchers := GetCloudBillingWatchers(providerConfig)
 
 	storage := &FileControllerStorage{
-		path: env.GetCloudCostConfigControllerStateFile(),
+		path: filepath.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), configFile),
 	}
 
 	ic := &Controller{

+ 0 - 2
pkg/cloud/config/controller_test.go

@@ -11,8 +11,6 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/gcp"
 )
 
-var configFile = "test.json"
-
 // Baseline valid config
 var validAthenaConf = &aws.AthenaConfiguration{
 	Bucket:     "bucket",

+ 7 - 2
pkg/cloud/config/watcher.go

@@ -175,7 +175,7 @@ func (cfw *ConfigFileWatcher) GetConfigs() []cloud.KeyedConfig {
 		}
 
 		var key map[string]string
-		err2 := loadFile(env.GetGCPAuthSecretFilePath(), &key)
+		err2 := loadFile(env.GetConfigPathWithDefault("/models/")+"key.json", &key)
 		if err2 != nil {
 			log.Errorf("ConfigFileWatcher: GCP: %s", err2)
 		}
@@ -239,8 +239,13 @@ type MultiCloudWatcher struct {
 }
 
 func (mcw *MultiCloudWatcher) GetConfigs() []cloud.KeyedConfig {
+	var multiConfigPath string
 
-	multiConfigPath := env.GetCloudCostConfigPath()
+	if env.IsKubernetesEnabled() {
+		multiConfigPath = path.Join(env.GetConfigPathWithDefault("/var/configs"), cloudIntegrationSecretPath)
+	} else {
+		multiConfigPath = env.GetCloudCostConfigPath()
+	}
 	exists, err := fileutil.FileExists(multiConfigPath)
 	if err != nil {
 		log.Errorf("MultiCloudWatcher:  error checking file at '%s': %s", multiConfigPath, err.Error())

+ 8 - 5
pkg/cloud/gcp/provider.go

@@ -14,7 +14,6 @@ import (
 	"sync"
 	"time"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
@@ -186,7 +185,9 @@ func (gcp *GCP) GetManagementPlatform() (string, error) {
 
 // Attempts to load a GCP auth secret and copy the contents to the key file.
 func (*GCP) loadGCPAuthSecret() {
-	keyPath := env.GetGCPAuthSecretFilePath()
+	path := env.GetConfigPathWithDefault("/models/")
+
+	keyPath := path + "key.json"
 	keyExists, _ := fileutil.FileExists(keyPath)
 	if keyExists {
 		log.Info("GCP Auth Key already exists, no need to load from secret")
@@ -238,7 +239,9 @@ func (gcp *GCP) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 					return err
 				}
 
-				keyPath := env.GetGCPAuthSecretFilePath()
+				path := env.GetConfigPathWithDefault("/models/")
+
+				keyPath := path + "key.json"
 				err = os.WriteFile(keyPath, j, 0644)
 				if err != nil {
 					return err
@@ -281,7 +284,7 @@ func (gcp *GCP) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 		}
 
 		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
+			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return err
 			}
@@ -320,7 +323,7 @@ func (gcp *GCP) ClusterInfo() (map[string]string, error) {
 	m["account"] = gcp.ClusterAccountID
 	m["project"] = gcp.ClusterProjectID
 	m["provisioner"] = gcp.clusterProvisioner
-	m["id"] = coreenv.GetClusterID()
+	m["id"] = env.GetClusterID()
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
 	return m, nil
 }

+ 2 - 3
pkg/cloud/oracle/provider.go

@@ -7,7 +7,6 @@ import (
 	"sync"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util"
@@ -49,7 +48,7 @@ func (o *Oracle) ClusterInfo() (map[string]string, error) {
 	m["account"] = o.ClusterAccountID
 	m["region"] = o.ClusterRegion
 	m["remoteReadEnabled"] = strconv.FormatBool(env.IsRemoteEnabled())
-	m["id"] = coreenv.GetClusterID()
+	m["id"] = env.GetClusterID()
 	return m, nil
 }
 
@@ -181,7 +180,7 @@ func (o *Oracle) UpdateConfig(r io.Reader, _ string) (*models.CustomPricing, err
 		}
 
 		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), o.getClusterName(pricing))
+			err := utils.UpdateClusterMeta(env.GetClusterID(), o.getClusterName(pricing))
 			if err != nil {
 				return err
 			}

+ 1 - 2
pkg/cloud/otc/provider.go

@@ -7,7 +7,6 @@ import (
 	"strings"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util"
@@ -356,7 +355,7 @@ func (otc *OTC) ClusterInfo() (map[string]string, error) {
 	m["account"] = c.ProjectID
 	m["region"] = otc.ClusterRegion
 	m["remoteReadEnabled"] = strconv.FormatBool(env.IsRemoteEnabled())
-	m["id"] = coreenv.GetClusterID()
+	m["id"] = env.GetClusterID()
 	return m, nil
 }
 

+ 47 - 22
pkg/cloud/provider/cloud_test.go

@@ -11,8 +11,6 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/clusters"
-	"github.com/opencost/opencost/core/pkg/env"
-	"github.com/opencost/opencost/core/pkg/storage"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/cloud/provider"
@@ -24,9 +22,10 @@ import (
 )
 
 const (
-	providerIDMap = "spec.providerID"
-	nameMap       = "metadata.name"
-	labelMapFoo   = "metadata.labels.foo"
+	providerIDMap  = "spec.providerID"
+	nameMap        = "metadata.name"
+	labelMapFoo    = "metadata.labels.foo"
+	labelMapFooBar = "metadata.labels.foo.bar"
 )
 
 func TestRegionValueFromMapField(t *testing.T) {
@@ -103,7 +102,9 @@ func TestPVPriceFromCSV(t *testing.T) {
 	pv := &clustercache.PersistentVolume{}
 	pv.Name = nameWant
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	wantPrice := "0.1337"
 	c := &provider.CSVProvider{
@@ -135,7 +136,9 @@ func TestPVPriceFromCSVStorageClass(t *testing.T) {
 	pv.Name = nameWant
 	pv.Spec.StorageClassName = storageClassWant
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	wantPrice := "0.1338"
 	c := &provider.CSVProvider{
@@ -166,7 +169,9 @@ func TestNodePriceFromCSVWithGPU(t *testing.T) {
 	labelFooWant := "labelfoo"
 	wantGPU := "2"
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = providerIDWant
@@ -258,9 +263,11 @@ func TestNodePriceFromCSVWithGPULabels(t *testing.T) {
 	}
 
 	t.Logf("Setting Config Path to: %s", configPath)
-	t.Setenv(env.ConfigPathEnvVar, configPath)
+	t.Setenv("CONFIG_PATH", configPath)
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = "providerid"
@@ -325,9 +332,11 @@ func TestRKE2NodePriceFromCSVWithGPULabels(t *testing.T) {
 	}
 
 	t.Logf("Setting Config Path to: %s", configPath)
-	t.Setenv(env.ConfigPathEnvVar, configPath)
+	t.Setenv("CONFIG_PATH", configPath)
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = "providerid"
@@ -370,7 +379,9 @@ func TestRKE2NodePriceFromCSVWithGPULabels(t *testing.T) {
 func TestNodePriceFromCSVSpecialChar(t *testing.T) {
 	nameWant := "gke-standard-cluster-1-pool-1-91dc432d-cg69"
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	n := &clustercache.Node{}
 	n.Name = nameWant
@@ -405,7 +416,9 @@ func TestNodePriceFromCSV(t *testing.T) {
 	nameWant := "gke-standard-cluster-1-pool-1-91dc432d-cg69"
 	labelFooWant := "labelfoo"
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = providerIDWant
@@ -465,7 +478,9 @@ func TestNodePriceFromCSVWithRegion(t *testing.T) {
 	nameWant := "foo"
 	labelFooWant := "labelfoo"
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = providerIDWant
@@ -656,9 +671,11 @@ func TestNodePriceFromCSVWithBadConfig(t *testing.T) {
 	}
 
 	t.Logf("Setting Config Path to: %s", configPath)
-	t.Setenv(env.ConfigPathEnvVar, configPath)
+	t.Setenv("CONFIG_PATH", configPath)
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	c := &provider.CSVProvider{
 		CSVLocation: "../../../configs/pricing_schema_case.csv",
@@ -688,9 +705,11 @@ func TestNodePriceFromCSVWithBadConfig(t *testing.T) {
 }
 
 func TestSourceMatchesFromCSV(t *testing.T) {
-	os.Setenv(env.ConfigPathEnvVar, "../../../configs")
+	os.Setenv("CONFIG_PATH", "../../../configs")
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	c := &provider.CSVProvider{
 		CSVLocation: "../../../configs/pricing_schema_case.csv",
@@ -767,7 +786,9 @@ func TestNodePriceFromCSVWithCase(t *testing.T) {
 	n.Labels[v1.LabelTopologyRegion] = "eastus2"
 	wantPrice := "0.13370357"
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	c := &provider.CSVProvider{
 		CSVLocation: "../../../configs/pricing_schema_case.csv",
@@ -795,7 +816,9 @@ func TestNodePriceFromCSVWithCase(t *testing.T) {
 func TestNodePriceFromCSVMixed(t *testing.T) {
 	labelFooWant := "OnDemand"
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	n := &clustercache.Node{}
 	n.Labels = make(map[string]string)
@@ -856,7 +879,9 @@ func TestNodePriceFromCSVByClass(t *testing.T) {
 	wantpricefloat := 0.13370357
 	wantPrice := fmt.Sprintf("%f", (math.Round(wantpricefloat*1000000) / 1000000))
 
-	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
+	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		LocalConfigPath: "./",
+	})
 
 	c := &provider.CSVProvider{
 		CSVLocation: "../../../configs/pricing_schema_case.csv",

+ 2 - 2
pkg/cloud/provider/customprovider.go

@@ -8,13 +8,13 @@ import (
 	"sync"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/core/pkg/util/json"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
+	"github.com/opencost/opencost/pkg/env"
 )
 
 type NodePrice struct {
@@ -142,7 +142,7 @@ func (cp *CustomProvider) ClusterInfo() (map[string]string, error) {
 	m["provider"] = opencost.CustomProvider
 	m["region"] = cp.ClusterRegion
 	m["account"] = cp.ClusterAccountID
-	m["id"] = coreenv.GetClusterID()
+	m["id"] = env.GetClusterID()
 	return m, nil
 }
 

+ 88 - 3
pkg/cloud/provider/provider.go

@@ -34,6 +34,22 @@ import (
 	"github.com/opencost/opencost/pkg/util/watcher"
 )
 
+// ClusterName returns the name defined in cluster info, defaulting to the
+// CLUSTER_ID environment variable
+func ClusterName(p models.Provider) string {
+	info, err := p.ClusterInfo()
+	if err != nil {
+		return env.GetClusterID()
+	}
+
+	name, ok := info["name"]
+	if !ok {
+		return env.GetClusterID()
+	}
+
+	return name
+}
+
 // CustomPricesEnabled returns the boolean equivalent of the cloup provider's custom prices flag,
 // indicating whether or not the cluster is using custom pricing.
 func CustomPricesEnabled(p models.Provider) bool {
@@ -61,6 +77,77 @@ func ConfigWatcherFor(p models.Provider) *watcher.ConfigMapWatcher {
 	}
 }
 
+// AllocateIdleByDefault returns true if the application settings specify to allocate idle by default
+func AllocateIdleByDefault(p models.Provider) bool {
+	config, err := p.GetConfig()
+	if err != nil {
+		return false
+	}
+
+	return config.DefaultIdle == "true"
+}
+
+// SharedNamespace returns a list of names of shared namespaces, as defined in the application settings
+func SharedNamespaces(p models.Provider) []string {
+	namespaces := []string{}
+
+	config, err := p.GetConfig()
+	if err != nil {
+		return namespaces
+	}
+	if config.SharedNamespaces == "" {
+		return namespaces
+	}
+	// trim spaces so that "kube-system, kubecost" is equivalent to "kube-system,kubecost"
+	for _, ns := range strings.Split(config.SharedNamespaces, ",") {
+		namespaces = append(namespaces, strings.Trim(ns, " "))
+	}
+
+	return namespaces
+}
+
+// SharedLabel returns the configured set of shared labels as a parallel tuple of keys to values; e.g.
+// for app:kubecost,type:staging this returns (["app", "type"], ["kubecost", "staging"]) in order to
+// match the signature of the NewSharedResourceInfo
+func SharedLabels(p models.Provider) ([]string, []string) {
+	names := []string{}
+	values := []string{}
+
+	config, err := p.GetConfig()
+	if err != nil {
+		return names, values
+	}
+
+	if config.SharedLabelNames == "" || config.SharedLabelValues == "" {
+		return names, values
+	}
+
+	ks := strings.Split(config.SharedLabelNames, ",")
+	vs := strings.Split(config.SharedLabelValues, ",")
+	if len(ks) != len(vs) {
+		log.Warnf("Shared labels have mis-matched lengths: %d names, %d values", len(ks), len(vs))
+		return names, values
+	}
+
+	for i := range ks {
+		names = append(names, strings.Trim(ks[i], " "))
+		values = append(values, strings.Trim(vs[i], " "))
+	}
+
+	return names, values
+}
+
+// ShareTenancyCosts returns true if the application settings specify to share
+// tenancy costs by default.
+func ShareTenancyCosts(p models.Provider) bool {
+	config, err := p.GetConfig()
+	if err != nil {
+		return false
+	}
+
+	return config.ShareTenancyCosts == "true"
+}
+
 // NewProvider looks at the nodespec or provider metadata server to decide which provider to instantiate.
 func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.ConfigFileManager) (models.Provider, error) {
 	getAllNodesFunc := func() ([]*clustercache.Node, error) {
@@ -72,15 +159,13 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 	}
 
 	var nodes []*clustercache.Node
-
-	if env.HasKubernetesResourceAccess() {
+	if !env.IsETLReadOnlyMode() {
 		// the error can be ignored because getAllNodesFunc only errors if nodes is empty, a case which we explicitly
 		// handle by checking the length of nodes below
 		nodes, _ = retry.Retry(context.Background(), getAllNodesFunc, 10, time.Second)
 	} else {
 		nodes, _ = getAllNodesFunc()
 	}
-
 	if len(nodes) == 0 {
 		log.Infof("Could not locate any nodes for cluster.")
 		return &CustomProvider{

+ 8 - 2
pkg/cloud/provider/providerconfig.go

@@ -7,7 +7,6 @@ import (
 	"strconv"
 	"sync"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/util/json"
 	"github.com/opencost/opencost/pkg/cloud/alibaba"
@@ -19,6 +18,7 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/otc"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/env"
 )
 
 const closedSourceConfigMount = "models/"
@@ -35,7 +35,7 @@ type ProviderConfig struct {
 
 // NewProviderConfig creates a new ConfigFile and returns the ProviderConfig
 func NewProviderConfig(configManager *config.ConfigFileManager, fileName string) *ProviderConfig {
-	configFile := configManager.ConfigFileAt(coreenv.GetPathFromConfig(fileName))
+	configFile := configManager.ConfigFileAt(configPathFor(fileName))
 	pc := &ProviderConfig{
 		lock:          new(sync.Mutex),
 		configManager: configManager,
@@ -272,6 +272,12 @@ func DefaultPricing() *models.CustomPricing {
 	}
 }
 
+// Returns the configuration directory concatenated with a specific config file name
+func configPathFor(filename string) string {
+	path := env.GetConfigPathWithDefault("/models/")
+	return gopath.Join(path, filename)
+}
+
 // Gives the config file name in a full qualified file name
 func filenameInConfigPath(fqfn string) string {
 	_, fileName := gopath.Split(fqfn)

+ 2 - 3
pkg/cloud/scaleway/provider.go

@@ -8,7 +8,6 @@ import (
 	"strings"
 	"sync"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 
@@ -304,7 +303,7 @@ func (scw *Scaleway) ClusterInfo() (map[string]string, error) {
 	m["region"] = scw.ClusterRegion
 	m["account"] = scw.ClusterAccountID
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
-	m["id"] = coreenv.GetClusterID()
+	m["id"] = env.GetClusterID()
 	return m, nil
 
 }
@@ -336,7 +335,7 @@ func (c *Scaleway) UpdateConfig(r io.Reader, updateType string) (*models.CustomP
 		}
 
 		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
+			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return err
 			}

+ 1 - 1
pkg/cloudcost/pipelineservice.go

@@ -142,7 +142,7 @@ func (s *PipelineService) GetCloudCostRepairHandler() func(w http.ResponseWriter
 
 		var window opencost.Window
 		if windowStr != "" {
-			win, err := opencost.ParseWindowUTC(windowStr)
+			win, err := opencost.ParseWindowWithOffset(windowStr, env.GetParsedUTCOffset())
 			if err != nil {
 				http.Error(w, fmt.Sprintf("Invalid parameter: %s", err), http.StatusBadRequest)
 				return

+ 2 - 2
pkg/clustercache/clustercache.go

@@ -56,7 +56,7 @@ func NewKubernetesClusterCacheV1(client kubernetes.Interface) cc.ClusterCache {
 	batchClient := client.BatchV1().RESTClient()
 	pdbClient := client.PolicyV1().RESTClient()
 
-	installNamespace := env.GetOpencostNamespace()
+	installNamespace := env.GetInstallNamespace()
 	log.Infof("NAMESPACE: %s", installNamespace)
 
 	kcc := &KubernetesClusterCache{
@@ -80,7 +80,7 @@ func NewKubernetesClusterCacheV1(client kubernetes.Interface) cc.ClusterCache {
 	// Wait for each caching watcher to initialize
 	cancel := make(chan struct{})
 	var wg sync.WaitGroup
-	if env.HasKubernetesResourceAccess() {
+	if !env.IsETLReadOnlyMode() {
 		wg.Add(14)
 		go initializeCache(kcc.namespaceWatch, &wg, cancel)
 		go initializeCache(kcc.nodeWatch, &wg, cancel)

+ 4 - 2
pkg/clustercache/clustercache2.go

@@ -54,8 +54,9 @@ func NewKubernetesClusterCacheV2(clientset kubernetes.Interface) *KubernetesClus
 func (kcc *KubernetesClusterCacheV2) Run() {
 	var wg sync.WaitGroup
 
-	wg.Add(14)
-	if env.HasKubernetesResourceAccess() {
+	if !env.IsETLReadOnlyMode() {
+		wg.Add(14)
+
 		kcc.namespaceStore.Watch(kcc.stopCh, wg.Done)
 		kcc.nodeStore.Watch(kcc.stopCh, wg.Done)
 		kcc.persistentVolumeClaimStore.Watch(kcc.stopCh, wg.Done)
@@ -71,6 +72,7 @@ func (kcc *KubernetesClusterCacheV2) Run() {
 		kcc.jobStore.Watch(kcc.stopCh, wg.Done)
 		kcc.pdbStore.Watch(kcc.stopCh, wg.Done)
 	}
+
 	wg.Wait()
 }
 

+ 11 - 5
pkg/cmd/agent/agent.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"fmt"
 	"net/http"
+	"path"
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/clusters"
@@ -74,7 +75,12 @@ func Execute(opts *AgentOpts) error {
 	}
 
 	// Create ConfigFileManager for synchronization of shared configuration
-	confManager := config.NewConfigFileManager(nil)
+	confManager := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		BucketStoreConfig: env.GetConfigBucketFile(),
+		LocalConfigPath:   "/",
+	})
+
+	configPrefix := env.GetConfigPathWithDefault(env.DefaultConfigMountPath)
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
 	cloudProvider, err := provider.NewProvider(clusterCache, cloudProviderKey, confManager)
@@ -87,7 +93,7 @@ func Execute(opts *AgentOpts) error {
 
 	var clusterInfoProvider clusters.ClusterInfoProvider
 	if env.IsExportClusterInfoEnabled() {
-		clusterInfoConf := confManager.ConfigFileAt(env.GetClusterInfoFilePath())
+		clusterInfoConf := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-info.json"))
 		clusterInfoProvider = costmodel.NewClusterInfoWriteOnRequest(localClusterInfo, clusterInfoConf)
 	} else {
 		clusterInfoProvider = localClusterInfo
@@ -123,14 +129,14 @@ func Execute(opts *AgentOpts) error {
 	}
 
 	// Append the pricing config watcher
-	installNamespace := env.GetOpencostNamespace()
-	configWatchers := watcher.NewConfigMapWatchers(k8sClient, installNamespace)
+	kubecostNamespace := env.GetInstallNamespace()
+	configWatchers := watcher.NewConfigMapWatchers(k8sClient, kubecostNamespace)
 	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	configWatchers.Watch()
 
 	// Initialize cluster exporting if it's enabled
 	if env.IsExportClusterCacheEnabled() {
-		cacheLocation := confManager.ConfigFileAt(env.GetClusterCacheFilePath())
+		cacheLocation := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-cache.json"))
 		clusterExporter = cluster.NewClusterExporter(clusterCache, cacheLocation, ClusterExportInterval)
 		clusterExporter.Run()
 	}

+ 3 - 3
pkg/cmd/commands.go

@@ -16,7 +16,7 @@ const (
 	// commandRoot is the root command used to route to sub-commands
 	commandRoot string = "root"
 
-	// CommandCostModel is the command used to execute the metrics emission and cost model querying
+	// CommandCostModel is the command used to execute the metrics emission and ETL pipeline
 	CommandCostModel string = "cost-model"
 
 	// CommandAgent executes the application in agent mode, which provides only metrics exporting.
@@ -96,7 +96,7 @@ func newRootCommand(costModelCmd *cobra.Command, cmds ...*cobra.Command) *cobra.
 
 // default open-source cost-model command
 func newCostModelCommand() *cobra.Command {
-	config := costmodel.DefaultConfig()
+	opts := &costmodel.CostModelOpts{}
 
 	cmCmd := &cobra.Command{
 		Use:   CommandCostModel,
@@ -105,7 +105,7 @@ func newCostModelCommand() *cobra.Command {
 			// Init logging here so cobra/viper has processed the command line args and flags
 			// otherwise only envvars are available during init
 			log.InitLogging(true)
-			return costmodel.Execute(config)
+			return costmodel.Execute(opts)
 		},
 	}
 

+ 0 - 31
pkg/cmd/costmodel/config.go

@@ -1,31 +0,0 @@
-package costmodel
-
-import (
-	"github.com/opencost/opencost/core/pkg/log"
-	"github.com/opencost/opencost/pkg/env"
-)
-
-// Config contain configuration options that can be passed to the Execute() method
-type Config struct {
-	Port                   int
-	KubernetesEnabled      bool
-	CarbonEstimatesEnabled bool
-	CloudCostEnabled       bool
-	CustomCostEnabled      bool
-}
-
-func DefaultConfig() *Config {
-	return &Config{
-		Port:                   env.GetOpencostAPIPort(),
-		KubernetesEnabled:      env.IsKubernetesEnabled(),
-		CarbonEstimatesEnabled: env.IsCarbonEstimatesEnabled(),
-		CloudCostEnabled:       env.IsCloudCostEnabled(),
-	}
-}
-
-func (c *Config) log() {
-	log.Infof("Kubernetes enabled: %t", c.KubernetesEnabled)
-	log.Infof("Carbon Estimates enabled: %t", c.CarbonEstimatesEnabled)
-	log.Infof("Cloud Costs enabled: %t", c.CloudCostEnabled)
-	log.Infof("Custom Costs enabled: %t", c.CustomCostEnabled)
-}

+ 77 - 12
pkg/cmd/costmodel/costmodel.go

@@ -4,10 +4,11 @@ import (
 	"context"
 	"fmt"
 	"net/http"
+	"net/http/pprof"
 	"time"
 
 	"github.com/julienschmidt/httprouter"
-	"github.com/opencost/opencost/core/pkg/util/apiutil"
+	"github.com/opencost/opencost/core/pkg/util/json"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/customcost"
@@ -23,17 +24,25 @@ import (
 	"github.com/opencost/opencost/pkg/metrics"
 )
 
-func Execute(conf *Config) error {
+// CostModelOpts contain configuration options that can be passed to the Execute() method
+type CostModelOpts struct {
+	// Stubbed for future configuration
+}
+
+func Healthz(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
+	w.WriteHeader(200)
+	w.Header().Set("Content-Length", "0")
+	w.Header().Set("Content-Type", "text/plain")
+}
+
+func Execute(opts *CostModelOpts) error {
 	log.Infof("Starting cost-model version %s", version.FriendlyVersion())
-	if conf == nil {
-		conf = DefaultConfig()
-	}
-	conf.log()
+	log.Infof("Kubernetes enabled: %t", env.IsKubernetesEnabled())
 
 	router := httprouter.New()
 	var a *costmodel.Accesses
 	var cp models.Provider
-	if conf.KubernetesEnabled {
+	if env.IsKubernetesEnabled() {
 		a = costmodel.Initialize(router)
 		err := StartExportWorker(context.Background(), a.Model)
 		if err != nil {
@@ -44,7 +53,7 @@ func Execute(conf *Config) error {
 		router.GET("/allocation", a.ComputeAllocationHandler)
 		router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
 		router.GET("/assets", a.ComputeAssetsHandler)
-		if conf.CarbonEstimatesEnabled {
+		if env.IsCarbonEstimatesEnabled() {
 			router.GET("/assets/carbon", a.ComputeAssetsCarbonHandler)
 		}
 
@@ -52,7 +61,8 @@ func Execute(conf *Config) error {
 		cp = a.CloudProvider
 	}
 
-	if conf.CloudCostEnabled {
+	log.Infof("Cloud Costs enabled: %t", env.IsCloudCostEnabled())
+	if env.IsCloudCostEnabled() {
 		var providerConfig models.ProviderConfig
 		if cp != nil {
 			providerConfig = provider.ExtractConfigFromProviders(cp)
@@ -60,8 +70,9 @@ func Execute(conf *Config) error {
 		costmodel.InitializeCloudCost(router, providerConfig)
 	}
 
+	log.Infof("Custom Costs enabled: %t", env.IsCustomCostEnabled())
 	var customCostPipelineService *customcost.PipelineService
-	if conf.CloudCostEnabled {
+	if env.IsCustomCostEnabled() {
 		customCostPipelineService = costmodel.InitializeCustomCost(router)
 	}
 
@@ -69,7 +80,20 @@ func Execute(conf *Config) error {
 	// valid for CustomCostPipelineService to be nil
 	router.GET("/customCost/status", customCostPipelineService.GetCustomCostStatusHandler())
 
-	apiutil.ApplyContainerDiagnosticEndpoints(router)
+	router.GET("/healthz", Healthz)
+
+	router.GET("/logs/level", GetLogLevel)
+	router.POST("/logs/level", SetLogLevel)
+
+	if env.IsPProfEnabled() {
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/", pprof.Index)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/cmdline", pprof.Cmdline)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/profile", pprof.Profile)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/symbol", pprof.Symbol)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/trace", pprof.Trace)
+		router.Handler(http.MethodGet, "/debug/pprof/goroutine", pprof.Handler("goroutine"))
+		router.Handler(http.MethodGet, "/debug/pprof/heap", pprof.Handler("heap"))
+	}
 
 	rootMux := http.NewServeMux()
 	rootMux.Handle("/", router)
@@ -77,7 +101,7 @@ func Execute(conf *Config) error {
 	telemetryHandler := metrics.ResponseMetricMiddleware(rootMux)
 	handler := cors.AllowAll().Handler(telemetryHandler)
 
-	return http.ListenAndServe(fmt.Sprint(":", conf.Port), errors.PanicHandlerMiddleware(handler))
+	return http.ListenAndServe(fmt.Sprint(":", env.GetAPIPort()), errors.PanicHandlerMiddleware(handler))
 }
 
 func StartExportWorker(ctx context.Context, model costmodel.AllocationModel) error {
@@ -114,3 +138,44 @@ func StartExportWorker(ctx context.Context, model costmodel.AllocationModel) err
 	}()
 	return nil
 }
+
+type LogLevelRequestResponse struct {
+	Level string `json:"level"`
+}
+
+func GetLogLevel(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	level := log.GetLogLevel()
+	llrr := LogLevelRequestResponse{
+		Level: level,
+	}
+
+	body, err := json.Marshal(llrr)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to retrive log level"), http.StatusInternalServerError)
+		return
+	}
+	_, err = w.Write(body)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to write response: %s", body), http.StatusInternalServerError)
+		return
+	}
+}
+
+func SetLogLevel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	params := LogLevelRequestResponse{}
+	err := json.NewDecoder(r.Body).Decode(&params)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to decode request body, error: %s", err), http.StatusBadRequest)
+		return
+	}
+
+	err = log.SetLogLevel(params.Level)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("level must be a valid log level according to zerolog; level given: %s, error: %s", params.Level, err), http.StatusBadRequest)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+}

+ 53 - 3
pkg/config/configmanager.go

@@ -1,11 +1,44 @@
 package config
 
 import (
+	"os"
 	"sync"
 
+	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/storage"
 )
 
+//--------------------------------------------------------------------------
+//  ConfigFileManagerOpts
+//--------------------------------------------------------------------------
+
+// ConfigFileManagerOpts describes how to configure the ConfigFileManager for
+// serving configuration files
+type ConfigFileManagerOpts struct {
+	// BucketStoreConfig is the local file location for the configuration used to
+	// write and read configuration data to/from the bucket. The format of this
+	// configuration file should be compatible with storage.NewBucketStorage
+	BucketStoreConfig string
+
+	// LocalConfigPath provides a backup location for storing the configuration
+	// files
+	LocalConfigPath string
+}
+
+// IsBucketStorageEnabled returns true if bucket storage is enabled.
+func (cfmo *ConfigFileManagerOpts) IsBucketStorageEnabled() bool {
+	return cfmo.BucketStoreConfig != ""
+}
+
+// DefaultConfigFileManagerOpts returns the default configuration options for the
+// config file manager
+func DefaultConfigFileManagerOpts() *ConfigFileManagerOpts {
+	return &ConfigFileManagerOpts{
+		BucketStoreConfig: "",
+		LocalConfigPath:   "/",
+	}
+}
+
 //--------------------------------------------------------------------------
 //  ConfigFileManager
 //--------------------------------------------------------------------------
@@ -19,9 +52,26 @@ type ConfigFileManager struct {
 }
 
 // NewConfigFileManager creates a new backing storage and configuration file manager
-func NewConfigFileManager(configStore storage.Storage) *ConfigFileManager {
-	if configStore == nil {
-		configStore = storage.NewFileStorage("/")
+func NewConfigFileManager(opts *ConfigFileManagerOpts) *ConfigFileManager {
+	if opts == nil {
+		opts = DefaultConfigFileManagerOpts()
+	}
+
+	var configStore storage.Storage
+	if opts.IsBucketStorageEnabled() {
+		bucketConfig, err := os.ReadFile(opts.BucketStoreConfig)
+		if err != nil {
+			log.Warnf("Failed to initialize config bucket storage: %s", err)
+		} else {
+			bucketStore, err := storage.NewBucketStorage(bucketConfig)
+			if err != nil {
+				log.Warnf("Failed to create config bucket storage: %s", err)
+			} else {
+				configStore = bucketStore
+			}
+		}
+	} else {
+		configStore = storage.NewFileStorage(opts.LocalConfigPath)
 	}
 
 	return &ConfigFileManager{

+ 9 - 9
pkg/costmodel/allocation_helpers.go

@@ -7,12 +7,12 @@ import (
 	"strings"
 	"time"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/pkg/cloud/provider"
+	"github.com/opencost/opencost/pkg/env"
 	"k8s.io/apimachinery/pkg/labels"
 )
 
@@ -108,7 +108,7 @@ func applyPodResults(window opencost.Window, resolution time.Duration, podMap ma
 
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		namespace := res.Namespace
@@ -914,7 +914,7 @@ func applyNetworkAllocation(podMap map[podKey]*pod, resNetworkGiB []*source.Netw
 	for _, res := range resNetworkCostPerGiB {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		costPerGiBByCluster[cluster] = res.Data[0].Value
@@ -1643,7 +1643,7 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr
 	for _, res := range resNodeCostPerCPUHr {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		node := res.Node
@@ -1679,7 +1679,7 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRA
 	for _, res := range resNodeCostPerRAMGiBHr {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		node := res.Node
@@ -1715,7 +1715,7 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr
 	for _, res := range resNodeCostPerGPUHr {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		node := res.Node
@@ -1751,7 +1751,7 @@ func applyNodeSpot(nodeMap map[nodeKey]*nodePricing, resNodeIsSpot []*source.Nod
 	for _, res := range resNodeIsSpot {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		node := res.Node
@@ -2050,7 +2050,7 @@ func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvK
 	for _, res := range resPVCInfo {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		namespace := res.Namespace
@@ -2110,7 +2110,7 @@ func buildPodPVCMap(podPVCMap map[podKey][]*pvc, pvMap map[pvKey]*pv, pvcMap map
 	for _, res := range resPodPVCAllocation {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		namespace := res.Namespace

+ 14 - 15
pkg/costmodel/cluster.go

@@ -6,7 +6,6 @@ import (
 	"strings"
 	"time"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 	"golang.org/x/exp/slices"
 
@@ -167,7 +166,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageBytes {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Instance
@@ -209,7 +208,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Instance
@@ -237,7 +236,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageUsedCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Instance
@@ -264,7 +263,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageUsedAvg {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Instance
@@ -291,7 +290,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageUsedMax {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Instance
@@ -318,7 +317,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalActiveMins {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Node
@@ -366,7 +365,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resPVStorageClass {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.PersistentVolume
@@ -764,7 +763,7 @@ func pvCosts(
 	for _, result := range resActiveMins {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.PersistentVolume
@@ -800,7 +799,7 @@ func pvCosts(
 	for _, result := range resPVSize {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.PersistentVolume
@@ -832,7 +831,7 @@ func pvCosts(
 	for _, result := range resPVCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.PersistentVolume
@@ -876,7 +875,7 @@ func pvCosts(
 	for _, result := range resPVUsedAvg {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		claimName := result.PersistentVolumeClaim
@@ -897,7 +896,7 @@ func pvCosts(
 
 			thatCluster := thatRes.Cluster
 			if thatCluster == "" {
-				thatCluster = coreenv.GetClusterID()
+				thatCluster = env.GetClusterID()
 			}
 
 			thatVolumeName := thatRes.VolumeName
@@ -943,7 +942,7 @@ func pvCosts(
 	for _, result := range resPVUsedMax {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		claimName := result.PersistentVolumeClaim
@@ -963,7 +962,7 @@ func pvCosts(
 		for _, thatRes := range resPVCInfo {
 			thatCluster := thatRes.Cluster
 			if thatCluster == "" {
-				thatCluster = coreenv.GetClusterID()
+				thatCluster = env.GetClusterID()
 			}
 
 			thatVolumeName := thatRes.VolumeName

+ 17 - 17
pkg/costmodel/cluster_helpers.go

@@ -5,7 +5,6 @@ import (
 	"strconv"
 	"time"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 
@@ -13,6 +12,7 @@ import (
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
+	"github.com/opencost/opencost/pkg/env"
 )
 
 // mergeTypeMaps takes two maps of (cluster name, node name) -> node type
@@ -49,7 +49,7 @@ func buildCPUCostMap(
 	for _, result := range resNodeCPUCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Node
@@ -117,7 +117,7 @@ func buildRAMCostMap(
 	for _, result := range resNodeRAMCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Node
@@ -188,7 +188,7 @@ func buildGPUCostMap(
 	for _, result := range resNodeGPUCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Node
@@ -251,7 +251,7 @@ func buildGPUCountMap(resNodeGPUCount []*source.NodeGPUCountResult) map[NodeIden
 	for _, result := range resNodeGPUCount {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Node
@@ -280,7 +280,7 @@ func buildCPUCoresMap(resNodeCPUCores []*source.NodeCPUCoresCapacityResult) map[
 	for _, result := range resNodeCPUCores {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Node
@@ -307,7 +307,7 @@ func buildRAMBytesMap(resNodeRAMBytes []*source.NodeRAMBytesCapacityResult) map[
 	for _, result := range resNodeRAMBytes {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Node
@@ -342,7 +342,7 @@ func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.NodeCPUModeTotalResult)
 	for _, result := range resNodeCPUModeTotal {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		node := result.Node
@@ -448,7 +448,7 @@ func buildRAMUserPctMap(resNodeRAMUserPct []*source.NodeRAMUserPercentResult) ma
 	for _, result := range resNodeRAMUserPct {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Instance
@@ -477,7 +477,7 @@ func buildRAMSystemPctMap(resNodeRAMSystemPct []*source.NodeRAMSystemPercentResu
 	for _, result := range resNodeRAMSystemPct {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Instance
@@ -509,7 +509,7 @@ type activeData struct {
 func clusterManagementKeyGen(result *source.ClusterManagementDurationResult) (ClusterManagementIdentifier, bool) {
 	cluster := result.Cluster
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	provisionerName := result.Provisioner
@@ -528,7 +528,7 @@ func clusterManagementValues(result *source.ClusterManagementDurationResult) []*
 func nodeKeyGen(result *source.NodeActiveMinutesResult) (NodeIdentifier, bool) {
 	cluster := result.Cluster
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	name := result.Node
@@ -552,7 +552,7 @@ func nodeValues(result *source.NodeActiveMinutesResult) []*util.Vector {
 func loadBalancerKeyGen(result *source.LBActiveMinutesResult) (LoadBalancerIdentifier, bool) {
 	cluster := result.Cluster
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	namespace := result.Namespace
@@ -577,7 +577,7 @@ func loadBalancerKeyGen(result *source.LBActiveMinutesResult) (LoadBalancerIdent
 	return LoadBalancerIdentifier{
 		Cluster:   cluster,
 		Namespace: namespace,
-		Name:      fmt.Sprintf("%s/%s", namespace, name), // TODO: this is kept for backwards-compatibility, but not good,
+		Name:      fmt.Sprintf("%s/%s", namespace, name), // TODO:ETL this is kept for backwards-compatibility, but not good,
 		IngressIP: ingressIp,
 	}, true
 }
@@ -627,7 +627,7 @@ func buildPreemptibleMap(
 	for _, result := range resIsSpot {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		name := result.Node
@@ -666,7 +666,7 @@ func buildAssetsPVCMap(resPVCInfo []*source.PVCInfoResult) map[DiskIdentifier]*D
 	for _, result := range resPVCInfo {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		volumeName := result.VolumeName
@@ -716,7 +716,7 @@ func buildLabelsMap(
 	for _, result := range resLabels {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 
 		node := result.Node

+ 6 - 6
pkg/costmodel/costmodel.go

@@ -12,13 +12,13 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
 	"github.com/opencost/opencost/core/pkg/clusters"
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/core/pkg/util/promutil"
 	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/env"
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
@@ -134,7 +134,7 @@ func (cd *CostData) GetController() (name string, kind string, hasController boo
 
 func (cm *CostModel) ComputeCostData(start, end time.Time) (map[string]*CostData, error) {
 	// Cluster ID is specific to the source cluster
-	clusterID := coreenv.GetClusterID()
+	clusterID := env.GetClusterID()
 	cp := cm.Provider
 	ds := cm.DataSource
 	mq := ds.Metrics()
@@ -1266,7 +1266,7 @@ func (cm *CostModel) GetLBCost() (map[serviceKey]*costAnalyzerCloud.LoadBalancer
 		namespace := service.Namespace
 		name := service.Name
 		key := serviceKey{
-			Cluster:   coreenv.GetClusterID(),
+			Cluster:   env.GetClusterID(),
 			Namespace: namespace,
 			Service:   name,
 		}
@@ -1590,7 +1590,7 @@ func (cm *CostModel) QueryAllocation(window opencost.Window, step time.Duration,
 
 				_, err := opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
 				if err != nil {
-					log.Errorf("Allocation: error updating asset resource totals for %s: %s", assetSet.Window, err)
+					log.Errorf("ETL: error updating asset resource totals for %s: %s", assetSet.Window, err)
 				}
 			}
 
@@ -1649,7 +1649,7 @@ func (cm *CostModel) QueryAllocation(window opencost.Window, step time.Duration,
 
 			_, err = opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
 			if err != nil {
-				log.Errorf("Allocation: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
+				log.Errorf("ETL: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
 			}
 
 		}
@@ -1759,7 +1759,7 @@ func computeIdleAllocations(allocSet *opencost.AllocationSet, assetSet *opencost
 	for key, assetTotal := range assetTotals {
 		allocTotal, ok := allocTotals[key]
 		if !ok {
-			log.Warnf("Allocation: did not find allocations for asset key: %s", key)
+			log.Warnf("ETL: did not find allocations for asset key: %s", key)
 
 			// Use a zero-value set of totals. This indicates either (1) an
 			// error computing totals, or (2) that no allocations ran on the

+ 8 - 8
pkg/costmodel/key.go

@@ -3,13 +3,13 @@ package costmodel
 import (
 	"fmt"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/pkg/env"
 )
 
 func newResultPodKey(cluster string, namespace string, pod string) (podKey, error) {
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -65,7 +65,7 @@ func newNamespaceKey(cluster, namespace string) namespaceKey {
 
 func newResultNamespaceKey(cluster string, namespace string) (namespaceKey, error) {
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -97,7 +97,7 @@ func newControllerKey(cluster, namespace, controllerKind, controller string) con
 
 func newResultControllerKey(cluster, namespace, controller, controllerKind string) (controllerKey, error) {
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -131,7 +131,7 @@ func newServiceKey(cluster, namespace, service string) serviceKey {
 
 func newResultServiceKey(cluster, namespace, service string) (serviceKey, error) {
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -163,7 +163,7 @@ func newNodeKey(cluster, node string) nodeKey {
 
 func newResultNodeKey(cluster string, node string) (nodeKey, error) {
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	if node == "" {
@@ -199,7 +199,7 @@ func newPVCKey(cluster, namespace, persistentVolumeClaim string) pvcKey {
 // clusterLabel, which we expect may not exist, but has a default value.)
 func newResultPVCKey(cluster, namespace, pvc string) (pvcKey, error) {
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -231,7 +231,7 @@ func newPVKey(cluster, persistentVolume string) pvKey {
 
 func newResultPVKey(cluster, pv string) (pvKey, error) {
 	if cluster == "" {
-		cluster = coreenv.GetClusterID()
+		cluster = env.GetClusterID()
 	}
 	if pv == "" {
 		return pvKey{}, fmt.Errorf("persistentvolume is required")

+ 2 - 2
pkg/costmodel/networkinsight.go

@@ -4,10 +4,10 @@ import (
 	"fmt"
 	"time"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
+	"github.com/opencost/opencost/pkg/env"
 )
 
 func (cm *CostModel) ComputeNetworkInsights(start, end time.Time) (*opencost.NetworkInsightSet, error) {
@@ -138,7 +138,7 @@ func applyNetworkCosts(
 
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = coreenv.GetClusterID()
+			cluster = env.GetClusterID()
 		}
 		namespace := res.Namespace
 		pod := res.Pod

+ 1 - 2
pkg/costmodel/nodeclientconfig.go

@@ -8,7 +8,6 @@ import (
 	"os"
 	"strings"
 
-	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	nodes "github.com/opencost/opencost/core/pkg/nodestats"
 	"github.com/opencost/opencost/pkg/env"
@@ -20,7 +19,7 @@ const (
 )
 
 func NewNodeClientConfigFromEnv() (*nodes.NodeClientConfig, error) {
-	clusterId := coreenv.GetClusterID()
+	clusterId := env.GetClusterID()
 	concurrentPollers := defaultConcurrentPollers
 	insecure := env.IsNodeStatsInsecure()
 	certFile := env.GetNodeStatsCertFile()

+ 23 - 8
pkg/costmodel/router.go

@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"net/http"
 	"os"
+	"path"
 	"reflect"
 	"strconv"
 	"strings"
@@ -348,7 +349,7 @@ func (a *Accesses) GetInstallNamespace(w http.ResponseWriter, r *http.Request, _
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	ns := env.GetOpencostNamespace()
+	ns := env.GetInstallNamespace()
 	w.Write([]byte(ns))
 }
 
@@ -396,7 +397,7 @@ func (a *Accesses) GetInstallInfo(w http.ResponseWriter, r *http.Request, _ http
 }
 
 func GetKubecostContainers(kubeClientSet kubernetes.Interface) ([]ContainerInfo, error) {
-	pods, err := kubeClientSet.CoreV1().Pods(env.GetOpencostNamespace()).List(context.Background(), metav1.ListOptions{
+	pods, err := kubeClientSet.CoreV1().Pods(env.GetInstallNamespace()).List(context.Background(), metav1.ListOptions{
 		LabelSelector: "app=cost-analyzer",
 		FieldSelector: "status.phase=Running",
 		Limit:         1,
@@ -432,7 +433,7 @@ func (a *Accesses) AddServiceKey(w http.ResponseWriter, r *http.Request, ps http
 
 	key := r.PostForm.Get("key")
 	k := []byte(key)
-	err := os.WriteFile(env.GetGCPAuthSecretFilePath(), k, 0644)
+	err := os.WriteFile(path.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), "key.json"), k, 0644)
 	if err != nil {
 		fmt.Fprintf(w, "Error writing service key: %s", err)
 	}
@@ -473,7 +474,12 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	k8sCache.Run()
 
 	// Create ConfigFileManager for synchronization of shared configuration
-	confManager := config.NewConfigFileManager(nil)
+	confManager := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
+		BucketStoreConfig: env.GetConfigBucketFile(),
+		LocalConfigPath:   "/",
+	})
+
+	configPrefix := env.GetConfigPathWithDefault("/var/configs/")
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
 	cloudProvider, err := provider.NewProvider(k8sCache, cloudProviderKey, confManager)
@@ -484,7 +490,7 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	// ClusterInfo Provider to provide the cluster map with local and remote cluster data
 	var clusterInfoProvider clusters.ClusterInfoProvider
 	if env.IsClusterInfoFileEnabled() {
-		clusterInfoFile := confManager.ConfigFileAt(env.GetClusterInfoFilePath())
+		clusterInfoFile := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-info.json"))
 		clusterInfoProvider = NewConfiguredClusterInfoProvider(clusterInfoFile)
 	} else {
 		clusterInfoProvider = NewLocalClusterInfoProvider(kubeClientset, cloudProvider)
@@ -510,7 +516,7 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	}
 	if env.IsCollectorDataSourceEnabled() {
 		fn = func() (source.OpenCostDataSource, error) {
-			store := storage.GetDefaultStorage()
+			store := getStorage()
 			nodeStatConf, err := NewNodeClientConfigFromEnv()
 			if err != nil {
 				return nil, fmt.Errorf("failed to get node client config: %w", err)
@@ -543,9 +549,9 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	}
 
 	// Append the pricing config watcher
-	installNamespace := env.GetOpencostNamespace()
+	kubecostNamespace := env.GetInstallNamespace()
 
-	configWatchers := watcher.NewConfigMapWatchers(kubeClientset, installNamespace, additionalConfigWatchers...)
+	configWatchers := watcher.NewConfigMapWatchers(kubeClientset, kubecostNamespace, additionalConfigWatchers...)
 	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	configWatchers.AddWatcher(metrics.GetMetricsConfigWatcher())
 	configWatchers.Watch()
@@ -603,6 +609,15 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	return a
 }
 
+func getStorage() storage.Storage {
+	var store storage.Storage
+	pvMountPath := env.GetPVMountPath()
+	if pvMountPath != "" {
+		store = storage.NewFileStorage(pvMountPath)
+	}
+	return store
+}
+
 // InitializeCloudCost Initializes Cloud Cost pipeline and querier and registers endpoints
 func InitializeCloudCost(router *httprouter.Router, providerConfig models.ProviderConfig) {
 	log.Debugf("Cloud Cost config path: %s", env.GetCloudCostConfigPath())

+ 0 - 79
pkg/env/cloudcost.go

@@ -1,79 +0,0 @@
-package env
-
-import (
-	"github.com/opencost/opencost/core/pkg/env"
-)
-
-const (
-	CloudCostConfigControllerStateFile = "cloud-configurations.json"
-	CloudIntegrationConfigFile         = "cloud-integration.json"
-	AzureBillingDataDownloadPath       = "db/cloudcost"
-)
-
-const (
-	CloudCostEnabledEnvVar          = "CLOUD_COST_ENABLED"
-	CloudCostMonthToDateIntervalVar = "CLOUD_COST_MONTH_TO_DATE_INTERVAL"
-	CloudCostRefreshRateHoursEnvVar = "CLOUD_COST_REFRESH_RATE_HOURS"
-	CloudCostQueryWindowDaysEnvVar  = "CLOUD_COST_QUERY_WINDOW_DAYS"
-	CloudCostRunWindowDaysEnvVar    = "CLOUD_COST_RUN_WINDOW_DAYS"
-
-	CustomCostEnabledEnvVar         = "CUSTOM_COST_ENABLED"
-	CustomCostQueryWindowDaysEnvVar = "CUSTOM_COST_QUERY_WINDOW_DAYS"
-
-	PluginConfigDirEnvVar     = "PLUGIN_CONFIG_DIR"
-	PluginExecutableDirEnvVar = "PLUGIN_EXECUTABLE_DIR"
-
-	AzureDownloadBillingDataToDiskEnvVar = "AZURE_DOWNLOAD_BILLING_DATA_TO_DISK"
-)
-
-func IsCloudCostEnabled() bool {
-	return env.GetBool(CloudCostEnabledEnvVar, false)
-}
-
-func IsCustomCostEnabled() bool {
-	return env.GetBool(CustomCostEnabledEnvVar, false)
-}
-
-func GetCloudCostConfigPath() string {
-	return env.GetPathFromConfig(CloudIntegrationConfigFile)
-}
-
-func GetCloudCostMonthToDateInterval() int {
-	return env.GetInt(CloudCostMonthToDateIntervalVar, 6)
-}
-
-func GetCloudCostRefreshRateHours() int64 {
-	return env.GetInt64(CloudCostRefreshRateHoursEnvVar, 6)
-}
-
-func GetCloudCostQueryWindowDays() int64 {
-	return env.GetInt64(CloudCostQueryWindowDaysEnvVar, 7)
-}
-
-func GetCustomCostQueryWindowHours() int64 {
-	return env.GetInt64(CustomCostQueryWindowDaysEnvVar, 1)
-}
-
-func GetCustomCostQueryWindowDays() int64 {
-	return env.GetInt64(CustomCostQueryWindowDaysEnvVar, 7)
-}
-
-func GetCloudCostRunWindowDays() int64 {
-	return env.GetInt64(CloudCostRunWindowDaysEnvVar, 3)
-}
-
-func GetPluginConfigDir() string {
-	return env.Get(PluginConfigDirEnvVar, "/opt/opencost/plugin/config")
-}
-
-func GetPluginExecutableDir() string {
-	return env.Get(PluginExecutableDirEnvVar, "/opt/opencost/plugin/bin")
-}
-
-func GetAzureDownloadBillingDataPath() string {
-	return env.GetPathFromConfig(AzureBillingDataDownloadPath)
-}
-
-func GetCloudCostConfigControllerStateFile() string {
-	return env.GetPathFromConfig(CloudCostConfigControllerStateFile)
-}

+ 0 - 38
pkg/env/cloudcost_test.go

@@ -1,38 +0,0 @@
-package env
-
-import (
-	"testing"
-
-	"github.com/opencost/opencost/core/pkg/env"
-)
-
-func TestGetCloudCostConfigPath(t *testing.T) {
-	tests := []struct {
-		name string
-		want string
-		pre  func()
-	}{
-		{
-			name: "Ensure the default value is 'cloud-integration.json'",
-			want: "/var/configs/cloud-integration.json",
-		},
-		{
-			name: "Ensure the value is 'cloud-integration.json' when CLOUD_COST_CONFIG_PATH is set to ''",
-			want: "/test/cloud-integration.json",
-			pre: func() {
-				env.Set(env.ConfigPathEnvVar, "/test")
-			},
-		},
-	}
-	for _, tt := range tests {
-		if tt.pre != nil {
-			tt.pre()
-		}
-		t.Run(tt.name, func(t *testing.T) {
-			if got := GetCloudCostConfigPath(); got != tt.want {
-				t.Errorf("GetCloudCostConfigPath() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-
-}

+ 0 - 81
pkg/env/costmodel_test.go

@@ -1,81 +0,0 @@
-package env
-
-import (
-	"os"
-	"testing"
-)
-
-func TestGetExportCSVMaxDays(t *testing.T) {
-	tests := []struct {
-		name string
-		want int
-		pre  func()
-	}{
-		{
-			name: "Ensure the default value is 90d",
-			want: 90,
-		},
-		{
-			name: "Ensure the value is 30 when EXPORT_CSV_MAX_DAYS is set to 30",
-			want: 30,
-			pre: func() {
-				os.Setenv("EXPORT_CSV_MAX_DAYS", "30")
-			},
-		},
-		{
-			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to empty string",
-			want: 90,
-			pre: func() {
-				os.Setenv("EXPORT_CSV_MAX_DAYS", "")
-			},
-		},
-		{
-			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to invalid value",
-			want: 90,
-			pre: func() {
-				os.Setenv("EXPORT_CSV_MAX_DAYS", "foo")
-			},
-		},
-	}
-	for _, tt := range tests {
-		if tt.pre != nil {
-			tt.pre()
-		}
-		t.Run(tt.name, func(t *testing.T) {
-			if got := GetExportCSVMaxDays(); got != tt.want {
-				t.Errorf("GetExportCSVMaxDays() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-}
-
-func TestGetKubernetesEnabled(t *testing.T) {
-	tests := []struct {
-		name string
-		want bool
-		pre  func()
-	}{
-		{
-			name: "Ensure the default value is false",
-			want: false,
-		},
-		{
-			name: "Ensure the value is true when KUBERNETES_PORT has a value",
-			want: true,
-			pre: func() {
-				os.Setenv("KUBERNETES_PORT", "tcp://10.43.0.1:443")
-			},
-		},
-	}
-	for _, tt := range tests {
-		if tt.pre != nil {
-			tt.pre()
-		}
-		t.Run(tt.name, func(t *testing.T) {
-			if got := IsKubernetesEnabled(); got != tt.want {
-				t.Errorf("IsKubernetesEnabled() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-
-}

+ 239 - 46
pkg/env/costmodel.go → pkg/env/costmodelenv.go

@@ -1,25 +1,17 @@
 package env
 
 import (
-	"github.com/opencost/opencost/core/pkg/env"
-)
+	"time"
 
-// FilePaths
-const (
-	ClusterInfoFile = "cluster-info.json"
-	ClusterCacheFile
-	GCPAuthSecretFile = "key.json"
-	MetricConfigFile  = "metrics.json"
+	"github.com/opencost/opencost/core/pkg/env"
+	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/util/timeutil"
 )
 
-// Env Variables
 const (
-	// Open configs
-
-	// We assume that Kubernetes is enabled if there is a KUBERNETES_PORT environment variable present
-	KubernetesEnabledEnvVar = "KUBERNETES_PORT"
+	APIPortEnvVar          = "API_PORT"
+	NetworkCostsPortEnvVar = "NETWORK_COSTS_PORT"
 
-	// Cloud Provider
 	AWSAccessKeyIDEnvVar     = "AWS_ACCESS_KEY_ID"
 	AWSAccessKeySecretEnvVar = "AWS_SECRET_ACCESS_KEY"
 	AWSClusterIDEnvVar       = "AWS_CLUSTER_ID"
@@ -28,23 +20,26 @@ const (
 	AlibabaAccessKeyIDEnvVar     = "ALIBABA_ACCESS_KEY_ID"
 	AlibabaAccessKeySecretEnvVar = "ALIBABA_SECRET_ACCESS_KEY"
 
-	AzureOfferIDEnvVar        = "AZURE_OFFER_ID"
-	AzureBillingAccountEnvVar = "AZURE_BILLING_ACCOUNT"
-
-	OCIPricingURL = "OCI_PRICING_URL"
-
-	ClusterProfileEnvVar    = "CLUSTER_PROFILE"
-	RemoteEnabledEnvVar     = "REMOTE_WRITE_ENABLED"
-	RemotePWEnvVar          = "REMOTE_WRITE_PASSWORD"
-	SQLAddressEnvVar        = "SQL_ADDRESS"
-	UseCSVProviderEnvVar    = "USE_CSV_PROVIDER"
-	UseCustomProviderEnvVar = "USE_CUSTOM_PROVIDER"
-	CSVRegionEnvVar         = "CSV_REGION"
-	CSVEndpointEnvVar       = "CSV_ENDPOINT"
-	CSVPathEnvVar           = "CSV_PATH"
-
+	AzureOfferIDEnvVar                   = "AZURE_OFFER_ID"
+	AzureBillingAccountEnvVar            = "AZURE_BILLING_ACCOUNT"
+	AzureDownloadBillingDataToDiskEnvVar = "AZURE_DOWNLOAD_BILLING_DATA_TO_DISK"
+
+	ReleaseNameEnvVar                = "RELEASE_NAME"
+	PodNameEnvVar                    = "POD_NAME"
+	ClusterIDEnvVar                  = "CLUSTER_ID"
+	ClusterProfileEnvVar             = "CLUSTER_PROFILE"
+	RemoteEnabledEnvVar              = "REMOTE_WRITE_ENABLED"
+	RemotePWEnvVar                   = "REMOTE_WRITE_PASSWORD"
+	SQLAddressEnvVar                 = "SQL_ADDRESS"
+	UseCSVProviderEnvVar             = "USE_CSV_PROVIDER"
+	UseCustomProviderEnvVar          = "USE_CUSTOM_PROVIDER"
+	CSVRegionEnvVar                  = "CSV_REGION"
+	CSVEndpointEnvVar                = "CSV_ENDPOINT"
+	CSVPathEnvVar                    = "CSV_PATH"
+	ConfigPathEnvVar                 = "CONFIG_PATH"
 	CloudProviderAPIKeyEnvVar        = "CLOUD_PROVIDER_API_KEY"
 	CollectorDataSourceEnabledEnvVar = "COLLECTOR_DATA_SOURCE_ENABLED"
+	PVMountPath                      = "PV_MOUNT_PATH"
 
 	EmitPodAnnotationsMetricEnvVar       = "EMIT_POD_ANNOTATIONS_METRIC"
 	EmitNamespaceAnnotationsMetricEnvVar = "EMIT_NAMESPACE_ANNOTATIONS_METRIC"
@@ -53,18 +48,29 @@ const (
 	EmitKsmV1MetricsEnvVar = "EMIT_KSM_V1_METRICS"
 	EmitKsmV1MetricsOnly   = "EMIT_KSM_V1_METRICS_ONLY"
 
+	PProfEnabledEnvVar = "PPROF_ENABLED"
+
 	LogCollectionEnabledEnvVar    = "LOG_COLLECTION_ENABLED"
 	ProductAnalyticsEnabledEnvVar = "PRODUCT_ANALYTICS_ENABLED"
 	ErrorReportingEnabledEnvVar   = "ERROR_REPORTING_ENABLED"
 	ValuesReportingEnabledEnvVar  = "VALUES_REPORTING_ENABLED"
 
+	KubeRbacProxyEnabled = "KUBE_RBAC_PROXY_ENABLED"
+
+	KubeConfigPathEnvVar = "KUBECONFIG_PATH"
+
+	UTCOffsetEnvVar = "UTC_OFFSET"
+
 	PricingConfigmapName = "PRICING_CONFIGMAP_NAME"
 	MetricsConfigmapName = "METRICS_CONFIGMAP_NAME"
 
-	ClusterInfoFileEnabledEnvVar = "CLUSTER_INFO_FILE_ENABLED"
+	ClusterInfoFileEnabledEnvVar  = "CLUSTER_INFO_FILE_ENABLED"
+	ClusterCacheFileEnabledEnvVar = "CLUSTER_CACHE_FILE_ENABLED"
 
 	IngestPodUIDEnvVar = "INGEST_POD_UID"
 
+	ETLReadOnlyMode = "ETL_READ_ONLY"
+
 	AllocationNodeLabelsEnabled = "ALLOCATION_NODE_LABELS_ENABLED"
 
 	AssetIncludeLocalDiskCostEnvVar = "ASSET_INCLUDE_LOCAL_DISK_COST"
@@ -76,20 +82,55 @@ const (
 	ExportCSVLabelsAll  = "EXPORT_CSV_LABELS_ALL"
 	ExportCSVMaxDays    = "EXPORT_CSV_MAX_DAYS"
 
+	ExportBucketConfigFileEnvVar = "EXPORT_BUCKET_CONFIG_FILE"
+
 	DataRetentionDailyResolutionDaysEnvVar   = "DATA_RETENTION_DAILY_RESOLUTION_DAYS"
 	DataRetentionHourlyResolutionHoursEnvVar = "DATA_RETENTION_HOURLY_RESOLUTION_HOURS"
 
+	// We assume that Kubernetes is enabled if there is a KUBERNETES_PORT environment variable present
+	KubernetesEnabledEnvVar         = "KUBERNETES_PORT"
+	CloudCostEnabledEnvVar          = "CLOUD_COST_ENABLED"
+	CloudCostConfigPath             = "CLOUD_COST_CONFIG_PATH"
+	CloudCostMonthToDateIntervalVar = "CLOUD_COST_MONTH_TO_DATE_INTERVAL"
+	CloudCostRefreshRateHoursEnvVar = "CLOUD_COST_REFRESH_RATE_HOURS"
+	CloudCostQueryWindowDaysEnvVar  = "CLOUD_COST_QUERY_WINDOW_DAYS"
+	CloudCostRunWindowDaysEnvVar    = "CLOUD_COST_RUN_WINDOW_DAYS"
+
+	CustomCostEnabledEnvVar          = "CUSTOM_COST_ENABLED"
+	CustomCostQueryWindowDaysEnvVar  = "CUSTOM_COST_QUERY_WINDOW_DAYS"
+	CustomCostRefreshRateHoursEnvVar = "CUSTOM_COST_REFRESH_RATE_HOURS"
+
+	PluginConfigDirEnvVar     = "PLUGIN_CONFIG_DIR"
+	PluginExecutableDirEnvVar = "PLUGIN_EXECUTABLE_DIR"
+
+	OCIPricingURL = "OCI_PRICING_URL"
+
 	CarbonEstimatesEnabledEnvVar = "CARBON_ESTIMATES_ENABLED"
 
-	KubernetesResourceAccessEnvVar = "KUBERNETES_RESOURCE_ACCESS"
-	UseCacheV1                     = "USE_CACHE_V1"
+	UseCacheV1 = "USE_CACHE_V1"
+
+	InstallNamespaceEnvVar = "INSTALL_NAMESPACE"
+	ConfigBucketEnvVar     = "CONFIG_BUCKET"
+
+	// Node Stats Client Configuration
+	NodeStatsForceKubeProxyEnvVar = "NODESTATS_FORCE_KUBE_PROXY"
+	NodeStatsLocalProxyEnvVar     = "NODESTATS_LOCAL_PROXY"
+	NodeStatsInsecureEnvVar       = "NODESTATS_INSECURE"
+	NodeStatsCertFileEnvVar       = "NODESTATS_CERT_FILE"
+	NodeStatsKeyFileEnvVar        = "NODESTATS_KEY_FILE"
+
+	// Deprecated
+	KubecostNamespaceEnvVar    = "KUBECOST_NAMESPACE"
+	KubecostConfigBucketEnvVar = "KUBECOST_CONFIG_BUCKET"
 
 	// Cloud provider override
 	CloudProviderVar = "CLOUD_PROVIDER"
 )
 
-func GetGCPAuthSecretFilePath() string {
-	return env.GetPathFromConfig(GCPAuthSecretFile)
+const DefaultConfigMountPath = "/var/configs"
+
+func IsETLReadOnlyMode() bool {
+	return env.GetBool(ETLReadOnlyMode, false)
 }
 
 func GetExportCSVFile() string {
@@ -104,22 +145,36 @@ func GetExportCSVLabelsList() []string {
 	return env.GetList(ExportCSVLabelsList, ",")
 }
 
+func IsPProfEnabled() bool {
+	return env.GetBool(PProfEnabledEnvVar, false)
+}
+
 func GetExportCSVMaxDays() int {
 	return env.GetInt(ExportCSVMaxDays, 90)
 }
 
+// GetAPIPort returns the environment variable value for APIPortEnvVar which
+// is the port number the API is available on.
+func GetAPIPort() int {
+	return env.GetInt(APIPortEnvVar, 9003)
+}
+
+// GetConfigBucketFile returns a file location for a mounted bucket configuration which is used to store
+// a subset of configurations that require sharing via remote storage.
+func GetConfigBucketFile() string {
+	return env.Get(ConfigBucketEnvVar, env.Get(KubecostConfigBucketEnvVar, ""))
+}
+
 // IsClusterInfoFileEnabled returns true if the cluster info is read from a file or pulled from the local
 // cloud provider and kubernetes.
 func IsClusterInfoFileEnabled() bool {
 	return env.GetBool(ClusterInfoFileEnabledEnvVar, false)
 }
 
-func GetClusterInfoFilePath() string {
-	return env.GetPathFromConfig(ClusterInfoFile)
-}
-
-func GetClusterCacheFilePath() string {
-	return env.GetPathFromConfig(ClusterCacheFile)
+// IsClusterCacheFileEnabled returns true if the kubernetes cluster data is read from a file or pulled from the local
+// kubernetes API.
+func IsClusterCacheFileEnabled() bool {
+	return env.GetBool(ClusterCacheFileEnabledEnvVar, false)
 }
 
 func GetPricingConfigmapName() string {
@@ -219,12 +274,34 @@ func IsAzureDownloadBillingDataToDisk() bool {
 	return env.GetBool(AzureDownloadBillingDataToDiskEnvVar, true)
 }
 
+// GetInstallNamespace returns the environment variable value that is set for the kubernetes namespace
+// this service is installed in.
+func GetInstallNamespace() string {
+	return env.Get(InstallNamespaceEnvVar, env.Get(KubecostNamespaceEnvVar, "opencost"))
+}
+
+// GetPodName returns the name of the current running pod. If this environment variable is not set,
+// empty string is returned.
+func GetPodName() string {
+	return env.Get(PodNameEnvVar, "")
+}
+
 // GetClusterProfile returns the environment variable value for ClusterProfileEnvVar which
 // represents the cluster profile configured for
 func GetClusterProfile() string {
 	return env.Get(ClusterProfileEnvVar, "development")
 }
 
+// GetClusterID returns the environment variable value for ClusterIDEnvVar which represents the
+// configurable identifier used for multi-cluster metric emission.
+func GetClusterID() string {
+	return env.Get(ClusterIDEnvVar, "")
+}
+
+func IsKubeRbacProxyEnabled() bool {
+	return env.GetBool(KubeRbacProxyEnabled, false)
+}
+
 // IsRemoteEnabled returns the environment variable value for RemoteEnabledEnvVar which represents whether
 // or not remote write is enabled for prometheus for use with SQL backed persistent storage.
 func IsRemoteEnabled() bool {
@@ -273,12 +350,28 @@ func GetCSVPath() string {
 	return env.Get(CSVPathEnvVar, "")
 }
 
+// GetCostAnalyzerVolumeMountPath is an alias of GetConfigPath, which returns the mount path for the
+// Cost Analyzer volume, which stores configs, persistent data, etc.
+func GetCostAnalyzerVolumeMountPath() string {
+	return GetConfigPathWithDefault(DefaultConfigMountPath)
+}
+
+// GetConfigPath returns the environment variable value for ConfigPathEnvVar which represents the cost
+// model configuration path
+func GetConfigPathWithDefault(defaultValue string) string {
+	return env.Get(ConfigPathEnvVar, defaultValue)
+}
+
 // GetCloudProviderAPI returns the environment variable value for CloudProviderAPIEnvVar which represents
 // the API key provided for the cloud provider.
 func GetCloudProviderAPIKey() string {
 	return env.Get(CloudProviderAPIKeyEnvVar, "")
 }
 
+func GetPVMountPath() string {
+	return env.Get(PVMountPath, "")
+}
+
 // IsCollectorDataSourceEnabeled returns the environment variable which enables a source.OpencostDatasource which does not use uses Prometheus
 func IsCollectorDataSourceEnabled() bool {
 	return env.GetBool(CollectorDataSourceEnabledEnvVar, false)
@@ -305,6 +398,26 @@ func IsValuesReportingEnabled() bool {
 	return env.GetBool(ValuesReportingEnabledEnvVar, true)
 }
 
+// GetKubeConfigPath returns the environment variable value for KubeConfigPathEnvVar
+func GetKubeConfigPath() string {
+	return env.Get(KubeConfigPathEnvVar, "")
+}
+
+// GetUTCOffset returns the environment variable value for UTCOffset
+func GetUTCOffset() string {
+	return env.Get(UTCOffsetEnvVar, "")
+}
+
+// GetParsedUTCOffset returns the duration of the configured UTC offset
+func GetParsedUTCOffset() time.Duration {
+	offset, err := timeutil.ParseUTCOffset(GetUTCOffset())
+	if err != nil {
+		log.Warnf("Failed to parse UTC offset: %s", err)
+		return time.Duration(0)
+	}
+	return offset
+}
+
 // IsIngestingPodUID returns the env variable from ingestPodUID, which alters the
 // contents of podKeys in Allocation
 func IsIngestingPodUID() bool {
@@ -341,16 +454,65 @@ func IsKubernetesEnabled() bool {
 	return env.Get(KubernetesEnabledEnvVar, "") != ""
 }
 
+func IsCloudCostEnabled() bool {
+	return env.GetBool(CloudCostEnabledEnvVar, false)
+}
+
+func IsCustomCostEnabled() bool {
+	return env.GetBool(CustomCostEnabledEnvVar, false)
+}
+
+func GetCloudCostConfigPath() string {
+	return env.Get(CloudCostConfigPath, "cloud-integration.json")
+}
+
+func GetCloudCostMonthToDateInterval() int {
+	return env.GetInt(CloudCostMonthToDateIntervalVar, 6)
+}
+
+func GetCloudCostRefreshRateHours() int64 {
+	return env.GetInt64(CloudCostRefreshRateHoursEnvVar, 6)
+}
+
+func GetCloudCostQueryWindowDays() int64 {
+	return env.GetInt64(CloudCostQueryWindowDaysEnvVar, 7)
+}
+
+func GetCustomCostQueryWindowHours() int64 {
+	return env.GetInt64(CustomCostQueryWindowDaysEnvVar, 1)
+}
+
+func GetCustomCostQueryWindowDays() int64 {
+	return env.GetInt64(CustomCostQueryWindowDaysEnvVar, 7)
+}
+
+func GetCloudCostRunWindowDays() int64 {
+	return env.GetInt64(CloudCostRunWindowDaysEnvVar, 3)
+}
+
 func GetOCIPricingURL() string {
 	return env.Get(OCIPricingURL, "https://apexapps.oracle.com/pls/apex/cetools/api/v1/products")
 }
 
+func GetPluginConfigDir() string {
+	return env.Get(PluginConfigDirEnvVar, "/opt/opencost/plugin/config")
+}
+
+func GetPluginExecutableDir() string {
+	return env.Get(PluginExecutableDirEnvVar, "/opt/opencost/plugin/bin")
+}
+
+func GetCustomCostRefreshRateHours() string {
+	return env.Get(CustomCostRefreshRateHoursEnvVar, "12h")
+}
+
 func IsCarbonEstimatesEnabled() bool {
 	return env.GetBool(CarbonEstimatesEnabledEnvVar, false)
 }
 
-// HasKubernetesResourceAccess can be set to false if Opencost is run without access to the kubernetes resources
-func HasKubernetesResourceAccess() bool { return env.GetBool(KubernetesResourceAccessEnvVar, true) }
+func GetExportBucketConfigFile() string {
+	return env.Get(ExportBucketConfigFileEnvVar, "")
+}
 
 // GetUseCacheV1 is a temporary flag to allow users to opt-in to using the old cache
 // Mainly for comparison purposes
@@ -358,11 +520,42 @@ func GetUseCacheV1() bool {
 	return env.GetBool(UseCacheV1, false)
 }
 
+func GetReleaseName() string {
+	return env.Get(ReleaseNameEnvVar, "kubecost")
+}
+
+func GetNetworkCostsPort() int {
+	return env.GetInt(NetworkCostsPortEnvVar, 3001)
+}
+
+// IsNodeStatsForceKubeProxy returns true if the node stats client should force the kube proxy direct end
+// point formatting
+func IsNodeStatsForceKubeProxy() bool {
+	return env.GetBool(NodeStatsForceKubeProxyEnvVar, false)
+}
+
+// GetNodeStatsLocalProxy returns the fully qualified local proxy endpoint for the node stats client IFF the proxyAPI
+// is selected.
+func GetNodeStatsLocalProxy() string {
+	return env.Get(NodeStatsLocalProxyEnvVar, "")
+}
+
+// IsNodeStatsInsecure returns true if the node stats client should skip TLS verification
+func IsNodeStatsInsecure() bool {
+	return env.GetBool(NodeStatsInsecureEnvVar, false)
+}
+
+// GetNodeStatsCertFile returns the path of the cert file
+func GetNodeStatsCertFile() string {
+	return env.Get(NodeStatsCertFileEnvVar, "")
+}
+
+// GetNodeStatsKeyFile returns the path of the key file
+func GetNodeStatsKeyFile() string {
+	return env.Get(NodeStatsKeyFileEnvVar, "")
+}
+
 // GetCloudProvider returns the explicitly set cloud provider from environment variable
 func GetCloudProvider() string {
 	return env.Get(CloudProviderVar, "")
 }
-
-func GetMetricConfigFile() string {
-	return env.GetPathFromConfig(MetricConfigFile)
-}

+ 236 - 0
pkg/env/costmodelenv_test.go

@@ -0,0 +1,236 @@
+package env
+
+import (
+	"os"
+	"testing"
+)
+
+func TestGetAPIPort(t *testing.T) {
+	tests := []struct {
+		name string
+		want int
+		pre  func()
+	}{
+		{
+			name: "Ensure the default API port '9003'",
+			want: 9003,
+		},
+		{
+			name: "Ensure the default API port '9003' when API_PORT is set to ''",
+			want: 9003,
+			pre: func() {
+				os.Setenv("API_PORT", "")
+			},
+		},
+		{
+			name: "Ensure the API port '9004' when API_PORT is set to '9004'",
+			want: 9004,
+			pre: func() {
+				os.Setenv("API_PORT", "9004")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetAPIPort(); got != tt.want {
+				t.Errorf("GetAPIPort() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+
+}
+
+func TestGetExportCSVMaxDays(t *testing.T) {
+	tests := []struct {
+		name string
+		want int
+		pre  func()
+	}{
+		{
+			name: "Ensure the default value is 90d",
+			want: 90,
+		},
+		{
+			name: "Ensure the value is 30 when EXPORT_CSV_MAX_DAYS is set to 30",
+			want: 30,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "30")
+			},
+		},
+		{
+			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to empty string",
+			want: 90,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "")
+			},
+		},
+		{
+			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to invalid value",
+			want: 90,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "foo")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetExportCSVMaxDays(); got != tt.want {
+				t.Errorf("GetExportCSVMaxDays() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestGetKubernetesEnabled(t *testing.T) {
+	tests := []struct {
+		name string
+		want bool
+		pre  func()
+	}{
+		{
+			name: "Ensure the default value is false",
+			want: false,
+		},
+		{
+			name: "Ensure the value is true when KUBERNETES_PORT has a value",
+			want: true,
+			pre: func() {
+				os.Setenv("KUBERNETES_PORT", "tcp://10.43.0.1:443")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := IsKubernetesEnabled(); got != tt.want {
+				t.Errorf("IsKubernetesEnabled() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+
+}
+
+func TestGetCloudCostConfigPath(t *testing.T) {
+	tests := []struct {
+		name string
+		want string
+		pre  func()
+	}{
+		{
+			name: "Ensure the default value is 'cloud-integration.json'",
+			want: "cloud-integration.json",
+		},
+		{
+			name: "Ensure the value is 'cloud-integration.json' when CLOUD_COST_CONFIG_PATH is set to ''",
+			want: "cloud-integration.json",
+			pre: func() {
+				os.Setenv("CLOUD_COST_CONFIG_PATH", "")
+			},
+		},
+		{
+			name: "Ensure the value is 'flying-pig.json' when CLOUD_COST_CONFIG_PATH is set to 'flying-pig.json'",
+			want: "flying-pig.json",
+			pre: func() {
+				os.Setenv("CLOUD_COST_CONFIG_PATH", "flying-pig.json")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetCloudCostConfigPath(); got != tt.want {
+				t.Errorf("GetCloudCostConfigPath() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+
+}
+
+func TestEnvVarsWithBackup(t *testing.T) {
+	t.Run("test install namespace env var", func(t *testing.T) {
+		t.Setenv(InstallNamespaceEnvVar, "test-namespace")
+		t.Setenv(KubecostNamespaceEnvVar, "kubecost-test-namespace")
+
+		ns := GetInstallNamespace()
+		if ns != "test-namespace" {
+			t.Errorf("Expected install namespace to be 'test-namespace', got '%s'", ns)
+		}
+	})
+	t.Run("test kubecost namespace env var", func(t *testing.T) {
+		t.Setenv(KubecostNamespaceEnvVar, "kc-test-namespace")
+
+		ns := GetInstallNamespace()
+
+		if ns != "kc-test-namespace" {
+			t.Errorf("Expected install namespace to be 'kc-test-namespace', got '%s'", ns)
+		}
+	})
+
+	t.Run("test default install namespace", func(t *testing.T) {
+		t.Setenv(InstallNamespaceEnvVar, "test-namespace")
+
+		ns := GetInstallNamespace()
+
+		if ns != "test-namespace" {
+			t.Errorf("Expected default install namespace to be 'test-namespace', got '%s'", ns)
+		}
+	})
+
+	t.Run("test default install namespace", func(t *testing.T) {
+		ns := GetInstallNamespace()
+
+		if ns != "opencost" {
+			t.Errorf("Expected default install namespace to be 'opencost', got '%s'", ns)
+		}
+	})
+
+	t.Run("test config bucket file with both", func(t *testing.T) {
+		t.Setenv(ConfigBucketEnvVar, "test-bucket")
+		t.Setenv(KubecostConfigBucketEnvVar, "kc-test-bucket")
+
+		configBucketFile := GetConfigBucketFile()
+
+		if configBucketFile != "test-bucket" {
+			t.Errorf("Expected config bucket file to be 'test-bucket', got '%s'", configBucketFile)
+		}
+	})
+
+	t.Run("test config bucket file with kc", func(t *testing.T) {
+		t.Setenv(KubecostConfigBucketEnvVar, "kc-test-bucket")
+
+		configBucketFile := GetConfigBucketFile()
+
+		if configBucketFile != "kc-test-bucket" {
+			t.Errorf("Expected config bucket file to be 'kc-test-bucket', got '%s'", configBucketFile)
+		}
+	})
+
+	t.Run("test config bucket file with single", func(t *testing.T) {
+		t.Setenv(ConfigBucketEnvVar, "test-bucket")
+
+		configBucketFile := GetConfigBucketFile()
+
+		if configBucketFile != "test-bucket" {
+			t.Errorf("Expected config bucket file to be 'test-bucket', got '%s'", configBucketFile)
+		}
+	})
+
+	t.Run("test config bucket file with both", func(t *testing.T) {
+		configBucketFile := GetConfigBucketFile()
+
+		if configBucketFile != "" {
+			t.Errorf("Expected config bucket file to be '', got '%s'", configBucketFile)
+		}
+	})
+
+}

+ 0 - 0
pkg/env/kubemetrics.go → pkg/env/kubemetricsenv.go


+ 0 - 41
pkg/env/nodestats.go

@@ -1,41 +0,0 @@
-package env
-
-import (
-	"github.com/opencost/opencost/core/pkg/env"
-)
-
-const (
-	// Node Stats Client Configuration
-	NodeStatsForceKubeProxyEnvVar = "NODESTATS_FORCE_KUBE_PROXY"
-	NodeStatsLocalProxyEnvVar     = "NODESTATS_LOCAL_PROXY"
-	NodeStatsInsecureEnvVar       = "NODESTATS_INSECURE"
-	NodeStatsCertFileEnvVar       = "NODESTATS_CERT_FILE"
-	NodeStatsKeyFileEnvVar        = "NODESTATS_KEY_FILE"
-)
-
-// IsNodeStatsForceKubeProxy returns true if the node stats client should force the kube proxy direct end
-// point formatting
-func IsNodeStatsForceKubeProxy() bool {
-	return env.GetBool(NodeStatsForceKubeProxyEnvVar, false)
-}
-
-// GetNodeStatsLocalProxy returns the fully qualified local proxy endpoint for the node stats client IFF the proxyAPI
-// is selected.
-func GetNodeStatsLocalProxy() string {
-	return env.Get(NodeStatsLocalProxyEnvVar, "")
-}
-
-// IsNodeStatsInsecure returns true if the node stats client should skip TLS verification
-func IsNodeStatsInsecure() bool {
-	return env.GetBool(NodeStatsInsecureEnvVar, false)
-}
-
-// GetNodeStatsCertFile returns the path of the cert file
-func GetNodeStatsCertFile() string {
-	return env.Get(NodeStatsCertFileEnvVar, "")
-}
-
-// GetNodeStatsKeyFile returns the path of the key file
-func GetNodeStatsKeyFile() string {
-	return env.Get(NodeStatsKeyFileEnvVar, "")
-}

+ 0 - 1
pkg/env/nodestats_test.go

@@ -1 +0,0 @@
-package env

+ 0 - 44
pkg/env/opencost.go

@@ -1,44 +0,0 @@
-package env
-
-import (
-	"time"
-
-	"github.com/opencost/opencost/core/pkg/env"
-	"github.com/opencost/opencost/core/pkg/log"
-	"github.com/opencost/opencost/core/pkg/util/timeutil"
-)
-
-// Environment variables specific to the running of opencost
-const (
-	DefaultAPIPort           = 9003
-	defaultOpencostNamespace = "opencost"
-)
-
-const (
-	UTCOffsetEnvVar = "UTC_OFFSET"
-)
-
-func GetOpencostAPIPort() int {
-	return env.GetAPIPortWithDefault(DefaultAPIPort)
-}
-
-// GetOpencostNamespace returns the environment variable value that is set for the kubernetes namespace
-// this service is installed in.
-func GetOpencostNamespace() string {
-	return env.GetInstallNamespace(defaultOpencostNamespace)
-}
-
-// GetUTCOffset returns the environment variable value for UTCOffset
-func GetUTCOffset() string {
-	return env.Get(UTCOffsetEnvVar, "")
-}
-
-// GetParsedUTCOffset returns the duration of the configured UTC offset
-func GetParsedUTCOffset() time.Duration {
-	offset, err := timeutil.ParseUTCOffset(GetUTCOffset())
-	if err != nil {
-		log.Warnf("Failed to parse UTC offset: %s", err)
-		return time.Duration(0)
-	}
-	return offset
-}

+ 0 - 47
pkg/env/opencost_test.go

@@ -1,47 +0,0 @@
-package env
-
-import (
-	"fmt"
-	"os"
-	"testing"
-
-	"github.com/opencost/opencost/core/pkg/env"
-)
-
-func TestGetAPIPort(t *testing.T) {
-	tests := []struct {
-		name string
-		want int
-		pre  func()
-	}{
-		{
-			name: "Ensure the default API port '9003'",
-			want: 9003,
-		},
-		{
-			name: fmt.Sprintf("Ensure the default API port '9003' when %s is set to ''", env.APIPortEnvVar),
-			want: 9003,
-			pre: func() {
-				os.Setenv(env.APIPortEnvVar, "")
-			},
-		},
-		{
-			name: fmt.Sprintf("Ensure the API port '9004' when %s is set to '9004'", env.APIPortEnvVar),
-			want: 9004,
-			pre: func() {
-				os.Setenv(env.APIPortEnvVar, "9004")
-			},
-		},
-	}
-	for _, tt := range tests {
-		if tt.pre != nil {
-			tt.pre()
-		}
-		t.Run(tt.name, func(t *testing.T) {
-			if got := GetOpencostAPIPort(); got != tt.want {
-				t.Errorf("GetAPIPort() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-
-}

+ 2 - 1
pkg/metrics/metricsconfig.go

@@ -4,6 +4,7 @@ import (
 	"encoding/json"
 	"fmt"
 	"os"
+	"path"
 	"sync"
 
 	"github.com/opencost/opencost/pkg/env"
@@ -12,7 +13,7 @@ import (
 
 var (
 	metricsConfigLock = new(sync.Mutex)
-	metricsFilePath   = env.GetMetricConfigFile()
+	metricsFilePath   = path.Join(env.GetCostAnalyzerVolumeMountPath(), "metrics.json")
 )
 
 type MetricsConfig struct {