Jelajahi Sumber

Revert "Revert "Code Clean Up"" (#3270)

Signed-off-by: Sean Holcomb <seanholcomb@gmail.com>
Sean Holcomb 9 bulan lalu
induk
melakukan
1e1643d24b
69 mengubah file dengan 875 tambahan dan 1653 penghapusan
  1. 1 1
      core/pkg/diagnostics/exporter/exporter.go
  2. 53 0
      core/pkg/env/core.go
  3. 2 2
      core/pkg/exporter/exporter_test.go
  4. 7 2
      core/pkg/exporter/pathing/bingenpath.go
  5. 3 3
      core/pkg/exporter/pathing/eventpath.go
  6. 24 30
      core/pkg/exporter/pathing/path_test.go
  7. 1 1
      core/pkg/heartbeat/exporter/exporter.go
  8. 2 2
      core/pkg/heartbeat/exporter/heartbeat_test.go
  9. 1 1
      core/pkg/opencost/allocation.go
  10. 24 24
      core/pkg/opencost/allocation_test.go
  11. 3 288
      core/pkg/opencost/asset.go
  12. 0 203
      core/pkg/opencost/asset_test.go
  13. 9 9
      core/pkg/opencost/exporter/exporter_test.go
  14. 1 1
      core/pkg/opencost/exporter/exporters.go
  15. 4 4
      core/pkg/opencost/opencost_codecs_test.go
  16. 0 66
      core/pkg/opencost/status.go
  17. 2 43
      core/pkg/opencost/totals.go
  18. 15 5
      core/pkg/pipelines/name.go
  19. 31 0
      core/pkg/storage/storefactory.go
  20. 32 0
      core/pkg/util/apiutil/apiutil.go
  21. 51 0
      core/pkg/util/apiutil/loglevel.go
  22. 1 1
      core/pkg/util/timeutil/timeutil.go
  23. 2 1
      modules/collector-source/pkg/collector/config.go
  24. 0 5
      modules/collector-source/pkg/env/collectorenv.go
  25. 10 41
      modules/prometheus-source/pkg/env/promenv.go
  26. 4 3
      modules/prometheus-source/pkg/prom/config.go
  27. 1 1
      modules/prometheus-source/pkg/prom/metricsquerier.go
  28. 16 16
      pkg/cloud/alibaba/provider.go
  29. 20 20
      pkg/cloud/aws/provider.go
  30. 3 2
      pkg/cloud/azure/provider.go
  31. 1 1
      pkg/cloud/azure/storagebillingparser.go
  32. 1 4
      pkg/cloud/config/controller.go
  33. 2 0
      pkg/cloud/config/controller_test.go
  34. 2 7
      pkg/cloud/config/watcher.go
  35. 5 8
      pkg/cloud/gcp/provider.go
  36. 3 2
      pkg/cloud/oracle/provider.go
  37. 2 1
      pkg/cloud/otc/provider.go
  38. 22 47
      pkg/cloud/provider/cloud_test.go
  39. 2 2
      pkg/cloud/provider/customprovider.go
  40. 3 88
      pkg/cloud/provider/provider.go
  41. 2 8
      pkg/cloud/provider/providerconfig.go
  42. 3 2
      pkg/cloud/scaleway/provider.go
  43. 1 1
      pkg/cloudcost/pipelineservice.go
  44. 2 2
      pkg/clustercache/clustercache.go
  45. 2 4
      pkg/clustercache/clustercache2.go
  46. 5 11
      pkg/cmd/agent/agent.go
  47. 3 3
      pkg/cmd/commands.go
  48. 31 0
      pkg/cmd/costmodel/config.go
  49. 12 77
      pkg/cmd/costmodel/costmodel.go
  50. 3 53
      pkg/config/configmanager.go
  51. 9 9
      pkg/costmodel/allocation_helpers.go
  52. 15 14
      pkg/costmodel/cluster.go
  53. 17 17
      pkg/costmodel/cluster_helpers.go
  54. 6 6
      pkg/costmodel/costmodel.go
  55. 8 8
      pkg/costmodel/key.go
  56. 2 2
      pkg/costmodel/networkinsight.go
  57. 2 1
      pkg/costmodel/nodeclientconfig.go
  58. 8 23
      pkg/costmodel/router.go
  59. 79 0
      pkg/env/cloudcost.go
  60. 38 0
      pkg/env/cloudcost_test.go
  61. 46 239
      pkg/env/costmodel.go
  62. 81 0
      pkg/env/costmodel_test.go
  63. 0 236
      pkg/env/costmodelenv_test.go
  64. 0 0
      pkg/env/kubemetrics.go
  65. 41 0
      pkg/env/nodestats.go
  66. 1 0
      pkg/env/nodestats_test.go
  67. 44 0
      pkg/env/opencost.go
  68. 47 0
      pkg/env/opencost_test.go
  69. 1 2
      pkg/metrics/metricsconfig.go

+ 1 - 1
core/pkg/diagnostics/exporter/exporter.go

@@ -10,7 +10,7 @@ import (
 
 // NewDiagnosticExporter creates a new `StorageExporter[DiagnosticsRunReport]` instance for exporting diagnostic run events.
 func NewDiagnosticExporter(clusterId string, applicationName string, storage storage.Storage) exporter.EventExporter[diagnostics.DiagnosticsRunReport] {
-	pathing, err := pathing.NewEventStoragePathFormatter("federated", clusterId, diagnostics.DiagnosticsEventName, applicationName)
+	pathing, err := pathing.NewEventStoragePathFormatter(applicationName, clusterId, diagnostics.DiagnosticsEventName)
 	if err != nil {
 		log.Errorf("failed to create pathing formatter: %v", err)
 		return nil

+ 53 - 0
core/pkg/env/core.go

@@ -0,0 +1,53 @@
+package env
+
+import (
+	"path"
+)
+
+const DefaultConfigPath = "/var/configs"
+const DefaultStorageFile = "federated-store.yaml"
+
+const (
+	APIPortEnvVar    = "API_PORT"
+	ClusterIDEnvVar  = "CLUSTER_ID"
+	ConfigPathEnvVar = "CONFIG_PATH"
+
+	PProfEnabledEnvVar = "PPROF_ENABLED"
+
+	InstallNamespaceEnvVar = "INSTALL_NAMESPACE"
+)
+
+// GetAPIPort returns the environment variable value for APIPortEnvVar which
+// is the port number the API is available on.
+func GetAPIPortWithDefault(def int) int {
+	return GetInt(APIPortEnvVar, def)
+}
+
+// GetClusterID returns the environment variable value for ClusterIDEnvVar which represents the
+// configurable identifier used for multi-cluster metric emission.
+func GetClusterID() string {
+	return Get(ClusterIDEnvVar, "")
+}
+
+// GetConfigPath returns the environment variable value for ConfigPathEnvVar which represents the cost
+// model configuration path
+func GetConfigPath() string {
+	return Get(ConfigPathEnvVar, DefaultConfigPath)
+}
+
+func GetPathFromConfig(subPaths ...string) string {
+	subPath := path.Join(subPaths...)
+	return path.Join(GetConfigPath(), subPath)
+}
+
+func GetDefaultStorageConfigFilePath() string {
+	return path.Join(GetConfigPath(), DefaultStorageFile)
+}
+
+func IsPProfEnabled() bool {
+	return GetBool(PProfEnabledEnvVar, false)
+}
+
+func GetInstallNamespace(def string) string {
+	return Get(InstallNamespaceEnvVar, def)
+}

+ 2 - 2
core/pkg/exporter/exporter_test.go

@@ -24,7 +24,7 @@ type TestData struct {
 func TestStorageExporters(t *testing.T) {
 	t.Run("test event storage exporter", func(t *testing.T) {
 		store := storage.NewMemoryStorage()
-		p, err := pathing.NewEventStoragePathFormatter("federated", TestClusterId, TestEventName)
+		p, err := pathing.NewEventStoragePathFormatter("root", TestClusterId, TestEventName)
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}
@@ -65,7 +65,7 @@ func TestStorageExporters(t *testing.T) {
 	t.Run("test compute storage exporter", func(t *testing.T) {
 		res := 24 * time.Hour
 		store := storage.NewMemoryStorage()
-		p, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AllocationPipelineName, &res)
+		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AllocationPipelineName, &res)
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}

+ 7 - 2
core/pkg/exporter/pathing/bingenpath.go

@@ -11,13 +11,14 @@ import (
 )
 
 const (
+	defaultRootDir string = "federated"
 	baseStorageDir string = "etl/bingen"
 )
 
 // BingenStoragePathFormatter is an implementation of the StoragePathFormatter interface for
 // a cluster separated storage path of the format:
 //
-//	<root>/federated/<cluster>/etl/bingen/<pipeline>/<resolution>/<epoch-start>-<epoch-end>
+//	<root>/<cluster>/etl/bingen/<pipeline>/<resolution>/<epoch-start>-<epoch-end>
 type BingenStoragePathFormatter struct {
 	rootDir    string
 	clusterId  string
@@ -25,6 +26,10 @@ type BingenStoragePathFormatter struct {
 	resolution string
 }
 
+func NewDefaultStoragePathFormatter(clusterId, pipeline string, resolution *time.Duration) (StoragePathFormatter[opencost.Window], error) {
+	return NewBingenStoragePathFormatter(defaultRootDir, clusterId, pipeline, resolution)
+}
+
 // NewBingenStoragePathFormatter creates a StoragePathFormatter for a cluster separated storage path
 // with the given root directory, cluster id, pipeline, and resolution. To omit the resolution directory
 // structure, provide a `nil` resolution.
@@ -68,7 +73,7 @@ func (bsf *BingenStoragePathFormatter) Dir() string {
 
 // ToFullPath returns the full path to a file name within the storage directory using the format:
 //
-//	<root>/federated/<cluster>/etl/bingen/<pipeline>/<resolution>/<prefix>.<start-epoch>-<end-epoch>
+//	<root>/<cluster>/etl/bingen/<pipeline>/<resolution>/<prefix>.<start-epoch>-<end-epoch>
 func (bsf *BingenStoragePathFormatter) ToFullPath(prefix string, window opencost.Window, fileExt string) string {
 	fileName := toBingenFileName(prefix, window, fileExt)
 

+ 3 - 3
core/pkg/exporter/pathing/eventpath.go

@@ -14,7 +14,7 @@ const EventStorageTimeFormat = "20060102150405"
 // EventStoragePathFormatter is an implementation of the StoragePathFormatter interface for
 // a cluster separated storage path of the format:
 //
-//	<root>/federated/<cluster>/<event>/<sub-paths...>/YYYYMMDDHHmmss
+//	<root>/<cluster>/<event>/<sub-paths...>/YYYYMMDDHHmmss
 type EventStoragePathFormatter struct {
 	rootDir   string
 	clusterId string
@@ -22,7 +22,7 @@ type EventStoragePathFormatter struct {
 	subPaths  []string
 }
 
-// NewBingenStoragePathFormatter creates a StoragePathFormatter for a cluster separated storage path
+// NewEventStoragePathFormatter creates a StoragePathFormatter for a cluster separated storage path
 // with the given root directory, cluster id, pipeline, and resolution. To omit the resolution directory
 // structure, provide a `nil` resolution.
 func NewEventStoragePathFormatter(rootDir, clusterId, event string, subPaths ...string) (StoragePathFormatter[time.Time], error) {
@@ -65,7 +65,7 @@ func (espf *EventStoragePathFormatter) Dir() string {
 
 // ToFullPath returns the full path to a file name within the storage directory using the format:
 //
-//	<root>/federated/<cluster>/<event>/YYYYMMDDHHmm.json
+//	<root>/<cluster>/<event>/YYYYMMDDHHmm.json
 func (espf *EventStoragePathFormatter) ToFullPath(prefix string, timestamp time.Time, fileExt string) string {
 	fileName := toEventFileName(prefix, timestamp, fileExt)
 

+ 24 - 30
core/pkg/exporter/pathing/path_test.go

@@ -1,6 +1,7 @@
 package pathing
 
 import (
+	"fmt"
 	"testing"
 	"time"
 
@@ -10,7 +11,6 @@ import (
 func TestBingenPathFormatter(t *testing.T) {
 	type testCase struct {
 		name       string
-		rootPath   string
 		clusterID  string
 		pipeline   string
 		resolution *time.Duration
@@ -21,63 +21,57 @@ func TestBingenPathFormatter(t *testing.T) {
 	testCases := []testCase{
 		{
 			name:       "no resolution",
-			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: nil,
 			prefix:     "",
-			expected:   "federated/cluster-a/etl/bingen/allocation/1704110400-1704114000",
+			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1704110400-1704114000", defaultRootDir, baseStorageDir),
 		},
 		{
 			name:       "with resolution",
-			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: &[]time.Duration{1 * time.Hour}[0],
 			prefix:     "",
-			expected:   "federated/cluster-a/etl/bingen/allocation/1h/1704110400-1704114000",
+			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1h/1704110400-1704114000", defaultRootDir, baseStorageDir),
 		},
 		{
 			name:       "no resolution with prefix",
-			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: nil,
 			prefix:     "test",
-			expected:   "federated/cluster-a/etl/bingen/allocation/test.1704110400-1704114000",
+			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/test.1704110400-1704114000", defaultRootDir, baseStorageDir),
 		},
 		{
 			name:       "with resolution with prefix",
-			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: &[]time.Duration{1 * time.Hour}[0],
 			prefix:     "test",
-			expected:   "federated/cluster-a/etl/bingen/allocation/1h/test.1704110400-1704114000",
+			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1h/test.1704110400-1704114000", defaultRootDir, baseStorageDir),
 		},
 		{
 			name:       "daily resolution",
-			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: &[]time.Duration{24 * time.Hour}[0],
 			prefix:     "",
-			expected:   "federated/cluster-a/etl/bingen/allocation/1d/1704110400-1704196800",
+			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1d/1704110400-1704196800", defaultRootDir, baseStorageDir),
 		},
 		{
 			name:       "weekly resolution",
-			rootPath:   "federated",
 			clusterID:  "cluster-a",
 			pipeline:   "allocation",
 			resolution: &[]time.Duration{7 * 24 * time.Hour}[0],
 			prefix:     "",
-			expected:   "federated/cluster-a/etl/bingen/allocation/1w/1704110400-1704715200",
+			expected:   fmt.Sprintf("%s/cluster-a/%s/allocation/1w/1704110400-1704715200", defaultRootDir, baseStorageDir),
 		},
 	}
 
 	for _, tc := range testCases {
 		t.Run(tc.name, func(t *testing.T) {
-			pathing, err := NewBingenStoragePathFormatter(tc.rootPath, tc.clusterID, tc.pipeline, tc.resolution)
+			pathing, err := NewDefaultStoragePathFormatter(tc.clusterID, tc.pipeline, tc.resolution)
 			if err != nil {
 				t.Fatalf("Unexpected error: %v", err)
 			}
@@ -111,83 +105,83 @@ func TestEventPathFormatter(t *testing.T) {
 	testCases := []testCase{
 		{
 			name:      "with root path with file extension",
-			rootPath:  "/tmp/federated",
+			rootPath:  "/tmp/root",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "",
 			fileExt:   "json",
-			expected:  "/tmp/federated/cluster-a/heartbeat/20240101124000.json",
+			expected:  "/tmp/root/cluster-a/heartbeat/20240101124000.json",
 		},
 		{
 			name:      "with file extension",
-			rootPath:  "federated",
+			rootPath:  "root",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "",
 			fileExt:   "json",
-			expected:  "federated/cluster-a/heartbeat/20240101124000.json",
+			expected:  "root/cluster-a/heartbeat/20240101124000.json",
 		},
 		{
 			name:      "with root path with file extension with sub-paths",
-			rootPath:  "/tmp/federated",
+			rootPath:  "/tmp/root",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{"foo", "bar"},
 			prefix:    "",
 			fileExt:   "json",
-			expected:  "/tmp/federated/cluster-a/heartbeat/foo/bar/20240101124000.json",
+			expected:  "/tmp/root/cluster-a/heartbeat/foo/bar/20240101124000.json",
 		},
 		{
 			name:      "without file extension",
-			rootPath:  "federated",
+			rootPath:  "root",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "",
 			fileExt:   "",
-			expected:  "federated/cluster-a/heartbeat/20240101124000",
+			expected:  "root/cluster-a/heartbeat/20240101124000",
 		},
 		{
 			name:      "with prefix with file extension",
-			rootPath:  "federated",
+			rootPath:  "root",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "test",
 			fileExt:   "json",
-			expected:  "federated/cluster-a/heartbeat/test.20240101124000.json",
+			expected:  "root/cluster-a/heartbeat/test.20240101124000.json",
 		},
 		{
 			name:      "with prefix with file extension with sub-paths",
-			rootPath:  "federated",
+			rootPath:  "root",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{"foo", "bar", "baz"},
 			prefix:    "test",
 			fileExt:   "json",
-			expected:  "federated/cluster-a/heartbeat/foo/bar/baz/test.20240101124000.json",
+			expected:  "root/cluster-a/heartbeat/foo/bar/baz/test.20240101124000.json",
 		},
 		{
 			name:      "with prefix without file extension",
-			rootPath:  "federated",
+			rootPath:  "root",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{},
 			prefix:    "test",
 			fileExt:   "",
-			expected:  "federated/cluster-a/heartbeat/test.20240101124000",
+			expected:  "root/cluster-a/heartbeat/test.20240101124000",
 		},
 		{
 			name:      "with prefix without file extension with sub-paths",
-			rootPath:  "federated",
+			rootPath:  "root",
 			clusterID: "cluster-a",
 			event:     "heartbeat",
 			subPaths:  []string{"foo"},
 			prefix:    "test",
 			fileExt:   "",
-			expected:  "federated/cluster-a/heartbeat/foo/test.20240101124000",
+			expected:  "root/cluster-a/heartbeat/foo/test.20240101124000",
 		},
 	}
 

+ 1 - 1
core/pkg/heartbeat/exporter/exporter.go

@@ -10,7 +10,7 @@ import (
 
 // NewHeartbeatExporter creates a new `StorageExporter[Heartbeat]` instance for exporting Heartbeat events.
 func NewHeartbeatExporter(clusterId string, applicationName string, storage storage.Storage) exporter.EventExporter[heartbeat.Heartbeat] {
-	pathing, err := pathing.NewEventStoragePathFormatter("federated", clusterId, heartbeat.HeartbeatEventName, applicationName)
+	pathing, err := pathing.NewEventStoragePathFormatter(applicationName, clusterId, heartbeat.HeartbeatEventName)
 	if err != nil {
 		log.Errorf("failed to create pathing formatter: %v", err)
 		return nil

+ 2 - 2
core/pkg/heartbeat/exporter/heartbeat_test.go

@@ -47,7 +47,7 @@ func TestHeartbeatExporter(t *testing.T) {
 	time.Sleep(10 * time.Second)
 	controller.Stop()
 
-	files, _ := store.List(path.Join("federated", MockClusterId, heartbeat.HeartbeatEventName, MockApplicationName))
+	files, _ := store.List(path.Join(MockApplicationName, MockClusterId, heartbeat.HeartbeatEventName))
 	if len(files) == 0 {
 		t.Fatal("No files found in storage")
 	}
@@ -61,7 +61,7 @@ func TestHeartbeatExporter(t *testing.T) {
 	lastCheck := time.Time{}
 
 	for _, f := range fileNames {
-		fpath := filepath.Join("federated", MockClusterId, "heartbeat", MockApplicationName, f)
+		fpath := filepath.Join(MockApplicationName, MockClusterId, heartbeat.HeartbeatEventName, f)
 		data, err := store.Read(fpath)
 		if err != nil {
 			t.Fatalf("Failed to read file %s: %v", fpath, err)

+ 1 - 1
core/pkg/opencost/allocation.go

@@ -2249,7 +2249,7 @@ func computeShareCoeffs(aggregateBy []string, options *AllocationAggregationOpti
 		if coeffs[a] > 0 && total > 0 {
 			coeffs[a] /= total
 		} else {
-			log.Warnf("ETL: invalid values for shared coefficients: %v, %v", coeffs[a], total)
+			log.Warnf("Allocation: invalid values for shared coefficients: %v, %v", coeffs[a], total)
 			coeffs[a] = 0.0
 		}
 	}

+ 24 - 24
core/pkg/opencost/allocation_test.go

@@ -527,7 +527,7 @@ func TestAllocationSet_generateKey(t *testing.T) {
 }
 
 func TestNewAllocationSet(t *testing.T) {
-	// TODO niko/etl
+	// TODO niko
 }
 
 func assertAllocationSetTotals(t *testing.T, as *AllocationSet, msg string, err error, length int, totalCost float64) {
@@ -662,7 +662,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 
 	// 3  Share idle
 	// 3a AggregationProperties=(Namespace) ShareIdle=ShareWeighted
-	// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven (TODO niko/etl)
+	// 3b AggregationProperties=(Namespace) ShareIdle=ShareEven (TODO niko)
 
 	// 4  Share resources
 	// 4a Share namespace ShareEven
@@ -2032,19 +2032,19 @@ func TestAllocationSet_AggregateBy_SharedCostBreakdown(t *testing.T) {
 	}
 }
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Clone(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Delete(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_End(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_IdleAllocations(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Insert(t *testing.T) {}
 
 // Asserts that all Allocations within an AllocationSet have a Window that
@@ -2297,34 +2297,34 @@ func TestParcInsert(t *testing.T) {
 	}
 }
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_IsEmpty(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Length(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Map(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_MarshalJSON(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Resolution(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Seconds(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Set(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_Start(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestAllocationSet_TotalCost(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 //func TestNewAllocationSetRange(t *testing.T) {}
 
 func TestAllocationSetRange_AccumulateRepeat(t *testing.T) {
@@ -2779,16 +2779,16 @@ func TestAllocationSetRange_AccumulateBy_Month(t *testing.T) {
 	}
 }
 
-// TODO niko/etl
+// TODO niko
 // func TestAllocationSetRange_AggregateBy(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 // func TestAllocationSetRange_Append(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 // func TestAllocationSetRange_Each(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 // func TestAllocationSetRange_Get(t *testing.T) {}
 
 func TestAllocationSetRange_InsertRange(t *testing.T) {
@@ -2958,7 +2958,7 @@ func TestAllocationSetRange_InsertRange(t *testing.T) {
 	}
 }
 
-// TODO niko/etl
+// TODO niko
 // func TestAllocationSetRange_Length(t *testing.T) {}
 
 func TestAllocationSetRange_MarshalJSON(t *testing.T) {
@@ -3016,10 +3016,10 @@ func TestAllocationSetRange_MarshalJSON(t *testing.T) {
 	}
 }
 
-// TODO niko/etl
+// TODO niko
 // func TestAllocationSetRange_Slice(t *testing.T) {}
 
-// TODO niko/etl
+// TODO niko
 // func TestAllocationSetRange_Window(t *testing.T) {}
 
 func TestAllocationSetRange_Start(t *testing.T) {

+ 3 - 288
core/pkg/opencost/asset.go

@@ -69,190 +69,6 @@ type Asset interface {
 	fmt.Stringer
 }
 
-// AssetToExternalAllocation converts the given asset to an Allocation, given
-// the Properties to use to aggregate, and the mapping from Allocation property
-// to Asset label. For example, consider this asset:
-//
-// CURRENT: Asset ETL stores its data ALREADY MAPPED from label to k8s concept. This isn't ideal-- see the TODO.
-//
-//	  Cloud {
-//		   TotalCost: 10.00,
-//		   Labels{
-//	      "kubernetes_namespace":"monitoring",
-//		     "env":"prod"
-//		   }
-//	  }
-//
-// Given the following parameters, we expect to return:
-//
-//  1. single-prop full match
-//     aggregateBy = ["namespace"]
-//     => Allocation{Name: "monitoring", ExternalCost: 10.00, TotalCost: 10.00}, nil
-//
-//  2. multi-prop full match
-//     aggregateBy = ["namespace", "label:env"]
-//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-//     => Allocation{Name: "monitoring/env=prod", ExternalCost: 10.00, TotalCost: 10.00}, nil
-//
-//  3. multi-prop partial match
-//     aggregateBy = ["namespace", "label:foo"]
-//     => Allocation{Name: "monitoring/__unallocated__", ExternalCost: 10.00, TotalCost: 10.00}, nil
-//
-//  4. no match
-//     aggregateBy = ["cluster"]
-//     => nil, err
-//
-// TODO:
-//
-//	  Cloud {
-//		   TotalCost: 10.00,
-//		   Labels{
-//	      "kubernetes_namespace":"monitoring",
-//		     "env":"prod"
-//		   }
-//	  }
-//
-// Given the following parameters, we expect to return:
-//
-//  1. single-prop full match
-//     aggregateBy = ["namespace"]
-//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-//     => Allocation{Name: "monitoring", ExternalCost: 10.00, TotalCost: 10.00}, nil
-//
-//  2. multi-prop full match
-//     aggregateBy = ["namespace", "label:env"]
-//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-//     => Allocation{Name: "monitoring/env=prod", ExternalCost: 10.00, TotalCost: 10.00}, nil
-//
-//  3. multi-prop partial match
-//     aggregateBy = ["namespace", "label:foo"]
-//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-//     => Allocation{Name: "monitoring/__unallocated__", ExternalCost: 10.00, TotalCost: 10.00}, nil
-//
-//  4. no match
-//     aggregateBy = ["cluster"]
-//     allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-//     => nil, err
-//
-// (See asset_test.go for assertions of these examples and more.)
-func AssetToExternalAllocation(asset Asset, aggregateBy []string, labelConfig *LabelConfig) (*Allocation, error) {
-	if asset == nil {
-		return nil, fmt.Errorf("asset is nil")
-	}
-
-	// Use default label config if one is not provided.
-	if labelConfig == nil {
-		labelConfig = NewLabelConfig()
-	}
-
-	// names will collect the slash-separated names accrued by iterating over
-	// aggregateBy and checking the relevant labels.
-	names := []string{}
-
-	// match records whether or not a match was found in the Asset labels,
-	// such that is can genuinely be turned into an external Allocation.
-	match := false
-
-	// props records the relevant Properties to set on the resultant Allocation
-	props := AllocationProperties{}
-
-	// For each aggregation parameter, try to find a match in the asset's
-	// labels, using the labelConfig to translate. For an aggregation parameter
-	// defined by a label (e.g. "label:app") this is simple: look for the label
-	// and use it (e.g. if "app" is a defined label on the asset, then use its
-	// value). For an aggregation parameter defined by a non-label property
-	// (e.g. "namespace") this requires using the labelConfig to look up the
-	// label name associated with that property and to use the value under that
-	// label, if set (e.g. if the aggregation property is "namespace" and the
-	// labelConfig is configured with "namespace_external_label" => "kubens"
-	// and the asset has label "kubens":"kubecost", then file the asset as an
-	// external cost under "kubecost").
-	for _, aggBy := range aggregateBy {
-		name := labelConfig.GetExternalAllocationName(asset.GetLabels(), aggBy)
-
-		if name == "" {
-			// No matching label has been defined in the cost-analyzer label config
-			// relating to the given aggregateBy property.
-			names = append(names, UnallocatedSuffix)
-			continue
-		} else {
-			names = append(names, name)
-			match = true
-
-			// Default labels to an empty map, if necessary
-			if props.Labels == nil {
-				props.Labels = map[string]string{}
-			}
-
-			// Set the corresponding property on props
-			switch aggBy {
-			case AllocationClusterProp:
-				props.Cluster = name
-			case AllocationNodeProp:
-				props.Node = name
-			case AllocationNamespaceProp:
-				props.Namespace = name
-			case AllocationControllerKindProp:
-				props.ControllerKind = name
-			case AllocationControllerProp:
-				props.Controller = name
-			case AllocationPodProp:
-				props.Pod = name
-			case AllocationContainerProp:
-				props.Container = name
-			case AllocationServiceProp:
-				props.Services = []string{name}
-			case AllocationDeploymentProp:
-				props.Controller = name
-				props.ControllerKind = "deployment"
-			case AllocationStatefulSetProp:
-				props.Controller = name
-				props.ControllerKind = "statefulset"
-			case AllocationDaemonSetProp:
-				props.Controller = name
-				props.ControllerKind = "daemonset"
-			case AllocationDepartmentProp:
-				props.Labels[labelConfig.DepartmentLabel] = name
-			case AllocationEnvironmentProp:
-				props.Labels[labelConfig.EnvironmentLabel] = name
-			case AllocationOwnerProp:
-				props.Labels[labelConfig.OwnerLabel] = name
-			case AllocationProductProp:
-				props.Labels[labelConfig.ProductLabel] = name
-			case AllocationTeamProp:
-				props.Labels[labelConfig.TeamLabel] = name
-			default:
-				if strings.HasPrefix(aggBy, "label:") {
-					// Set the corresponding label in props
-					labelName := strings.TrimPrefix(aggBy, "label:")
-					labelValue := strings.TrimPrefix(name, labelName+"=")
-					props.Labels[labelName] = labelValue
-				}
-			}
-		}
-	}
-
-	// If not a signle aggregation property generated a matching label property,
-	// then consider the asset ineligible to be treated as an external allocation.
-	if !match {
-		return nil, fmt.Errorf("asset does not qualify as an external allocation")
-	}
-
-	// Use naming to label as an external allocation. See IsExternal() for more.
-	names = append(names, ExternalSuffix)
-
-	// TODO: external allocation: efficiency?
-	// TODO: external allocation: resource totals?
-	return &Allocation{
-		Name:         strings.Join(names, "/"),
-		Properties:   &props,
-		Window:       asset.GetWindow().Clone(),
-		Start:        asset.GetStart(),
-		End:          asset.GetEnd(),
-		ExternalCost: asset.TotalCost(),
-	}, nil
-}
-
 // key is used to determine uniqueness of an Asset, for instance during Insert
 // to determine if two Assets should be combined. Passing `nil` `aggregateBy` indicates
 // that all available `AssetProperty` keys should be used. Passing empty `aggregateBy` indicates that
@@ -1239,7 +1055,7 @@ func (d *Disk) Minutes() float64 {
 	windowMins := d.Window.Minutes()
 
 	if diskMins > windowMins {
-		log.Warnf("Asset ETL: Disk.Minutes exceeds window: %.2f > %.2f", diskMins, windowMins)
+		log.Warnf("Asset: Disk.Minutes exceeds window: %.2f > %.2f", diskMins, windowMins)
 		diskMins = windowMins
 	}
 
@@ -1699,7 +1515,7 @@ func (n *Network) Minutes() float64 {
 	windowMins := n.Window.Minutes()
 
 	if netMins > windowMins {
-		log.Warnf("Asset ETL: Network.Minutes exceeds window: %.2f > %.2f", netMins, windowMins)
+		log.Warnf("Asset: Network.Minutes exceeds window: %.2f > %.2f", netMins, windowMins)
 		netMins = windowMins
 	}
 
@@ -2019,7 +1835,7 @@ func (n *Node) Minutes() float64 {
 	windowMins := n.Window.Minutes()
 
 	if nodeMins > windowMins {
-		log.Warnf("Asset ETL: Node.Minutes exceeds window: %.2f > %.2f", nodeMins, windowMins)
+		log.Warnf("Asset: Node.Minutes exceeds window: %.2f > %.2f", nodeMins, windowMins)
 		nodeMins = windowMins
 	}
 
@@ -3186,107 +3002,6 @@ func (as *AssetSet) FindMatch(query Asset, aggregateBy []string, labelConfig *La
 	return nil, fmt.Errorf("Asset not found to match %s on %v", query, aggregateBy)
 }
 
-// ReconciliationMatch attempts to find an exact match in the AssetSet on
-// (Category, ProviderID). If a match is found, it returns the Asset with the
-// intent to adjust it. If no match exists, it attempts to find one on only
-// (ProviderID). If that match is found, it returns the Asset with the intent
-// to insert the associated Cloud cost.
-func (as *AssetSet) ReconciliationMatch(query Asset) (Asset, bool, error) {
-	// Full match means matching on (Category, ProviderID)
-	fullMatchProps := []string{string(AssetCategoryProp), string(AssetProviderIDProp)}
-	fullMatchKey, err := key(query, fullMatchProps, nil)
-
-	// This should never happen because we are using enumerated Properties,
-	// but the check is here in case that changes
-	if err != nil {
-		return nil, false, err
-	}
-
-	// Partial match means matching only on (ProviderID)
-	providerIDMatchProps := []string{string(AssetProviderIDProp)}
-	providerIDMatchKey, err := key(query, providerIDMatchProps, nil)
-
-	// This should never happen because we are using enumerated Properties,
-	// but the check is here in case that changes
-	if err != nil {
-		return nil, false, err
-	}
-
-	var providerIDMatch Asset
-	for _, asset := range as.Assets {
-		// Ignore cloud assets when looking for reconciliation matches
-		if asset.Type() == CloudAssetType {
-			continue
-		}
-		if k, err := key(asset, fullMatchProps, nil); err != nil {
-			return nil, false, err
-		} else if k == fullMatchKey {
-			log.DedupedInfof(10, "Asset ETL: Reconciliation[rcnw]: ReconcileRange Match: %s", fullMatchKey)
-			return asset, true, nil
-		}
-		if k, err := key(asset, providerIDMatchProps, nil); err != nil {
-			return nil, false, err
-		} else if k == providerIDMatchKey {
-			// Found a partial match. Save it until after all other options
-			// have been checked for full matches.
-			providerIDMatch = asset
-		}
-	}
-
-	// No full match was found, so return partial match, if found.
-	if providerIDMatch != nil {
-		return providerIDMatch, false, nil
-	}
-
-	return nil, false, fmt.Errorf("Asset not found to match %s", query)
-}
-
-// ReconciliationMatchMap returns a map of the calling AssetSet's Assets, by provider id and category. This data structure
-// allows for reconciliation matching to be done in constant time and prevents duplicate reconciliation.
-func (as *AssetSet) ReconciliationMatchMap() map[string]map[string]Asset {
-	matchMap := make(map[string]map[string]Asset)
-
-	if as == nil {
-		return matchMap
-	}
-
-	for _, asset := range as.Assets {
-		if asset == nil {
-			continue
-		}
-		props := asset.GetProperties()
-		// Ignore assets that cannot be matched when looking for reconciliation matches
-		if props == nil || props.ProviderID == "" {
-			continue
-		}
-
-		// we can't guarantee case in providerID for Azure provider to have map working for all providers,
-		// lower casing providerID  while creating reconciliation map
-		providerID := strings.ToLower(props.ProviderID)
-		if _, ok := matchMap[providerID]; !ok {
-			matchMap[providerID] = make(map[string]Asset)
-		}
-
-		// Check if a match is already in the map
-		if duplicateAsset, ok := matchMap[providerID][props.Category]; ok {
-			log.DedupedWarningf(5, "duplicate asset found when reconciling for %s", props.ProviderID)
-			// if one asset already has adjustment use that one
-			if duplicateAsset.GetAdjustment() == 0 && asset.GetAdjustment() != 0 {
-				matchMap[providerID][props.Category] = asset
-			} else if duplicateAsset.GetAdjustment() != 0 && asset.GetAdjustment() == 0 {
-				matchMap[providerID][props.Category] = duplicateAsset
-				// otherwise use the one with the higher cost
-			} else if duplicateAsset.TotalCost() < asset.TotalCost() {
-				matchMap[providerID][props.Category] = asset
-			}
-		} else {
-			matchMap[providerID][props.Category] = asset
-		}
-
-	}
-	return matchMap
-}
-
 // Get returns the Asset in the AssetSet at the given key, or nil and false
 // if no Asset exists for the given key
 func (as *AssetSet) Get(key string) (Asset, bool) {

+ 0 - 203
core/pkg/opencost/asset_test.go

@@ -913,32 +913,6 @@ func TestAssetSet_InsertMatchingWindow(t *testing.T) {
 	}
 }
 
-func TestAssetSet_ReconciliationMatchMap(t *testing.T) {
-	endYesterday := time.Now().UTC().Truncate(day)
-	startYesterday := endYesterday.Add(-day)
-
-	as := GenerateMockAssetSet(startYesterday, day)
-	matchMap := as.ReconciliationMatchMap()
-
-	// Determine the number of assets by provider ID
-	assetCountByProviderId := make(map[string]int, len(matchMap))
-	for _, a := range as.Assets {
-		if a == nil || a.GetProperties() == nil || a.GetProperties().ProviderID == "" {
-			return
-		}
-		if _, ok := assetCountByProviderId[a.GetProperties().ProviderID]; !ok {
-			assetCountByProviderId[a.GetProperties().ProviderID] = 0
-		}
-		assetCountByProviderId[a.GetProperties().ProviderID] += 1
-	}
-
-	for k, count := range assetCountByProviderId {
-		if len(matchMap[k]) != count {
-			t.Errorf("AssetSet.ReconciliationMatchMap: incorrect asset count for provider id: %s", k)
-		}
-	}
-}
-
 func TestAssetSetRange_AccumulateToAssetSet(t *testing.T) {
 	endYesterday := time.Now().UTC().Truncate(day)
 	startYesterday := endYesterday.Add(-day)
@@ -1058,183 +1032,6 @@ func TestAssetSetRange_AccumulateToAssetSet(t *testing.T) {
 	}, nil)
 }
 
-func TestAssetToExternalAllocation(t *testing.T) {
-	var asset Asset
-	var alloc *Allocation
-	var err error
-
-	labelConfig := NewLabelConfig()
-
-	_, err = AssetToExternalAllocation(asset, []string{"namespace"}, labelConfig)
-	if err == nil {
-		t.Fatalf("expected error due to nil asset; no error returned")
-	}
-
-	// Consider this Asset:
-	//   Cloud {
-	// 	   TotalCost: 10.00,
-	// 	   Labels{
-	//       "kubernetes_namespace":"monitoring",
-	// 	     "env":"prod"
-	// 	   }
-	//   }
-	cloud := NewCloud(ComputeCategory, "abc123", start1, start2, windows[0])
-	cloud.SetLabels(map[string]string{
-		"kubernetes_namespace":        "monitoring",
-		"env":                         "prod",
-		"app":                         "cost-analyzer",
-		"kubernetes_label_app":        "app",
-		"kubernetes_label_department": "department",
-		"kubernetes_label_env":        "env",
-		"kubernetes_label_owner":      "owner",
-		"kubernetes_label_team":       "team",
-	})
-	cloud.Cost = 10.00
-	asset = cloud
-
-	_, err = AssetToExternalAllocation(asset, []string{"namespace"}, nil)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	_, err = AssetToExternalAllocation(asset, nil, nil)
-	if err == nil {
-		t.Fatalf("expected error due to nil aggregateBy; no error returned")
-	}
-
-	// Given the following parameters, we expect to return:
-	//
-	//   1) single-prop full match
-	//   aggregateBy = ["namespace"]
-	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-	//   => Allocation{Name: "monitoring", ExternalCost: 10.00, TotalCost: 10.00}, nil
-	//
-	//   2) multi-prop full match
-	//   aggregateBy = ["namespace", "label:env"]
-	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-	//   => Allocation{Name: "monitoring/env=prod", ExternalCost: 10.00, TotalCost: 10.00}, nil
-	//
-	//   3) multi-prop partial match
-	//   aggregateBy = ["namespace", "label:foo"]
-	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-	//   => Allocation{Name: "monitoring/__unallocated__", ExternalCost: 10.00, TotalCost: 10.00}, nil
-	//
-	//	 4) label alias match(es)
-	//	 aggregateBy = ["product", "deployment", "environment", "owner", "team"]
-	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-	//   => Allocation{Name: "app/department/env/owner/team", ExternalCost: 10.00, TotalCost: 10.00}, nil
-	//
-	//   5) no match
-	//   aggregateBy = ["cluster"]
-	//   allocationPropertyLabels = {"namespace":"kubernetes_namespace"}
-	//   => nil, err
-
-	// 1) single-prop full match
-	alloc, err = AssetToExternalAllocation(asset, []string{"namespace"}, nil)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if alloc.Name != "monitoring/__external__" {
-		t.Fatalf("expected external allocation with name '%s'; got '%s'", "monitoring/__external__", alloc.Name)
-	}
-	if ns := alloc.Properties.Namespace; ns != "monitoring" {
-		t.Fatalf("expected external allocation with AllocationProperties.Namespace '%s'; got '%s'", "monitoring", ns)
-	}
-	if alloc.ExternalCost != 10.00 {
-		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
-	}
-	if alloc.TotalCost() != 10.00 {
-		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
-	}
-
-	// 2) multi-prop full match
-	alloc, err = AssetToExternalAllocation(asset, []string{"namespace", "label:env"}, nil)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if alloc.Name != "monitoring/env=prod/__external__" {
-		t.Fatalf("expected external allocation with name '%s'; got '%s'", "monitoring/env=prod/__external__", alloc.Name)
-	}
-	if ns := alloc.Properties.Namespace; ns != "monitoring" {
-		t.Fatalf("expected external allocation with AllocationProperties.Namespace '%s'; got '%s' (%s)", "monitoring", ns, err)
-	}
-	if ls := alloc.Properties.Labels; len(ls) == 0 || ls["env"] != "prod" {
-		t.Fatalf("expected external allocation with AllocationProperties.Labels[\"env\"] '%s'; got '%s' (%s)", "prod", ls["env"], err)
-	}
-	if alloc.ExternalCost != 10.00 {
-		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
-	}
-	if alloc.TotalCost() != 10.00 {
-		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
-	}
-
-	// 3) multi-prop partial match
-	alloc, err = AssetToExternalAllocation(asset, []string{"namespace", "label:foo"}, nil)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if alloc.Name != "monitoring/__unallocated__/__external__" {
-		t.Fatalf("expected external allocation with name '%s'; got '%s'", "monitoring/__unallocated__/__external__", alloc.Name)
-	}
-	if ns := alloc.Properties.Namespace; ns != "monitoring" {
-		t.Fatalf("expected external allocation with AllocationProperties.Namespace '%s'; got '%s' (%s)", "monitoring", ns, err)
-	}
-	if alloc.ExternalCost != 10.00 {
-		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
-	}
-	if alloc.TotalCost() != 10.00 {
-		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
-	}
-
-	// 4) label alias match(es)
-	alloc, err = AssetToExternalAllocation(asset, []string{"product", "department", "environment", "owner", "team"}, nil)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if alloc.Name != "app/department/env/owner/team/__external__" {
-		t.Fatalf("expected external allocation with name '%s'; got '%s'", "app/department/env/owner/team/__external__", alloc.Name)
-	}
-	if alloc.Properties.Labels[labelConfig.ProductLabel] != "app" {
-		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.ProductLabel, "app", alloc.Properties.Labels[labelConfig.ProductLabel])
-	}
-	if alloc.Properties.Labels[labelConfig.DepartmentLabel] != "department" {
-		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.DepartmentLabel, "department", alloc.Properties.Labels[labelConfig.DepartmentLabel])
-	}
-	if alloc.Properties.Labels[labelConfig.EnvironmentLabel] != "env" {
-		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.EnvironmentLabel, "env", alloc.Properties.Labels[labelConfig.EnvironmentLabel])
-	}
-	if alloc.Properties.Labels[labelConfig.OwnerLabel] != "owner" {
-		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.OwnerLabel, "owner", alloc.Properties.Labels[labelConfig.OwnerLabel])
-	}
-	if alloc.Properties.Labels[labelConfig.TeamLabel] != "team" {
-		t.Fatalf("expected external allocation with label %s equal to %s; got %s", labelConfig.TeamLabel, "team", alloc.Properties.Labels[labelConfig.TeamLabel])
-	}
-	if alloc.ExternalCost != 10.00 {
-		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
-	}
-	if alloc.TotalCost() != 10.00 {
-		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
-	}
-
-	// 5) no match
-	_, err = AssetToExternalAllocation(asset, []string{"cluster"}, nil)
-	if err == nil {
-		t.Fatalf("expected 'no match' error")
-	}
-
-	// other cases
-
-	alloc, err = AssetToExternalAllocation(asset, []string{"namespace", "label:app"}, nil)
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if alloc.ExternalCost != 10.00 {
-		t.Fatalf("expected external allocation with ExternalCost %f; got %f", 10.00, alloc.ExternalCost)
-	}
-	if alloc.TotalCost() != 10.00 {
-		t.Fatalf("expected external allocation with TotalCost %f; got %f", 10.00, alloc.TotalCost())
-	}
-}
-
 func TestAssetSetRange_Start(t *testing.T) {
 	tests := []struct {
 		name string

+ 9 - 9
core/pkg/opencost/exporter/exporter_test.go

@@ -144,7 +144,7 @@ func TestExporters(t *testing.T) {
 	t.Run("allocation exporter", func(t *testing.T) {
 		allocSource := NewMockAllocationSource()
 		memStore := storage.NewMemoryStorage()
-		p, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
+		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}
@@ -173,7 +173,7 @@ func TestExporters(t *testing.T) {
 	t.Run("asset exporter", func(t *testing.T) {
 		assetSource := NewMockAssetSource()
 		memStore := storage.NewMemoryStorage()
-		p, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
+		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}
@@ -202,7 +202,7 @@ func TestExporters(t *testing.T) {
 	t.Run("network insight exporter", func(t *testing.T) {
 		netInsightSource := NewMockNetworkInsightSource()
 		memStore := storage.NewMemoryStorage()
-		p, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
+		p, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create path formatter: %v", err)
 		}
@@ -264,15 +264,15 @@ func TestPipelineExportControllers(t *testing.T) {
 		time.Sleep(time.Second + (750 * time.Millisecond))
 		exportControllers.Stop()
 
-		allocPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
+		allocPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create allocations path formatter: %v", err)
 		}
-		assetPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
+		assetPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create assets path formatter: %v", err)
 		}
-		netPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
+		netPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create net insights path formatter: %v", err)
 		}
@@ -300,15 +300,15 @@ func TestPipelineExportControllers(t *testing.T) {
 		time.Sleep(time.Second + (750 * time.Millisecond))
 		exportControllers.Stop()
 
-		allocPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
+		allocPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AllocationPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create allocations path formatter: %v", err)
 		}
-		assetPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
+		assetPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.AssetsPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create assets path formatter: %v", err)
 		}
-		netPath, err := pathing.NewBingenStoragePathFormatter("federated", TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
+		netPath, err := pathing.NewDefaultStoragePathFormatter(TestClusterId, pipelines.NetworkInsightPipelineName, ptr(TestResolution))
 		if err != nil {
 			t.Fatalf("failed to create net insights path formatter: %v", err)
 		}

+ 1 - 1
core/pkg/opencost/exporter/exporters.go

@@ -24,7 +24,7 @@ func NewComputePipelineExporter[T any, U export.BinaryMarshalerPtr[T], S validat
 		return nil, fmt.Errorf("failed to extract pipeline name for type: %s", typeutil.TypeOf[T]())
 	}
 
-	pathing, err := pathing.NewBingenStoragePathFormatter("federated", clusterId, pipelineName, &resolution)
+	pathing, err := pathing.NewDefaultStoragePathFormatter(clusterId, pipelineName, &resolution)
 	if err != nil {
 		return nil, fmt.Errorf("failed to create path formatter: %w", err)
 	}

+ 4 - 4
core/pkg/opencost/opencost_codecs_test.go

@@ -6,11 +6,11 @@ import (
 )
 
 func TestAllocation_BinaryEncoding(t *testing.T) {
-	// TODO niko/etl
+	// TODO niko
 }
 
 func TestAllocationSet_BinaryEncoding(t *testing.T) {
-	// TODO niko/etl
+	// TODO niko
 }
 
 func BenchmarkAllocationSetRange_BinaryEncoding(b *testing.B) {
@@ -193,11 +193,11 @@ func TestAny_BinaryEncoding(t *testing.T) {
 }
 
 func TestAsset_BinaryEncoding(t *testing.T) {
-	// TODO niko/etl
+	// TODO niko
 }
 
 func TestAssetSet_BinaryEncoding(t *testing.T) {
-	// TODO niko/etl
+	// TODO niko
 }
 
 func TestAssetSetRange_BinaryEncoding(t *testing.T) {

+ 0 - 66
core/pkg/opencost/status.go

@@ -1,66 +0,0 @@
-package opencost
-
-import "time"
-
-// ETLStatus describes ETL metadata
-type ETLStatus struct {
-	Coverage                   Window           `json:"coverage"`
-	LastRun                    time.Time        `json:"lastRun"`
-	Progress                   float64          `json:"progress"`
-	RefreshRate                string           `json:"refreshRate"`
-	Resolution                 string           `json:"resolution"`
-	MaxPrometheusQueryDuration string           `json:"maxPrometheusQueryDuration"`
-	StartTime                  time.Time        `json:"startTime"`
-	UTCOffset                  string           `json:"utcOffset"`
-	Backup                     *DirectoryStatus `json:"backup,omitempty"`
-}
-
-// DirectoryStatus describes metadata of a directory of files
-type DirectoryStatus struct {
-	Path         string       `json:"path"`
-	Size         string       `json:"size"`
-	LastModified time.Time    `json:"lastModified"`
-	FileCount    int          `json:"fileCount"`
-	Files        []FileStatus `json:"files"`
-}
-
-// FileStatus describes the metadata of a single file
-type FileStatus struct {
-	Name         string            `json:"name"`
-	Size         string            `json:"size"`
-	LastModified time.Time         `json:"lastModified"`
-	IsRepairing  bool              `json:"isRepairing"`
-	Details      map[string]string `json:"details,omitempty"`
-	Errors       []string          `json:"errors,omitempty"`
-	Warnings     []string          `json:"warnings,omitempty"`
-}
-
-// CloudStatus describes CloudStore metadata
-type CloudStatus struct {
-	ConnectionStatus string                `json:"cloudConnectionStatus"`
-	ProviderType     string                `json:"providerType"`
-	CloudUsage       *CloudAssetStatus     `json:"cloudUsage,omitempty"`
-	Reconciliation   *ReconciliationStatus `json:"reconciliation,omitempty"`
-}
-
-// CloudAssetStatus describes CloudAsset metadata of a CloudStore
-type CloudAssetStatus struct {
-	Coverage    Window    `json:"coverage"`
-	LastRun     time.Time `json:"lastRun"`
-	NextRun     time.Time `json:"nextRun"`
-	Progress    float64   `json:"progress"`
-	RefreshRate string    `json:"refreshRate"`
-	Resolution  string    `json:"resolution"`
-	StartTime   time.Time `json:"startTime"`
-}
-
-// ReconciliationStatus describes Reconciliation metadata of a CloudStore
-type ReconciliationStatus struct {
-	Coverage    Window    `json:"coverage"`
-	LastRun     time.Time `json:"lastRun"`
-	NextRun     time.Time `json:"nextRun"`
-	Progress    float64   `json:"progress"`
-	RefreshRate string    `json:"refreshRate"`
-	Resolution  string    `json:"resolution"`
-	StartTime   time.Time `json:"startTime"`
-}

+ 2 - 43
core/pkg/opencost/totals.go

@@ -488,7 +488,7 @@ func ComputeAssetTotals(as *AssetSet, byAsset bool) map[string]*AssetTotals {
 		if isAttached {
 			// Record attached volume data at the cluster and node level, using
 			// name matching to distinguish from PersistentVolumes.
-			// TODO can we make a stronger match at the underlying ETL layer?
+			// TODO can we make a stronger match at the underlying costmodel layer?
 			arts[key].Count++
 			arts[key].AttachedVolumeCost += disk.Cost
 			arts[key].AttachedVolumeCostAdjustment += disk.Adjustment
@@ -573,47 +573,6 @@ type AllocationTotalsStore interface {
 	SetAllocationTotalsByNode(start, end time.Time, rts map[string]*AllocationTotals)
 }
 
-// UpdateAllocationTotalsStore updates an AllocationTotalsStore
-// by totaling the given AllocationSet and saving the totals.
-func UpdateAllocationTotalsStore(arts AllocationTotalsStore, as *AllocationSet) (*AllocationTotalsSet, error) {
-	if arts == nil {
-		return nil, errors.New("cannot update nil AllocationTotalsStore")
-	}
-
-	if as == nil {
-		return nil, errors.New("cannot update AllocationTotalsStore from nil AllocationSet")
-	}
-
-	if as.Window.IsOpen() {
-		return nil, errors.New("cannot update AllocationTotalsStore from AllocationSet with open window")
-	}
-
-	start := *as.Window.Start()
-	end := *as.Window.End()
-
-	artsByCluster := ComputeAllocationTotals(as, AllocationClusterProp)
-	arts.SetAllocationTotalsByCluster(start, end, artsByCluster)
-
-	artsByNode := ComputeAllocationTotals(as, AllocationNodeProp)
-	arts.SetAllocationTotalsByNode(start, end, artsByNode)
-
-	log.Debugf("ETL: Allocation: updated resource totals for %s", as.Window)
-
-	win := NewClosedWindow(start, end)
-
-	abc := map[string]*AllocationTotals{}
-	for key, val := range artsByCluster {
-		abc[key] = val.Clone()
-	}
-
-	abn := map[string]*AllocationTotals{}
-	for key, val := range artsByNode {
-		abn[key] = val.Clone()
-	}
-
-	return NewAllocationTotalsSet(win, abc, abn), nil
-}
-
 // AssetTotalsStore allows for storing (i.e. setting and getting)
 // AssetTotals by cluster and by node.
 type AssetTotalsStore interface {
@@ -647,7 +606,7 @@ func UpdateAssetTotalsStore(arts AssetTotalsStore, as *AssetSet) (*AssetTotalsSe
 	artsByNode := ComputeAssetTotals(as, true)
 	arts.SetAssetTotalsByNode(start, end, artsByNode)
 
-	log.Debugf("ETL: Asset: updated resource totals for %s", as.Window)
+	log.Debugf("Asset: updated resource totals for %s", as.Window)
 
 	win := NewClosedWindow(start, end)
 

+ 15 - 5
core/pkg/pipelines/name.go

@@ -1,16 +1,21 @@
 package pipelines
 
 import (
+	"github.com/opencost/opencost/core/pkg/diagnostics"
+	"github.com/opencost/opencost/core/pkg/heartbeat"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util/typeutil"
 )
 
 const (
-	AllocationPipelineName     string = "allocations"
-	AssetsPipelineName         string = "assets"
-	CloudCostsPipelineName     string = "cloudcosts"
-	NetworkInsightPipelineName string = "networkinsights"
-	CustomCostsPipelineName    string = "customcosts"
+	AllocationPipelineName        string = "allocations"
+	AssetsPipelineName            string = "assets"
+	CloudCostsPipelineName        string = "cloudcosts"
+	NetworkInsightPipelineName    string = "networkinsights"
+	CustomCostsPipelineName       string = "customcosts"
+	TurbonomicActionsPipelineName string = "turbonomicactions"
+	HeartbeatPipelineName         string = "heartbeat"
+	DiagnosticsPipelineName       string = "diagnostics"
 )
 
 var nameByType map[string]string
@@ -29,6 +34,9 @@ func init() {
 	networkInsightSetKey := typeutil.TypeOf[opencost.NetworkInsightSet]()
 	networkInsightKey := typeutil.TypeOf[opencost.NetworkInsight]()
 
+	heartbeatKey := typeutil.TypeOf[heartbeat.Heartbeat]()
+	diagnosticsKey := typeutil.TypeOf[diagnostics.DiagnosticsRunReport]()
+
 	nameByType = map[string]string{
 		allocSetKey:          AllocationPipelineName,
 		allocKey:             AllocationPipelineName,
@@ -38,6 +46,8 @@ func init() {
 		cloudCostKey:         CloudCostsPipelineName,
 		networkInsightSetKey: NetworkInsightPipelineName,
 		networkInsightKey:    NetworkInsightPipelineName,
+		heartbeatKey:         HeartbeatPipelineName,
+		diagnosticsKey:       DiagnosticsPipelineName,
 	}
 }
 

+ 31 - 0
core/pkg/storage/storefactory.go

@@ -0,0 +1,31 @@
+package storage
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/opencost/opencost/core/pkg/env"
+)
+
+// GetDefaultStorage initializes the default shared storage which is required for kubecost
+func GetDefaultStorage() Storage {
+	store, err := InitializeStorage(env.GetDefaultStorageConfigFilePath())
+	if err != nil {
+		panic(fmt.Sprintf("failed to initialize default storage: %s", err.Error()))
+	}
+	return store
+}
+
+// InitializeStorage creates a storage from the config file at the given path
+func InitializeStorage(configPath string) (Storage, error) {
+	storageConfig, err := os.ReadFile(configPath)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read file '%s': %w", configPath, err)
+	}
+	store, err := NewBucketStorage(storageConfig)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create storage from config '%s': %w", configPath, err)
+	}
+
+	return store, nil
+}

+ 32 - 0
core/pkg/util/apiutil/apiutil.go

@@ -0,0 +1,32 @@
+package apiutil
+
+import (
+	"net/http"
+	"net/http/pprof"
+
+	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/core/pkg/env"
+)
+
+func ApplyContainerDiagnosticEndpoints(router *httprouter.Router) {
+	router.HandlerFunc("GET", "/healthz", healthz)
+
+	router.GET("/logs/level", GetLogLevel)
+	router.POST("/logs/level", SetLogLevel)
+
+	if env.IsPProfEnabled() {
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/", pprof.Index)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/cmdline", pprof.Cmdline)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/profile", pprof.Profile)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/symbol", pprof.Symbol)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/trace", pprof.Trace)
+		router.Handler(http.MethodGet, "/debug/pprof/goroutine", pprof.Handler("goroutine"))
+		router.Handler(http.MethodGet, "/debug/pprof/heap", pprof.Handler("heap"))
+	}
+}
+
+func healthz(w http.ResponseWriter, _ *http.Request) {
+	w.WriteHeader(200)
+	w.Header().Set("Content-Length", "0")
+	w.Header().Set("Content-Type", "text/plain")
+}

+ 51 - 0
core/pkg/util/apiutil/loglevel.go

@@ -0,0 +1,51 @@
+package apiutil
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/core/pkg/log"
+)
+
+type LogLevelRequestResponse struct {
+	Level string `json:"level"`
+}
+
+func GetLogLevel(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	level := log.GetLogLevel()
+	llrr := LogLevelRequestResponse{
+		Level: level,
+	}
+
+	body, err := json.Marshal(llrr)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to retrive log level"), http.StatusInternalServerError)
+		return
+	}
+	_, err = w.Write(body)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to write response: %s", body), http.StatusInternalServerError)
+		return
+	}
+}
+
+func SetLogLevel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	params := LogLevelRequestResponse{}
+	err := json.NewDecoder(r.Body).Decode(&params)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to decode request body, error: %s", err), http.StatusBadRequest)
+		return
+	}
+
+	err = log.SetLogLevel(params.Level)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("level must be a valid log level according to zerolog; level given: %s, error: %s", params.Level, err), http.StatusBadRequest)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+}

+ 1 - 1
core/pkg/util/timeutil/timeutil.go

@@ -117,7 +117,7 @@ func ParseUTCOffset(offsetStr string) (time.Duration, error) {
 	return offset, nil
 }
 
-// FormatStoreResolution provides a clean notation for ETL store resolutions.
+// FormatStoreResolution provides a clean notation for store resolutions.
 // e.g. daily => 1d; hourly => 1h
 func FormatStoreResolution(dur time.Duration) string {
 	if dur >= (7 * 24 * time.Hour) {

+ 2 - 1
modules/collector-source/pkg/collector/config.go

@@ -1,6 +1,7 @@
 package collector
 
 import (
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/modules/collector-source/pkg/env"
 	"github.com/opencost/opencost/modules/collector-source/pkg/util"
 )
@@ -29,7 +30,7 @@ func NewOpenCostCollectorConfigFromEnv() CollectorConfig {
 			},
 		},
 		ScrapeInterval: env.GetCollectorScrapeIntervalSeconds(),
-		ClusterID:      env.GetClusterID(),
+		ClusterID:      coreenv.GetClusterID(),
 		NetworkPort:    env.GetNetworkPort(),
 	}
 }

+ 0 - 5
modules/collector-source/pkg/env/collectorenv.go

@@ -5,7 +5,6 @@ import (
 )
 
 const (
-	ClusterIDEnvVar                 = "CLUSTER_ID"
 	NetworkPortEnvVar               = "NETWORK_PORT"
 	Collector10mResolutionRetention = "COLLECTOR_10M_RESOLUTION_RETENTION"
 	Collector1hResolutionRetention  = "COLLECTOR_1H_RESOLUTION_RETENTION"
@@ -13,10 +12,6 @@ const (
 	CollectorScrapeInterval         = "COLLECTOR_SCRAPE_INTERVAL"
 )
 
-func GetClusterID() string {
-	return env.Get(ClusterIDEnvVar, "")
-}
-
 func GetNetworkPort() int {
 	return env.GetInt(NetworkPortEnvVar, 3001)
 }

+ 10 - 41
modules/prometheus-source/pkg/env/promenv.go

@@ -20,10 +20,10 @@ const (
 	PrometheusTLSHandshakeTimeoutEnvVar = "PROMETHEUS_TLS_HANDSHAKE_TIMEOUT"
 	ScrapeIntervalEnvVar                = "KUBECOST_SCRAPE_INTERVAL"
 
-	ETLMaxPrometheusQueryDurationMinutes = "ETL_MAX_PROMETHEUS_QUERY_DURATION_MINUTES"
+	PrometheusMaxQueryDurationMinutesEnvVar = "PROMETHEUS_MAX_QUERY_DURATION_MINUTES"
+	PrometheusQueryResolutionSecondsEnvVar  = "PROMETHEUS_QUERY_RESOLUTION_SECONDS"
 
 	MaxQueryConcurrencyEnvVar = "MAX_QUERY_CONCURRENCY"
-	QueryLoggingFileEnvVar    = "QUERY_LOGGING_FILE"
 	PromClusterIDLabelEnvVar  = "PROM_CLUSTER_ID_LABEL"
 
 	PrometheusHeaderXScopeOrgIdEnvVar = "PROMETHEUS_HEADER_X_SCOPE_ORGID"
@@ -34,15 +34,9 @@ const (
 	DBBasicAuthPassword = "DB_BASIC_AUTH_PW"
 	DBBearerToken       = "DB_BEARER_TOKEN"
 
-	MultiClusterBasicAuthUsername = "MC_BASIC_AUTH_USERNAME"
-	MultiClusterBasicAuthPassword = "MC_BASIC_AUTH_PW"
-	MultiClusterBearerToken       = "MC_BEARER_TOKEN"
-
 	CurrentClusterIdFilterEnabledVar = "CURRENT_CLUSTER_ID_FILTER_ENABLED"
-	ClusterIDEnvVar                  = "CLUSTER_ID"
 
-	KubecostJobNameEnvVar      = "KUBECOST_JOB_NAME"
-	ETLResolutionSecondsEnvVar = "ETL_RESOLUTION_SECONDS"
+	KubecostJobNameEnvVar = "KUBECOST_JOB_NAME"
 )
 
 // IsPrometheusRetryOnRateLimitResponse will attempt to retry if a 429 response is received OR a 400 with a body containing
@@ -108,13 +102,13 @@ func IsKubeRbacProxyEnabled() bool {
 	return env.GetBool(KubeRbacProxyEnabledEnvVar, false)
 }
 
-// GetETLResolution determines the resolution of ETL queries. The smaller the
+// GetPrometheusQueryResolution determines the resolution of prom queries. The smaller the
 // duration, the higher the resolution; the higher the resolution, the more
 // accurate the query results, but the more computationally expensive.
-func GetETLResolution() time.Duration {
-	// Use the configured ETL resolution, or default to
+func GetPrometheusQueryResolution() time.Duration {
+	// Use the configured query resolution, or default to
 	// 5m (i.e. 300s)
-	secs := time.Duration(env.GetInt64(ETLResolutionSecondsEnvVar, 300))
+	secs := time.Duration(env.GetInt64(PrometheusQueryResolutionSecondsEnvVar, 300))
 	return secs * time.Second
 }
 
@@ -127,11 +121,6 @@ func GetMaxQueryConcurrency() int {
 	return maxQueryConcurrency
 }
 
-// GetQueryLoggingFile returns a file location if query logging is enabled. Otherwise, empty string
-func GetQueryLoggingFile() string {
-	return env.Get(QueryLoggingFileEnvVar, "")
-}
-
 func GetDBBasicAuthUsername() string {
 	return env.Get(DBBasicAuthUsername, "")
 }
@@ -144,23 +133,9 @@ func GetDBBearerToken() string {
 	return env.Get(DBBearerToken, "")
 }
 
-// GetMultiClusterBasicAuthUsername returns the environment variable value for MultiClusterBasicAuthUsername
-func GetMultiClusterBasicAuthUsername() string {
-	return env.Get(MultiClusterBasicAuthUsername, "")
-}
-
-// GetMultiClusterBasicAuthPassword returns the environment variable value for MultiClusterBasicAuthPassword
-func GetMultiClusterBasicAuthPassword() string {
-	return env.Get(MultiClusterBasicAuthPassword, "")
-}
-
-func GetMultiClusterBearerToken() string {
-	return env.Get(MultiClusterBearerToken, "")
-}
-
-func GetETLMaxPrometheusQueryDuration() time.Duration {
+func GetPrometheusMaxQueryDuration() time.Duration {
 	dayMins := 60 * 24
-	mins := time.Duration(env.GetInt64(ETLMaxPrometheusQueryDurationMinutes, int64(dayMins)))
+	mins := time.Duration(env.GetInt64(PrometheusMaxQueryDurationMinutesEnvVar, int64(dayMins)))
 	return mins * time.Minute
 }
 
@@ -169,17 +144,11 @@ func GetPromClusterLabel() string {
 	return env.Get(PromClusterIDLabelEnvVar, "cluster_id")
 }
 
-// GetClusterID returns the environment variable value for ClusterIDEnvVar which represents the
-// configurable identifier used for multi-cluster metric emission.
-func GetClusterID() string {
-	return env.Get(ClusterIDEnvVar, "")
-}
-
 // GetPromClusterFilter returns environment variable value CurrentClusterIdFilterEnabledVar which
 // represents additional prometheus filter for all metrics for current cluster id
 func GetPromClusterFilter() string {
 	if env.GetBool(CurrentClusterIdFilterEnabledVar, false) {
-		return fmt.Sprintf("%s=\"%s\"", GetPromClusterLabel(), GetClusterID())
+		return fmt.Sprintf("%s=\"%s\"", GetPromClusterLabel(), env.GetClusterID())
 	}
 	return ""
 }

+ 4 - 3
modules/prometheus-source/pkg/prom/config.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"time"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/modules/prometheus-source/pkg/env"
 
@@ -54,9 +55,9 @@ func NewOpenCostPrometheusConfigFromEnv() (*OpenCostPrometheusConfig, error) {
 	jobName := env.GetJobName()
 	scrapeInterval := env.GetScrapeInterval()
 
-	maxQueryDuration := env.GetETLMaxPrometheusQueryDuration()
+	maxQueryDuration := env.GetPrometheusMaxQueryDuration()
 
-	clusterId := env.GetClusterID()
+	clusterId := coreenv.GetClusterID()
 	clusterLabel := env.GetPromClusterLabel()
 	clusterFilter := env.GetPromClusterFilter()
 
@@ -89,7 +90,7 @@ func NewOpenCostPrometheusConfigFromEnv() (*OpenCostPrometheusConfig, error) {
 		}
 	}
 
-	dataResolution := env.GetETLResolution()
+	dataResolution := env.GetPrometheusQueryResolution()
 
 	// Ensuring if data resolution is less than 60s default it to 1m
 	resolutionMinutes := int(dataResolution.Minutes())

+ 1 - 1
modules/prometheus-source/pkg/prom/metricsquerier.go

@@ -648,7 +648,7 @@ func (pds *PrometheusMetricsQuerier) QueryCPUUsageMax(start, end time.Time) *sou
 	// The parameter after the metric ...{}[<thisone>] should be set to 2x
 	// the resolution, to make sure the irate always has two points to query
 	// in case the Prom scrape duration has been reduced to be equal to the
-	// ETL resolution.
+	// query resolution.
 	const queryFmtCPUUsageMaxSubquery = `max(max_over_time(irate(container_cpu_usage_seconds_total{container!="POD", container!="", %s}[%dm])[%s:%dm])) by (container, pod_name, pod, namespace, node, instance, %s)`
 	// env.GetPromClusterFilter(), doubleResStr, durStr, resStr, env.GetPromClusterLabel()
 

+ 16 - 16
pkg/cloud/alibaba/provider.go

@@ -15,7 +15,7 @@ import (
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers"
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
 	"github.com/opencost/opencost/core/pkg/clustercache"
-	"github.com/opencost/opencost/core/pkg/env"
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util/fileutil"
@@ -24,7 +24,7 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 
-	ocenv "github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/env"
 	"golang.org/x/exp/slices"
 )
 
@@ -329,10 +329,10 @@ func (alibaba *Alibaba) GetAlibabaAccessKey() (*credentials.AccessKeyCredential,
 	}
 
 	if config.AlibabaServiceKeyName == "" {
-		config.AlibabaServiceKeyName = ocenv.GetAlibabaAccessKeyID()
+		config.AlibabaServiceKeyName = env.GetAlibabaAccessKeyID()
 	}
 	if config.AlibabaServiceKeySecret == "" {
-		config.AlibabaServiceKeySecret = ocenv.GetAlibabaAccessKeySecret()
+		config.AlibabaServiceKeySecret = env.GetAlibabaAccessKeySecret()
 	}
 
 	if config.AlibabaServiceKeyName == "" && config.AlibabaServiceKeySecret == "" {
@@ -341,8 +341,8 @@ func (alibaba *Alibaba) GetAlibabaAccessKey() (*credentials.AccessKeyCredential,
 		if err != nil {
 			return nil, fmt.Errorf("unable to set the Alibaba Cloud key/secret from config file %w", err)
 		}
-		config.AlibabaServiceKeyName = ocenv.GetAlibabaAccessKeyID()
-		config.AlibabaServiceKeySecret = ocenv.GetAlibabaAccessKeySecret()
+		config.AlibabaServiceKeyName = env.GetAlibabaAccessKeyID()
+		config.AlibabaServiceKeySecret = env.GetAlibabaAccessKeySecret()
 	}
 
 	if config.AlibabaServiceKeyName == "" && config.AlibabaServiceKeySecret == "" {
@@ -636,13 +636,13 @@ func (alibaba *Alibaba) loadAlibabaAuthSecretAndSetEnv(force bool) error {
 		return fmt.Errorf("failed to unmarshall access key id and access key secret with err: %w", err)
 	}
 
-	err = env.Set(ocenv.AlibabaAccessKeyIDEnvVar, ak.AccessKeyID)
+	err = coreenv.Set(env.AlibabaAccessKeyIDEnvVar, ak.AccessKeyID)
 	if err != nil {
-		return fmt.Errorf("failed to set environment variable: %s with err: %w", ocenv.AlibabaAccessKeyIDEnvVar, err)
+		return fmt.Errorf("failed to set environment variable: %s with err: %w", env.AlibabaAccessKeyIDEnvVar, err)
 	}
-	err = env.Set(ocenv.AlibabaAccessKeySecretEnvVar, ak.SecretAccessKey)
+	err = coreenv.Set(env.AlibabaAccessKeySecretEnvVar, ak.SecretAccessKey)
 	if err != nil {
-		return fmt.Errorf("failed to set environment variable: %s with err: %w", ocenv.AlibabaAccessKeySecretEnvVar, err)
+		return fmt.Errorf("failed to set environment variable: %s with err: %w", env.AlibabaAccessKeySecretEnvVar, err)
 	}
 
 	alibaba.accessKey = &credentials.AccessKeyCredential{
@@ -655,7 +655,7 @@ func (alibaba *Alibaba) loadAlibabaAuthSecretAndSetEnv(force bool) error {
 // Regions returns a current supported list of Alibaba regions
 func (alibaba *Alibaba) Regions() []string {
 
-	regionOverrides := ocenv.GetRegionOverrideList()
+	regionOverrides := env.GetRegionOverrideList()
 
 	if len(regionOverrides) > 0 {
 		log.Debugf("Overriding Alibaba regions with configured region list: %+v", regionOverrides)
@@ -680,7 +680,7 @@ func (alibaba *Alibaba) ClusterInfo() (map[string]string, error) {
 
 	// Set it to environment clusterID if not set at this point
 	if clusterName == "" {
-		clusterName = ocenv.GetClusterID()
+		clusterName = coreenv.GetClusterID()
 	}
 
 	m := make(map[string]string)
@@ -688,7 +688,7 @@ func (alibaba *Alibaba) ClusterInfo() (map[string]string, error) {
 	m["provider"] = opencost.AlibabaProvider
 	m["project"] = alibaba.ClusterAccountId
 	m["region"] = alibaba.ClusterRegion
-	m["id"] = ocenv.GetClusterID()
+	m["id"] = coreenv.GetClusterID()
 	return m, nil
 }
 
@@ -731,8 +731,8 @@ func (alibaba *Alibaba) UpdateConfig(r io.Reader, updateType string) (*models.Cu
 			}
 		}
 
-		if ocenv.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(ocenv.GetClusterID(), c.ClusterName)
+		if env.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return err
 			}
@@ -1391,7 +1391,7 @@ func determinePVRegion(pv *clustercache.PersistentVolume) string {
 		}
 	}
 
-	regionOverrides := ocenv.GetRegionOverrideList()
+	regionOverrides := env.GetRegionOverrideList()
 	regions := alibabaRegions
 
 	if len(regionOverrides) > 0 {

+ 20 - 20
pkg/cloud/aws/provider.go

@@ -21,7 +21,7 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/utils"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
-	"github.com/opencost/opencost/core/pkg/env"
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	errs "github.com/opencost/opencost/core/pkg/errors"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
@@ -29,7 +29,7 @@ import (
 	"github.com/opencost/opencost/core/pkg/util/fileutil"
 	"github.com/opencost/opencost/core/pkg/util/json"
 	"github.com/opencost/opencost/core/pkg/util/timeutil"
-	ocenv "github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/env"
 
 	awsSDK "github.com/aws/aws-sdk-go-v2/aws"
 	"github.com/aws/aws-sdk-go-v2/config"
@@ -465,10 +465,10 @@ func (aws *AWS) GetAWSAccessKey() (*AWSAccessKey, error) {
 	}
 	//Look for service key values in env if not present in config
 	if config.ServiceKeyName == "" {
-		config.ServiceKeyName = ocenv.GetAWSAccessKeyID()
+		config.ServiceKeyName = env.GetAWSAccessKeyID()
 	}
 	if config.ServiceKeySecret == "" {
-		config.ServiceKeySecret = ocenv.GetAWSAccessKeySecret()
+		config.ServiceKeySecret = env.GetAWSAccessKeySecret()
 	}
 
 	if config.ServiceKeyName == "" && config.ServiceKeySecret == "" {
@@ -581,8 +581,8 @@ func (aws *AWS) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 			}
 		}
 
-		if ocenv.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(ocenv.GetClusterID(), c.ClusterName)
+		if env.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return err
 			}
@@ -800,8 +800,8 @@ func (aws *AWS) getRegionPricing(nodeList []*clustercache.Node) (*http.Response,
 
 	pricingURL += "index.json"
 
-	if ocenv.GetAWSPricingURL() != "" { // Allow override of pricing URL
-		pricingURL = ocenv.GetAWSPricingURL()
+	if env.GetAWSPricingURL() != "" { // Allow override of pricing URL
+		pricingURL = env.GetAWSPricingURL()
 	}
 
 	log.Infof("starting download of \"%s\", which is quite large ...", pricingURL)
@@ -1457,17 +1457,17 @@ func (awsProvider *AWS) ClusterInfo() (map[string]string, error) {
 	// Determine cluster name
 	clusterName := c.ClusterName
 	if clusterName == "" {
-		awsClusterID := ocenv.GetAWSClusterID()
+		awsClusterID := env.GetAWSClusterID()
 		if awsClusterID != "" {
 			log.Infof("Returning \"%s\" as ClusterName", awsClusterID)
 			clusterName = awsClusterID
-			log.Warnf("Warning - %s will be deprecated in a future release. Use %s instead", ocenv.AWSClusterIDEnvVar, ocenv.ClusterIDEnvVar)
-		} else if clusterName = ocenv.GetClusterID(); clusterName != "" {
-			log.DedupedInfof(5, "Setting cluster name to %s from %s ", clusterName, ocenv.ClusterIDEnvVar)
+			log.Warnf("Warning - %s will be deprecated in a future release. Use %s instead", env.AWSClusterIDEnvVar, coreenv.ClusterIDEnvVar)
+		} else if clusterName = coreenv.GetClusterID(); clusterName != "" {
+			log.DedupedInfof(5, "Setting cluster name to %s from %s ", clusterName, coreenv.ClusterIDEnvVar)
 		} else {
 			clusterName = defaultClusterName
 			log.DedupedWarningf(5, "Unable to detect cluster name - using default of %s", defaultClusterName)
-			log.DedupedWarningf(5, "Please set cluster name through configmap or via %s env var", ocenv.ClusterIDEnvVar)
+			log.DedupedWarningf(5, "Please set cluster name through configmap or via %s env var", coreenv.ClusterIDEnvVar)
 		}
 	}
 
@@ -1483,8 +1483,8 @@ func (awsProvider *AWS) ClusterInfo() (map[string]string, error) {
 	m["provider"] = opencost.AWSProvider
 	m["account"] = clusterAccountID
 	m["region"] = awsProvider.ClusterRegion
-	m["id"] = ocenv.GetClusterID()
-	m["remoteReadEnabled"] = strconv.FormatBool(ocenv.IsRemoteEnabled())
+	m["id"] = coreenv.GetClusterID()
+	m["remoteReadEnabled"] = strconv.FormatBool(env.IsRemoteEnabled())
 	m["provisioner"] = awsProvider.clusterProvisioner
 	return m, nil
 }
@@ -1502,11 +1502,11 @@ func (aws *AWS) ConfigureAuth() error {
 func (aws *AWS) ConfigureAuthWith(config *models.CustomPricing) error {
 	accessKeyID, accessKeySecret := aws.getAWSAuth(false, config)
 	if accessKeyID != "" && accessKeySecret != "" { // credentials may exist on the actual AWS node-- if so, use those. If not, override with the service key
-		err := env.Set(ocenv.AWSAccessKeyIDEnvVar, accessKeyID)
+		err := coreenv.Set(env.AWSAccessKeyIDEnvVar, accessKeyID)
 		if err != nil {
 			return err
 		}
-		err = env.Set(ocenv.AWSAccessKeySecretEnvVar, accessKeySecret)
+		err = coreenv.Set(env.AWSAccessKeySecretEnvVar, accessKeySecret)
 		if err != nil {
 			return err
 		}
@@ -1536,7 +1536,7 @@ func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string,
 	}
 
 	// 3. Fall back to env vars
-	if ocenv.GetAWSAccessKeyID() == "" || ocenv.GetAWSAccessKeySecret() == "" {
+	if env.GetAWSAccessKeyID() == "" || env.GetAWSAccessKeySecret() == "" {
 		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
 			Message: "AWS ServiceKey exists",
 			Status:  false,
@@ -1547,7 +1547,7 @@ func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string,
 			Status:  true,
 		})
 	}
-	return ocenv.GetAWSAccessKeyID(), ocenv.GetAWSAccessKeySecret()
+	return env.GetAWSAccessKeyID(), env.GetAWSAccessKeySecret()
 }
 
 // Load once and cache the result (even on failure). This is an install time secret, so
@@ -2452,7 +2452,7 @@ func (aws *AWS) CombinedDiscountForNode(instanceType string, isPreemptible bool,
 // Regions returns a predefined list of AWS regions
 func (aws *AWS) Regions() []string {
 
-	regionOverrides := ocenv.GetRegionOverrideList()
+	regionOverrides := env.GetRegionOverrideList()
 
 	if len(regionOverrides) > 0 {
 		log.Debugf("Overriding AWS regions with configured region list: %+v", regionOverrides)

+ 3 - 2
pkg/cloud/azure/provider.go

@@ -20,6 +20,7 @@ import (
 	"github.com/Azure/go-autorest/autorest"
 	"github.com/Azure/go-autorest/autorest/azure"
 	"github.com/Azure/go-autorest/autorest/azure/auth"
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
 	"github.com/opencost/opencost/core/pkg/log"
@@ -1507,7 +1508,7 @@ func (az *Azure) ClusterInfo() (map[string]string, error) {
 	m["account"] = az.ClusterAccountID
 	m["region"] = az.ClusterRegion
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
-	m["id"] = env.GetClusterID()
+	m["id"] = coreenv.GetClusterID()
 	return m, nil
 
 }
@@ -1564,7 +1565,7 @@ func (az *Azure) UpdateConfig(r io.Reader, updateType string) (*models.CustomPri
 		}
 
 		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
+			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return fmt.Errorf("error updating cluster metadata: %s", err)
 			}

+ 1 - 1
pkg/cloud/azure/storagebillingparser.go

@@ -62,7 +62,7 @@ func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, re
 
 	if env.IsAzureDownloadBillingDataToDisk() {
 		// clean up old files that have been saved to disk before downloading new ones
-		localPath := filepath.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), "db", "cloudcost")
+		localPath := env.GetAzureDownloadBillingDataPath()
 		if _, err := asbp.deleteFilesOlderThan7d(localPath); err != nil {
 			log.Warnf("CloudCost: Azure: ParseBillingData: failed to remove the following stale files: %v", err)
 		}

+ 1 - 4
pkg/cloud/config/controller.go

@@ -3,7 +3,6 @@ package config
 import (
 	"fmt"
 	"os"
-	"path/filepath"
 	"sync"
 	"time"
 
@@ -15,8 +14,6 @@ import (
 	"github.com/opencost/opencost/pkg/env"
 )
 
-const configFile = "cloud-configurations.json"
-
 // Controller manages the cloud.Config using config Watcher(s) to track various configuration
 // methods. To do this it has a map of config watchers mapped on configuration source and a list Observers that it updates
 // upon any change detected from the config watchers.
@@ -33,7 +30,7 @@ func NewController(providerConfig models.ProviderConfig) *Controller {
 	watchers := GetCloudBillingWatchers(providerConfig)
 
 	storage := &FileControllerStorage{
-		path: filepath.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), configFile),
+		path: env.GetCloudCostConfigControllerStateFile(),
 	}
 
 	ic := &Controller{

+ 2 - 0
pkg/cloud/config/controller_test.go

@@ -11,6 +11,8 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/gcp"
 )
 
+var configFile = "test.json"
+
 // Baseline valid config
 var validAthenaConf = &aws.AthenaConfiguration{
 	Bucket:     "bucket",

+ 2 - 7
pkg/cloud/config/watcher.go

@@ -175,7 +175,7 @@ func (cfw *ConfigFileWatcher) GetConfigs() []cloud.KeyedConfig {
 		}
 
 		var key map[string]string
-		err2 := loadFile(env.GetConfigPathWithDefault("/models/")+"key.json", &key)
+		err2 := loadFile(env.GetGCPAuthSecretFilePath(), &key)
 		if err2 != nil {
 			log.Errorf("ConfigFileWatcher: GCP: %s", err2)
 		}
@@ -239,13 +239,8 @@ type MultiCloudWatcher struct {
 }
 
 func (mcw *MultiCloudWatcher) GetConfigs() []cloud.KeyedConfig {
-	var multiConfigPath string
 
-	if env.IsKubernetesEnabled() {
-		multiConfigPath = path.Join(env.GetConfigPathWithDefault("/var/configs"), cloudIntegrationSecretPath)
-	} else {
-		multiConfigPath = env.GetCloudCostConfigPath()
-	}
+	multiConfigPath := env.GetCloudCostConfigPath()
 	exists, err := fileutil.FileExists(multiConfigPath)
 	if err != nil {
 		log.Errorf("MultiCloudWatcher:  error checking file at '%s': %s", multiConfigPath, err.Error())

+ 5 - 8
pkg/cloud/gcp/provider.go

@@ -14,6 +14,7 @@ import (
 	"sync"
 	"time"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
@@ -185,9 +186,7 @@ func (gcp *GCP) GetManagementPlatform() (string, error) {
 
 // Attempts to load a GCP auth secret and copy the contents to the key file.
 func (*GCP) loadGCPAuthSecret() {
-	path := env.GetConfigPathWithDefault("/models/")
-
-	keyPath := path + "key.json"
+	keyPath := env.GetGCPAuthSecretFilePath()
 	keyExists, _ := fileutil.FileExists(keyPath)
 	if keyExists {
 		log.Info("GCP Auth Key already exists, no need to load from secret")
@@ -239,9 +238,7 @@ func (gcp *GCP) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 					return err
 				}
 
-				path := env.GetConfigPathWithDefault("/models/")
-
-				keyPath := path + "key.json"
+				keyPath := env.GetGCPAuthSecretFilePath()
 				err = os.WriteFile(keyPath, j, 0644)
 				if err != nil {
 					return err
@@ -284,7 +281,7 @@ func (gcp *GCP) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 		}
 
 		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
+			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return err
 			}
@@ -323,7 +320,7 @@ func (gcp *GCP) ClusterInfo() (map[string]string, error) {
 	m["account"] = gcp.ClusterAccountID
 	m["project"] = gcp.ClusterProjectID
 	m["provisioner"] = gcp.clusterProvisioner
-	m["id"] = env.GetClusterID()
+	m["id"] = coreenv.GetClusterID()
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
 	return m, nil
 }

+ 3 - 2
pkg/cloud/oracle/provider.go

@@ -7,6 +7,7 @@ import (
 	"sync"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util"
@@ -48,7 +49,7 @@ func (o *Oracle) ClusterInfo() (map[string]string, error) {
 	m["account"] = o.ClusterAccountID
 	m["region"] = o.ClusterRegion
 	m["remoteReadEnabled"] = strconv.FormatBool(env.IsRemoteEnabled())
-	m["id"] = env.GetClusterID()
+	m["id"] = coreenv.GetClusterID()
 	return m, nil
 }
 
@@ -180,7 +181,7 @@ func (o *Oracle) UpdateConfig(r io.Reader, _ string) (*models.CustomPricing, err
 		}
 
 		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(env.GetClusterID(), o.getClusterName(pricing))
+			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), o.getClusterName(pricing))
 			if err != nil {
 				return err
 			}

+ 2 - 1
pkg/cloud/otc/provider.go

@@ -7,6 +7,7 @@ import (
 	"strings"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util"
@@ -355,7 +356,7 @@ func (otc *OTC) ClusterInfo() (map[string]string, error) {
 	m["account"] = c.ProjectID
 	m["region"] = otc.ClusterRegion
 	m["remoteReadEnabled"] = strconv.FormatBool(env.IsRemoteEnabled())
-	m["id"] = env.GetClusterID()
+	m["id"] = coreenv.GetClusterID()
 	return m, nil
 }
 

+ 22 - 47
pkg/cloud/provider/cloud_test.go

@@ -11,6 +11,8 @@ import (
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/clusters"
+	"github.com/opencost/opencost/core/pkg/env"
+	"github.com/opencost/opencost/core/pkg/storage"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/cloud/provider"
@@ -22,10 +24,9 @@ import (
 )
 
 const (
-	providerIDMap  = "spec.providerID"
-	nameMap        = "metadata.name"
-	labelMapFoo    = "metadata.labels.foo"
-	labelMapFooBar = "metadata.labels.foo.bar"
+	providerIDMap = "spec.providerID"
+	nameMap       = "metadata.name"
+	labelMapFoo   = "metadata.labels.foo"
 )
 
 func TestRegionValueFromMapField(t *testing.T) {
@@ -102,9 +103,7 @@ func TestPVPriceFromCSV(t *testing.T) {
 	pv := &clustercache.PersistentVolume{}
 	pv.Name = nameWant
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	wantPrice := "0.1337"
 	c := &provider.CSVProvider{
@@ -136,9 +135,7 @@ func TestPVPriceFromCSVStorageClass(t *testing.T) {
 	pv.Name = nameWant
 	pv.Spec.StorageClassName = storageClassWant
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	wantPrice := "0.1338"
 	c := &provider.CSVProvider{
@@ -169,9 +166,7 @@ func TestNodePriceFromCSVWithGPU(t *testing.T) {
 	labelFooWant := "labelfoo"
 	wantGPU := "2"
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = providerIDWant
@@ -263,11 +258,9 @@ func TestNodePriceFromCSVWithGPULabels(t *testing.T) {
 	}
 
 	t.Logf("Setting Config Path to: %s", configPath)
-	t.Setenv("CONFIG_PATH", configPath)
+	t.Setenv(env.ConfigPathEnvVar, configPath)
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = "providerid"
@@ -332,11 +325,9 @@ func TestRKE2NodePriceFromCSVWithGPULabels(t *testing.T) {
 	}
 
 	t.Logf("Setting Config Path to: %s", configPath)
-	t.Setenv("CONFIG_PATH", configPath)
+	t.Setenv(env.ConfigPathEnvVar, configPath)
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = "providerid"
@@ -379,9 +370,7 @@ func TestRKE2NodePriceFromCSVWithGPULabels(t *testing.T) {
 func TestNodePriceFromCSVSpecialChar(t *testing.T) {
 	nameWant := "gke-standard-cluster-1-pool-1-91dc432d-cg69"
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	n := &clustercache.Node{}
 	n.Name = nameWant
@@ -416,9 +405,7 @@ func TestNodePriceFromCSV(t *testing.T) {
 	nameWant := "gke-standard-cluster-1-pool-1-91dc432d-cg69"
 	labelFooWant := "labelfoo"
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = providerIDWant
@@ -478,9 +465,7 @@ func TestNodePriceFromCSVWithRegion(t *testing.T) {
 	nameWant := "foo"
 	labelFooWant := "labelfoo"
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	n := &clustercache.Node{}
 	n.SpecProviderID = providerIDWant
@@ -671,11 +656,9 @@ func TestNodePriceFromCSVWithBadConfig(t *testing.T) {
 	}
 
 	t.Logf("Setting Config Path to: %s", configPath)
-	t.Setenv("CONFIG_PATH", configPath)
+	t.Setenv(env.ConfigPathEnvVar, configPath)
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	c := &provider.CSVProvider{
 		CSVLocation: "../../../configs/pricing_schema_case.csv",
@@ -705,11 +688,9 @@ func TestNodePriceFromCSVWithBadConfig(t *testing.T) {
 }
 
 func TestSourceMatchesFromCSV(t *testing.T) {
-	os.Setenv("CONFIG_PATH", "../../../configs")
+	os.Setenv(env.ConfigPathEnvVar, "../../../configs")
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	c := &provider.CSVProvider{
 		CSVLocation: "../../../configs/pricing_schema_case.csv",
@@ -786,9 +767,7 @@ func TestNodePriceFromCSVWithCase(t *testing.T) {
 	n.Labels[v1.LabelTopologyRegion] = "eastus2"
 	wantPrice := "0.13370357"
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	c := &provider.CSVProvider{
 		CSVLocation: "../../../configs/pricing_schema_case.csv",
@@ -816,9 +795,7 @@ func TestNodePriceFromCSVWithCase(t *testing.T) {
 func TestNodePriceFromCSVMixed(t *testing.T) {
 	labelFooWant := "OnDemand"
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	n := &clustercache.Node{}
 	n.Labels = make(map[string]string)
@@ -879,9 +856,7 @@ func TestNodePriceFromCSVByClass(t *testing.T) {
 	wantpricefloat := 0.13370357
 	wantPrice := fmt.Sprintf("%f", (math.Round(wantpricefloat*1000000) / 1000000))
 
-	confMan := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		LocalConfigPath: "./",
-	})
+	confMan := config.NewConfigFileManager(storage.NewFileStorage("./"))
 
 	c := &provider.CSVProvider{
 		CSVLocation: "../../../configs/pricing_schema_case.csv",

+ 2 - 2
pkg/cloud/provider/customprovider.go

@@ -8,13 +8,13 @@ import (
 	"sync"
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/core/pkg/util/json"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
-	"github.com/opencost/opencost/pkg/env"
 )
 
 type NodePrice struct {
@@ -142,7 +142,7 @@ func (cp *CustomProvider) ClusterInfo() (map[string]string, error) {
 	m["provider"] = opencost.CustomProvider
 	m["region"] = cp.ClusterRegion
 	m["account"] = cp.ClusterAccountID
-	m["id"] = env.GetClusterID()
+	m["id"] = coreenv.GetClusterID()
 	return m, nil
 }
 

+ 3 - 88
pkg/cloud/provider/provider.go

@@ -34,22 +34,6 @@ import (
 	"github.com/opencost/opencost/pkg/util/watcher"
 )
 
-// ClusterName returns the name defined in cluster info, defaulting to the
-// CLUSTER_ID environment variable
-func ClusterName(p models.Provider) string {
-	info, err := p.ClusterInfo()
-	if err != nil {
-		return env.GetClusterID()
-	}
-
-	name, ok := info["name"]
-	if !ok {
-		return env.GetClusterID()
-	}
-
-	return name
-}
-
 // CustomPricesEnabled returns the boolean equivalent of the cloup provider's custom prices flag,
 // indicating whether or not the cluster is using custom pricing.
 func CustomPricesEnabled(p models.Provider) bool {
@@ -77,77 +61,6 @@ func ConfigWatcherFor(p models.Provider) *watcher.ConfigMapWatcher {
 	}
 }
 
-// AllocateIdleByDefault returns true if the application settings specify to allocate idle by default
-func AllocateIdleByDefault(p models.Provider) bool {
-	config, err := p.GetConfig()
-	if err != nil {
-		return false
-	}
-
-	return config.DefaultIdle == "true"
-}
-
-// SharedNamespace returns a list of names of shared namespaces, as defined in the application settings
-func SharedNamespaces(p models.Provider) []string {
-	namespaces := []string{}
-
-	config, err := p.GetConfig()
-	if err != nil {
-		return namespaces
-	}
-	if config.SharedNamespaces == "" {
-		return namespaces
-	}
-	// trim spaces so that "kube-system, kubecost" is equivalent to "kube-system,kubecost"
-	for _, ns := range strings.Split(config.SharedNamespaces, ",") {
-		namespaces = append(namespaces, strings.Trim(ns, " "))
-	}
-
-	return namespaces
-}
-
-// SharedLabel returns the configured set of shared labels as a parallel tuple of keys to values; e.g.
-// for app:kubecost,type:staging this returns (["app", "type"], ["kubecost", "staging"]) in order to
-// match the signature of the NewSharedResourceInfo
-func SharedLabels(p models.Provider) ([]string, []string) {
-	names := []string{}
-	values := []string{}
-
-	config, err := p.GetConfig()
-	if err != nil {
-		return names, values
-	}
-
-	if config.SharedLabelNames == "" || config.SharedLabelValues == "" {
-		return names, values
-	}
-
-	ks := strings.Split(config.SharedLabelNames, ",")
-	vs := strings.Split(config.SharedLabelValues, ",")
-	if len(ks) != len(vs) {
-		log.Warnf("Shared labels have mis-matched lengths: %d names, %d values", len(ks), len(vs))
-		return names, values
-	}
-
-	for i := range ks {
-		names = append(names, strings.Trim(ks[i], " "))
-		values = append(values, strings.Trim(vs[i], " "))
-	}
-
-	return names, values
-}
-
-// ShareTenancyCosts returns true if the application settings specify to share
-// tenancy costs by default.
-func ShareTenancyCosts(p models.Provider) bool {
-	config, err := p.GetConfig()
-	if err != nil {
-		return false
-	}
-
-	return config.ShareTenancyCosts == "true"
-}
-
 // NewProvider looks at the nodespec or provider metadata server to decide which provider to instantiate.
 func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.ConfigFileManager) (models.Provider, error) {
 	getAllNodesFunc := func() ([]*clustercache.Node, error) {
@@ -159,13 +72,15 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 	}
 
 	var nodes []*clustercache.Node
-	if !env.IsETLReadOnlyMode() {
+
+	if env.HasKubernetesResourceAccess() {
 		// the error can be ignored because getAllNodesFunc only errors if nodes is empty, a case which we explicitly
 		// handle by checking the length of nodes below
 		nodes, _ = retry.Retry(context.Background(), getAllNodesFunc, 10, time.Second)
 	} else {
 		nodes, _ = getAllNodesFunc()
 	}
+
 	if len(nodes) == 0 {
 		log.Infof("Could not locate any nodes for cluster.")
 		return &CustomProvider{

+ 2 - 8
pkg/cloud/provider/providerconfig.go

@@ -7,6 +7,7 @@ import (
 	"strconv"
 	"sync"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/util/json"
 	"github.com/opencost/opencost/pkg/cloud/alibaba"
@@ -18,7 +19,6 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/otc"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 	"github.com/opencost/opencost/pkg/config"
-	"github.com/opencost/opencost/pkg/env"
 )
 
 const closedSourceConfigMount = "models/"
@@ -35,7 +35,7 @@ type ProviderConfig struct {
 
 // NewProviderConfig creates a new ConfigFile and returns the ProviderConfig
 func NewProviderConfig(configManager *config.ConfigFileManager, fileName string) *ProviderConfig {
-	configFile := configManager.ConfigFileAt(configPathFor(fileName))
+	configFile := configManager.ConfigFileAt(coreenv.GetPathFromConfig(fileName))
 	pc := &ProviderConfig{
 		lock:          new(sync.Mutex),
 		configManager: configManager,
@@ -272,12 +272,6 @@ func DefaultPricing() *models.CustomPricing {
 	}
 }
 
-// Returns the configuration directory concatenated with a specific config file name
-func configPathFor(filename string) string {
-	path := env.GetConfigPathWithDefault("/models/")
-	return gopath.Join(path, filename)
-}
-
 // Gives the config file name in a full qualified file name
 func filenameInConfigPath(fqfn string) string {
 	_, fileName := gopath.Split(fqfn)

+ 3 - 2
pkg/cloud/scaleway/provider.go

@@ -8,6 +8,7 @@ import (
 	"strings"
 	"sync"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 
@@ -303,7 +304,7 @@ func (scw *Scaleway) ClusterInfo() (map[string]string, error) {
 	m["region"] = scw.ClusterRegion
 	m["account"] = scw.ClusterAccountID
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
-	m["id"] = env.GetClusterID()
+	m["id"] = coreenv.GetClusterID()
 	return m, nil
 
 }
@@ -335,7 +336,7 @@ func (c *Scaleway) UpdateConfig(r io.Reader, updateType string) (*models.CustomP
 		}
 
 		if env.IsRemoteEnabled() {
-			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
+			err := utils.UpdateClusterMeta(coreenv.GetClusterID(), c.ClusterName)
 			if err != nil {
 				return err
 			}

+ 1 - 1
pkg/cloudcost/pipelineservice.go

@@ -142,7 +142,7 @@ func (s *PipelineService) GetCloudCostRepairHandler() func(w http.ResponseWriter
 
 		var window opencost.Window
 		if windowStr != "" {
-			win, err := opencost.ParseWindowWithOffset(windowStr, env.GetParsedUTCOffset())
+			win, err := opencost.ParseWindowUTC(windowStr)
 			if err != nil {
 				http.Error(w, fmt.Sprintf("Invalid parameter: %s", err), http.StatusBadRequest)
 				return

+ 2 - 2
pkg/clustercache/clustercache.go

@@ -56,7 +56,7 @@ func NewKubernetesClusterCacheV1(client kubernetes.Interface) cc.ClusterCache {
 	batchClient := client.BatchV1().RESTClient()
 	pdbClient := client.PolicyV1().RESTClient()
 
-	installNamespace := env.GetInstallNamespace()
+	installNamespace := env.GetOpencostNamespace()
 	log.Infof("NAMESPACE: %s", installNamespace)
 
 	kcc := &KubernetesClusterCache{
@@ -80,7 +80,7 @@ func NewKubernetesClusterCacheV1(client kubernetes.Interface) cc.ClusterCache {
 	// Wait for each caching watcher to initialize
 	cancel := make(chan struct{})
 	var wg sync.WaitGroup
-	if !env.IsETLReadOnlyMode() {
+	if env.HasKubernetesResourceAccess() {
 		wg.Add(14)
 		go initializeCache(kcc.namespaceWatch, &wg, cancel)
 		go initializeCache(kcc.nodeWatch, &wg, cancel)

+ 2 - 4
pkg/clustercache/clustercache2.go

@@ -54,9 +54,8 @@ func NewKubernetesClusterCacheV2(clientset kubernetes.Interface) *KubernetesClus
 func (kcc *KubernetesClusterCacheV2) Run() {
 	var wg sync.WaitGroup
 
-	if !env.IsETLReadOnlyMode() {
-		wg.Add(14)
-
+	wg.Add(14)
+	if env.HasKubernetesResourceAccess() {
 		kcc.namespaceStore.Watch(kcc.stopCh, wg.Done)
 		kcc.nodeStore.Watch(kcc.stopCh, wg.Done)
 		kcc.persistentVolumeClaimStore.Watch(kcc.stopCh, wg.Done)
@@ -72,7 +71,6 @@ func (kcc *KubernetesClusterCacheV2) Run() {
 		kcc.jobStore.Watch(kcc.stopCh, wg.Done)
 		kcc.pdbStore.Watch(kcc.stopCh, wg.Done)
 	}
-
 	wg.Wait()
 }
 

+ 5 - 11
pkg/cmd/agent/agent.go

@@ -4,7 +4,6 @@ import (
 	"context"
 	"fmt"
 	"net/http"
-	"path"
 	"time"
 
 	"github.com/opencost/opencost/core/pkg/clusters"
@@ -75,12 +74,7 @@ func Execute(opts *AgentOpts) error {
 	}
 
 	// Create ConfigFileManager for synchronization of shared configuration
-	confManager := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		BucketStoreConfig: env.GetConfigBucketFile(),
-		LocalConfigPath:   "/",
-	})
-
-	configPrefix := env.GetConfigPathWithDefault(env.DefaultConfigMountPath)
+	confManager := config.NewConfigFileManager(nil)
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
 	cloudProvider, err := provider.NewProvider(clusterCache, cloudProviderKey, confManager)
@@ -93,7 +87,7 @@ func Execute(opts *AgentOpts) error {
 
 	var clusterInfoProvider clusters.ClusterInfoProvider
 	if env.IsExportClusterInfoEnabled() {
-		clusterInfoConf := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-info.json"))
+		clusterInfoConf := confManager.ConfigFileAt(env.GetClusterInfoFilePath())
 		clusterInfoProvider = costmodel.NewClusterInfoWriteOnRequest(localClusterInfo, clusterInfoConf)
 	} else {
 		clusterInfoProvider = localClusterInfo
@@ -129,14 +123,14 @@ func Execute(opts *AgentOpts) error {
 	}
 
 	// Append the pricing config watcher
-	kubecostNamespace := env.GetInstallNamespace()
-	configWatchers := watcher.NewConfigMapWatchers(k8sClient, kubecostNamespace)
+	installNamespace := env.GetOpencostNamespace()
+	configWatchers := watcher.NewConfigMapWatchers(k8sClient, installNamespace)
 	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	configWatchers.Watch()
 
 	// Initialize cluster exporting if it's enabled
 	if env.IsExportClusterCacheEnabled() {
-		cacheLocation := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-cache.json"))
+		cacheLocation := confManager.ConfigFileAt(env.GetClusterCacheFilePath())
 		clusterExporter = cluster.NewClusterExporter(clusterCache, cacheLocation, ClusterExportInterval)
 		clusterExporter.Run()
 	}

+ 3 - 3
pkg/cmd/commands.go

@@ -16,7 +16,7 @@ const (
 	// commandRoot is the root command used to route to sub-commands
 	commandRoot string = "root"
 
-	// CommandCostModel is the command used to execute the metrics emission and ETL pipeline
+	// CommandCostModel is the command used to execute the metrics emission and cost model querying
 	CommandCostModel string = "cost-model"
 
 	// CommandAgent executes the application in agent mode, which provides only metrics exporting.
@@ -96,7 +96,7 @@ func newRootCommand(costModelCmd *cobra.Command, cmds ...*cobra.Command) *cobra.
 
 // default open-source cost-model command
 func newCostModelCommand() *cobra.Command {
-	opts := &costmodel.CostModelOpts{}
+	config := costmodel.DefaultConfig()
 
 	cmCmd := &cobra.Command{
 		Use:   CommandCostModel,
@@ -105,7 +105,7 @@ func newCostModelCommand() *cobra.Command {
 			// Init logging here so cobra/viper has processed the command line args and flags
 			// otherwise only envvars are available during init
 			log.InitLogging(true)
-			return costmodel.Execute(opts)
+			return costmodel.Execute(config)
 		},
 	}
 

+ 31 - 0
pkg/cmd/costmodel/config.go

@@ -0,0 +1,31 @@
+package costmodel
+
+import (
+	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/pkg/env"
+)
+
+// Config contain configuration options that can be passed to the Execute() method
+type Config struct {
+	Port                   int
+	KubernetesEnabled      bool
+	CarbonEstimatesEnabled bool
+	CloudCostEnabled       bool
+	CustomCostEnabled      bool
+}
+
+func DefaultConfig() *Config {
+	return &Config{
+		Port:                   env.GetOpencostAPIPort(),
+		KubernetesEnabled:      env.IsKubernetesEnabled(),
+		CarbonEstimatesEnabled: env.IsCarbonEstimatesEnabled(),
+		CloudCostEnabled:       env.IsCloudCostEnabled(),
+	}
+}
+
+func (c *Config) log() {
+	log.Infof("Kubernetes enabled: %t", c.KubernetesEnabled)
+	log.Infof("Carbon Estimates enabled: %t", c.CarbonEstimatesEnabled)
+	log.Infof("Cloud Costs enabled: %t", c.CloudCostEnabled)
+	log.Infof("Custom Costs enabled: %t", c.CustomCostEnabled)
+}

+ 12 - 77
pkg/cmd/costmodel/costmodel.go

@@ -4,11 +4,10 @@ import (
 	"context"
 	"fmt"
 	"net/http"
-	"net/http/pprof"
 	"time"
 
 	"github.com/julienschmidt/httprouter"
-	"github.com/opencost/opencost/core/pkg/util/json"
+	"github.com/opencost/opencost/core/pkg/util/apiutil"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/customcost"
@@ -24,25 +23,17 @@ import (
 	"github.com/opencost/opencost/pkg/metrics"
 )
 
-// CostModelOpts contain configuration options that can be passed to the Execute() method
-type CostModelOpts struct {
-	// Stubbed for future configuration
-}
-
-func Healthz(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
-	w.WriteHeader(200)
-	w.Header().Set("Content-Length", "0")
-	w.Header().Set("Content-Type", "text/plain")
-}
-
-func Execute(opts *CostModelOpts) error {
+func Execute(conf *Config) error {
 	log.Infof("Starting cost-model version %s", version.FriendlyVersion())
-	log.Infof("Kubernetes enabled: %t", env.IsKubernetesEnabled())
+	if conf == nil {
+		conf = DefaultConfig()
+	}
+	conf.log()
 
 	router := httprouter.New()
 	var a *costmodel.Accesses
 	var cp models.Provider
-	if env.IsKubernetesEnabled() {
+	if conf.KubernetesEnabled {
 		a = costmodel.Initialize(router)
 		err := StartExportWorker(context.Background(), a.Model)
 		if err != nil {
@@ -53,7 +44,7 @@ func Execute(opts *CostModelOpts) error {
 		router.GET("/allocation", a.ComputeAllocationHandler)
 		router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
 		router.GET("/assets", a.ComputeAssetsHandler)
-		if env.IsCarbonEstimatesEnabled() {
+		if conf.CarbonEstimatesEnabled {
 			router.GET("/assets/carbon", a.ComputeAssetsCarbonHandler)
 		}
 
@@ -61,8 +52,7 @@ func Execute(opts *CostModelOpts) error {
 		cp = a.CloudProvider
 	}
 
-	log.Infof("Cloud Costs enabled: %t", env.IsCloudCostEnabled())
-	if env.IsCloudCostEnabled() {
+	if conf.CloudCostEnabled {
 		var providerConfig models.ProviderConfig
 		if cp != nil {
 			providerConfig = provider.ExtractConfigFromProviders(cp)
@@ -70,9 +60,8 @@ func Execute(opts *CostModelOpts) error {
 		costmodel.InitializeCloudCost(router, providerConfig)
 	}
 
-	log.Infof("Custom Costs enabled: %t", env.IsCustomCostEnabled())
 	var customCostPipelineService *customcost.PipelineService
-	if env.IsCustomCostEnabled() {
+	if conf.CloudCostEnabled {
 		customCostPipelineService = costmodel.InitializeCustomCost(router)
 	}
 
@@ -80,20 +69,7 @@ func Execute(opts *CostModelOpts) error {
 	// valid for CustomCostPipelineService to be nil
 	router.GET("/customCost/status", customCostPipelineService.GetCustomCostStatusHandler())
 
-	router.GET("/healthz", Healthz)
-
-	router.GET("/logs/level", GetLogLevel)
-	router.POST("/logs/level", SetLogLevel)
-
-	if env.IsPProfEnabled() {
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/", pprof.Index)
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/cmdline", pprof.Cmdline)
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/profile", pprof.Profile)
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/symbol", pprof.Symbol)
-		router.HandlerFunc(http.MethodGet, "/debug/pprof/trace", pprof.Trace)
-		router.Handler(http.MethodGet, "/debug/pprof/goroutine", pprof.Handler("goroutine"))
-		router.Handler(http.MethodGet, "/debug/pprof/heap", pprof.Handler("heap"))
-	}
+	apiutil.ApplyContainerDiagnosticEndpoints(router)
 
 	rootMux := http.NewServeMux()
 	rootMux.Handle("/", router)
@@ -101,7 +77,7 @@ func Execute(opts *CostModelOpts) error {
 	telemetryHandler := metrics.ResponseMetricMiddleware(rootMux)
 	handler := cors.AllowAll().Handler(telemetryHandler)
 
-	return http.ListenAndServe(fmt.Sprint(":", env.GetAPIPort()), errors.PanicHandlerMiddleware(handler))
+	return http.ListenAndServe(fmt.Sprint(":", conf.Port), errors.PanicHandlerMiddleware(handler))
 }
 
 func StartExportWorker(ctx context.Context, model costmodel.AllocationModel) error {
@@ -138,44 +114,3 @@ func StartExportWorker(ctx context.Context, model costmodel.AllocationModel) err
 	}()
 	return nil
 }
-
-type LogLevelRequestResponse struct {
-	Level string `json:"level"`
-}
-
-func GetLogLevel(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	level := log.GetLogLevel()
-	llrr := LogLevelRequestResponse{
-		Level: level,
-	}
-
-	body, err := json.Marshal(llrr)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to retrive log level"), http.StatusInternalServerError)
-		return
-	}
-	_, err = w.Write(body)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to write response: %s", body), http.StatusInternalServerError)
-		return
-	}
-}
-
-func SetLogLevel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
-	params := LogLevelRequestResponse{}
-	err := json.NewDecoder(r.Body).Decode(&params)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to decode request body, error: %s", err), http.StatusBadRequest)
-		return
-	}
-
-	err = log.SetLogLevel(params.Level)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("level must be a valid log level according to zerolog; level given: %s, error: %s", params.Level, err), http.StatusBadRequest)
-		return
-	}
-	w.WriteHeader(http.StatusOK)
-}

+ 3 - 53
pkg/config/configmanager.go

@@ -1,44 +1,11 @@
 package config
 
 import (
-	"os"
 	"sync"
 
-	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/storage"
 )
 
-//--------------------------------------------------------------------------
-//  ConfigFileManagerOpts
-//--------------------------------------------------------------------------
-
-// ConfigFileManagerOpts describes how to configure the ConfigFileManager for
-// serving configuration files
-type ConfigFileManagerOpts struct {
-	// BucketStoreConfig is the local file location for the configuration used to
-	// write and read configuration data to/from the bucket. The format of this
-	// configuration file should be compatible with storage.NewBucketStorage
-	BucketStoreConfig string
-
-	// LocalConfigPath provides a backup location for storing the configuration
-	// files
-	LocalConfigPath string
-}
-
-// IsBucketStorageEnabled returns true if bucket storage is enabled.
-func (cfmo *ConfigFileManagerOpts) IsBucketStorageEnabled() bool {
-	return cfmo.BucketStoreConfig != ""
-}
-
-// DefaultConfigFileManagerOpts returns the default configuration options for the
-// config file manager
-func DefaultConfigFileManagerOpts() *ConfigFileManagerOpts {
-	return &ConfigFileManagerOpts{
-		BucketStoreConfig: "",
-		LocalConfigPath:   "/",
-	}
-}
-
 //--------------------------------------------------------------------------
 //  ConfigFileManager
 //--------------------------------------------------------------------------
@@ -52,26 +19,9 @@ type ConfigFileManager struct {
 }
 
 // NewConfigFileManager creates a new backing storage and configuration file manager
-func NewConfigFileManager(opts *ConfigFileManagerOpts) *ConfigFileManager {
-	if opts == nil {
-		opts = DefaultConfigFileManagerOpts()
-	}
-
-	var configStore storage.Storage
-	if opts.IsBucketStorageEnabled() {
-		bucketConfig, err := os.ReadFile(opts.BucketStoreConfig)
-		if err != nil {
-			log.Warnf("Failed to initialize config bucket storage: %s", err)
-		} else {
-			bucketStore, err := storage.NewBucketStorage(bucketConfig)
-			if err != nil {
-				log.Warnf("Failed to create config bucket storage: %s", err)
-			} else {
-				configStore = bucketStore
-			}
-		}
-	} else {
-		configStore = storage.NewFileStorage(opts.LocalConfigPath)
+func NewConfigFileManager(configStore storage.Storage) *ConfigFileManager {
+	if configStore == nil {
+		configStore = storage.NewFileStorage("/")
 	}
 
 	return &ConfigFileManager{

+ 9 - 9
pkg/costmodel/allocation_helpers.go

@@ -7,12 +7,12 @@ import (
 	"strings"
 	"time"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/pkg/cloud/provider"
-	"github.com/opencost/opencost/pkg/env"
 	"k8s.io/apimachinery/pkg/labels"
 )
 
@@ -108,7 +108,7 @@ func applyPodResults(window opencost.Window, resolution time.Duration, podMap ma
 
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		namespace := res.Namespace
@@ -914,7 +914,7 @@ func applyNetworkAllocation(podMap map[podKey]*pod, resNetworkGiB []*source.Netw
 	for _, res := range resNetworkCostPerGiB {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		costPerGiBByCluster[cluster] = res.Data[0].Value
@@ -1643,7 +1643,7 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr
 	for _, res := range resNodeCostPerCPUHr {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		node := res.Node
@@ -1679,7 +1679,7 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRA
 	for _, res := range resNodeCostPerRAMGiBHr {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		node := res.Node
@@ -1715,7 +1715,7 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr
 	for _, res := range resNodeCostPerGPUHr {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		node := res.Node
@@ -1751,7 +1751,7 @@ func applyNodeSpot(nodeMap map[nodeKey]*nodePricing, resNodeIsSpot []*source.Nod
 	for _, res := range resNodeIsSpot {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		node := res.Node
@@ -2050,7 +2050,7 @@ func buildPVCMap(resolution time.Duration, pvcMap map[pvcKey]*pvc, pvMap map[pvK
 	for _, res := range resPVCInfo {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		namespace := res.Namespace
@@ -2110,7 +2110,7 @@ func buildPodPVCMap(podPVCMap map[podKey][]*pvc, pvMap map[pvKey]*pv, pvcMap map
 	for _, res := range resPodPVCAllocation {
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		namespace := res.Namespace

+ 15 - 14
pkg/costmodel/cluster.go

@@ -6,6 +6,7 @@ import (
 	"strings"
 	"time"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 	"golang.org/x/exp/slices"
 
@@ -166,7 +167,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageBytes {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Instance
@@ -208,7 +209,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Instance
@@ -236,7 +237,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageUsedCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Instance
@@ -263,7 +264,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageUsedAvg {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Instance
@@ -290,7 +291,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalStorageUsedMax {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Instance
@@ -317,7 +318,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resLocalActiveMins {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Node
@@ -365,7 +366,7 @@ func ClusterDisks(dataSource source.OpenCostDataSource, cp models.Provider, star
 	for _, result := range resPVStorageClass {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.PersistentVolume
@@ -763,7 +764,7 @@ func pvCosts(
 	for _, result := range resActiveMins {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.PersistentVolume
@@ -799,7 +800,7 @@ func pvCosts(
 	for _, result := range resPVSize {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.PersistentVolume
@@ -831,7 +832,7 @@ func pvCosts(
 	for _, result := range resPVCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.PersistentVolume
@@ -875,7 +876,7 @@ func pvCosts(
 	for _, result := range resPVUsedAvg {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		claimName := result.PersistentVolumeClaim
@@ -896,7 +897,7 @@ func pvCosts(
 
 			thatCluster := thatRes.Cluster
 			if thatCluster == "" {
-				thatCluster = env.GetClusterID()
+				thatCluster = coreenv.GetClusterID()
 			}
 
 			thatVolumeName := thatRes.VolumeName
@@ -942,7 +943,7 @@ func pvCosts(
 	for _, result := range resPVUsedMax {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		claimName := result.PersistentVolumeClaim
@@ -962,7 +963,7 @@ func pvCosts(
 		for _, thatRes := range resPVCInfo {
 			thatCluster := thatRes.Cluster
 			if thatCluster == "" {
-				thatCluster = env.GetClusterID()
+				thatCluster = coreenv.GetClusterID()
 			}
 
 			thatVolumeName := thatRes.VolumeName

+ 17 - 17
pkg/costmodel/cluster_helpers.go

@@ -5,6 +5,7 @@ import (
 	"strconv"
 	"time"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/provider"
 
@@ -12,7 +13,6 @@ import (
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
-	"github.com/opencost/opencost/pkg/env"
 )
 
 // mergeTypeMaps takes two maps of (cluster name, node name) -> node type
@@ -49,7 +49,7 @@ func buildCPUCostMap(
 	for _, result := range resNodeCPUCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Node
@@ -117,7 +117,7 @@ func buildRAMCostMap(
 	for _, result := range resNodeRAMCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Node
@@ -188,7 +188,7 @@ func buildGPUCostMap(
 	for _, result := range resNodeGPUCost {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Node
@@ -251,7 +251,7 @@ func buildGPUCountMap(resNodeGPUCount []*source.NodeGPUCountResult) map[NodeIden
 	for _, result := range resNodeGPUCount {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Node
@@ -280,7 +280,7 @@ func buildCPUCoresMap(resNodeCPUCores []*source.NodeCPUCoresCapacityResult) map[
 	for _, result := range resNodeCPUCores {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Node
@@ -307,7 +307,7 @@ func buildRAMBytesMap(resNodeRAMBytes []*source.NodeRAMBytesCapacityResult) map[
 	for _, result := range resNodeRAMBytes {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Node
@@ -342,7 +342,7 @@ func buildCPUBreakdownMap(resNodeCPUModeTotal []*source.NodeCPUModeTotalResult)
 	for _, result := range resNodeCPUModeTotal {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		node := result.Node
@@ -448,7 +448,7 @@ func buildRAMUserPctMap(resNodeRAMUserPct []*source.NodeRAMUserPercentResult) ma
 	for _, result := range resNodeRAMUserPct {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Instance
@@ -477,7 +477,7 @@ func buildRAMSystemPctMap(resNodeRAMSystemPct []*source.NodeRAMSystemPercentResu
 	for _, result := range resNodeRAMSystemPct {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Instance
@@ -509,7 +509,7 @@ type activeData struct {
 func clusterManagementKeyGen(result *source.ClusterManagementDurationResult) (ClusterManagementIdentifier, bool) {
 	cluster := result.Cluster
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	provisionerName := result.Provisioner
@@ -528,7 +528,7 @@ func clusterManagementValues(result *source.ClusterManagementDurationResult) []*
 func nodeKeyGen(result *source.NodeActiveMinutesResult) (NodeIdentifier, bool) {
 	cluster := result.Cluster
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	name := result.Node
@@ -552,7 +552,7 @@ func nodeValues(result *source.NodeActiveMinutesResult) []*util.Vector {
 func loadBalancerKeyGen(result *source.LBActiveMinutesResult) (LoadBalancerIdentifier, bool) {
 	cluster := result.Cluster
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	namespace := result.Namespace
@@ -577,7 +577,7 @@ func loadBalancerKeyGen(result *source.LBActiveMinutesResult) (LoadBalancerIdent
 	return LoadBalancerIdentifier{
 		Cluster:   cluster,
 		Namespace: namespace,
-		Name:      fmt.Sprintf("%s/%s", namespace, name), // TODO:ETL this is kept for backwards-compatibility, but not good,
+		Name:      fmt.Sprintf("%s/%s", namespace, name), // TODO: this is kept for backwards-compatibility, but not good,
 		IngressIP: ingressIp,
 	}, true
 }
@@ -627,7 +627,7 @@ func buildPreemptibleMap(
 	for _, result := range resIsSpot {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		name := result.Node
@@ -666,7 +666,7 @@ func buildAssetsPVCMap(resPVCInfo []*source.PVCInfoResult) map[DiskIdentifier]*D
 	for _, result := range resPVCInfo {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		volumeName := result.VolumeName
@@ -716,7 +716,7 @@ func buildLabelsMap(
 	for _, result := range resLabels {
 		cluster := result.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 
 		node := result.Node

+ 6 - 6
pkg/costmodel/costmodel.go

@@ -12,13 +12,13 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/clustercache"
 	"github.com/opencost/opencost/core/pkg/clusters"
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
 	"github.com/opencost/opencost/core/pkg/util"
 	"github.com/opencost/opencost/core/pkg/util/promutil"
 	costAnalyzerCloud "github.com/opencost/opencost/pkg/cloud/models"
-	"github.com/opencost/opencost/pkg/env"
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
@@ -134,7 +134,7 @@ func (cd *CostData) GetController() (name string, kind string, hasController boo
 
 func (cm *CostModel) ComputeCostData(start, end time.Time) (map[string]*CostData, error) {
 	// Cluster ID is specific to the source cluster
-	clusterID := env.GetClusterID()
+	clusterID := coreenv.GetClusterID()
 	cp := cm.Provider
 	ds := cm.DataSource
 	mq := ds.Metrics()
@@ -1266,7 +1266,7 @@ func (cm *CostModel) GetLBCost() (map[serviceKey]*costAnalyzerCloud.LoadBalancer
 		namespace := service.Namespace
 		name := service.Name
 		key := serviceKey{
-			Cluster:   env.GetClusterID(),
+			Cluster:   coreenv.GetClusterID(),
 			Namespace: namespace,
 			Service:   name,
 		}
@@ -1590,7 +1590,7 @@ func (cm *CostModel) QueryAllocation(window opencost.Window, step time.Duration,
 
 				_, err := opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
 				if err != nil {
-					log.Errorf("ETL: error updating asset resource totals for %s: %s", assetSet.Window, err)
+					log.Errorf("Allocation: error updating asset resource totals for %s: %s", assetSet.Window, err)
 				}
 			}
 
@@ -1649,7 +1649,7 @@ func (cm *CostModel) QueryAllocation(window opencost.Window, step time.Duration,
 
 			_, err = opencost.UpdateAssetTotalsStore(totalsStore, assetSet)
 			if err != nil {
-				log.Errorf("ETL: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
+				log.Errorf("Allocation: error updating asset resource totals for %s: %s", opencost.NewClosedWindow(*asr.Window().Start(), *asr.Window().End()), err)
 			}
 
 		}
@@ -1759,7 +1759,7 @@ func computeIdleAllocations(allocSet *opencost.AllocationSet, assetSet *opencost
 	for key, assetTotal := range assetTotals {
 		allocTotal, ok := allocTotals[key]
 		if !ok {
-			log.Warnf("ETL: did not find allocations for asset key: %s", key)
+			log.Warnf("Allocation: did not find allocations for asset key: %s", key)
 
 			// Use a zero-value set of totals. This indicates either (1) an
 			// error computing totals, or (2) that no allocations ran on the

+ 8 - 8
pkg/costmodel/key.go

@@ -3,13 +3,13 @@ package costmodel
 import (
 	"fmt"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/opencost"
-	"github.com/opencost/opencost/pkg/env"
 )
 
 func newResultPodKey(cluster string, namespace string, pod string) (podKey, error) {
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -65,7 +65,7 @@ func newNamespaceKey(cluster, namespace string) namespaceKey {
 
 func newResultNamespaceKey(cluster string, namespace string) (namespaceKey, error) {
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -97,7 +97,7 @@ func newControllerKey(cluster, namespace, controllerKind, controller string) con
 
 func newResultControllerKey(cluster, namespace, controller, controllerKind string) (controllerKey, error) {
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -131,7 +131,7 @@ func newServiceKey(cluster, namespace, service string) serviceKey {
 
 func newResultServiceKey(cluster, namespace, service string) (serviceKey, error) {
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -163,7 +163,7 @@ func newNodeKey(cluster, node string) nodeKey {
 
 func newResultNodeKey(cluster string, node string) (nodeKey, error) {
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	if node == "" {
@@ -199,7 +199,7 @@ func newPVCKey(cluster, namespace, persistentVolumeClaim string) pvcKey {
 // clusterLabel, which we expect may not exist, but has a default value.)
 func newResultPVCKey(cluster, namespace, pvc string) (pvcKey, error) {
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 
 	if namespace == "" {
@@ -231,7 +231,7 @@ func newPVKey(cluster, persistentVolume string) pvKey {
 
 func newResultPVKey(cluster, pv string) (pvKey, error) {
 	if cluster == "" {
-		cluster = env.GetClusterID()
+		cluster = coreenv.GetClusterID()
 	}
 	if pv == "" {
 		return pvKey{}, fmt.Errorf("persistentvolume is required")

+ 2 - 2
pkg/costmodel/networkinsight.go

@@ -4,10 +4,10 @@ import (
 	"fmt"
 	"time"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/source"
-	"github.com/opencost/opencost/pkg/env"
 )
 
 func (cm *CostModel) ComputeNetworkInsights(start, end time.Time) (*opencost.NetworkInsightSet, error) {
@@ -138,7 +138,7 @@ func applyNetworkCosts(
 
 		cluster := res.Cluster
 		if cluster == "" {
-			cluster = env.GetClusterID()
+			cluster = coreenv.GetClusterID()
 		}
 		namespace := res.Namespace
 		pod := res.Pod

+ 2 - 1
pkg/costmodel/nodeclientconfig.go

@@ -8,6 +8,7 @@ import (
 	"os"
 	"strings"
 
+	coreenv "github.com/opencost/opencost/core/pkg/env"
 	"github.com/opencost/opencost/core/pkg/log"
 	nodes "github.com/opencost/opencost/core/pkg/nodestats"
 	"github.com/opencost/opencost/pkg/env"
@@ -19,7 +20,7 @@ const (
 )
 
 func NewNodeClientConfigFromEnv() (*nodes.NodeClientConfig, error) {
-	clusterId := env.GetClusterID()
+	clusterId := coreenv.GetClusterID()
 	concurrentPollers := defaultConcurrentPollers
 	insecure := env.IsNodeStatsInsecure()
 	certFile := env.GetNodeStatsCertFile()

+ 8 - 23
pkg/costmodel/router.go

@@ -6,7 +6,6 @@ import (
 	"fmt"
 	"net/http"
 	"os"
-	"path"
 	"reflect"
 	"strconv"
 	"strings"
@@ -349,7 +348,7 @@ func (a *Accesses) GetInstallNamespace(w http.ResponseWriter, r *http.Request, _
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
 
-	ns := env.GetInstallNamespace()
+	ns := env.GetOpencostNamespace()
 	w.Write([]byte(ns))
 }
 
@@ -397,7 +396,7 @@ func (a *Accesses) GetInstallInfo(w http.ResponseWriter, r *http.Request, _ http
 }
 
 func GetKubecostContainers(kubeClientSet kubernetes.Interface) ([]ContainerInfo, error) {
-	pods, err := kubeClientSet.CoreV1().Pods(env.GetInstallNamespace()).List(context.Background(), metav1.ListOptions{
+	pods, err := kubeClientSet.CoreV1().Pods(env.GetOpencostNamespace()).List(context.Background(), metav1.ListOptions{
 		LabelSelector: "app=cost-analyzer",
 		FieldSelector: "status.phase=Running",
 		Limit:         1,
@@ -433,7 +432,7 @@ func (a *Accesses) AddServiceKey(w http.ResponseWriter, r *http.Request, ps http
 
 	key := r.PostForm.Get("key")
 	k := []byte(key)
-	err := os.WriteFile(path.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), "key.json"), k, 0644)
+	err := os.WriteFile(env.GetGCPAuthSecretFilePath(), k, 0644)
 	if err != nil {
 		fmt.Fprintf(w, "Error writing service key: %s", err)
 	}
@@ -474,12 +473,7 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	k8sCache.Run()
 
 	// Create ConfigFileManager for synchronization of shared configuration
-	confManager := config.NewConfigFileManager(&config.ConfigFileManagerOpts{
-		BucketStoreConfig: env.GetConfigBucketFile(),
-		LocalConfigPath:   "/",
-	})
-
-	configPrefix := env.GetConfigPathWithDefault("/var/configs/")
+	confManager := config.NewConfigFileManager(nil)
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
 	cloudProvider, err := provider.NewProvider(k8sCache, cloudProviderKey, confManager)
@@ -490,7 +484,7 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	// ClusterInfo Provider to provide the cluster map with local and remote cluster data
 	var clusterInfoProvider clusters.ClusterInfoProvider
 	if env.IsClusterInfoFileEnabled() {
-		clusterInfoFile := confManager.ConfigFileAt(path.Join(configPrefix, "cluster-info.json"))
+		clusterInfoFile := confManager.ConfigFileAt(env.GetClusterInfoFilePath())
 		clusterInfoProvider = NewConfiguredClusterInfoProvider(clusterInfoFile)
 	} else {
 		clusterInfoProvider = NewLocalClusterInfoProvider(kubeClientset, cloudProvider)
@@ -516,7 +510,7 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	}
 	if env.IsCollectorDataSourceEnabled() {
 		fn = func() (source.OpenCostDataSource, error) {
-			store := getStorage()
+			store := storage.GetDefaultStorage()
 			nodeStatConf, err := NewNodeClientConfigFromEnv()
 			if err != nil {
 				return nil, fmt.Errorf("failed to get node client config: %w", err)
@@ -549,9 +543,9 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	}
 
 	// Append the pricing config watcher
-	kubecostNamespace := env.GetInstallNamespace()
+	installNamespace := env.GetOpencostNamespace()
 
-	configWatchers := watcher.NewConfigMapWatchers(kubeClientset, kubecostNamespace, additionalConfigWatchers...)
+	configWatchers := watcher.NewConfigMapWatchers(kubeClientset, installNamespace, additionalConfigWatchers...)
 	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	configWatchers.AddWatcher(metrics.GetMetricsConfigWatcher())
 	configWatchers.Watch()
@@ -609,15 +603,6 @@ func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.
 	return a
 }
 
-func getStorage() storage.Storage {
-	var store storage.Storage
-	pvMountPath := env.GetPVMountPath()
-	if pvMountPath != "" {
-		store = storage.NewFileStorage(pvMountPath)
-	}
-	return store
-}
-
 // InitializeCloudCost Initializes Cloud Cost pipeline and querier and registers endpoints
 func InitializeCloudCost(router *httprouter.Router, providerConfig models.ProviderConfig) {
 	log.Debugf("Cloud Cost config path: %s", env.GetCloudCostConfigPath())

+ 79 - 0
pkg/env/cloudcost.go

@@ -0,0 +1,79 @@
+package env
+
+import (
+	"github.com/opencost/opencost/core/pkg/env"
+)
+
+const (
+	CloudCostConfigControllerStateFile = "cloud-configurations.json"
+	CloudIntegrationConfigFile         = "cloud-integration.json"
+	AzureBillingDataDownloadPath       = "db/cloudcost"
+)
+
+const (
+	CloudCostEnabledEnvVar          = "CLOUD_COST_ENABLED"
+	CloudCostMonthToDateIntervalVar = "CLOUD_COST_MONTH_TO_DATE_INTERVAL"
+	CloudCostRefreshRateHoursEnvVar = "CLOUD_COST_REFRESH_RATE_HOURS"
+	CloudCostQueryWindowDaysEnvVar  = "CLOUD_COST_QUERY_WINDOW_DAYS"
+	CloudCostRunWindowDaysEnvVar    = "CLOUD_COST_RUN_WINDOW_DAYS"
+
+	CustomCostEnabledEnvVar         = "CUSTOM_COST_ENABLED"
+	CustomCostQueryWindowDaysEnvVar = "CUSTOM_COST_QUERY_WINDOW_DAYS"
+
+	PluginConfigDirEnvVar     = "PLUGIN_CONFIG_DIR"
+	PluginExecutableDirEnvVar = "PLUGIN_EXECUTABLE_DIR"
+
+	AzureDownloadBillingDataToDiskEnvVar = "AZURE_DOWNLOAD_BILLING_DATA_TO_DISK"
+)
+
+func IsCloudCostEnabled() bool {
+	return env.GetBool(CloudCostEnabledEnvVar, false)
+}
+
+func IsCustomCostEnabled() bool {
+	return env.GetBool(CustomCostEnabledEnvVar, false)
+}
+
+func GetCloudCostConfigPath() string {
+	return env.GetPathFromConfig(CloudIntegrationConfigFile)
+}
+
+func GetCloudCostMonthToDateInterval() int {
+	return env.GetInt(CloudCostMonthToDateIntervalVar, 6)
+}
+
+func GetCloudCostRefreshRateHours() int64 {
+	return env.GetInt64(CloudCostRefreshRateHoursEnvVar, 6)
+}
+
+func GetCloudCostQueryWindowDays() int64 {
+	return env.GetInt64(CloudCostQueryWindowDaysEnvVar, 7)
+}
+
+func GetCustomCostQueryWindowHours() int64 {
+	return env.GetInt64(CustomCostQueryWindowDaysEnvVar, 1)
+}
+
+func GetCustomCostQueryWindowDays() int64 {
+	return env.GetInt64(CustomCostQueryWindowDaysEnvVar, 7)
+}
+
+func GetCloudCostRunWindowDays() int64 {
+	return env.GetInt64(CloudCostRunWindowDaysEnvVar, 3)
+}
+
+func GetPluginConfigDir() string {
+	return env.Get(PluginConfigDirEnvVar, "/opt/opencost/plugin/config")
+}
+
+func GetPluginExecutableDir() string {
+	return env.Get(PluginExecutableDirEnvVar, "/opt/opencost/plugin/bin")
+}
+
+func GetAzureDownloadBillingDataPath() string {
+	return env.GetPathFromConfig(AzureBillingDataDownloadPath)
+}
+
+func GetCloudCostConfigControllerStateFile() string {
+	return env.GetPathFromConfig(CloudCostConfigControllerStateFile)
+}

+ 38 - 0
pkg/env/cloudcost_test.go

@@ -0,0 +1,38 @@
+package env
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/core/pkg/env"
+)
+
+func TestGetCloudCostConfigPath(t *testing.T) {
+	tests := []struct {
+		name string
+		want string
+		pre  func()
+	}{
+		{
+			name: "Ensure the default value is 'cloud-integration.json'",
+			want: "/var/configs/cloud-integration.json",
+		},
+		{
+			name: "Ensure the value is 'cloud-integration.json' when CLOUD_COST_CONFIG_PATH is set to ''",
+			want: "/test/cloud-integration.json",
+			pre: func() {
+				env.Set(env.ConfigPathEnvVar, "/test")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetCloudCostConfigPath(); got != tt.want {
+				t.Errorf("GetCloudCostConfigPath() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+
+}

+ 46 - 239
pkg/env/costmodelenv.go → pkg/env/costmodel.go

@@ -1,17 +1,25 @@
 package env
 
 import (
-	"time"
-
 	"github.com/opencost/opencost/core/pkg/env"
-	"github.com/opencost/opencost/core/pkg/log"
-	"github.com/opencost/opencost/core/pkg/util/timeutil"
 )
 
+// FilePaths
+const (
+	ClusterInfoFile = "cluster-info.json"
+	ClusterCacheFile
+	GCPAuthSecretFile = "key.json"
+	MetricConfigFile  = "metrics.json"
+)
+
+// Env Variables
 const (
-	APIPortEnvVar          = "API_PORT"
-	NetworkCostsPortEnvVar = "NETWORK_COSTS_PORT"
+	// Open configs
+
+	// We assume that Kubernetes is enabled if there is a KUBERNETES_PORT environment variable present
+	KubernetesEnabledEnvVar = "KUBERNETES_PORT"
 
+	// Cloud Provider
 	AWSAccessKeyIDEnvVar     = "AWS_ACCESS_KEY_ID"
 	AWSAccessKeySecretEnvVar = "AWS_SECRET_ACCESS_KEY"
 	AWSClusterIDEnvVar       = "AWS_CLUSTER_ID"
@@ -20,26 +28,23 @@ const (
 	AlibabaAccessKeyIDEnvVar     = "ALIBABA_ACCESS_KEY_ID"
 	AlibabaAccessKeySecretEnvVar = "ALIBABA_SECRET_ACCESS_KEY"
 
-	AzureOfferIDEnvVar                   = "AZURE_OFFER_ID"
-	AzureBillingAccountEnvVar            = "AZURE_BILLING_ACCOUNT"
-	AzureDownloadBillingDataToDiskEnvVar = "AZURE_DOWNLOAD_BILLING_DATA_TO_DISK"
-
-	ReleaseNameEnvVar                = "RELEASE_NAME"
-	PodNameEnvVar                    = "POD_NAME"
-	ClusterIDEnvVar                  = "CLUSTER_ID"
-	ClusterProfileEnvVar             = "CLUSTER_PROFILE"
-	RemoteEnabledEnvVar              = "REMOTE_WRITE_ENABLED"
-	RemotePWEnvVar                   = "REMOTE_WRITE_PASSWORD"
-	SQLAddressEnvVar                 = "SQL_ADDRESS"
-	UseCSVProviderEnvVar             = "USE_CSV_PROVIDER"
-	UseCustomProviderEnvVar          = "USE_CUSTOM_PROVIDER"
-	CSVRegionEnvVar                  = "CSV_REGION"
-	CSVEndpointEnvVar                = "CSV_ENDPOINT"
-	CSVPathEnvVar                    = "CSV_PATH"
-	ConfigPathEnvVar                 = "CONFIG_PATH"
+	AzureOfferIDEnvVar        = "AZURE_OFFER_ID"
+	AzureBillingAccountEnvVar = "AZURE_BILLING_ACCOUNT"
+
+	OCIPricingURL = "OCI_PRICING_URL"
+
+	ClusterProfileEnvVar    = "CLUSTER_PROFILE"
+	RemoteEnabledEnvVar     = "REMOTE_WRITE_ENABLED"
+	RemotePWEnvVar          = "REMOTE_WRITE_PASSWORD"
+	SQLAddressEnvVar        = "SQL_ADDRESS"
+	UseCSVProviderEnvVar    = "USE_CSV_PROVIDER"
+	UseCustomProviderEnvVar = "USE_CUSTOM_PROVIDER"
+	CSVRegionEnvVar         = "CSV_REGION"
+	CSVEndpointEnvVar       = "CSV_ENDPOINT"
+	CSVPathEnvVar           = "CSV_PATH"
+
 	CloudProviderAPIKeyEnvVar        = "CLOUD_PROVIDER_API_KEY"
 	CollectorDataSourceEnabledEnvVar = "COLLECTOR_DATA_SOURCE_ENABLED"
-	PVMountPath                      = "PV_MOUNT_PATH"
 
 	EmitPodAnnotationsMetricEnvVar       = "EMIT_POD_ANNOTATIONS_METRIC"
 	EmitNamespaceAnnotationsMetricEnvVar = "EMIT_NAMESPACE_ANNOTATIONS_METRIC"
@@ -48,29 +53,18 @@ const (
 	EmitKsmV1MetricsEnvVar = "EMIT_KSM_V1_METRICS"
 	EmitKsmV1MetricsOnly   = "EMIT_KSM_V1_METRICS_ONLY"
 
-	PProfEnabledEnvVar = "PPROF_ENABLED"
-
 	LogCollectionEnabledEnvVar    = "LOG_COLLECTION_ENABLED"
 	ProductAnalyticsEnabledEnvVar = "PRODUCT_ANALYTICS_ENABLED"
 	ErrorReportingEnabledEnvVar   = "ERROR_REPORTING_ENABLED"
 	ValuesReportingEnabledEnvVar  = "VALUES_REPORTING_ENABLED"
 
-	KubeRbacProxyEnabled = "KUBE_RBAC_PROXY_ENABLED"
-
-	KubeConfigPathEnvVar = "KUBECONFIG_PATH"
-
-	UTCOffsetEnvVar = "UTC_OFFSET"
-
 	PricingConfigmapName = "PRICING_CONFIGMAP_NAME"
 	MetricsConfigmapName = "METRICS_CONFIGMAP_NAME"
 
-	ClusterInfoFileEnabledEnvVar  = "CLUSTER_INFO_FILE_ENABLED"
-	ClusterCacheFileEnabledEnvVar = "CLUSTER_CACHE_FILE_ENABLED"
+	ClusterInfoFileEnabledEnvVar = "CLUSTER_INFO_FILE_ENABLED"
 
 	IngestPodUIDEnvVar = "INGEST_POD_UID"
 
-	ETLReadOnlyMode = "ETL_READ_ONLY"
-
 	AllocationNodeLabelsEnabled = "ALLOCATION_NODE_LABELS_ENABLED"
 
 	AssetIncludeLocalDiskCostEnvVar = "ASSET_INCLUDE_LOCAL_DISK_COST"
@@ -82,55 +76,20 @@ const (
 	ExportCSVLabelsAll  = "EXPORT_CSV_LABELS_ALL"
 	ExportCSVMaxDays    = "EXPORT_CSV_MAX_DAYS"
 
-	ExportBucketConfigFileEnvVar = "EXPORT_BUCKET_CONFIG_FILE"
-
 	DataRetentionDailyResolutionDaysEnvVar   = "DATA_RETENTION_DAILY_RESOLUTION_DAYS"
 	DataRetentionHourlyResolutionHoursEnvVar = "DATA_RETENTION_HOURLY_RESOLUTION_HOURS"
 
-	// We assume that Kubernetes is enabled if there is a KUBERNETES_PORT environment variable present
-	KubernetesEnabledEnvVar         = "KUBERNETES_PORT"
-	CloudCostEnabledEnvVar          = "CLOUD_COST_ENABLED"
-	CloudCostConfigPath             = "CLOUD_COST_CONFIG_PATH"
-	CloudCostMonthToDateIntervalVar = "CLOUD_COST_MONTH_TO_DATE_INTERVAL"
-	CloudCostRefreshRateHoursEnvVar = "CLOUD_COST_REFRESH_RATE_HOURS"
-	CloudCostQueryWindowDaysEnvVar  = "CLOUD_COST_QUERY_WINDOW_DAYS"
-	CloudCostRunWindowDaysEnvVar    = "CLOUD_COST_RUN_WINDOW_DAYS"
-
-	CustomCostEnabledEnvVar          = "CUSTOM_COST_ENABLED"
-	CustomCostQueryWindowDaysEnvVar  = "CUSTOM_COST_QUERY_WINDOW_DAYS"
-	CustomCostRefreshRateHoursEnvVar = "CUSTOM_COST_REFRESH_RATE_HOURS"
-
-	PluginConfigDirEnvVar     = "PLUGIN_CONFIG_DIR"
-	PluginExecutableDirEnvVar = "PLUGIN_EXECUTABLE_DIR"
-
-	OCIPricingURL = "OCI_PRICING_URL"
-
 	CarbonEstimatesEnabledEnvVar = "CARBON_ESTIMATES_ENABLED"
 
-	UseCacheV1 = "USE_CACHE_V1"
-
-	InstallNamespaceEnvVar = "INSTALL_NAMESPACE"
-	ConfigBucketEnvVar     = "CONFIG_BUCKET"
-
-	// Node Stats Client Configuration
-	NodeStatsForceKubeProxyEnvVar = "NODESTATS_FORCE_KUBE_PROXY"
-	NodeStatsLocalProxyEnvVar     = "NODESTATS_LOCAL_PROXY"
-	NodeStatsInsecureEnvVar       = "NODESTATS_INSECURE"
-	NodeStatsCertFileEnvVar       = "NODESTATS_CERT_FILE"
-	NodeStatsKeyFileEnvVar        = "NODESTATS_KEY_FILE"
-
-	// Deprecated
-	KubecostNamespaceEnvVar    = "KUBECOST_NAMESPACE"
-	KubecostConfigBucketEnvVar = "KUBECOST_CONFIG_BUCKET"
+	KubernetesResourceAccessEnvVar = "KUBERNETES_RESOURCE_ACCESS"
+	UseCacheV1                     = "USE_CACHE_V1"
 
 	// Cloud provider override
 	CloudProviderVar = "CLOUD_PROVIDER"
 )
 
-const DefaultConfigMountPath = "/var/configs"
-
-func IsETLReadOnlyMode() bool {
-	return env.GetBool(ETLReadOnlyMode, false)
+func GetGCPAuthSecretFilePath() string {
+	return env.GetPathFromConfig(GCPAuthSecretFile)
 }
 
 func GetExportCSVFile() string {
@@ -145,36 +104,22 @@ func GetExportCSVLabelsList() []string {
 	return env.GetList(ExportCSVLabelsList, ",")
 }
 
-func IsPProfEnabled() bool {
-	return env.GetBool(PProfEnabledEnvVar, false)
-}
-
 func GetExportCSVMaxDays() int {
 	return env.GetInt(ExportCSVMaxDays, 90)
 }
 
-// GetAPIPort returns the environment variable value for APIPortEnvVar which
-// is the port number the API is available on.
-func GetAPIPort() int {
-	return env.GetInt(APIPortEnvVar, 9003)
-}
-
-// GetConfigBucketFile returns a file location for a mounted bucket configuration which is used to store
-// a subset of configurations that require sharing via remote storage.
-func GetConfigBucketFile() string {
-	return env.Get(ConfigBucketEnvVar, env.Get(KubecostConfigBucketEnvVar, ""))
-}
-
 // IsClusterInfoFileEnabled returns true if the cluster info is read from a file or pulled from the local
 // cloud provider and kubernetes.
 func IsClusterInfoFileEnabled() bool {
 	return env.GetBool(ClusterInfoFileEnabledEnvVar, false)
 }
 
-// IsClusterCacheFileEnabled returns true if the kubernetes cluster data is read from a file or pulled from the local
-// kubernetes API.
-func IsClusterCacheFileEnabled() bool {
-	return env.GetBool(ClusterCacheFileEnabledEnvVar, false)
+func GetClusterInfoFilePath() string {
+	return env.GetPathFromConfig(ClusterInfoFile)
+}
+
+func GetClusterCacheFilePath() string {
+	return env.GetPathFromConfig(ClusterCacheFile)
 }
 
 func GetPricingConfigmapName() string {
@@ -274,34 +219,12 @@ func IsAzureDownloadBillingDataToDisk() bool {
 	return env.GetBool(AzureDownloadBillingDataToDiskEnvVar, true)
 }
 
-// GetInstallNamespace returns the environment variable value that is set for the kubernetes namespace
-// this service is installed in.
-func GetInstallNamespace() string {
-	return env.Get(InstallNamespaceEnvVar, env.Get(KubecostNamespaceEnvVar, "opencost"))
-}
-
-// GetPodName returns the name of the current running pod. If this environment variable is not set,
-// empty string is returned.
-func GetPodName() string {
-	return env.Get(PodNameEnvVar, "")
-}
-
 // GetClusterProfile returns the environment variable value for ClusterProfileEnvVar which
 // represents the cluster profile configured for
 func GetClusterProfile() string {
 	return env.Get(ClusterProfileEnvVar, "development")
 }
 
-// GetClusterID returns the environment variable value for ClusterIDEnvVar which represents the
-// configurable identifier used for multi-cluster metric emission.
-func GetClusterID() string {
-	return env.Get(ClusterIDEnvVar, "")
-}
-
-func IsKubeRbacProxyEnabled() bool {
-	return env.GetBool(KubeRbacProxyEnabled, false)
-}
-
 // IsRemoteEnabled returns the environment variable value for RemoteEnabledEnvVar which represents whether
 // or not remote write is enabled for prometheus for use with SQL backed persistent storage.
 func IsRemoteEnabled() bool {
@@ -350,28 +273,12 @@ func GetCSVPath() string {
 	return env.Get(CSVPathEnvVar, "")
 }
 
-// GetCostAnalyzerVolumeMountPath is an alias of GetConfigPath, which returns the mount path for the
-// Cost Analyzer volume, which stores configs, persistent data, etc.
-func GetCostAnalyzerVolumeMountPath() string {
-	return GetConfigPathWithDefault(DefaultConfigMountPath)
-}
-
-// GetConfigPath returns the environment variable value for ConfigPathEnvVar which represents the cost
-// model configuration path
-func GetConfigPathWithDefault(defaultValue string) string {
-	return env.Get(ConfigPathEnvVar, defaultValue)
-}
-
 // GetCloudProviderAPI returns the environment variable value for CloudProviderAPIEnvVar which represents
 // the API key provided for the cloud provider.
 func GetCloudProviderAPIKey() string {
 	return env.Get(CloudProviderAPIKeyEnvVar, "")
 }
 
-func GetPVMountPath() string {
-	return env.Get(PVMountPath, "")
-}
-
 // IsCollectorDataSourceEnabeled returns the environment variable which enables a source.OpencostDatasource which does not use uses Prometheus
 func IsCollectorDataSourceEnabled() bool {
 	return env.GetBool(CollectorDataSourceEnabledEnvVar, false)
@@ -398,26 +305,6 @@ func IsValuesReportingEnabled() bool {
 	return env.GetBool(ValuesReportingEnabledEnvVar, true)
 }
 
-// GetKubeConfigPath returns the environment variable value for KubeConfigPathEnvVar
-func GetKubeConfigPath() string {
-	return env.Get(KubeConfigPathEnvVar, "")
-}
-
-// GetUTCOffset returns the environment variable value for UTCOffset
-func GetUTCOffset() string {
-	return env.Get(UTCOffsetEnvVar, "")
-}
-
-// GetParsedUTCOffset returns the duration of the configured UTC offset
-func GetParsedUTCOffset() time.Duration {
-	offset, err := timeutil.ParseUTCOffset(GetUTCOffset())
-	if err != nil {
-		log.Warnf("Failed to parse UTC offset: %s", err)
-		return time.Duration(0)
-	}
-	return offset
-}
-
 // IsIngestingPodUID returns the env variable from ingestPodUID, which alters the
 // contents of podKeys in Allocation
 func IsIngestingPodUID() bool {
@@ -454,65 +341,16 @@ func IsKubernetesEnabled() bool {
 	return env.Get(KubernetesEnabledEnvVar, "") != ""
 }
 
-func IsCloudCostEnabled() bool {
-	return env.GetBool(CloudCostEnabledEnvVar, false)
-}
-
-func IsCustomCostEnabled() bool {
-	return env.GetBool(CustomCostEnabledEnvVar, false)
-}
-
-func GetCloudCostConfigPath() string {
-	return env.Get(CloudCostConfigPath, "cloud-integration.json")
-}
-
-func GetCloudCostMonthToDateInterval() int {
-	return env.GetInt(CloudCostMonthToDateIntervalVar, 6)
-}
-
-func GetCloudCostRefreshRateHours() int64 {
-	return env.GetInt64(CloudCostRefreshRateHoursEnvVar, 6)
-}
-
-func GetCloudCostQueryWindowDays() int64 {
-	return env.GetInt64(CloudCostQueryWindowDaysEnvVar, 7)
-}
-
-func GetCustomCostQueryWindowHours() int64 {
-	return env.GetInt64(CustomCostQueryWindowDaysEnvVar, 1)
-}
-
-func GetCustomCostQueryWindowDays() int64 {
-	return env.GetInt64(CustomCostQueryWindowDaysEnvVar, 7)
-}
-
-func GetCloudCostRunWindowDays() int64 {
-	return env.GetInt64(CloudCostRunWindowDaysEnvVar, 3)
-}
-
 func GetOCIPricingURL() string {
 	return env.Get(OCIPricingURL, "https://apexapps.oracle.com/pls/apex/cetools/api/v1/products")
 }
 
-func GetPluginConfigDir() string {
-	return env.Get(PluginConfigDirEnvVar, "/opt/opencost/plugin/config")
-}
-
-func GetPluginExecutableDir() string {
-	return env.Get(PluginExecutableDirEnvVar, "/opt/opencost/plugin/bin")
-}
-
-func GetCustomCostRefreshRateHours() string {
-	return env.Get(CustomCostRefreshRateHoursEnvVar, "12h")
-}
-
 func IsCarbonEstimatesEnabled() bool {
 	return env.GetBool(CarbonEstimatesEnabledEnvVar, false)
 }
 
-func GetExportBucketConfigFile() string {
-	return env.Get(ExportBucketConfigFileEnvVar, "")
-}
+// HasKubernetesResourceAccess can be set to false if Opencost is run without access to the kubernetes resources
+func HasKubernetesResourceAccess() bool { return env.GetBool(KubernetesResourceAccessEnvVar, true) }
 
 // GetUseCacheV1 is a temporary flag to allow users to opt-in to using the old cache
 // Mainly for comparison purposes
@@ -520,42 +358,11 @@ func GetUseCacheV1() bool {
 	return env.GetBool(UseCacheV1, false)
 }
 
-func GetReleaseName() string {
-	return env.Get(ReleaseNameEnvVar, "kubecost")
-}
-
-func GetNetworkCostsPort() int {
-	return env.GetInt(NetworkCostsPortEnvVar, 3001)
-}
-
-// IsNodeStatsForceKubeProxy returns true if the node stats client should force the kube proxy direct end
-// point formatting
-func IsNodeStatsForceKubeProxy() bool {
-	return env.GetBool(NodeStatsForceKubeProxyEnvVar, false)
-}
-
-// GetNodeStatsLocalProxy returns the fully qualified local proxy endpoint for the node stats client IFF the proxyAPI
-// is selected.
-func GetNodeStatsLocalProxy() string {
-	return env.Get(NodeStatsLocalProxyEnvVar, "")
-}
-
-// IsNodeStatsInsecure returns true if the node stats client should skip TLS verification
-func IsNodeStatsInsecure() bool {
-	return env.GetBool(NodeStatsInsecureEnvVar, false)
-}
-
-// GetNodeStatsCertFile returns the path of the cert file
-func GetNodeStatsCertFile() string {
-	return env.Get(NodeStatsCertFileEnvVar, "")
-}
-
-// GetNodeStatsKeyFile returns the path of the key file
-func GetNodeStatsKeyFile() string {
-	return env.Get(NodeStatsKeyFileEnvVar, "")
-}
-
 // GetCloudProvider returns the explicitly set cloud provider from environment variable
 func GetCloudProvider() string {
 	return env.Get(CloudProviderVar, "")
 }
+
+func GetMetricConfigFile() string {
+	return env.GetPathFromConfig(MetricConfigFile)
+}

+ 81 - 0
pkg/env/costmodel_test.go

@@ -0,0 +1,81 @@
+package env
+
+import (
+	"os"
+	"testing"
+)
+
+func TestGetExportCSVMaxDays(t *testing.T) {
+	tests := []struct {
+		name string
+		want int
+		pre  func()
+	}{
+		{
+			name: "Ensure the default value is 90d",
+			want: 90,
+		},
+		{
+			name: "Ensure the value is 30 when EXPORT_CSV_MAX_DAYS is set to 30",
+			want: 30,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "30")
+			},
+		},
+		{
+			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to empty string",
+			want: 90,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "")
+			},
+		},
+		{
+			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to invalid value",
+			want: 90,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "foo")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetExportCSVMaxDays(); got != tt.want {
+				t.Errorf("GetExportCSVMaxDays() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestGetKubernetesEnabled(t *testing.T) {
+	tests := []struct {
+		name string
+		want bool
+		pre  func()
+	}{
+		{
+			name: "Ensure the default value is false",
+			want: false,
+		},
+		{
+			name: "Ensure the value is true when KUBERNETES_PORT has a value",
+			want: true,
+			pre: func() {
+				os.Setenv("KUBERNETES_PORT", "tcp://10.43.0.1:443")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := IsKubernetesEnabled(); got != tt.want {
+				t.Errorf("IsKubernetesEnabled() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+
+}

+ 0 - 236
pkg/env/costmodelenv_test.go

@@ -1,236 +0,0 @@
-package env
-
-import (
-	"os"
-	"testing"
-)
-
-func TestGetAPIPort(t *testing.T) {
-	tests := []struct {
-		name string
-		want int
-		pre  func()
-	}{
-		{
-			name: "Ensure the default API port '9003'",
-			want: 9003,
-		},
-		{
-			name: "Ensure the default API port '9003' when API_PORT is set to ''",
-			want: 9003,
-			pre: func() {
-				os.Setenv("API_PORT", "")
-			},
-		},
-		{
-			name: "Ensure the API port '9004' when API_PORT is set to '9004'",
-			want: 9004,
-			pre: func() {
-				os.Setenv("API_PORT", "9004")
-			},
-		},
-	}
-	for _, tt := range tests {
-		if tt.pre != nil {
-			tt.pre()
-		}
-		t.Run(tt.name, func(t *testing.T) {
-			if got := GetAPIPort(); got != tt.want {
-				t.Errorf("GetAPIPort() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-
-}
-
-func TestGetExportCSVMaxDays(t *testing.T) {
-	tests := []struct {
-		name string
-		want int
-		pre  func()
-	}{
-		{
-			name: "Ensure the default value is 90d",
-			want: 90,
-		},
-		{
-			name: "Ensure the value is 30 when EXPORT_CSV_MAX_DAYS is set to 30",
-			want: 30,
-			pre: func() {
-				os.Setenv("EXPORT_CSV_MAX_DAYS", "30")
-			},
-		},
-		{
-			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to empty string",
-			want: 90,
-			pre: func() {
-				os.Setenv("EXPORT_CSV_MAX_DAYS", "")
-			},
-		},
-		{
-			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to invalid value",
-			want: 90,
-			pre: func() {
-				os.Setenv("EXPORT_CSV_MAX_DAYS", "foo")
-			},
-		},
-	}
-	for _, tt := range tests {
-		if tt.pre != nil {
-			tt.pre()
-		}
-		t.Run(tt.name, func(t *testing.T) {
-			if got := GetExportCSVMaxDays(); got != tt.want {
-				t.Errorf("GetExportCSVMaxDays() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-}
-
-func TestGetKubernetesEnabled(t *testing.T) {
-	tests := []struct {
-		name string
-		want bool
-		pre  func()
-	}{
-		{
-			name: "Ensure the default value is false",
-			want: false,
-		},
-		{
-			name: "Ensure the value is true when KUBERNETES_PORT has a value",
-			want: true,
-			pre: func() {
-				os.Setenv("KUBERNETES_PORT", "tcp://10.43.0.1:443")
-			},
-		},
-	}
-	for _, tt := range tests {
-		if tt.pre != nil {
-			tt.pre()
-		}
-		t.Run(tt.name, func(t *testing.T) {
-			if got := IsKubernetesEnabled(); got != tt.want {
-				t.Errorf("IsKubernetesEnabled() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-
-}
-
-func TestGetCloudCostConfigPath(t *testing.T) {
-	tests := []struct {
-		name string
-		want string
-		pre  func()
-	}{
-		{
-			name: "Ensure the default value is 'cloud-integration.json'",
-			want: "cloud-integration.json",
-		},
-		{
-			name: "Ensure the value is 'cloud-integration.json' when CLOUD_COST_CONFIG_PATH is set to ''",
-			want: "cloud-integration.json",
-			pre: func() {
-				os.Setenv("CLOUD_COST_CONFIG_PATH", "")
-			},
-		},
-		{
-			name: "Ensure the value is 'flying-pig.json' when CLOUD_COST_CONFIG_PATH is set to 'flying-pig.json'",
-			want: "flying-pig.json",
-			pre: func() {
-				os.Setenv("CLOUD_COST_CONFIG_PATH", "flying-pig.json")
-			},
-		},
-	}
-	for _, tt := range tests {
-		if tt.pre != nil {
-			tt.pre()
-		}
-		t.Run(tt.name, func(t *testing.T) {
-			if got := GetCloudCostConfigPath(); got != tt.want {
-				t.Errorf("GetCloudCostConfigPath() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-
-}
-
-func TestEnvVarsWithBackup(t *testing.T) {
-	t.Run("test install namespace env var", func(t *testing.T) {
-		t.Setenv(InstallNamespaceEnvVar, "test-namespace")
-		t.Setenv(KubecostNamespaceEnvVar, "kubecost-test-namespace")
-
-		ns := GetInstallNamespace()
-		if ns != "test-namespace" {
-			t.Errorf("Expected install namespace to be 'test-namespace', got '%s'", ns)
-		}
-	})
-	t.Run("test kubecost namespace env var", func(t *testing.T) {
-		t.Setenv(KubecostNamespaceEnvVar, "kc-test-namespace")
-
-		ns := GetInstallNamespace()
-
-		if ns != "kc-test-namespace" {
-			t.Errorf("Expected install namespace to be 'kc-test-namespace', got '%s'", ns)
-		}
-	})
-
-	t.Run("test default install namespace", func(t *testing.T) {
-		t.Setenv(InstallNamespaceEnvVar, "test-namespace")
-
-		ns := GetInstallNamespace()
-
-		if ns != "test-namespace" {
-			t.Errorf("Expected default install namespace to be 'test-namespace', got '%s'", ns)
-		}
-	})
-
-	t.Run("test default install namespace", func(t *testing.T) {
-		ns := GetInstallNamespace()
-
-		if ns != "opencost" {
-			t.Errorf("Expected default install namespace to be 'opencost', got '%s'", ns)
-		}
-	})
-
-	t.Run("test config bucket file with both", func(t *testing.T) {
-		t.Setenv(ConfigBucketEnvVar, "test-bucket")
-		t.Setenv(KubecostConfigBucketEnvVar, "kc-test-bucket")
-
-		configBucketFile := GetConfigBucketFile()
-
-		if configBucketFile != "test-bucket" {
-			t.Errorf("Expected config bucket file to be 'test-bucket', got '%s'", configBucketFile)
-		}
-	})
-
-	t.Run("test config bucket file with kc", func(t *testing.T) {
-		t.Setenv(KubecostConfigBucketEnvVar, "kc-test-bucket")
-
-		configBucketFile := GetConfigBucketFile()
-
-		if configBucketFile != "kc-test-bucket" {
-			t.Errorf("Expected config bucket file to be 'kc-test-bucket', got '%s'", configBucketFile)
-		}
-	})
-
-	t.Run("test config bucket file with single", func(t *testing.T) {
-		t.Setenv(ConfigBucketEnvVar, "test-bucket")
-
-		configBucketFile := GetConfigBucketFile()
-
-		if configBucketFile != "test-bucket" {
-			t.Errorf("Expected config bucket file to be 'test-bucket', got '%s'", configBucketFile)
-		}
-	})
-
-	t.Run("test config bucket file with both", func(t *testing.T) {
-		configBucketFile := GetConfigBucketFile()
-
-		if configBucketFile != "" {
-			t.Errorf("Expected config bucket file to be '', got '%s'", configBucketFile)
-		}
-	})
-
-}

+ 0 - 0
pkg/env/kubemetricsenv.go → pkg/env/kubemetrics.go


+ 41 - 0
pkg/env/nodestats.go

@@ -0,0 +1,41 @@
+package env
+
+import (
+	"github.com/opencost/opencost/core/pkg/env"
+)
+
+const (
+	// Node Stats Client Configuration
+	NodeStatsForceKubeProxyEnvVar = "NODESTATS_FORCE_KUBE_PROXY"
+	NodeStatsLocalProxyEnvVar     = "NODESTATS_LOCAL_PROXY"
+	NodeStatsInsecureEnvVar       = "NODESTATS_INSECURE"
+	NodeStatsCertFileEnvVar       = "NODESTATS_CERT_FILE"
+	NodeStatsKeyFileEnvVar        = "NODESTATS_KEY_FILE"
+)
+
+// IsNodeStatsForceKubeProxy returns true if the node stats client should force the kube proxy direct end
+// point formatting
+func IsNodeStatsForceKubeProxy() bool {
+	return env.GetBool(NodeStatsForceKubeProxyEnvVar, false)
+}
+
+// GetNodeStatsLocalProxy returns the fully qualified local proxy endpoint for the node stats client IFF the proxyAPI
+// is selected.
+func GetNodeStatsLocalProxy() string {
+	return env.Get(NodeStatsLocalProxyEnvVar, "")
+}
+
+// IsNodeStatsInsecure returns true if the node stats client should skip TLS verification
+func IsNodeStatsInsecure() bool {
+	return env.GetBool(NodeStatsInsecureEnvVar, false)
+}
+
+// GetNodeStatsCertFile returns the path of the cert file
+func GetNodeStatsCertFile() string {
+	return env.Get(NodeStatsCertFileEnvVar, "")
+}
+
+// GetNodeStatsKeyFile returns the path of the key file
+func GetNodeStatsKeyFile() string {
+	return env.Get(NodeStatsKeyFileEnvVar, "")
+}

+ 1 - 0
pkg/env/nodestats_test.go

@@ -0,0 +1 @@
+package env

+ 44 - 0
pkg/env/opencost.go

@@ -0,0 +1,44 @@
+package env
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/env"
+	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/util/timeutil"
+)
+
+// Environment variables specific to the running of opencost
+const (
+	DefaultAPIPort           = 9003
+	defaultOpencostNamespace = "opencost"
+)
+
+const (
+	UTCOffsetEnvVar = "UTC_OFFSET"
+)
+
+func GetOpencostAPIPort() int {
+	return env.GetAPIPortWithDefault(DefaultAPIPort)
+}
+
+// GetOpencostNamespace returns the environment variable value that is set for the kubernetes namespace
+// this service is installed in.
+func GetOpencostNamespace() string {
+	return env.GetInstallNamespace(defaultOpencostNamespace)
+}
+
+// GetUTCOffset returns the environment variable value for UTCOffset
+func GetUTCOffset() string {
+	return env.Get(UTCOffsetEnvVar, "")
+}
+
+// GetParsedUTCOffset returns the duration of the configured UTC offset
+func GetParsedUTCOffset() time.Duration {
+	offset, err := timeutil.ParseUTCOffset(GetUTCOffset())
+	if err != nil {
+		log.Warnf("Failed to parse UTC offset: %s", err)
+		return time.Duration(0)
+	}
+	return offset
+}

+ 47 - 0
pkg/env/opencost_test.go

@@ -0,0 +1,47 @@
+package env
+
+import (
+	"fmt"
+	"os"
+	"testing"
+
+	"github.com/opencost/opencost/core/pkg/env"
+)
+
+func TestGetAPIPort(t *testing.T) {
+	tests := []struct {
+		name string
+		want int
+		pre  func()
+	}{
+		{
+			name: "Ensure the default API port '9003'",
+			want: 9003,
+		},
+		{
+			name: fmt.Sprintf("Ensure the default API port '9003' when %s is set to ''", env.APIPortEnvVar),
+			want: 9003,
+			pre: func() {
+				os.Setenv(env.APIPortEnvVar, "")
+			},
+		},
+		{
+			name: fmt.Sprintf("Ensure the API port '9004' when %s is set to '9004'", env.APIPortEnvVar),
+			want: 9004,
+			pre: func() {
+				os.Setenv(env.APIPortEnvVar, "9004")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetOpencostAPIPort(); got != tt.want {
+				t.Errorf("GetAPIPort() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+
+}

+ 1 - 2
pkg/metrics/metricsconfig.go

@@ -4,7 +4,6 @@ import (
 	"encoding/json"
 	"fmt"
 	"os"
-	"path"
 	"sync"
 
 	"github.com/opencost/opencost/pkg/env"
@@ -13,7 +12,7 @@ import (
 
 var (
 	metricsConfigLock = new(sync.Mutex)
-	metricsFilePath   = path.Join(env.GetCostAnalyzerVolumeMountPath(), "metrics.json")
+	metricsFilePath   = env.GetMetricConfigFile()
 )
 
 type MetricsConfig struct {