2
0
Эх сурвалжийг харах

Open Source Cloud Cost

Signed-off-by: Sean Holcomb <seanholcomb@gmail.com>
Sean Holcomb 2 жил өмнө
parent
commit
f9a0d15dd2
62 өөрчлөгдсөн 5250 нэмэгдсэн , 261 устгасан
  1. 7 3
      go.mod
  2. 13 3
      go.sum
  3. 6 6
      pkg/cloud/alibaba/authorizer.go
  4. 12 7
      pkg/cloud/alibaba/boaconfiguration.go
  5. 2 2
      pkg/cloud/alibaba/boaconfiguration_test.go
  6. 6 4
      pkg/cloud/alibaba/boaquerier.go
  7. 1 1
      pkg/cloud/authorizer.go
  8. 18 13
      pkg/cloud/aws/athenaconfiguration.go
  9. 2 2
      pkg/cloud/aws/athenaconfiguration_test.go
  10. 1 11
      pkg/cloud/aws/athenaintegration.go
  11. 6 4
      pkg/cloud/aws/athenaquerier.go
  12. 14 14
      pkg/cloud/aws/authorizer.go
  13. 3 3
      pkg/cloud/aws/authorizer_test.go
  14. 12 7
      pkg/cloud/aws/s3configuration.go
  15. 5 2
      pkg/cloud/aws/s3connection.go
  16. 2 2
      pkg/cloud/aws/s3connection_test.go
  17. 1 2
      pkg/cloud/aws/s3selectintegration.go
  18. 1 2
      pkg/cloud/aws/s3selectquerier.go
  19. 6 6
      pkg/cloud/azure/authorizer.go
  20. 2 5
      pkg/cloud/azure/azurestorageintegration.go
  21. 20 9
      pkg/cloud/azure/storagebillingparser.go
  22. 16 11
      pkg/cloud/azure/storageconfiguration.go
  23. 2 2
      pkg/cloud/azure/storageconfiguration_test.go
  24. 5 2
      pkg/cloud/azure/storageconnection.go
  25. 0 12
      pkg/cloud/cloudcostintegration.go
  26. 2 1
      pkg/cloud/config.go
  27. 291 0
      pkg/cloud/config/configurations.go
  28. 290 0
      pkg/cloud/config/configurations_test.go
  29. 305 0
      pkg/cloud/config/controller.go
  30. 160 0
      pkg/cloud/config/controller_handlers.go
  31. 871 0
      pkg/cloud/config/controller_test.go
  32. 95 0
      pkg/cloud/config/mock.go
  33. 14 0
      pkg/cloud/config/observer.go
  34. 351 0
      pkg/cloud/config/watcher.go
  35. 9 9
      pkg/cloud/gcp/authorizer.go
  36. 13 8
      pkg/cloud/gcp/bigqueryconfiguration.go
  37. 2 2
      pkg/cloud/gcp/bigqueryconfiguration_test.go
  38. 2 1
      pkg/cloud/gcp/bigqueryintegration.go
  39. 15 3
      pkg/cloud/gcp/bigqueryquerier.go
  40. 28 0
      pkg/cloud/provider/providerconfig.go
  41. 207 0
      pkg/cloudcost/ingestionmanager.go
  42. 342 0
      pkg/cloudcost/ingestor.go
  43. 96 0
      pkg/cloudcost/integration.go
  44. 103 0
      pkg/cloudcost/memoryrepository.go
  45. 194 0
      pkg/cloudcost/pipelineservice.go
  46. 89 0
      pkg/cloudcost/querier.go
  47. 370 0
      pkg/cloudcost/queryservice.go
  48. 16 0
      pkg/cloudcost/repository.go
  49. 229 0
      pkg/cloudcost/repositoryquerier.go
  50. 24 0
      pkg/cloudcost/status.go
  51. 107 0
      pkg/cloudcost/view.go
  52. 18 0
      pkg/cmd/costmodel/costmodel.go
  53. 47 35
      pkg/costmodel/router.go
  54. 32 0
      pkg/env/costmodelenv.go
  55. 190 63
      pkg/kubecost/cloudcost.go
  56. 1 1
      pkg/kubecost/cloudcost_test.go
  57. 14 3
      pkg/kubecost/cloudcostprops.go
  58. 77 0
      pkg/kubecost/costmetric.go
  59. 23 0
      pkg/kubecost/query.go
  60. 229 0
      pkg/kubecost/window.go
  61. 209 0
      pkg/proto/http.go
  62. 22 0
      pkg/proto/proto.go

+ 7 - 3
go.mod

@@ -49,13 +49,15 @@ require (
 	github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9
 	github.com/spf13/cobra v1.2.1
 	github.com/spf13/viper v1.8.1
-	github.com/stretchr/testify v1.8.1
+	github.com/stretchr/testify v1.8.4
 	go.etcd.io/bbolt v1.3.5
+	go.opentelemetry.io/otel v1.19.0
 	golang.org/x/exp v0.0.0-20221031165847-c99f073a8326
 	golang.org/x/oauth2 v0.6.0
 	golang.org/x/sync v0.1.0
 	golang.org/x/text v0.13.0
 	google.golang.org/api v0.114.0
+	google.golang.org/protobuf v1.29.1
 	gopkg.in/yaml.v2 v2.4.0
 	k8s.io/api v0.25.3
 	k8s.io/apimachinery v0.25.3
@@ -99,7 +101,8 @@ require (
 	github.com/dustin/go-humanize v1.0.1 // indirect
 	github.com/emicklei/go-restful/v3 v3.10.2 // indirect
 	github.com/fsnotify/fsnotify v1.6.0 // indirect
-	github.com/go-logr/logr v1.2.3 // indirect
+	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-openapi/jsonpointer v0.19.5 // indirect
 	github.com/go-openapi/jsonreference v0.19.6 // indirect
 	github.com/go-openapi/swag v0.21.1 // indirect
@@ -154,6 +157,8 @@ require (
 	github.com/subosito/gotenv v1.2.0 // indirect
 	github.com/zeebo/xxh3 v1.0.2 // indirect
 	go.opencensus.io v0.24.0 // indirect
+	go.opentelemetry.io/otel/metric v1.19.0 // indirect
+	go.opentelemetry.io/otel/trace v1.19.0 // indirect
 	go.uber.org/atomic v1.10.0 // indirect
 	golang.org/x/crypto v0.14.0 // indirect
 	golang.org/x/mod v0.8.0 // indirect
@@ -166,7 +171,6 @@ require (
 	google.golang.org/appengine v1.6.7 // indirect
 	google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 // indirect
 	google.golang.org/grpc v1.53.0 // indirect
-	google.golang.org/protobuf v1.29.1 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect

+ 13 - 3
go.sum

@@ -261,8 +261,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
 github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
 github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
 github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
 github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
 github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
@@ -660,8 +663,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
 github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
@@ -709,6 +713,12 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
 go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
 go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
 go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
+go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
+go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
+go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
+go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
+go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
 go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=

+ 6 - 6
pkg/cloud/alibaba/authorizer.go

@@ -5,7 +5,7 @@ import (
 
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -13,7 +13,7 @@ const AccessKeyAuthorizerType = "AlibabaAccessKey"
 
 // Authorizer provide *bssopenapi.Client for Alibaba cloud BOS for Billing related SDK calls
 type Authorizer interface {
-	config.Authorizer
+	cloud.Authorizer
 	GetCredentials() (auth.Credential, error)
 }
 
@@ -36,7 +36,7 @@ type AccessKey struct {
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (ak *AccessKey) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 3)
-	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = AccessKeyAuthorizerType
 	fmap["accessKeyID"] = ak.AccessKeyID
 	fmap["accessKeySecret"] = ak.AccessKeySecret
 	return json.Marshal(fmap)
@@ -52,7 +52,7 @@ func (ak *AccessKey) Validate() error {
 	return nil
 }
 
-func (ak *AccessKey) Equals(config config.Config) bool {
+func (ak *AccessKey) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -70,10 +70,10 @@ func (ak *AccessKey) Equals(config config.Config) bool {
 	return true
 }
 
-func (ak *AccessKey) Sanitize() config.Config {
+func (ak *AccessKey) Sanitize() cloud.Config {
 	return &AccessKey{
 		AccessKeyID:     ak.AccessKeyID,
-		AccessKeySecret: config.Redacted,
+		AccessKeySecret: cloud.Redacted,
 	}
 }
 

+ 12 - 7
pkg/cloud/alibaba/boaconfiguration.go

@@ -3,7 +3,8 @@ package alibaba
 import (
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -36,7 +37,7 @@ func (bc *BOAConfiguration) Validate() error {
 	return nil
 }
 
-func (bc *BOAConfiguration) Equals(config config.Config) bool {
+func (bc *BOAConfiguration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -65,7 +66,7 @@ func (bc *BOAConfiguration) Equals(config config.Config) bool {
 	return true
 }
 
-func (bc *BOAConfiguration) Sanitize() config.Config {
+func (bc *BOAConfiguration) Sanitize() cloud.Config {
 	return &BOAConfiguration{
 		Account:    bc.Account,
 		Region:     bc.Region,
@@ -77,6 +78,10 @@ func (bc *BOAConfiguration) Key() string {
 	return fmt.Sprintf("%s/%s", bc.Account, bc.Region)
 }
 
+func (bc *BOAConfiguration) Provider() string {
+	return kubecost.AlibabaProvider
+}
+
 func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
 	var f interface{}
 	err := json.Unmarshal(b, &f)
@@ -86,13 +91,13 @@ func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	account, err := config.GetInterfaceValue[string](fmap, "account")
+	account, err := cloud.GetInterfaceValue[string](fmap, "account")
 	if err != nil {
 		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	bc.Account = account
 
-	region, err := config.GetInterfaceValue[string](fmap, "region")
+	region, err := cloud.GetInterfaceValue[string](fmap, "region")
 	if err != nil {
 		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -102,7 +107,7 @@ func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -111,7 +116,7 @@ func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
 	return nil
 }
 
-func ConvertAlibabaInfoToConfig(acc AlibabaInfo) config.KeyedConfig {
+func ConvertAlibabaInfoToConfig(acc AlibabaInfo) cloud.KeyedConfig {
 	if acc.IsEmpty() {
 		return nil
 	}

+ 2 - 2
pkg/cloud/alibaba/boaconfiguration_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -97,7 +97,7 @@ func TestBoaConfiguration_Validate(t *testing.T) {
 func TestBOAConfiguration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     BOAConfiguration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 6 - 4
pkg/cloud/alibaba/boaquerier.go

@@ -4,11 +4,9 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
-
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
 	"github.com/aliyun/alibaba-cloud-sdk-go/services/bssopenapi"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
 )
@@ -25,10 +23,14 @@ type BoaQuerier struct {
 }
 
 func (bq *BoaQuerier) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if bq.ConnectionStatus.String() == "" {
+		bq.ConnectionStatus = cloud.InitialStatus
+	}
 	return bq.ConnectionStatus
 }
 
-func (bq *BoaQuerier) Equals(config cloudconfig.Config) bool {
+func (bq *BoaQuerier) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*BoaQuerier)
 	if !ok {
 		return false

+ 1 - 1
pkg/cloud/config/authorizer.go → pkg/cloud/authorizer.go

@@ -1,4 +1,4 @@
-package config
+package cloud
 
 import (
 	"fmt"

+ 18 - 13
pkg/cloud/aws/athenaconfiguration.go

@@ -3,7 +3,8 @@ package aws
 import (
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -55,7 +56,7 @@ func (ac *AthenaConfiguration) Validate() error {
 	return nil
 }
 
-func (ac *AthenaConfiguration) Equals(config config.Config) bool {
+func (ac *AthenaConfiguration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -105,7 +106,7 @@ func (ac *AthenaConfiguration) Equals(config config.Config) bool {
 	return true
 }
 
-func (ac *AthenaConfiguration) Sanitize() config.Config {
+func (ac *AthenaConfiguration) Sanitize() cloud.Config {
 	return &AthenaConfiguration{
 		Bucket:     ac.Bucket,
 		Region:     ac.Region,
@@ -122,6 +123,10 @@ func (ac *AthenaConfiguration) Key() string {
 	return fmt.Sprintf("%s/%s", ac.Account, ac.Bucket)
 }
 
+func (ac *AthenaConfiguration) Provider() string {
+	return kubecost.AWSProvider
+}
+
 func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
 	var f interface{}
 	err := json.Unmarshal(b, &f)
@@ -131,45 +136,45 @@ func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	bucket, err := cloud.GetInterfaceValue[string](fmap, "bucket")
 	if err != nil {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Bucket = bucket
 
-	region, err := config.GetInterfaceValue[string](fmap, "region")
+	region, err := cloud.GetInterfaceValue[string](fmap, "region")
 	if err != nil {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Region = region
 
-	database, err := config.GetInterfaceValue[string](fmap, "database")
+	database, err := cloud.GetInterfaceValue[string](fmap, "database")
 	if err != nil {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Database = database
 
 	if _, ok := fmap["catalog"]; ok {
-		catalog, err := config.GetInterfaceValue[string](fmap, "catalog")
+		catalog, err := cloud.GetInterfaceValue[string](fmap, "catalog")
 		if err != nil {
 			return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 		}
 		ac.Catalog = catalog
 	}
 
-	table, err := config.GetInterfaceValue[string](fmap, "table")
+	table, err := cloud.GetInterfaceValue[string](fmap, "table")
 	if err != nil {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Table = table
 
-	workgroup, err := config.GetInterfaceValue[string](fmap, "workgroup")
+	workgroup, err := cloud.GetInterfaceValue[string](fmap, "workgroup")
 	if err != nil {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Workgroup = workgroup
 
-	account, err := config.GetInterfaceValue[string](fmap, "account")
+	account, err := cloud.GetInterfaceValue[string](fmap, "account")
 	if err != nil {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
@@ -179,7 +184,7 @@ func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
@@ -190,7 +195,7 @@ func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
 
 // ConvertAwsAthenaInfoToConfig takes a legacy config and generates a Config based on the presence of properties to match
 // legacy behavior
-func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) config.KeyedConfig {
+func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) cloud.KeyedConfig {
 	if aai.IsEmpty() {
 		return nil
 	}
@@ -213,7 +218,7 @@ func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) config.KeyedConfig {
 		}
 	}
 
-	var config config.KeyedConfig
+	var config cloud.KeyedConfig
 	if aai.AthenaTable != "" || aai.AthenaDatabase != "" {
 		config = &AthenaConfiguration{
 			Bucket:     aai.AthenaBucketName,

+ 2 - 2
pkg/cloud/aws/athenaconfiguration_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -184,7 +184,7 @@ func TestAthenaConfiguration_Validate(t *testing.T) {
 func TestAthenaConfiguration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     AthenaConfiguration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 1 - 11
pkg/cloud/aws/athenaintegration.go

@@ -11,7 +11,6 @@ import (
 	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
-	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 const LabelColumnPrefix = "resource_tags_user_"
@@ -154,7 +153,7 @@ func (ai *AthenaIntegration) GetCloudCost(start, end time.Time) (*kubecost.Cloud
 	`
 	aqi.Query = fmt.Sprintf(queryStr, columnStr, ai.Table, whereClause, groupByStr)
 
-	ccsr, err := kubecost.NewCloudCostSetRange(start, end, timeutil.Day, ai.Key())
+	ccsr, err := kubecost.NewCloudCostSetRange(start, end, kubecost.AccumulateOptionDay, ai.Key())
 	if err != nil {
 		return nil, err
 	}
@@ -442,12 +441,3 @@ func (ai *AthenaIntegration) GetConnectionStatusFromResult(result cloud.EmptyChe
 	}
 	return cloud.SuccessfulConnection
 }
-
-func (ai *AthenaIntegration) GetConnectionStatus() string {
-	// initialize status if it has not done so; this can happen if the integration is inactive
-	if ai.ConnectionStatus.String() == "" {
-		ai.ConnectionStatus = cloud.InitialStatus
-	}
-
-	return ai.ConnectionStatus.String()
-}

+ 6 - 4
pkg/cloud/aws/athenaquerier.go

@@ -8,12 +8,10 @@ import (
 	"strings"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
-
 	"github.com/aws/aws-sdk-go-v2/aws"
 	"github.com/aws/aws-sdk-go-v2/service/athena"
 	"github.com/aws/aws-sdk-go-v2/service/athena/types"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/stringutil"
@@ -25,10 +23,14 @@ type AthenaQuerier struct {
 }
 
 func (aq *AthenaQuerier) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if aq.ConnectionStatus.String() == "" {
+		aq.ConnectionStatus = cloud.InitialStatus
+	}
 	return aq.ConnectionStatus
 }
 
-func (aq *AthenaQuerier) Equals(config cloudconfig.Config) bool {
+func (aq *AthenaQuerier) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*AthenaQuerier)
 	if !ok {
 		return false

+ 14 - 14
pkg/cloud/aws/authorizer.go

@@ -8,7 +8,7 @@ import (
 	awsconfig "github.com/aws/aws-sdk-go-v2/config"
 	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
 	"github.com/aws/aws-sdk-go-v2/service/sts"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -18,7 +18,7 @@ const AssumeRoleAuthorizerType = "AWSAssumeRole"
 
 // Authorizer implementations provide aws.Config for AWS SDK calls
 type Authorizer interface {
-	config.Authorizer
+	cloud.Authorizer
 	CreateAWSConfig(string) (aws.Config, error)
 }
 
@@ -45,7 +45,7 @@ type AccessKey struct {
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (ak *AccessKey) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 3)
-	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = AccessKeyAuthorizerType
 	fmap["id"] = ak.ID
 	fmap["secret"] = ak.Secret
 	return json.Marshal(fmap)
@@ -70,7 +70,7 @@ func (ak *AccessKey) Validate() error {
 	return nil
 }
 
-func (ak *AccessKey) Equals(config config.Config) bool {
+func (ak *AccessKey) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -88,10 +88,10 @@ func (ak *AccessKey) Equals(config config.Config) bool {
 	return true
 }
 
-func (ak *AccessKey) Sanitize() config.Config {
+func (ak *AccessKey) Sanitize() cloud.Config {
 	return &AccessKey{
 		ID:     ak.ID,
-		Secret: config.Redacted,
+		Secret: cloud.Redacted,
 	}
 }
 
@@ -115,7 +115,7 @@ type ServiceAccount struct{}
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (sa *ServiceAccount) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 1)
-	fmap[config.AuthorizerTypeProperty] = ServiceAccountAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = ServiceAccountAuthorizerType
 	return json.Marshal(fmap)
 }
 
@@ -124,7 +124,7 @@ func (sa *ServiceAccount) Validate() error {
 	return nil
 }
 
-func (sa *ServiceAccount) Equals(config config.Config) bool {
+func (sa *ServiceAccount) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -136,7 +136,7 @@ func (sa *ServiceAccount) Equals(config config.Config) bool {
 	return true
 }
 
-func (sa *ServiceAccount) Sanitize() config.Config {
+func (sa *ServiceAccount) Sanitize() cloud.Config {
 	return &ServiceAccount{}
 }
 
@@ -157,7 +157,7 @@ type AssumeRole struct {
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (ara *AssumeRole) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 3)
-	fmap[config.AuthorizerTypeProperty] = AssumeRoleAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = AssumeRoleAuthorizerType
 	fmap["roleARN"] = ara.RoleARN
 	fmap["authorizer"] = ara.Authorizer
 	return json.Marshal(fmap)
@@ -173,7 +173,7 @@ func (ara *AssumeRole) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	roleARN, err := config.GetInterfaceValue[string](fmap, "roleARN")
+	roleARN, err := cloud.GetInterfaceValue[string](fmap, "roleARN")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -183,7 +183,7 @@ func (ara *AssumeRole) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("AssumeRole: UnmarshalJSON: missing Authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("AssumeRole: UnmarshalJSON: %s", err.Error())
 	}
@@ -218,7 +218,7 @@ func (ara *AssumeRole) Validate() error {
 	return nil
 }
 
-func (ara *AssumeRole) Equals(config config.Config) bool {
+func (ara *AssumeRole) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -243,7 +243,7 @@ func (ara *AssumeRole) Equals(config config.Config) bool {
 	return true
 }
 
-func (ara *AssumeRole) Sanitize() config.Config {
+func (ara *AssumeRole) Sanitize() cloud.Config {
 	return &AssumeRole{
 		Authorizer: ara.Authorizer.Sanitize().(Authorizer),
 		RoleARN:    ara.RoleARN,

+ 3 - 3
pkg/cloud/aws/authorizer_test.go

@@ -3,7 +3,7 @@ package aws
 import (
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 )
 
 func TestAuthorizerJSON_Sanitize(t *testing.T) {
@@ -19,7 +19,7 @@ func TestAuthorizerJSON_Sanitize(t *testing.T) {
 			},
 			expected: &AccessKey{
 				ID:     "ID",
-				Secret: config.Redacted,
+				Secret: cloud.Redacted,
 			},
 		},
 		"Service Account": {
@@ -37,7 +37,7 @@ func TestAuthorizerJSON_Sanitize(t *testing.T) {
 			expected: &AssumeRole{
 				Authorizer: &AccessKey{
 					ID:     "ID",
-					Secret: config.Redacted,
+					Secret: cloud.Redacted,
 				},
 				RoleARN: "role arn",
 			},

+ 12 - 7
pkg/cloud/aws/s3configuration.go

@@ -4,7 +4,8 @@ import (
 	"fmt"
 
 	"github.com/aws/aws-sdk-go-v2/aws"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -42,7 +43,7 @@ func (s3c *S3Configuration) Validate() error {
 	return nil
 }
 
-func (s3c *S3Configuration) Equals(config config.Config) bool {
+func (s3c *S3Configuration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -76,7 +77,7 @@ func (s3c *S3Configuration) Equals(config config.Config) bool {
 	return true
 }
 
-func (s3c *S3Configuration) Sanitize() config.Config {
+func (s3c *S3Configuration) Sanitize() cloud.Config {
 	return &S3Configuration{
 		Bucket:     s3c.Bucket,
 		Region:     s3c.Region,
@@ -89,6 +90,10 @@ func (s3c *S3Configuration) Key() string {
 	return fmt.Sprintf("%s/%s", s3c.Account, s3c.Bucket)
 }
 
+func (s3c *S3Configuration) Provider() string {
+	return kubecost.AWSProvider
+}
+
 func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
 	var f interface{}
 	err := json.Unmarshal(b, &f)
@@ -98,19 +103,19 @@ func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	bucket, err := cloud.GetInterfaceValue[string](fmap, "bucket")
 	if err != nil {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
 	}
 	s3c.Bucket = bucket
 
-	region, err := config.GetInterfaceValue[string](fmap, "region")
+	region, err := cloud.GetInterfaceValue[string](fmap, "region")
 	if err != nil {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
 	}
 	s3c.Region = region
 
-	account, err := config.GetInterfaceValue[string](fmap, "account")
+	account, err := cloud.GetInterfaceValue[string](fmap, "account")
 	if err != nil {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
 	}
@@ -120,7 +125,7 @@ func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
 	}

+ 5 - 2
pkg/cloud/aws/s3connection.go

@@ -6,7 +6,6 @@ import (
 	"github.com/aws/aws-sdk-go-v2/aws"
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	"github.com/opencost/opencost/pkg/cloud"
-	"github.com/opencost/opencost/pkg/cloud/config"
 )
 
 type S3Connection struct {
@@ -15,10 +14,14 @@ type S3Connection struct {
 }
 
 func (s3c *S3Connection) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if s3c.ConnectionStatus.String() == "" {
+		s3c.ConnectionStatus = cloud.InitialStatus
+	}
 	return s3c.ConnectionStatus
 }
 
-func (s3c *S3Connection) Equals(config config.Config) bool {
+func (s3c *S3Connection) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*S3Connection)
 	if !ok {
 		return false

+ 2 - 2
pkg/cloud/aws/s3connection_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -105,7 +105,7 @@ func TestS3Configuration_Validate(t *testing.T) {
 func TestS3Configuration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     S3Configuration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 1 - 2
pkg/cloud/aws/s3selectintegration.go

@@ -10,7 +10,6 @@ import (
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
-	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 const S3SelectDateLayout = "2006-01-02T15:04:05Z"
@@ -58,7 +57,7 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 	ccsr, err := kubecost.NewCloudCostSetRange(
 		start,
 		end,
-		timeutil.Day,
+		kubecost.AccumulateOptionDay,
 		s3si.Key(),
 	)
 	if err != nil {

+ 1 - 2
pkg/cloud/aws/s3selectquerier.go

@@ -13,7 +13,6 @@ import (
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
 	"github.com/opencost/opencost/pkg/cloud"
-	"github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/util/stringutil"
 )
 
@@ -22,7 +21,7 @@ type S3SelectQuerier struct {
 	connectionStatus cloud.ConnectionStatus
 }
 
-func (s3sq *S3SelectQuerier) Equals(config config.Config) bool {
+func (s3sq *S3SelectQuerier) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*S3SelectQuerier)
 	if !ok {
 		return false

+ 6 - 6
pkg/cloud/azure/authorizer.go

@@ -5,13 +5,13 @@ import (
 	"fmt"
 
 	"github.com/Azure/azure-storage-blob-go/azblob"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 )
 
 const AccessKeyAuthorizerType = "AzureAccessKey"
 
 type Authorizer interface {
-	config.Authorizer
+	cloud.Authorizer
 	GetBlobCredentials() (azblob.Credential, error)
 }
 
@@ -32,7 +32,7 @@ type AccessKey struct {
 
 func (ak *AccessKey) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 3)
-	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = AccessKeyAuthorizerType
 	fmap["accessKey"] = ak.AccessKey
 	fmap["account"] = ak.Account
 	return json.Marshal(fmap)
@@ -48,7 +48,7 @@ func (ak *AccessKey) Validate() error {
 	return nil
 }
 
-func (ak *AccessKey) Equals(config config.Config) bool {
+func (ak *AccessKey) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -67,9 +67,9 @@ func (ak *AccessKey) Equals(config config.Config) bool {
 	return true
 }
 
-func (ak *AccessKey) Sanitize() config.Config {
+func (ak *AccessKey) Sanitize() cloud.Config {
 	return &AccessKey{
-		AccessKey: config.Redacted,
+		AccessKey: cloud.Redacted,
 		Account:   ak.Account,
 	}
 }

+ 2 - 5
pkg/cloud/azure/azurestorageintegration.go

@@ -4,23 +4,21 @@ import (
 	"strings"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 type AzureStorageIntegration struct {
 	AzureStorageBillingParser
-	ConnectionStatus cloud.ConnectionStatus
 }
 
 func (asi *AzureStorageIntegration) GetCloudCost(start, end time.Time) (*kubecost.CloudCostSetRange, error) {
-	ccsr, err := kubecost.NewCloudCostSetRange(start, end, timeutil.Day, asi.Key())
+	ccsr, err := kubecost.NewCloudCostSetRange(start, end, kubecost.AccumulateOptionDay, asi.Key())
 	if err != nil {
 		return nil, err
 	}
 
-	status, err := asi.ParseBillingData(start, end, func(abv *BillingRowValues) error {
+	err = asi.ParseBillingData(start, end, func(abv *BillingRowValues) error {
 		s := abv.Date
 		e := abv.Date.Add(timeutil.Day)
 		window := kubecost.NewWindow(&s, &e)
@@ -77,7 +75,6 @@ func (asi *AzureStorageIntegration) GetCloudCost(start, end time.Time) (*kubecos
 		return nil
 	})
 	if err != nil {
-		asi.ConnectionStatus = status
 		return nil, err
 	}
 	return ccsr, nil

+ 20 - 9
pkg/cloud/azure/storagebillingparser.go

@@ -11,7 +11,6 @@ import (
 
 	"github.com/Azure/azure-storage-blob-go/azblob"
 	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/log"
 )
 
@@ -20,7 +19,7 @@ type AzureStorageBillingParser struct {
 	StorageConnection
 }
 
-func (asbp *AzureStorageBillingParser) Equals(config cloudconfig.Config) bool {
+func (asbp *AzureStorageBillingParser) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*AzureStorageBillingParser)
 	if !ok {
 		return false
@@ -30,33 +29,45 @@ func (asbp *AzureStorageBillingParser) Equals(config cloudconfig.Config) bool {
 
 type AzureBillingResultFunc func(*BillingRowValues) error
 
-func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, resultFn AzureBillingResultFunc) (cloud.ConnectionStatus, error) {
+func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, resultFn AzureBillingResultFunc) error {
 	err := asbp.Validate()
 	if err != nil {
-		return cloud.InvalidConfiguration, err
+		asbp.ConnectionStatus = cloud.InvalidConfiguration
+		return err
 	}
 
 	containerURL, err := asbp.getContainer()
 	if err != nil {
-		return cloud.FailedConnection, err
+		asbp.ConnectionStatus = cloud.FailedConnection
+		return err
 	}
 	ctx := context.Background()
 	blobNames, err := asbp.getMostRecentBlobs(start, end, containerURL, ctx)
 	if err != nil {
-		return cloud.FailedConnection, err
+		asbp.ConnectionStatus = cloud.FailedConnection
+		return err
 	}
+
+	if len(blobNames) == 0 && asbp.ConnectionStatus != cloud.SuccessfulConnection {
+		asbp.ConnectionStatus = cloud.MissingData
+		return nil
+	}
+
 	for _, blobName := range blobNames {
 		blobBytes, err2 := asbp.DownloadBlob(blobName, containerURL, ctx)
 		if err2 != nil {
-			return cloud.FailedConnection, err2
+			asbp.ConnectionStatus = cloud.FailedConnection
+			return err2
 		}
 		err2 = asbp.parseCSV(start, end, csv.NewReader(bytes.NewReader(blobBytes)), resultFn)
 		if err2 != nil {
-			return cloud.ParseError, err2
+			asbp.ConnectionStatus = cloud.ParseError
+			return err2
 		}
 
 	}
-	return cloud.SuccessfulConnection, nil
+	asbp.ConnectionStatus = cloud.SuccessfulConnection
+	return nil
 }
 
 func (asbp *AzureStorageBillingParser) parseCSV(start, end time.Time, reader *csv.Reader, resultFn AzureBillingResultFunc) error {

+ 16 - 11
pkg/cloud/azure/storageconfiguration.go

@@ -3,7 +3,8 @@ package azure
 import (
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -43,7 +44,7 @@ func (sc *StorageConfiguration) Validate() error {
 	return nil
 }
 
-func (sc *StorageConfiguration) Equals(config config.Config) bool {
+func (sc *StorageConfiguration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -85,7 +86,7 @@ func (sc *StorageConfiguration) Equals(config config.Config) bool {
 	return true
 }
 
-func (sc *StorageConfiguration) Sanitize() config.Config {
+func (sc *StorageConfiguration) Sanitize() cloud.Config {
 	return &StorageConfiguration{
 		SubscriptionID: sc.SubscriptionID,
 		Account:        sc.Account,
@@ -105,6 +106,10 @@ func (sc *StorageConfiguration) Key() string {
 	return key
 }
 
+func (sc *StorageConfiguration) Provider() string {
+	return kubecost.AzureProvider
+}
+
 func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
 	var f interface{}
 	err := json.Unmarshal(b, &f)
@@ -114,41 +119,41 @@ func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	subscriptionID, err := config.GetInterfaceValue[string](fmap, "subscriptionID")
+	subscriptionID, err := cloud.GetInterfaceValue[string](fmap, "subscriptionID")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	sc.SubscriptionID = subscriptionID
 
-	account, err := config.GetInterfaceValue[string](fmap, "account")
+	account, err := cloud.GetInterfaceValue[string](fmap, "account")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	sc.Account = account
 
-	container, err := config.GetInterfaceValue[string](fmap, "container")
+	container, err := cloud.GetInterfaceValue[string](fmap, "container")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	sc.Container = container
 
-	path, err := config.GetInterfaceValue[string](fmap, "path")
+	path, err := cloud.GetInterfaceValue[string](fmap, "path")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	sc.Path = path
 
-	cloud, err := config.GetInterfaceValue[string](fmap, "cloud")
+	cloudValue, err := cloud.GetInterfaceValue[string](fmap, "cloud")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
-	sc.Cloud = cloud
+	sc.Cloud = cloudValue
 
 	authAny, ok := fmap["authorizer"]
 	if !ok {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -157,7 +162,7 @@ func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
 	return nil
 }
 
-func ConvertAzureStorageConfigToConfig(asc AzureStorageConfig) config.KeyedConfig {
+func ConvertAzureStorageConfigToConfig(asc AzureStorageConfig) cloud.KeyedConfig {
 	if asc.IsEmpty() {
 		return nil
 	}

+ 2 - 2
pkg/cloud/azure/storageconfiguration_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -145,7 +145,7 @@ func TestStorageConfiguration_Validate(t *testing.T) {
 func TestStorageConfiguration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     StorageConfiguration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 5 - 2
pkg/cloud/azure/storageconnection.go

@@ -9,7 +9,6 @@ import (
 
 	"github.com/Azure/azure-storage-blob-go/azblob"
 	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/log"
 )
 
@@ -20,10 +19,14 @@ type StorageConnection struct {
 }
 
 func (sc *StorageConnection) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if sc.ConnectionStatus.String() == "" {
+		sc.ConnectionStatus = cloud.InitialStatus
+	}
 	return sc.ConnectionStatus
 }
 
-func (sc *StorageConnection) Equals(config cloudconfig.Config) bool {
+func (sc *StorageConnection) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*StorageConnection)
 	if !ok {
 		return false

+ 0 - 12
pkg/cloud/cloudcostintegration.go

@@ -1,12 +0,0 @@
-package cloud
-
-import (
-	"time"
-
-	"github.com/opencost/opencost/pkg/kubecost"
-)
-
-// CloudCostIntegration is an interface for retrieving daily granularity CloudCost data for a given range
-type CloudCostIntegration interface {
-	GetCloudCost(time.Time, time.Time) (*kubecost.CloudCostSetRange, error)
-}

+ 2 - 1
pkg/cloud/config/config.go → pkg/cloud/config.go

@@ -1,4 +1,4 @@
-package config
+package cloud
 
 import (
 	"fmt"
@@ -17,6 +17,7 @@ type Config interface {
 type KeyedConfig interface {
 	Config
 	Key() string
+	Provider() string
 }
 
 type KeyedConfigWatcher interface {

+ 291 - 0
pkg/cloud/config/configurations.go

@@ -0,0 +1,291 @@
+package config
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// MultiCloudConfig struct is used to unmarshal cloud configs for each provider out of cloud-integration file
+// Deprecated: v1.104 use Configurations
+type MultiCloudConfig struct {
+	AzureConfigs   []azure.AzureStorageConfig `json:"azure"`
+	GCPConfigs     []gcp.BigQueryConfig       `json:"gcp"`
+	AWSConfigs     []aws.AwsAthenaInfo        `json:"aws"`
+	AlibabaConfigs []alibaba.AlibabaInfo      `json:"alibaba"`
+}
+
+func (mcc MultiCloudConfig) loadConfigurations(configs *Configurations) {
+	// Load AWS configs
+	for _, awsConfig := range mcc.AWSConfigs {
+		kc := aws.ConvertAwsAthenaInfoToConfig(awsConfig)
+		err := configs.Insert(kc)
+		if err != nil {
+			log.Errorf("MultiCloudConfig: error converting AWS config %s", err.Error())
+		}
+
+	}
+
+	// Load GCP configs
+	for _, gcpConfig := range mcc.GCPConfigs {
+		kc := gcp.ConvertBigQueryConfigToConfig(gcpConfig)
+		err := configs.Insert(kc)
+		if err != nil {
+			log.Errorf("MultiCloudConfig: error converting GCP config %s", err.Error())
+		}
+	}
+
+	// Load Azure configs
+	for _, azureConfig := range mcc.AzureConfigs {
+		kc := azure.ConvertAzureStorageConfigToConfig(azureConfig)
+		err := configs.Insert(kc)
+		if err != nil {
+			log.Errorf("MultiCloudConfig: error converting Azure config %s", err.Error())
+		}
+	}
+
+	// Load Alibaba Cloud Configs
+	for _, aliCloudConfig := range mcc.AlibabaConfigs {
+		kc := alibaba.ConvertAlibabaInfoToConfig(aliCloudConfig)
+		err := configs.Insert(kc)
+		if err != nil {
+			log.Errorf("MultiCloudConfig: error converting Alibaba config %s", err.Error())
+		}
+	}
+}
+
+// Configurations is a general use container for all configuration types
+type Configurations struct {
+	AWS     *AWSConfigs     `json:"aws,omitempty"`
+	GCP     *GCPConfigs     `json:"gcp,omitempty"`
+	Azure   *AzureConfigs   `json:"azure,omitempty"`
+	Alibaba *AlibabaConfigs `json:"alibaba,omitempty"`
+}
+
+// UnmarshalJSON custom json unmarshalling to maintain support for MultiCloudConfig format
+func (c *Configurations) UnmarshalJSON(bytes []byte) error {
+	// Attempt to unmarshal into old config object
+	multiConfig := &MultiCloudConfig{}
+	err := json.Unmarshal(bytes, multiConfig)
+	// If unmarshal is successful, move values into config and return
+	if err == nil {
+		multiConfig.loadConfigurations(c)
+		return nil
+	}
+	// Create inline type to gain access to default Unmarshalling
+	type ConfUnmarshaller *Configurations
+	var conf ConfUnmarshaller = c
+	return json.Unmarshal(bytes, conf)
+}
+
+func (c *Configurations) Equals(that *Configurations) bool {
+	if c == nil && that == nil {
+		return true
+	}
+	if c == nil || that == nil {
+		return false
+	}
+
+	if !c.AWS.Equals(that.AWS) {
+		return false
+	}
+
+	if !c.GCP.Equals(that.GCP) {
+		return false
+	}
+
+	if !c.Azure.Equals(that.Azure) {
+		return false
+	}
+
+	if !c.Alibaba.Equals(that.Alibaba) {
+		return false
+	}
+
+	return true
+}
+
+func (c *Configurations) Insert(keyedConfig cloud.Config) error {
+	switch keyedConfig.(type) {
+	case *aws.AthenaConfiguration:
+		if c.AWS == nil {
+			c.AWS = &AWSConfigs{}
+		}
+		c.AWS.Athena = append(c.AWS.Athena, keyedConfig.(*aws.AthenaConfiguration))
+	case *aws.S3Configuration:
+		if c.AWS == nil {
+			c.AWS = &AWSConfigs{}
+		}
+		c.AWS.S3 = append(c.AWS.S3, keyedConfig.(*aws.S3Configuration))
+	case *gcp.BigQueryConfiguration:
+		if c.GCP == nil {
+			c.GCP = &GCPConfigs{}
+		}
+		c.GCP.BigQuery = append(c.GCP.BigQuery, keyedConfig.(*gcp.BigQueryConfiguration))
+	case *azure.StorageConfiguration:
+		if c.Azure == nil {
+			c.Azure = &AzureConfigs{}
+		}
+		c.Azure.Storage = append(c.Azure.Storage, keyedConfig.(*azure.StorageConfiguration))
+	case *alibaba.BOAConfiguration:
+		if c.Alibaba == nil {
+			c.Alibaba = &AlibabaConfigs{}
+		}
+		c.Alibaba.BOA = append(c.Alibaba.BOA, keyedConfig.(*alibaba.BOAConfiguration))
+	default:
+		return fmt.Errorf("Configurations: Insert: failed to insert config of type: %T", keyedConfig)
+	}
+	return nil
+}
+
+func (c *Configurations) ToSlice() []cloud.KeyedConfig {
+	var keyedConfigs []cloud.KeyedConfig
+	if c.AWS != nil {
+		for _, athenaConfig := range c.AWS.Athena {
+			keyedConfigs = append(keyedConfigs, athenaConfig)
+		}
+
+		for _, s3Config := range c.AWS.S3 {
+			keyedConfigs = append(keyedConfigs, s3Config)
+		}
+	}
+
+	if c.GCP != nil {
+		for _, bigQueryConfig := range c.GCP.BigQuery {
+			keyedConfigs = append(keyedConfigs, bigQueryConfig)
+		}
+	}
+
+	if c.Azure != nil {
+		for _, azureStorageConfig := range c.Azure.Storage {
+			keyedConfigs = append(keyedConfigs, azureStorageConfig)
+		}
+	}
+
+	if c.Alibaba != nil {
+		for _, boaConfig := range c.Alibaba.BOA {
+			keyedConfigs = append(keyedConfigs, boaConfig)
+		}
+	}
+
+	return keyedConfigs
+
+}
+
+type AWSConfigs struct {
+	Athena []*aws.AthenaConfiguration `json:"athena,omitempty"`
+	S3     []*aws.S3Configuration     `json:"s3,omitempty"`
+}
+
+func (ac *AWSConfigs) Equals(that *AWSConfigs) bool {
+	if ac == nil && that == nil {
+		return true
+	}
+	if ac == nil || that == nil {
+		return false
+	}
+	// Check Athena
+	if len(ac.Athena) != len(that.Athena) {
+		return false
+	}
+	for i, thisAthena := range ac.Athena {
+		thatAthena := that.Athena[i]
+		if !thisAthena.Equals(thatAthena) {
+			return false
+		}
+	}
+
+	// Check S3
+	if len(ac.S3) != len(that.S3) {
+		return false
+	}
+	for i, thisS3 := range ac.S3 {
+		thatS3 := that.S3[i]
+		if !thisS3.Equals(thatS3) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type GCPConfigs struct {
+	BigQuery []*gcp.BigQueryConfiguration `json:"bigQuery,omitempty"`
+}
+
+func (gc *GCPConfigs) Equals(that *GCPConfigs) bool {
+	if gc == nil && that == nil {
+		return true
+	}
+	if gc == nil || that == nil {
+		return false
+	}
+	// Check BigQuery
+	if len(gc.BigQuery) != len(that.BigQuery) {
+		return false
+	}
+	for i, thisBigQuery := range gc.BigQuery {
+		thatBigQuery := that.BigQuery[i]
+		if !thisBigQuery.Equals(thatBigQuery) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type AzureConfigs struct {
+	Storage []*azure.StorageConfiguration `json:"storage,omitempty"`
+}
+
+func (ac *AzureConfigs) Equals(that *AzureConfigs) bool {
+	if ac == nil && that == nil {
+		return true
+	}
+	if ac == nil || that == nil {
+		return false
+	}
+	// Check Storage
+	if len(ac.Storage) != len(that.Storage) {
+		return false
+	}
+	for i, thisStorage := range ac.Storage {
+		thatStorage := that.Storage[i]
+		if !thisStorage.Equals(thatStorage) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type AlibabaConfigs struct {
+	BOA []*alibaba.BOAConfiguration `json:"boa,omitempty"`
+}
+
+func (ac *AlibabaConfigs) Equals(that *AlibabaConfigs) bool {
+	if ac == nil && that == nil {
+		return true
+	}
+	if ac == nil || that == nil {
+		return false
+	}
+	// Check BOA
+	if len(ac.BOA) != len(that.BOA) {
+		return false
+	}
+	for i, thisBOA := range ac.BOA {
+		thatBOA := that.BOA[i]
+		if !thisBOA.Equals(thatBOA) {
+			return false
+		}
+	}
+
+	return true
+}

+ 290 - 0
pkg/cloud/config/configurations_test.go

@@ -0,0 +1,290 @@
+package config
+
+import (
+	"encoding/json"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+)
+
+var (
+	azureMultiCloudConf = MultiCloudConfig{
+		AzureConfigs: []azure.AzureStorageConfig{
+			{
+				SubscriptionId: "subscriptionID",
+				AccountName:    "accountName",
+				AccessKey:      "accessKey",
+				ContainerName:  "containerName",
+				ContainerPath:  "containerPath",
+				AzureCloud:     "azureCloud",
+			},
+		},
+	}
+	azureConfiguration = &Configurations{
+		Azure: &AzureConfigs{
+			Storage: []*azure.StorageConfiguration{
+				{
+					SubscriptionID: "subscriptionID",
+					Account:        "accountName",
+					Container:      "containerName",
+					Path:           "containerPath",
+					Cloud:          "azureCloud",
+					Authorizer: &azure.AccessKey{
+						AccessKey: "accessKey",
+						Account:   "accountName",
+					},
+				},
+			},
+		},
+	}
+
+	GCPKeyMultiCloudConf = MultiCloudConfig{
+		GCPConfigs: []gcp.BigQueryConfig{
+			{
+				ProjectID:          "projectID",
+				BillingDataDataset: "dataset.table",
+				Key: map[string]string{
+					"key": "value",
+				},
+			},
+		},
+	}
+
+	GCPKeyConfigurations = Configurations{
+		GCP: &GCPConfigs{BigQuery: []*gcp.BigQueryConfiguration{{
+			ProjectID: "projectID",
+			Dataset:   "dataset",
+			Table:     "table",
+			Authorizer: &gcp.ServiceAccountKey{
+				Key: map[string]string{
+					"key": "value",
+				},
+			},
+		},
+		}},
+	}
+
+	GCPWIMultiCloudConf = MultiCloudConfig{
+		GCPConfigs: []gcp.BigQueryConfig{
+			{
+				ProjectID:          "projectID",
+				BillingDataDataset: "dataset.table",
+				Key:                nil,
+			},
+		},
+	}
+
+	GCPWIConfigurations = Configurations{
+		GCP: &GCPConfigs{BigQuery: []*gcp.BigQueryConfiguration{{
+			ProjectID:  "projectID",
+			Dataset:    "dataset",
+			Table:      "table",
+			Authorizer: &gcp.WorkloadIdentity{},
+		},
+		}},
+	}
+
+	AWSAthenaKeyMultiCloudConfig = MultiCloudConfig{
+		AWSConfigs: []aws.AwsAthenaInfo{
+			{
+				AthenaBucketName: "bucket",
+				AthenaRegion:     "region",
+				AthenaDatabase:   "database",
+				AthenaTable:      "table",
+				AthenaWorkgroup:  "workgroup",
+				ServiceKeyName:   "id",
+				ServiceKeySecret: "secret",
+				AccountID:        "account",
+				MasterPayerARN:   "",
+			},
+		},
+	}
+
+	AWSAthenaKeyConfigurations = &Configurations{
+		AWS: &AWSConfigs{
+			Athena: []*aws.AthenaConfiguration{
+				{
+					Bucket:    "bucket",
+					Region:    "region",
+					Database:  "database",
+					Table:     "table",
+					Workgroup: "workgroup",
+					Account:   "account",
+					Authorizer: &aws.AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+				},
+			},
+		},
+	}
+
+	AWSAthenaAssumeRoleServiceAccountMultiCloudConfig = MultiCloudConfig{
+		AWSConfigs: []aws.AwsAthenaInfo{
+			{
+				AthenaBucketName: "bucket",
+				AthenaRegion:     "region",
+				AthenaDatabase:   "database",
+				AthenaTable:      "table",
+				AthenaWorkgroup:  "workgroup",
+				AccountID:        "account",
+				MasterPayerARN:   "roleArn",
+			},
+		},
+	}
+
+	AWSAthenaAssumeRoleServiceAccountConfigurations = &Configurations{
+		AWS: &AWSConfigs{
+			Athena: []*aws.AthenaConfiguration{
+				{
+					Bucket:    "bucket",
+					Region:    "region",
+					Database:  "database",
+					Table:     "table",
+					Workgroup: "workgroup",
+					Account:   "account",
+					Authorizer: &aws.AssumeRole{
+						Authorizer: &aws.ServiceAccount{},
+						RoleARN:    "roleArn",
+					},
+				},
+			},
+		},
+	}
+	AWSS3ServiceAccountMultiCloudConfig = MultiCloudConfig{
+		AWSConfigs: []aws.AwsAthenaInfo{
+			{
+				AthenaBucketName: "bucket",
+				AthenaRegion:     "region",
+				AccountID:        "account",
+				MasterPayerARN:   "",
+			},
+		},
+	}
+
+	AWSS3ServiceAccountConfigurations = &Configurations{
+		AWS: &AWSConfigs{
+			S3: []*aws.S3Configuration{
+				{
+					Bucket:     "bucket",
+					Region:     "region",
+					Account:    "account",
+					Authorizer: &aws.ServiceAccount{},
+				},
+			},
+		},
+	}
+
+	AWSS3AssumeRoleAccessKeyMultiCloudConfig = MultiCloudConfig{
+		AWSConfigs: []aws.AwsAthenaInfo{
+			{
+				AthenaBucketName: "bucket",
+				AthenaRegion:     "region",
+				AccountID:        "account",
+				ServiceKeyName:   "id",
+				ServiceKeySecret: "secret",
+				MasterPayerARN:   "roleARN",
+			},
+		},
+	}
+	AWSS3AssumeRoleAccessKeyConfigurations = &Configurations{
+		AWS: &AWSConfigs{
+			S3: []*aws.S3Configuration{
+				{
+					Bucket:  "bucket",
+					Region:  "region",
+					Account: "account",
+					Authorizer: &aws.AssumeRole{
+						Authorizer: &aws.AccessKey{
+							ID:     "id",
+							Secret: "secret",
+						},
+						RoleARN: "roleARN",
+					},
+				},
+			},
+		},
+	}
+)
+
+func TestConfigurations_UnmarshalJSON(t *testing.T) {
+	tests := map[string]struct {
+		input    any
+		expected *Configurations
+	}{
+		"Azure Storage AccessKey": {
+			input:    azureConfiguration,
+			expected: azureConfiguration,
+		},
+		"Azure Storage AccessKey Conversion": {
+			input:    azureMultiCloudConf,
+			expected: azureConfiguration,
+		},
+		"GCP BigQuery ServiceAccountKey": {
+			input:    GCPKeyConfigurations,
+			expected: &GCPKeyConfigurations,
+		},
+		"GCP BigQuery ServiceAccountKey Conversion": {
+			input:    GCPKeyMultiCloudConf,
+			expected: &GCPKeyConfigurations,
+		},
+		"GCP BigQuery Workload Identity ": {
+			input:    &GCPWIConfigurations,
+			expected: &GCPWIConfigurations,
+		},
+		"GCP BigQuery Workload Identity Conversion": {
+			input:    GCPWIMultiCloudConf,
+			expected: &GCPWIConfigurations,
+		},
+		"AWS Athena Access Key": {
+			input:    AWSAthenaKeyConfigurations,
+			expected: AWSAthenaKeyConfigurations,
+		},
+		"AWS Athena Access Key Conversion": {
+			input:    AWSAthenaKeyMultiCloudConfig,
+			expected: AWSAthenaKeyConfigurations,
+		},
+		"AWS Athena Assume Role Service Account": {
+			input:    AWSAthenaAssumeRoleServiceAccountConfigurations,
+			expected: AWSAthenaAssumeRoleServiceAccountConfigurations,
+		},
+		"AWS Athena Assume Role Service Account Conversion": {
+			input:    AWSAthenaAssumeRoleServiceAccountMultiCloudConfig,
+			expected: AWSAthenaAssumeRoleServiceAccountConfigurations,
+		},
+		"AWS S3 Service Account": {
+			input:    AWSS3ServiceAccountConfigurations,
+			expected: AWSS3ServiceAccountConfigurations,
+		},
+		"AWS S3 Service Account Conversion": {
+			input:    AWSS3ServiceAccountMultiCloudConfig,
+			expected: AWSS3ServiceAccountConfigurations,
+		},
+		"AWS S3 Assume Role Access Key": {
+			input:    AWSS3AssumeRoleAccessKeyConfigurations,
+			expected: AWSS3AssumeRoleAccessKeyConfigurations,
+		},
+		"AWS S3 Assume Role Service Access Key": {
+			input:    AWSS3AssumeRoleAccessKeyMultiCloudConfig,
+			expected: AWSS3AssumeRoleAccessKeyConfigurations,
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			b, err := json.Marshal(tt.input)
+			if err != nil {
+				t.Fatalf("failed to marshal input")
+			}
+			actual := &Configurations{}
+			err = json.Unmarshal(b, actual)
+			if err != nil && tt.expected != nil {
+				t.Fatalf("Unmarshal failed with error %s", err.Error())
+			}
+			if !tt.expected.Equals(actual) {
+				t.Fatalf("actual Configuration did not match expected")
+			}
+		})
+	}
+}

+ 305 - 0
pkg/cloud/config/controller.go

@@ -0,0 +1,305 @@
+package config
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/provider"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+)
+
+// configID identifies the source and the ID of a configuration to handle duplicate configs from multiple sources
+type configID struct {
+	source ConfigSource
+	key    string
+}
+
+func (cid configID) Equals(that configID) bool {
+	return cid.source == that.source && cid.key == that.key
+}
+
+func newConfigID(source, key string) configID {
+	return configID{
+		source: GetConfigSource(source),
+		key:    key,
+	}
+}
+
+type Status struct {
+	Source ConfigSource
+	Key    string
+	Active bool
+	Valid  bool
+	Config cloud.KeyedConfig
+}
+
+// Controller manages the cloud.Config using config Watcher(s) to track various configuration
+// methods. To do this it has a map of config watchers mapped on configuration source and a list Observers that it updates
+// upon any change detected from the config watchers.
+type Controller struct {
+	statuses  map[configID]*Status
+	observers []Observer
+	watchers  map[ConfigSource]cloud.KeyedConfigWatcher
+}
+
+// NewController initializes an Config Controller
+func NewController(cp models.Provider) *Controller {
+	providerConfig := provider.ExtractConfigFromProviders(cp)
+	watchers := GetCloudBillingWatchers(providerConfig)
+	ic := &Controller{
+		statuses: make(map[configID]*Status),
+		watchers: watchers,
+	}
+
+	ic.load()
+	ic.pullWatchers()
+
+	go func() {
+		ticker := timeutil.NewJobTicker()
+		defer ticker.Close()
+
+		for {
+			ticker.TickIn(10 * time.Second)
+
+			<-ticker.Ch
+
+			ic.pullWatchers()
+		}
+	}()
+
+	return ic
+}
+
+func (c *Controller) EnableConfig(key, source string) error {
+	cID := newConfigID(source, key)
+	cs, ok := c.statuses[cID]
+	if !ok {
+		return fmt.Errorf("Controller: EnableConfig: config with key %s from source %s does not exist", key, source)
+	}
+	if cs.Active {
+		return fmt.Errorf("Controller: EnableConfig: config with key %s from source %s is already active", key, source)
+	}
+
+	// check for configurations with the same configuration key that are already active.
+	for confID, confStat := range c.statuses {
+		if confID.key != key || confID.source == cID.source {
+			continue
+		}
+
+		// if active disable
+		if confStat.Active == true {
+			confStat.Active = false
+		}
+	}
+
+	cs.Active = true
+	c.putConfig(cs.Config)
+	c.save()
+	return nil
+}
+
+// DisableConfig updates an config status if it was enabled
+func (c *Controller) DisableConfig(key, source string) error {
+	iID := newConfigID(source, key)
+	is, ok := c.statuses[iID]
+	if !ok {
+		return fmt.Errorf("Controller: DisableConfig: config with key %s from source %s does not exist", key, source)
+	}
+	if !is.Active {
+		return fmt.Errorf("Controller: DisableConfig: config with key %s from source %s is already disabled", key, source)
+	}
+
+	is.Active = false
+	c.deleteConfig(iID.key)
+	c.save()
+	return nil
+}
+
+// DeleteConfig removes an config from the statuses and deletes the config on all observers if it was active
+func (c *Controller) DeleteConfig(key, source string) error {
+	id := newConfigID(source, key)
+	is, ok := c.statuses[id]
+	if !ok {
+		return fmt.Errorf("Controller: DisableConfig: config with key %s from source %s does not exist", key, source)
+	}
+
+	// delete config on observers if active
+	if is.Active {
+		c.deleteConfig(id.key)
+	}
+	delete(c.statuses, id)
+	c.save()
+	return nil
+}
+
+// pullWatchers retrieve configs from watchers and update configs according to priority of sources
+func (c *Controller) pullWatchers() {
+
+	for source, watcher := range c.watchers {
+		for _, conf := range watcher.GetConfigs() {
+			key := conf.Key()
+			cID := configID{
+				source: source,
+				key:    key,
+			}
+
+			err := conf.Validate()
+			valid := err == nil
+
+			status := Status{
+				Key:    key,
+				Source: source,
+				Active: valid, // active if valid, for now
+				Valid:  valid,
+				Config: conf,
+			}
+
+			// Check existing configs for matching key and source
+			if existingStatus, ok := c.statuses[cID]; ok {
+				// if config has not changed continue
+				if existingStatus.Config.Equals(conf) {
+					continue
+				}
+				// if existing CS is active then it should be replaced by the updated config
+				if existingStatus.Active {
+					if status.Valid {
+						c.putConfig(conf)
+					} else {
+						// if active config is being overwritten by an invalid one, delete the config, as it will not be active
+						c.deleteConfig(key)
+					}
+					c.statuses[cID] = &status
+					continue
+				}
+			}
+
+			// At this point we know that the config from this watcher has changed
+
+			// handle an config with a new unique key for a source or an update config from a source which was inactive before
+			if valid {
+				for matchID, matchCS := range c.statuses {
+					// skip matching configs
+					if matchID.Equals(cID) {
+						continue
+					}
+
+					if matchCS.Active {
+						// if source is non-multi-cloud disable all other non-multi-cloud sourced configs
+						if cID.source == HelmSource || cID.source == ConfigFileSource {
+							if matchID.source == HelmSource || matchID.source == ConfigFileSource {
+								matchCS.Active = false
+								c.deleteConfig(matchID.key)
+							}
+						}
+
+						// check for configs with the same key that are active
+						if matchID.key == key {
+							// If source has higher priority disable other active configs
+							matchCS.Active = false
+							c.deleteConfig(matchID.key)
+						}
+					}
+				}
+			}
+
+			// update config and put to observers if active
+			c.statuses[cID] = &status
+			if status.Active {
+				c.putConfig(conf)
+			}
+		}
+	}
+}
+
+// todo implement when building config api and persistence is necessary
+func (c *Controller) load() {}
+
+// todo implement when building config api and persistence is necessary
+func (c *Controller) save() {}
+
+func (c *Controller) ExportConfigs(key string) (*Configurations, error) {
+	configs := new(Configurations)
+
+	activeConfigs := make(map[string]cloud.Config)
+	for iID, cs := range c.statuses {
+		if cs.Active {
+			activeConfigs[iID.key] = cs.Config
+		}
+	}
+	if key != "" {
+		conf, ok := activeConfigs[key]
+		if !ok {
+			return nil, fmt.Errorf("Config with key %s does not exist or is inactive", key)
+		}
+		sanitizedConfig := conf.Sanitize()
+		err := configs.Insert(sanitizedConfig)
+		if err != nil {
+			return nil, fmt.Errorf("failed to insert config: %w", err)
+		}
+		return configs, nil
+	}
+
+	for _, conf := range activeConfigs {
+		sanitizedConfig := conf.Sanitize()
+		err := configs.Insert(sanitizedConfig)
+		if err != nil {
+			return nil, fmt.Errorf("failed to insert config: %w", err)
+		}
+	}
+	return configs, nil
+}
+
+func (c *Controller) getActiveConfigs() map[string]cloud.KeyedConfig {
+	bi := make(map[string]cloud.KeyedConfig)
+	for iID, cs := range c.statuses {
+		if cs.Active {
+			bi[iID.key] = cs.Config
+		}
+	}
+	return bi
+}
+
+// deleteConfig ask observers to remove and stop all processes related to a configuration with a given key
+func (c *Controller) deleteConfig(key string) {
+	var wg sync.WaitGroup
+	for _, obs := range c.observers {
+		observer := obs
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			observer.DeleteConfig(key)
+		}()
+	}
+	wg.Wait()
+}
+
+// RegisterObserver gives out the current active list configs and adds the observer to the push list
+func (c *Controller) RegisterObserver(obs Observer) {
+	obs.SetConfigs(c.getActiveConfigs())
+	c.observers = append(c.observers, obs)
+}
+
+func (c *Controller) GetStatus() []Status {
+	var status []Status
+	for _, intStat := range c.statuses {
+		status = append(status, *intStat)
+	}
+	return status
+}
+
+// putConfig gives observers a new config to handle
+func (c *Controller) putConfig(conf cloud.KeyedConfig) {
+	var wg sync.WaitGroup
+	for _, obs := range c.observers {
+		observer := obs
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			observer.PutConfig(conf)
+		}()
+	}
+	wg.Wait()
+}

+ 160 - 0
pkg/cloud/config/controller_handlers.go

@@ -0,0 +1,160 @@
+package config
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/proto"
+)
+
+var protocol = proto.HTTP()
+
+func (c *Controller) cloudCostChecks() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// If Pipeline is nil, always return 503
+	if c == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "ConfigController: is nil", http.StatusServiceUnavailable)
+		}
+	}
+
+	if !env.IsCloudCostEnabled() {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "ConfigController: is not enabled", http.StatusServiceUnavailable)
+		}
+	}
+
+	return nil
+}
+
+// GetEnableConfigHandler creates a handler from a http request which enables an integration via the integrationController
+func (c *Controller) GetExportConfigHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// perform basic checks to ensure that the pipeline can be accessed
+	fn := c.cloudCostChecks()
+	if fn != nil {
+		return fn
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+
+		configs, err := c.ExportConfigs(integrationKey)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		protocol.WriteDataWithMessage(w, configs, "Configurations have been sanitized to protect secrets")
+	}
+}
+
+// GetEnableConfigHandler creates a handler from a http request which enables an integration via the integrationController
+func (c *Controller) GetEnableConfigHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// perform basic checks to ensure that the pipeline can be accessed
+	fn := c.cloudCostChecks()
+	if fn != nil {
+		return fn
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+		if integrationKey == "" {
+			http.Error(w, "required parameter 'integrationKey' is missing", http.StatusBadRequest)
+			return
+		}
+
+		source := r.URL.Query().Get("source")
+		if source == "" {
+			http.Error(w, "required parameter 'source' is missing", http.StatusBadRequest)
+			return
+		}
+
+		err := c.EnableConfig(integrationKey, source)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		protocol.WriteData(w, fmt.Sprintf("Successfully enabled integration with key %s from source %s", integrationKey, source))
+	}
+}
+
+// GetDisableConfigHandler creates a handler from a http request which disables an integration via the integrationController
+func (c *Controller) GetDisableConfigHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// perform basic checks to ensure that the pipeline can be accessed
+	fn := c.cloudCostChecks()
+	if fn != nil {
+		return fn
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+		if integrationKey == "" {
+			http.Error(w, "required parameter 'integrationKey' is missing", http.StatusBadRequest)
+			return
+		}
+
+		source := r.URL.Query().Get("source")
+		if source == "" {
+			http.Error(w, "required parameter 'source' is missing", http.StatusBadRequest)
+			return
+		}
+
+		err := c.DisableConfig(integrationKey, source)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		protocol.WriteData(w, fmt.Sprintf("Successfully disabled integration with key %s from source %s", integrationKey, source))
+	}
+}
+
+// GetDeleteConfigHandler creates a handler from a http request which deletes an integration via the integrationController
+// if there are no other integrations with the given integration key, it also clears the data.
+func (c *Controller) GetDeleteConfigHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// perform basic checks to ensure that the pipeline can be accessed
+	fn := c.cloudCostChecks()
+	if fn != nil {
+		return fn
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+		if integrationKey == "" {
+			http.Error(w, "required parameter 'integrationKey' is missing", http.StatusBadRequest)
+			return
+		}
+
+		source := r.URL.Query().Get("source")
+		if source == "" {
+			http.Error(w, "required parameter 'source' is missing", http.StatusBadRequest)
+			return
+		}
+
+		err := c.DeleteConfig(integrationKey, source)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		protocol.WriteData(w, fmt.Sprintf("Successfully deleted integration with key %s from source %s", integrationKey, source))
+
+		for _, intStat := range c.GetStatus() {
+			if intStat.Key == integrationKey {
+				protocol.WriteData(w, fmt.Sprintf("Found addition integration with integration key %s from source %s. If you wish to delete this data do so manually or delete all integrations with matching keys", integrationKey, intStat.Source))
+				return
+			}
+		}
+		protocol.WriteData(w, fmt.Sprintf("Successfully deleted cloud cost data with key %s", integrationKey))
+	}
+}

+ 871 - 0
pkg/cloud/config/controller_test.go

@@ -0,0 +1,871 @@
+package config
+
+import (
+	"testing"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+)
+
+// Baseline valid config
+var validAthenaConf = &aws.AthenaConfiguration{
+	Bucket:     "bucket",
+	Region:     "region",
+	Database:   "database",
+	Table:      "table",
+	Workgroup:  "workgroup",
+	Account:    "account",
+	Authorizer: &aws.ServiceAccount{},
+}
+
+// Config with the same key as the baseline but is not equal to it because of the change in the non-keyed property Workgroup
+var validAthenaConfModifiedProperty = &aws.AthenaConfiguration{
+	Bucket:     "bucket",
+	Region:     "region",
+	Database:   "database",
+	Table:      "table",
+	Workgroup:  "workgroup1",
+	Account:    "account",
+	Authorizer: &aws.ServiceAccount{},
+}
+
+// Config with the same key as baseline but is invalid due to missing Authorizer
+var invalidAthenaConf = &aws.AthenaConfiguration{
+	Bucket:     "bucket",
+	Region:     "region",
+	Database:   "database",
+	Table:      "table",
+	Workgroup:  "workgroup",
+	Account:    "account",
+	Authorizer: nil,
+}
+
+// A valid config with a different key from the baseline
+var validBigQueryConf = &gcp.BigQueryConfiguration{
+	ProjectID:  "projectID",
+	Dataset:    "dataset",
+	Table:      "table",
+	Authorizer: &gcp.WorkloadIdentity{},
+}
+
+func TestIntegrationController_pullWatchers(t *testing.T) {
+	testCases := map[string]struct {
+		initialStatuses  []*Status
+		configWatchers   map[ConfigSource]cloudconfig.KeyedConfigWatcher
+		expectedStatuses []*Status
+	}{
+		// Helm Source
+		"Helm Source init": {
+			initialStatuses: []*Status{},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Helm Source No Change": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Helm Source Update Config": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConfModifiedProperty.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConfModifiedProperty,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Helm Source Update Config Invalid": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Helm Source New Config": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: false, // this value changed
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		// Config File
+		"Config File Source init": {
+			initialStatuses: []*Status{},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Config File No Change": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Config File Update Config": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Config File Update Config Invalid": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Config File New Config": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: false, // this value changed
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		// Multi Cloud
+		"Multi Cloud Source init": {
+			initialStatuses: []*Status{},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Multi Cloud No Change": {
+			initialStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Multi Cloud Update Config": {
+			initialStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConfModifiedProperty,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConfModifiedProperty.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConfModifiedProperty,
+				},
+			},
+		},
+		"Multi Cloud Update Config Invalid": {
+			initialStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Multi Cloud New Config": {
+			initialStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: MultiCloudSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		// Watch Interaction
+		"New Helm, Existing Config File": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		"Update Helm, Existing Config File": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConfModifiedProperty,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConfModifiedProperty.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConfModifiedProperty,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		"New Helm Invalid, Existing Config File": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Update Helm Invalid, Existing Config File": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"New Config File, Existing Helm": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		"Update Config File, Existing Helm": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{},
+				},
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConfModifiedProperty,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConfModifiedProperty.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConfModifiedProperty,
+				},
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		"New Config File Invalid, Existing Helm": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Update Config File Invalid, Existing Helm": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+	}
+
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// Test set up and validation
+			initialStatuses := make(map[configID]*Status)
+			for _, status := range tc.initialStatuses {
+				iID := configID{
+					source: status.Source,
+					key:    status.Key,
+				}
+				if _, ok := initialStatuses[iID]; ok {
+					t.Errorf("invalid test, duplicate initial status with key: %s source: %s", iID.key, iID.source.String())
+				}
+				initialStatuses[iID] = status
+			}
+
+			expectedStatuses := make(map[configID]*Status)
+			for _, status := range tc.expectedStatuses {
+				iID := configID{
+					source: status.Source,
+					key:    status.Key,
+				}
+				if _, ok := expectedStatuses[iID]; ok {
+					t.Errorf("invalid test, duplicate expected status with key: %s source: %s", iID.key, iID.source.String())
+				}
+				expectedStatuses[iID] = status
+			}
+
+			// Initialize controller
+			icd := &Controller{
+				statuses: initialStatuses,
+				watchers: tc.configWatchers,
+			}
+			icd.pullWatchers()
+			if len(icd.statuses) != len(tc.expectedStatuses) {
+				t.Errorf("integration statueses did not have the correct length actaul: %d, expected: %d", len(icd.statuses), len(tc.expectedStatuses))
+			}
+
+			for iID, actualStatus := range icd.statuses {
+				expectedStatus, ok := expectedStatuses[iID]
+				if !ok {
+					t.Errorf("expected integration statuses is missing with integration ID: %v", iID)
+				}
+
+				// failure here indicates an issue with the configID
+				if actualStatus.Key != expectedStatus.Key {
+					t.Errorf("integration status does not have the correct Key values actual: %s, expected: %s", actualStatus.Key, expectedStatus.Key)
+				}
+
+				// failure here indicates an issue with the configID
+				if actualStatus.Key != expectedStatus.Key {
+					t.Errorf("integration status does not have the correct Source values actual: %s, expected: %s", actualStatus.Source, expectedStatus.Source)
+				}
+
+				if actualStatus.Active != expectedStatus.Active {
+					t.Errorf("integration status does not have the correct Active values actual: %v, expected: %v", actualStatus.Active, expectedStatus.Active)
+				}
+
+				if actualStatus.Valid != expectedStatus.Valid {
+					t.Errorf("integration status does not have the correct Valid values actual: %v, expected: %v", actualStatus.Valid, expectedStatus.Valid)
+				}
+
+				if !actualStatus.Config.Equals(expectedStatus.Config) {
+					t.Errorf("integration status does not have the correct config values actual: %v, expected: %v", actualStatus.Config, expectedStatus.Config)
+				}
+			}
+		})
+	}
+}

+ 95 - 0
pkg/cloud/config/mock.go

@@ -0,0 +1,95 @@
+package config
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+type MockConfig struct {
+}
+
+func (mc *MockConfig) Validate() error {
+	return nil
+}
+
+func (mc *MockConfig) Equals(config cloud.Config) bool {
+	_, ok := config.(*MockConfig)
+	return ok
+}
+
+func (mc *MockConfig) Sanitize() cloud.Config {
+	return &MockConfig{}
+}
+
+// MockKeyedConfig implements KeyedConfig it only requires a key to be valid, there is an additional property allowing
+// MockKeyedConfig with the same key to not be equal
+type MockKeyedConfig struct {
+	key      string
+	property string
+	valid    bool
+}
+
+func NewMockKeyedConfig(key, property string, valid bool) cloud.KeyedConfig {
+	return &MockKeyedConfig{
+		key:      key,
+		property: property,
+		valid:    valid,
+	}
+}
+
+func (mkc *MockKeyedConfig) Validate() error {
+	if !mkc.valid {
+		return fmt.Errorf("MockKeyedConfig: set to invalid")
+	}
+	if mkc.key == "" {
+		return fmt.Errorf("MockKeyedConfig: missing key")
+	}
+	return nil
+}
+
+func (mkc *MockKeyedConfig) Equals(config cloud.Config) bool {
+	that, ok := config.(*MockKeyedConfig)
+	if !ok {
+		return false
+	}
+
+	if mkc.key != that.key {
+		return false
+	}
+
+	if mkc.property != that.property {
+		return false
+	}
+
+	if mkc.valid != that.valid {
+		return false
+	}
+
+	return true
+}
+
+func (mkc *MockKeyedConfig) Sanitize() cloud.Config {
+	return &MockKeyedConfig{
+		key:      mkc.key,
+		property: mkc.property,
+		valid:    mkc.valid,
+	}
+}
+
+func (mkc *MockKeyedConfig) Key() string {
+	return mkc.key
+}
+
+func (mkc *MockKeyedConfig) Provider() string {
+	return kubecost.CustomProvider
+}
+
+type MockKeyedConfigWatcher struct {
+	Integrations []cloud.KeyedConfig
+}
+
+func (mkcw *MockKeyedConfigWatcher) GetConfigs() []cloud.KeyedConfig {
+	return mkcw.Integrations
+}

+ 14 - 0
pkg/cloud/config/observer.go

@@ -0,0 +1,14 @@
+package config
+
+import (
+	"github.com/opencost/opencost/pkg/cloud"
+)
+
+// Observer should be implemented by any struct which need access to the up-to-date list of active configs
+// that the Config.Controller provides. Any cloud billing Integration in the application that is used in the application
+// should pass through this interface, and be revoked if it is not included in a Delete call.
+type Observer interface {
+	PutConfig(cloud.KeyedConfig)
+	DeleteConfig(string)
+	SetConfigs(map[string]cloud.KeyedConfig)
+}

+ 351 - 0
pkg/cloud/config/watcher.go

@@ -0,0 +1,351 @@
+package config
+
+import (
+	"fmt"
+	"io/ioutil"
+	"path"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/models"
+
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const authSecretPath = "/var/secrets/service-key.json"
+const storageConfigSecretPath = "/var/azure-storage-config/azure-storage-config.json"
+const cloudIntegrationSecretPath = "/cloud-integration/cloud-integration.json"
+
+type HelmWatcher struct {
+	providerConfig models.ProviderConfig
+}
+
+// GetConfigs checks secret files and config map set via the helm chart for Cloud Billing integrations. Returns
+// only one billing integration due to values being shared by different configuration types.
+func (hw *HelmWatcher) GetConfigs() []cloud.KeyedConfig {
+	var configs []cloud.KeyedConfig
+
+	customPricing, _ := hw.providerConfig.GetCustomPricingData()
+
+	// check for Azure Storage config in secret file
+	exists, err := fileutil.FileExists(storageConfigSecretPath)
+	if err != nil {
+		log.Errorf("HelmWatcher: AzureStorage: error checking file at '%s': %s", storageConfigSecretPath, err.Error())
+	}
+
+	// If file does not exist implies that this configuration method was not used
+	if exists {
+		result, err2 := ioutil.ReadFile(storageConfigSecretPath)
+		if err2 != nil {
+			log.Errorf("HelmWatcher: AzureStorage: Error reading file: %s", err2.Error())
+			return nil
+		}
+
+		asc := &azure.AzureStorageConfig{}
+		err2 = json.Unmarshal(result, asc)
+		if err2 != nil {
+			log.Errorf("HelmWatcher: AzureStorage: Error reading json: %s", err2.Error())
+			return nil
+		}
+		if asc != nil && !asc.IsEmpty() {
+			// If subscription id is not set it may be present in the rate card API
+			if asc.SubscriptionId == "" {
+				ask := &azure.AzureServiceKey{}
+				err3 := loadFile(authSecretPath, ask)
+				if err3 != nil {
+					log.Errorf("HelmWatcher: AzureStorage: AzureRateCard: %s", err3)
+				}
+				if ask != nil {
+					asc.SubscriptionId = ask.SubscriptionID
+				}
+			}
+			// If SubscriptionID is still empty check the customPricing
+			if asc.SubscriptionId == "" {
+				asc.SubscriptionId = customPricing.AzureSubscriptionID
+			}
+			kc := azure.ConvertAzureStorageConfigToConfig(*asc)
+			configs = append(configs, kc)
+			return configs
+		}
+
+	}
+
+	exists, err = fileutil.FileExists(authSecretPath)
+	if err != nil {
+		log.Errorf("HelmWatcher:  error checking file at '%s': %s", authSecretPath, err.Error())
+	}
+
+	// If the Auth Secret is not set then the config file watch will be responsible for providing the configurer for the
+	// config values present in the CustomPricing object
+	if exists {
+		if customPricing.BillingDataDataset != "" {
+			// Big Query Configuration
+			bqc := gcp.BigQueryConfig{
+				ProjectID:          customPricing.ProjectID,
+				BillingDataDataset: customPricing.BillingDataDataset,
+			}
+
+			key := make(map[string]string)
+			err2 := loadFile(authSecretPath, &key)
+			if err2 != nil {
+				log.Errorf("HelmWatcher: GCP: %s", err2)
+			}
+			if key != nil && len(key) != 0 {
+				bqc.Key = key
+			}
+
+			kc := gcp.ConvertBigQueryConfigToConfig(bqc)
+			configs = append(configs, kc)
+			return configs
+		}
+
+		if customPricing.AthenaBucketName != "" {
+			aai := aws.AwsAthenaInfo{
+				AthenaBucketName: customPricing.AthenaBucketName,
+				AthenaRegion:     customPricing.AthenaRegion,
+				AthenaDatabase:   customPricing.AthenaDatabase,
+				AthenaTable:      customPricing.AthenaTable,
+				AthenaWorkgroup:  customPricing.AthenaWorkgroup,
+				AccountID:        customPricing.AthenaProjectID,
+				MasterPayerARN:   customPricing.MasterPayerARN,
+			}
+
+			// If Account ID is blank check ProjectID
+			if aai.AccountID == "" {
+				aai.AccountID = customPricing.ProjectID
+			}
+
+			var accessKey aws.AWSAccessKey
+			err2 := loadFile(authSecretPath, &accessKey)
+			if err2 != nil {
+				log.Errorf("HelmWatcher: AWS: %s", err2)
+			}
+
+			aai.ServiceKeyName = accessKey.AccessKeyID
+			aai.ServiceKeySecret = accessKey.SecretAccessKey
+
+			kc := aws.ConvertAwsAthenaInfoToConfig(aai)
+			configs = append(configs, kc)
+			return configs
+
+		}
+	}
+
+	return configs
+}
+
+type ConfigFileWatcher struct {
+	providerConfig models.ProviderConfig
+}
+
+// GetConfigs checks secret files and config map set via the helm chart for Cloud Billing integrations. Returns
+// only one billing integration due to values being shared by different configuration types.
+func (cfw *ConfigFileWatcher) GetConfigs() []cloud.KeyedConfig {
+	var configs []cloud.KeyedConfig
+
+	customPricing, _ := cfw.providerConfig.GetCustomPricingData()
+
+	// Detect Azure Storage configuration
+	if customPricing.AzureSubscriptionID != "" {
+		asc := azure.AzureStorageConfig{
+			SubscriptionId: customPricing.AzureSubscriptionID,
+			AccountName:    customPricing.AzureStorageAccount,
+			AccessKey:      customPricing.AzureStorageAccessKey,
+			ContainerName:  customPricing.AzureStorageContainer,
+			ContainerPath:  customPricing.AzureContainerPath,
+			AzureCloud:     customPricing.AzureCloud,
+		}
+		kc := azure.ConvertAzureStorageConfigToConfig(asc)
+		configs = append(configs, kc)
+		return configs
+
+	}
+
+	// Detect Big Query Configuration
+	if customPricing.BillingDataDataset != "" {
+		bqc := gcp.BigQueryConfig{
+			ProjectID:          customPricing.ProjectID,
+			BillingDataDataset: customPricing.BillingDataDataset,
+		}
+
+		var key map[string]string
+		err2 := loadFile(env.GetConfigPathWithDefault("/models/")+"key.json", &key)
+		if err2 != nil {
+			log.Errorf("ConfigFileWatcher: GCP: %s", err2)
+		}
+		if key != nil && len(key) != 0 {
+			bqc.Key = key
+		}
+
+		kc := gcp.ConvertBigQueryConfigToConfig(bqc)
+		configs = append(configs, kc)
+		return configs
+	}
+
+	// Detect AWS configuration
+	if customPricing.AthenaBucketName != "" {
+		aai := aws.AwsAthenaInfo{
+			AthenaBucketName: customPricing.AthenaBucketName,
+			AthenaRegion:     customPricing.AthenaRegion,
+			AthenaDatabase:   customPricing.AthenaDatabase,
+			AthenaTable:      customPricing.AthenaTable,
+			AthenaWorkgroup:  customPricing.AthenaWorkgroup,
+			ServiceKeyName:   customPricing.ServiceKeyName,
+			ServiceKeySecret: customPricing.ServiceKeySecret,
+			AccountID:        customPricing.AthenaProjectID,
+			MasterPayerARN:   customPricing.MasterPayerARN,
+		}
+
+		// If Account ID is blank check ProjectID
+		if aai.AccountID == "" {
+			aai.AccountID = customPricing.ProjectID
+		}
+
+		// If the sample nil service key name is set, zero it out so that it is not
+		// misinterpreted as a real service key.
+		if aai.ServiceKeyName == "AKIXXX" {
+			aai.ServiceKeyName = ""
+		}
+
+		kc := aws.ConvertAwsAthenaInfoToConfig(aai)
+		configs = append(configs, kc)
+		return configs
+	}
+
+	//detect Alibaba Configuration
+
+	if customPricing.AlibabaClusterRegion != "" {
+		aliCloudInfo := alibaba.AlibabaInfo{
+			AlibabaClusterRegion:    customPricing.AlibabaClusterRegion,
+			AlibabaServiceKeyName:   customPricing.AlibabaServiceKeyName,
+			AlibabaServiceKeySecret: customPricing.AlibabaServiceKeySecret,
+			AlibabaAccountID:        customPricing.ProjectID,
+		}
+		kc := alibaba.ConvertAlibabaInfoToConfig(aliCloudInfo)
+		configs = append(configs, kc)
+		return configs
+	}
+	return configs
+}
+
+// MultiCloudWatcher ingests values a MultiCloudConfig from the file pulled in from the secret by the helm chart
+type MultiCloudWatcher struct {
+}
+
+func (mcw *MultiCloudWatcher) GetConfigs() []cloud.KeyedConfig {
+	multiConfigPath := path.Join(env.GetConfigPathWithDefault("/var/configs"), cloudIntegrationSecretPath)
+	exists, err := fileutil.FileExists(multiConfigPath)
+	if err != nil {
+		log.Errorf("MultiCloudWatcher:  error checking file at '%s': %s", multiConfigPath, err.Error())
+	}
+
+	// If config does not exist implies that this configuration method was not used
+	if !exists {
+		// check the original location of secret mount
+		multiConfigPath = path.Join("/var", cloudIntegrationSecretPath)
+		exists, err = fileutil.FileExists(multiConfigPath)
+		if err != nil {
+			log.Errorf("MultiCloudWatcher:  error checking file at '%s': %s", multiConfigPath, err.Error())
+		}
+
+		// If config does not exist implies that this configuration method was not used
+		if !exists {
+			return nil
+		}
+	}
+
+	configurations := &Configurations{}
+	err = loadFile(multiConfigPath, configurations)
+	if err != nil {
+		log.Errorf("MultiCloudWatcher: Error getting file '%s': %s", multiConfigPath, err.Error())
+		return nil
+	}
+
+	return configurations.ToSlice()
+}
+
+func GetCloudBillingWatchers(providerConfig models.ProviderConfig) map[ConfigSource]cloud.KeyedConfigWatcher {
+	watchers := make(map[ConfigSource]cloud.KeyedConfigWatcher, 3)
+	watchers[MultiCloudSource] = &MultiCloudWatcher{}
+	if providerConfig != nil {
+		watchers[HelmSource] = &HelmWatcher{providerConfig: providerConfig}
+		watchers[ConfigFileSource] = &ConfigFileWatcher{providerConfig: providerConfig}
+	}
+
+	return watchers
+}
+
+// loadFile unmarshals the json content of a file into the provided object
+// an empty return with no error indicates that the file did not exist.
+func loadFile[T any](path string, content T) error {
+	exists, err := fileutil.FileExists(path)
+	if err != nil {
+		return fmt.Errorf("loadFile: error checking file at '%s': %s", path, err.Error())
+	}
+
+	// If file does not exist implies that this configuration method was not used
+	if !exists {
+		return nil
+	}
+
+	result, err := ioutil.ReadFile(path)
+	if err != nil {
+		return fmt.Errorf("loadFile: Error reading file: %s", err.Error())
+	}
+
+	err = json.Unmarshal(result, content)
+	if err != nil {
+		return fmt.Errorf("loadFile: Error reading json: %s", err.Error())
+	}
+
+	return nil
+}
+
+// ConfigSource is an Enum of the sources int value of the Source determines its priority
+type ConfigSource int
+
+const (
+	UnknownSource ConfigSource = iota
+	ConfigControllerSource
+	MultiCloudSource
+	ConfigFileSource
+	HelmSource
+)
+
+func GetConfigSource(str string) ConfigSource {
+	switch str {
+	case "configController":
+		return ConfigControllerSource
+	case "configfile":
+		return ConfigFileSource
+	case "helm":
+		return HelmSource
+	case "multicloud":
+		return MultiCloudSource
+	default:
+		return UnknownSource
+	}
+}
+
+func (cs ConfigSource) String() string {
+	switch cs {
+	case ConfigControllerSource:
+		return "configController"
+	case ConfigFileSource:
+		return "configfile"
+	case HelmSource:
+		return "helm"
+	case MultiCloudSource:
+		return "multicloud"
+	case UnknownSource:
+		return "unknown"
+	default:
+		return "unknown"
+	}
+}

+ 9 - 9
pkg/cloud/gcp/authorizer.go

@@ -4,7 +4,7 @@ import (
 	"encoding/json"
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"google.golang.org/api/option"
 )
 
@@ -13,7 +13,7 @@ const WorkloadIdentityAuthorizerType = "GCPWorkloadIdentity"
 
 // Authorizer provide a []option.ClientOption which is used in when creating clients in the GCP SDK
 type Authorizer interface {
-	config.Authorizer
+	cloud.Authorizer
 	CreateGCPClientOptions() ([]option.ClientOption, error)
 }
 
@@ -36,7 +36,7 @@ type ServiceAccountKey struct {
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (gkc *ServiceAccountKey) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 2)
-	fmap[config.AuthorizerTypeProperty] = ServiceAccountKeyAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = ServiceAccountKeyAuthorizerType
 	fmap["key"] = gkc.Key
 	return json.Marshal(fmap)
 }
@@ -49,7 +49,7 @@ func (gkc *ServiceAccountKey) Validate() error {
 	return nil
 }
 
-func (gkc *ServiceAccountKey) Equals(config config.Config) bool {
+func (gkc *ServiceAccountKey) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -71,10 +71,10 @@ func (gkc *ServiceAccountKey) Equals(config config.Config) bool {
 	return true
 }
 
-func (gkc *ServiceAccountKey) Sanitize() config.Config {
+func (gkc *ServiceAccountKey) Sanitize() cloud.Config {
 	redactedMap := make(map[string]string, len(gkc.Key))
 	for key, _ := range gkc.Key {
-		redactedMap[key] = config.Redacted
+		redactedMap[key] = cloud.Redacted
 	}
 	return &ServiceAccountKey{
 		Key: redactedMap,
@@ -103,7 +103,7 @@ type WorkloadIdentity struct{}
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (wi *WorkloadIdentity) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 1)
-	fmap[config.AuthorizerTypeProperty] = WorkloadIdentityAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = WorkloadIdentityAuthorizerType
 	return json.Marshal(fmap)
 }
 
@@ -111,7 +111,7 @@ func (wi *WorkloadIdentity) Validate() error {
 	return nil
 }
 
-func (wi *WorkloadIdentity) Equals(config config.Config) bool {
+func (wi *WorkloadIdentity) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -123,7 +123,7 @@ func (wi *WorkloadIdentity) Equals(config config.Config) bool {
 	return true
 }
 
-func (wi *WorkloadIdentity) Sanitize() config.Config {
+func (wi *WorkloadIdentity) Sanitize() cloud.Config {
 	return &WorkloadIdentity{}
 }
 

+ 13 - 8
pkg/cloud/gcp/bigqueryconfiguration.go

@@ -6,7 +6,8 @@ import (
 	"strings"
 
 	"cloud.google.com/go/bigquery"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -43,7 +44,7 @@ func (bqc *BigQueryConfiguration) Validate() error {
 	return nil
 }
 
-func (bqc *BigQueryConfiguration) Equals(config config.Config) bool {
+func (bqc *BigQueryConfiguration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -77,7 +78,7 @@ func (bqc *BigQueryConfiguration) Equals(config config.Config) bool {
 	return true
 }
 
-func (bqc *BigQueryConfiguration) Sanitize() config.Config {
+func (bqc *BigQueryConfiguration) Sanitize() cloud.Config {
 	return &BigQueryConfiguration{
 		ProjectID:  bqc.ProjectID,
 		Dataset:    bqc.Dataset,
@@ -91,6 +92,10 @@ func (bqc *BigQueryConfiguration) Key() string {
 	return fmt.Sprintf("%s/%s", bqc.ProjectID, bqc.GetBillingDataDataset())
 }
 
+func (bqc *BigQueryConfiguration) Provider() string {
+	return kubecost.GCPProvider
+}
+
 func (bqc *BigQueryConfiguration) GetBillingDataDataset() string {
 	return fmt.Sprintf("%s.%s", bqc.Dataset, bqc.Table)
 }
@@ -113,19 +118,19 @@ func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	projectID, err := config.GetInterfaceValue[string](fmap, "projectID")
+	projectID, err := cloud.GetInterfaceValue[string](fmap, "projectID")
 	if err != nil {
 		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
 	}
 	bqc.ProjectID = projectID
 
-	dataset, err := config.GetInterfaceValue[string](fmap, "dataset")
+	dataset, err := cloud.GetInterfaceValue[string](fmap, "dataset")
 	if err != nil {
 		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
 	}
 	bqc.Dataset = dataset
 
-	table, err := config.GetInterfaceValue[string](fmap, "table")
+	table, err := cloud.GetInterfaceValue[string](fmap, "table")
 	if err != nil {
 		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
 	}
@@ -135,7 +140,7 @@ func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -143,7 +148,7 @@ func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
 	return nil
 }
 
-func ConvertBigQueryConfigToConfig(bqc BigQueryConfig) config.KeyedConfig {
+func ConvertBigQueryConfigToConfig(bqc BigQueryConfig) cloud.KeyedConfig {
 	if bqc.IsEmpty() {
 		return nil
 	}

+ 2 - 2
pkg/cloud/gcp/bigqueryconfiguration_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -122,7 +122,7 @@ func TestBigQueryConfiguration_Validate(t *testing.T) {
 func TestBigQueryConfiguration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     BigQueryConfiguration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 2 - 1
pkg/cloud/gcp/bigqueryintegration.go

@@ -84,7 +84,7 @@ func (bqi *BigQueryIntegration) GetCloudCost(start time.Time, end time.Time) (*k
 
 	// Perform Query and parse values
 
-	ccsr, err := kubecost.NewCloudCostSetRange(start, end, timeutil.Day, bqi.Key())
+	ccsr, err := kubecost.NewCloudCostSetRange(start, end, kubecost.AccumulateOptionDay, bqi.Key())
 	if err != nil {
 		return ccsr, fmt.Errorf("error creating new CloudCostSetRange: %s", err)
 	}
@@ -110,6 +110,7 @@ func (bqi *BigQueryIntegration) GetCloudCost(start time.Time, end time.Time) (*k
 		ccsr.LoadCloudCost(ccl.CloudCost)
 
 	}
+
 	return ccsr, nil
 
 }

+ 15 - 3
pkg/cloud/gcp/bigqueryquerier.go

@@ -5,7 +5,6 @@ import (
 
 	"cloud.google.com/go/bigquery"
 	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
 )
 
 type BigQueryQuerier struct {
@@ -14,10 +13,14 @@ type BigQueryQuerier struct {
 }
 
 func (bqq *BigQueryQuerier) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if bqq.ConnectionStatus.String() == "" {
+		bqq.ConnectionStatus = cloud.InitialStatus
+	}
 	return bqq.ConnectionStatus
 }
 
-func (bqq *BigQueryQuerier) Equals(config cloudconfig.Config) bool {
+func (bqq *BigQueryQuerier) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*BigQueryQuerier)
 	if !ok {
 		return false
@@ -41,5 +44,14 @@ func (bqq *BigQueryQuerier) Query(ctx context.Context, queryStr string) (*bigque
 	}
 
 	query := client.Query(queryStr)
-	return query.Read(ctx)
+	iter, err := query.Read(ctx)
+
+	// If result is empty and connection status is not already successful update status to missing data
+	if iter == nil && bqq.ConnectionStatus != cloud.SuccessfulConnection {
+		bqq.ConnectionStatus = cloud.MissingData
+		return iter, nil
+	}
+
+	bqq.ConnectionStatus = cloud.SuccessfulConnection
+	return iter, nil
 }

+ 28 - 0
pkg/cloud/provider/providerconfig.go

@@ -7,6 +7,10 @@ import (
 	"strconv"
 	"sync"
 
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 	"github.com/opencost/opencost/pkg/config"
@@ -294,3 +298,27 @@ func ReturnPricingFromConfigs(filename string) (*models.CustomPricing, error) {
 	}
 	return defaultPricing, nil
 }
+
+func ExtractConfigFromProviders(prov models.Provider) models.ProviderConfig {
+	if prov == nil {
+		log.Errorf("cannot extract config from nil provider")
+		return nil
+	}
+	switch p := prov.(type) {
+	case *CSVProvider:
+		return ExtractConfigFromProviders(p.CustomProvider)
+	case *CustomProvider:
+		return p.Config
+	case *gcp.GCP:
+		return p.Config
+	case *aws.AWS:
+		return p.Config
+	case *azure.Azure:
+		return p.Config
+	case *alibaba.Alibaba:
+		return p.Config
+	default:
+		log.Errorf("failed to extract config from provider")
+		return nil
+	}
+}

+ 207 - 0
pkg/cloudcost/ingestionmanager.go

@@ -0,0 +1,207 @@
+package cloudcost
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// IngestionManager is a config.Observer which creates Ingestor instances based on the signals that it receives from the
+// config.Controller
+type IngestionManager struct {
+	lock      sync.Mutex
+	ingestors map[string]*ingestor
+	config    IngestorConfig
+	repo      Repository
+}
+
+// NewIngestionManager creates a new IngestionManager and registers it with the provided integration controller
+func NewIngestionManager(controller *config.Controller, repo Repository, ingConf IngestorConfig) *IngestionManager {
+	// return empty ingestion manager if store or integration controller are nil
+	if controller == nil || repo == nil {
+		return &IngestionManager{
+			ingestors: map[string]*ingestor{},
+		}
+	}
+
+	im := &IngestionManager{
+		ingestors: map[string]*ingestor{},
+		repo:      repo,
+		config:    ingConf,
+	}
+	controller.RegisterObserver(im)
+
+	return im
+}
+
+// PutConfig is an imperative function which puts an ingestor for the provided Integration
+func (im *IngestionManager) PutConfig(kc cloud.KeyedConfig) {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	err := im.createIngestor(kc)
+	if err != nil {
+		log.Errorf("IngestionManager: PutConfig failed to create billing integration: %s", err.Error())
+	}
+}
+
+// DeleteConfig is an imperative function which removes an ingestor with a matching key
+func (im *IngestionManager) DeleteConfig(key string) {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	im.deleteIngestor(key)
+}
+
+// SetConfigs is a declarative function for setting which BillingIntegrations IngestionManager should have ingestors for
+func (im *IngestionManager) SetConfigs(configs map[string]cloud.KeyedConfig) {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	// delete any exiting ingestors
+	for key, _ := range im.ingestors {
+		im.deleteIngestor(key)
+	}
+	// create  ingestors for provided
+	for _, conf := range configs {
+		err := im.createIngestor(conf)
+		if err != nil {
+			log.Errorf("IngestionManager: error creating ingestor: %s", err.Error())
+		}
+	}
+}
+
+func (im *IngestionManager) StartAll() {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	var wg sync.WaitGroup
+	wg.Add(len(im.ingestors))
+	for key := range im.ingestors {
+		ing := im.ingestors[key]
+		go func() {
+			defer wg.Done()
+			ing.Start(false)
+
+		}()
+	}
+	wg.Wait()
+}
+
+func (im *IngestionManager) StopAll() {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	var wg sync.WaitGroup
+	wg.Add(len(im.ingestors))
+	for key := range im.ingestors {
+		ing := im.ingestors[key]
+		go func() {
+			defer wg.Done()
+			ing.Stop()
+		}()
+	}
+	wg.Wait()
+}
+
+func (im *IngestionManager) RebuildAll() {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	var wg sync.WaitGroup
+	wg.Add(len(im.ingestors))
+	for key := range im.ingestors {
+		go func(ing *ingestor) {
+			defer wg.Done()
+			ing.Stop()
+			ing.Start(true)
+
+		}(im.ingestors[key])
+	}
+	wg.Wait()
+}
+
+func (im *IngestionManager) Rebuild(integrationKey string) error {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	ing, ok := im.ingestors[integrationKey]
+	if !ok {
+		return fmt.Errorf("CloudCost: IngestionManager: Rebuild: failed to rebuild, integration with key does not exist: %s", integrationKey)
+	}
+	ing.Stop()
+	ing.Start(true)
+	return nil
+}
+
+func (im *IngestionManager) RepairAll(start, end time.Time) error {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	s := kubecost.RoundForward(start, im.config.Resolution)
+	e := kubecost.RoundForward(end, im.config.Resolution)
+	windows, err := kubecost.GetWindowsForQueryWindow(s, e, im.config.QueryWindow)
+	if err != nil {
+		return fmt.Errorf("CloudCost: IngestionManager: Repair could not retrieve windows: %s", err.Error())
+	}
+
+	for key := range im.ingestors {
+		go func(ing *ingestor) {
+			for _, window := range windows {
+				ing.BuildWindow(*window.Start(), *window.End())
+			}
+		}(im.ingestors[key])
+	}
+
+	return nil
+}
+
+func (im *IngestionManager) Repair(integrationKey string, start, end time.Time) error {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	s := kubecost.RoundForward(start, im.config.Resolution)
+	e := kubecost.RoundForward(end, im.config.Resolution)
+	windows, err := kubecost.GetWindowsForQueryWindow(s, e, im.config.QueryWindow)
+	if err != nil {
+		return fmt.Errorf("CloudCost: IngestionManager: Repair could not retrieve windows: %s", err.Error())
+	}
+	ing, ok := im.ingestors[integrationKey]
+	if !ok {
+		return fmt.Errorf("CloudCost: IngestionManager: Repair: failed to rebuild, integration with key does not exist: %s", integrationKey)
+	}
+	go func(ing *ingestor) {
+		for _, window := range windows {
+			ing.BuildWindow(*window.Start(), *window.End())
+		}
+	}(ing)
+	return nil
+}
+
+// deleteIngestor stops then removes an ingestor from the map of ingestors
+func (im *IngestionManager) deleteIngestor(integrationKey string) {
+	ing, ok := im.ingestors[integrationKey]
+	if !ok {
+		return
+	}
+	log.Infof("CloudCost: IngestionManager: deleting integration with key: %s", integrationKey)
+	ing.Stop()
+
+	delete(im.ingestors, integrationKey)
+}
+
+// createIngestor stops existing ingestor with matching key then creates and starts and new ingestor
+func (im *IngestionManager) createIngestor(config cloud.KeyedConfig) error {
+	if config == nil {
+		return fmt.Errorf("cannot create ingestor from nil integration")
+	}
+	// delete ingestor with matching key if it exists
+	im.deleteIngestor(config.Key())
+	log.Infof("CloudCost: IngestionManager: creating integration with key: %s", config.Key())
+	ing, err := NewIngestor(im.config, im.repo, config)
+	if err != nil {
+		return fmt.Errorf("IngestionManager: createIngestor: %w", err)
+	}
+
+	ing.Start(false)
+
+	im.ingestors[config.Key()] = ing
+
+	return nil
+}

+ 342 - 0
pkg/cloudcost/ingestor.go

@@ -0,0 +1,342 @@
+package cloudcost
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+)
+
+// IngestorStatus includes diagnostic values for a given Ingestor
+type IngestorStatus struct {
+	Created          time.Time
+	LastRun          time.Time
+	NextRun          time.Time
+	Runs             int
+	Coverage         kubecost.Window
+	ConnectionStatus cloud.ConnectionStatus
+}
+
+// IngestorConfig is a configuration struct for an Ingestor
+type IngestorConfig struct {
+	MonthToDateRunInterval int
+	RefreshRate            time.Duration
+	Resolution             time.Duration
+	Duration               time.Duration
+	QueryWindow            time.Duration
+	RunWindow              time.Duration
+}
+
+// DefaultIngestorConfiguration retrieves an IngestorConfig from env variables
+func DefaultIngestorConfiguration() IngestorConfig {
+	return IngestorConfig{
+		Resolution:             timeutil.Day,
+		Duration:               timeutil.Day * time.Duration(env.GetDataRetentionDailyResolutionDays()),
+		MonthToDateRunInterval: env.GetCloudCostMonthToDateInterval(),
+		RefreshRate:            time.Hour * time.Duration(env.GetCloudCostRefreshRateHours()),
+		QueryWindow:            timeutil.Day * time.Duration(env.GetCloudCostQueryWindowDays()),
+		RunWindow:              timeutil.Day * time.Duration(env.GetCloudCostRunWindowDays()),
+	}
+}
+
+// ingestor runs the process for ingesting CloudCost from its CloudCostIntegration and store it in a Repository
+type ingestor struct {
+	key          string
+	integration  CloudCostIntegration
+	config       IngestorConfig
+	repo         Repository
+	runID        string
+	lastRun      time.Time
+	runs         int
+	creationTime time.Time
+	coverage     kubecost.Window
+	coverageLock sync.Mutex
+	isRunning    atomic.Bool
+	isStopping   atomic.Bool
+	exitBuildCh  chan string
+	exitRunCh    chan string
+}
+
+// NewIngestor is an initializer for ingestor
+func NewIngestor(ingestorConfig IngestorConfig, repo Repository, config cloud.KeyedConfig) (*ingestor, error) {
+	if repo == nil {
+		return nil, fmt.Errorf("CloudCost: NewIngestor: repository connot be nil")
+	}
+	if config == nil {
+		return nil, fmt.Errorf("CloudCost: NewIngestor: integration connot be nil")
+	}
+	cci := GetIntegrationFromConfig(config)
+	if cci == nil {
+		return nil, fmt.Errorf("CloudCost: NewIngestor: provider integration config was not a valid type: %T", config)
+	}
+	now := time.Now().UTC()
+	midnight := kubecost.RoundForward(now, timeutil.Day)
+	return &ingestor{
+		config:       ingestorConfig,
+		repo:         repo,
+		key:          config.Key(),
+		integration:  cci,
+		creationTime: now,
+		lastRun:      now,
+		coverage:     kubecost.NewClosedWindow(midnight, midnight),
+	}, nil
+}
+
+func (ing *ingestor) LoadWindow(start, end time.Time) {
+	windows, err := kubecost.GetWindows(start, end, timeutil.Day)
+	if err != nil {
+		log.Errorf("CloudCost[%s]: ingestor: invalid window %s", ing.key, kubecost.NewWindow(&start, &end))
+		return
+	}
+
+	for _, window := range windows {
+		has, err2 := ing.repo.Has(*window.Start(), ing.key)
+		if err2 != nil {
+			log.Errorf("CloudCost[%s]: ingestor: error when loading window: %s", ing.key, err2.Error())
+		}
+		if !has {
+			ing.BuildWindow(start, end)
+			return
+		}
+		ing.expandCoverage(window)
+		log.Debugf("CloudCost[%s]: ingestor: skipping build for window %s, coverage already exists", ing.key, window.String())
+	}
+
+}
+
+func (ing *ingestor) BuildWindow(start, end time.Time) {
+	log.Infof("CloudCost[%s]: ingestor: building window %s", ing.key, kubecost.NewWindow(&start, &end))
+	ccsr, err := ing.integration.GetCloudCost(start, end)
+	if err != nil {
+		log.Errorf("CloudCost[%s]: ingestor: build failed for window %s: %s", ing.key, kubecost.NewWindow(&start, &end), err.Error())
+		return
+	}
+	for _, ccs := range ccsr.CloudCostSets {
+		log.Debugf("BuildWindow[%s]: GetCloudCost: writing cloud costs for window %s: %d", ccs.Integration, ccs.Window, len(ccs.CloudCosts))
+		err2 := ing.repo.Put(ccs)
+		if err2 != nil {
+			log.Errorf("CloudCost[%s]: ingestor: failed to save Cloud Cost Set with window %s: %s", ing.key, ccs.GetWindow().String(), err2.Error())
+		}
+		ing.expandCoverage(ccs.Window)
+	}
+}
+
+func (ing *ingestor) Start(rebuild bool) {
+
+	// If already running, log that and return.
+	if !ing.isRunning.CompareAndSwap(false, true) {
+		log.Infof("CloudCost: ingestor: is already running")
+		return
+	}
+
+	ing.runID = stringutil.RandSeq(5)
+
+	ing.exitBuildCh = make(chan string)
+	ing.exitRunCh = make(chan string)
+
+	// Build the store once, advancing backward in time from the earliest
+	// point of coverage.
+	go ing.build(rebuild)
+
+	go ing.run()
+}
+
+func (ing *ingestor) Stop() {
+	// If already stopping, log that and return.
+	if !ing.isStopping.CompareAndSwap(false, true) {
+		log.Infof("CloudCost: ingestor: is already stopping")
+		return
+	}
+
+	msg := "Stopping"
+
+	// If the processes are running (and thus there are channels available for
+	// stopping them) then stop all sub-processes (i.e. build and run)
+	var wg sync.WaitGroup
+
+	if ing.exitBuildCh != nil {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			ing.exitBuildCh <- msg
+		}()
+	}
+
+	if ing.exitRunCh != nil {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			ing.exitRunCh <- msg
+		}()
+	}
+
+	wg.Wait()
+
+	// Declare that the store is officially no longer running. This allows
+	// Start to be called again, restarting the store from scratch.
+	ing.isRunning.Store(false)
+	ing.isStopping.Store(false)
+}
+
+// Status returns an IngestorStatus that describes the current state of the ingestor
+func (ing *ingestor) Status() IngestorStatus {
+	return IngestorStatus{
+		Created:          ing.creationTime,
+		LastRun:          ing.lastRun,
+		NextRun:          ing.lastRun.Add(ing.config.RefreshRate).UTC(),
+		Runs:             ing.runs,
+		Coverage:         ing.coverage,
+		ConnectionStatus: ing.integration.GetStatus(),
+	}
+}
+
+func (ing *ingestor) build(rebuild bool) {
+	defer errors.HandlePanic()
+
+	// Profile the full Duration of the build time
+	buildStart := time.Now()
+
+	// Build as far back as the configures build Duration
+	limit := kubecost.RoundBack(time.Now().UTC().Add(-ing.config.Duration), ing.config.Resolution)
+
+	queryWindowStr := timeutil.FormatStoreResolution(ing.config.QueryWindow)
+	log.Infof("CloudCost[%s]: ingestor: build[%s]: Starting build back to %s in blocks of %s", ing.key, ing.runID, limit.String(), queryWindowStr)
+
+	// Start with a window of the configured Duration and ending on the given
+	// start time. Build windows repeating until the window reaches the
+	// given limit time
+
+	// Round end times back to nearest Resolution points in the past,
+	// querying for exactly one interval
+	e := kubecost.RoundBack(time.Now().UTC(), ing.config.Resolution)
+	s := e.Add(-ing.config.QueryWindow)
+
+	// Continue until limit is reached
+	for limit.Before(e) {
+		// If exit instruction is received, log and return
+		select {
+		case <-ing.exitBuildCh:
+			log.Debugf("CloudCost[%s]: ingestor: build[%s]: exiting", ing.key, ing.runID)
+			return
+		default:
+		}
+
+		// Profile the current build step
+		stepStart := time.Now()
+
+		// if rebuild is not specified then check for existing coverage on window
+		if rebuild {
+			ing.BuildWindow(s, e)
+		} else {
+			ing.LoadWindow(s, e)
+		}
+
+		log.Infof("CloudCost[%s]: ingestor: build[%s]:  %s in %v", ing.key, ing.runID, kubecost.NewClosedWindow(s, e), time.Since(stepStart))
+
+		// Shift to next QueryWindow
+		s = s.Add(-ing.config.QueryWindow)
+		if s.Before(limit) {
+			s = limit
+		}
+		e = e.Add(-ing.config.QueryWindow)
+	}
+
+	log.Infof(fmt.Sprintf("CloudCost[%s]: ingestor: build[%s]: completed in %v", ing.key, ing.runID, time.Since(buildStart)))
+
+	// In order to be able to Stop, we have to wait on an exit message
+	// here
+	<-ing.exitBuildCh
+
+}
+
+func (ing *ingestor) run() {
+	defer errors.HandlePanic()
+
+	ticker := timeutil.NewJobTicker()
+	defer ticker.Close()
+	ticker.TickIn(0)
+
+	for {
+		// If an exit instruction is received, break the run loop
+		select {
+		case <-ing.exitRunCh:
+			log.Debugf("CloudCost[%s]: ingestor: Run[%s] exiting", ing.key, ing.runID)
+			return
+		case <-ticker.Ch:
+			// Wait for next tick
+		}
+
+		// Start from the last covered time, minus the RunWindow
+		start := ing.lastRun
+		start = start.Add(-ing.config.RunWindow)
+
+		// Every Nth (determined by the MonthToDateRunInterval) run should be a month to date run. Where the start is
+		// truncated to the beginning of its current month this can mean that early in a new month we will build all of
+		// last month and the first few days of the current month.
+		if ing.runs%ing.config.MonthToDateRunInterval == 0 {
+			start = time.Date(start.Year(), start.Month(), 1, 0, 0, 0, 0, time.UTC)
+			log.Infof("CloudCost[%s]: ingestor: Run[%s]: running month-to-date update starting at %s", ing.key, ing.runID, start.String())
+		}
+
+		// Round start time back to the nearest Resolution point in the past from the
+		// last update to the QueryWindow
+		s := kubecost.RoundBack(start.UTC(), ing.config.Resolution)
+		e := s.Add(ing.config.QueryWindow)
+
+		// Start with a window of the configured Duration and starting on the given
+		// start time. Do the following, repeating until the window reaches the
+		// current time:
+		// 1. Instruct builder to build window
+		// 2. Move window forward one Resolution
+		for time.Now().After(s) {
+			profStart := time.Now()
+			ing.BuildWindow(s, e)
+
+			log.Debugf("CloudCost[%s]: ingestor: Run[%s]: completed %s in %v", ing.key, ing.runID, kubecost.NewWindow(&s, &e), time.Since(profStart))
+
+			s = s.Add(ing.config.QueryWindow)
+			e = e.Add(ing.config.QueryWindow)
+			// prevent builds into the future
+			if e.After(time.Now().UTC()) {
+				e = kubecost.RoundForward(time.Now().UTC(), ing.config.Resolution)
+			}
+
+		}
+		ing.lastRun = time.Now().UTC()
+
+		limit := kubecost.RoundBack(time.Now().UTC(), ing.config.Resolution).Add(-ing.config.Duration)
+		err := ing.repo.Expire(limit)
+		if err != nil {
+			log.Errorf("CloudCost: Ingestor: failed to expire Data: %s", err)
+		}
+
+		ing.coverageLock.Lock()
+		ing.coverage = ing.coverage.ContractStart(limit)
+		ing.coverageLock.Unlock()
+
+		ing.runs++
+
+		ticker.TickIn(ing.config.RefreshRate)
+	}
+}
+
+func (ing *ingestor) expandCoverage(window kubecost.Window) {
+	if window.IsOpen() {
+		return
+	}
+	ing.coverageLock.Lock()
+	defer ing.coverageLock.Unlock()
+
+	coverage := ing.coverage.ExpandStart(*window.Start())
+	coverage = coverage.ExpandEnd(*window.End())
+
+	ing.coverage = coverage
+}

+ 96 - 0
pkg/cloudcost/integration.go

@@ -0,0 +1,96 @@
+package cloudcost
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+// CloudCostIntegration is an interface for retrieving daily granularity CloudCost data for a given range
+type CloudCostIntegration interface {
+	GetCloudCost(time.Time, time.Time) (*kubecost.CloudCostSetRange, error)
+	GetStatus() cloud.ConnectionStatus
+}
+
+// GetIntegrationFromConfig coverts any valid KeyedConfig into the appropriate BillingIntegration if possible
+func GetIntegrationFromConfig(kc cloud.KeyedConfig) CloudCostIntegration {
+	switch keyedConfig := kc.(type) {
+	// AthenaIntegration
+	case *aws.AthenaConfiguration:
+		return &aws.AthenaIntegration{
+			AthenaQuerier: aws.AthenaQuerier{
+				AthenaConfiguration: *keyedConfig,
+			},
+		}
+	case *aws.AthenaQuerier:
+		return &aws.AthenaIntegration{
+			AthenaQuerier: *keyedConfig,
+		}
+	case *aws.AthenaIntegration:
+		return keyedConfig
+	// BigQueryIntegration
+	case *gcp.BigQueryConfiguration:
+		return &gcp.BigQueryIntegration{
+			BigQueryQuerier: gcp.BigQueryQuerier{
+				BigQueryConfiguration: *keyedConfig,
+			},
+		}
+	case *gcp.BigQueryQuerier:
+		return &gcp.BigQueryIntegration{
+			BigQueryQuerier: *keyedConfig,
+		}
+	case *gcp.BigQueryIntegration:
+		return keyedConfig
+	// AzureStorageIntegration
+	case *azure.StorageConfiguration:
+		return &azure.AzureStorageIntegration{
+			AzureStorageBillingParser: azure.AzureStorageBillingParser{
+				StorageConnection: azure.StorageConnection{
+					StorageConfiguration: *keyedConfig},
+			},
+		}
+	case *azure.StorageConnection:
+		return &azure.AzureStorageIntegration{
+			AzureStorageBillingParser: azure.AzureStorageBillingParser{
+				StorageConnection: *keyedConfig,
+			},
+		}
+	case *azure.AzureStorageBillingParser:
+		return &azure.AzureStorageIntegration{
+			AzureStorageBillingParser: *keyedConfig,
+		}
+	case *azure.AzureStorageIntegration:
+		return keyedConfig
+	// S3SelectIntegration
+	case *aws.S3Configuration:
+		return &aws.S3SelectIntegration{
+			S3SelectQuerier: aws.S3SelectQuerier{
+				S3Connection: aws.S3Connection{
+					S3Configuration: *keyedConfig,
+				},
+			},
+		}
+	case *aws.S3Connection:
+		return &aws.S3SelectIntegration{
+			S3SelectQuerier: aws.S3SelectQuerier{
+				S3Connection: *keyedConfig,
+			},
+		}
+	case *aws.S3SelectQuerier:
+		return &aws.S3SelectIntegration{
+			S3SelectQuerier: *keyedConfig,
+		}
+	case *aws.S3SelectIntegration:
+		return keyedConfig
+	// Alibaba BOA Integration
+	case *alibaba.BOAConfiguration:
+		return nil
+	default:
+		return nil
+	}
+}

+ 103 - 0
pkg/cloudcost/memoryrepository.go

@@ -0,0 +1,103 @@
+package cloudcost
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"golang.org/x/exp/maps"
+)
+
+// MemoryRepository is an implementation of Repository that uses a map keyed on config key and window start along with a
+// RWMutex to make it threadsafe
+type MemoryRepository struct {
+	rwLock sync.RWMutex
+	data   map[string]map[time.Time]*kubecost.CloudCostSet
+}
+
+func NewMemoryRepository() *MemoryRepository {
+	return &MemoryRepository{
+		data: make(map[string]map[time.Time]*kubecost.CloudCostSet),
+	}
+}
+
+func (m *MemoryRepository) Has(startTime time.Time, billingIntegration string) (bool, error) {
+	m.rwLock.RLock()
+	defer m.rwLock.RUnlock()
+
+	billingIntegrationData, ok := m.data[billingIntegration]
+	if !ok {
+		return false, nil
+	}
+
+	_, ook := billingIntegrationData[startTime.UTC()]
+	return ook, nil
+}
+
+func (m *MemoryRepository) Get(startTime time.Time, billingIntegration string) (*kubecost.CloudCostSet, error) {
+	m.rwLock.RLock()
+	defer m.rwLock.RUnlock()
+
+	billingIntegrationData, ok := m.data[billingIntegration]
+	if !ok {
+		return nil, nil
+	}
+
+	ccs, ook := billingIntegrationData[startTime.UTC()]
+	if !ook {
+		return nil, nil
+	}
+	return ccs.Clone(), nil
+}
+
+func (m *MemoryRepository) Keys() ([]string, error) {
+	m.rwLock.RLock()
+	defer m.rwLock.RUnlock()
+
+	keys := maps.Keys(m.data)
+	return keys, nil
+}
+
+func (m *MemoryRepository) Put(ccs *kubecost.CloudCostSet) error {
+	m.rwLock.Lock()
+	defer m.rwLock.Unlock()
+
+	if ccs == nil {
+		return fmt.Errorf("MemoryRepository: Put: cannot save nil")
+	}
+
+	if ccs.Window.IsOpen() {
+		return fmt.Errorf("MemoryRepository: Put: cloud cost set has invalid window %s", ccs.Window.String())
+	}
+
+	if ccs.Integration == "" {
+		return fmt.Errorf("MemoryRepository: Put: cloud cost set does not have an integration value")
+	}
+
+	if _, ok := m.data[ccs.Integration]; !ok {
+		m.data[ccs.Integration] = make(map[time.Time]*kubecost.CloudCostSet)
+	}
+
+	m.data[ccs.Integration][ccs.Window.Start().UTC()] = ccs
+	return nil
+}
+
+// Expire deletes all items in the map with a start time before the given limit
+func (m *MemoryRepository) Expire(limit time.Time) error {
+	m.rwLock.Lock()
+	defer m.rwLock.Unlock()
+
+	for key, integration := range m.data {
+		for startTime := range integration {
+			if startTime.Before(limit) {
+				delete(integration, startTime)
+			}
+		}
+		// remove integration if it is now empty
+		if len(integration) == 0 {
+			delete(m.data, key)
+		}
+	}
+	return nil
+}

+ 194 - 0
pkg/cloudcost/pipelineservice.go

@@ -0,0 +1,194 @@
+package cloudcost
+
+import (
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/julienschmidt/httprouter"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/proto"
+)
+
+var protocol = proto.HTTP()
+
+// PipelineService exposes CloudCost pipeline controls and diagnostics endpoints
+type PipelineService struct {
+	ingestionManager *IngestionManager
+	store            Repository
+	configController *config.Controller
+}
+
+// NewPipelineService is a constructor for a PipelineService
+func NewPipelineService(repo Repository, ic *config.Controller, ingConf IngestorConfig) *PipelineService {
+	im := NewIngestionManager(ic, repo, ingConf)
+	return &PipelineService{
+		ingestionManager: im,
+		store:            repo,
+		configController: ic,
+	}
+}
+
+// Status merges status values from the config.Controller and the IngestionManager to give a combined view of that state
+// of configs and their ingestion status
+func (dp *PipelineService) Status() []Status {
+	var statuses []Status
+	// Pull config status from the config controller
+	confStatuses := dp.configController.GetStatus()
+	refreshRate := time.Hour * time.Duration(env.GetCloudCostRefreshRateHours())
+	for _, confStat := range confStatuses {
+		var conf cloudconfig.Config
+		var provider string
+		if confStat.Config != nil {
+			conf = confStat.Config.Sanitize()
+			provider = confStat.Config.Provider()
+		}
+
+		var ingestorStatus IngestorStatus
+		if ing, ok := dp.ingestionManager.ingestors[confStat.Key]; ok {
+			ingestorStatus = ing.Status()
+		}
+
+		// These are the statuses
+		status := Status{
+			Key:              confStat.Key,
+			Source:           confStat.Source.String(),
+			Active:           confStat.Active,
+			Valid:            confStat.Valid,
+			Config:           conf,
+			Provider:         provider,
+			ConnectionStatus: ingestorStatus.ConnectionStatus.String(),
+			LastRun:          ingestorStatus.LastRun,
+			NextRun:          ingestorStatus.NextRun,
+			Runs:             ingestorStatus.Runs,
+			Created:          ingestorStatus.Created,
+			Coverage:         ingestorStatus.Coverage.String(),
+			RefreshRate:      refreshRate.String(),
+		}
+		statuses = append(statuses, status)
+	}
+
+	return statuses
+}
+
+// GetCloudCostRebuildHandler creates a handler from a http request which initiates a rebuild of cloud cost pipeline, if an
+// integrationKey is provided then it only rebuilds the specified billing integration
+func (s *PipelineService) GetCloudCostRebuildHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// If Reporting Service is nil, always return 501
+	if s == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Cloud Cost Pipeline Service is nil", http.StatusNotImplemented)
+		}
+	}
+	if s.ingestionManager == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Cloud Cost Pipeline Service Ingestion Manager is nil", http.StatusNotImplemented)
+		}
+	}
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		commit := r.URL.Query().Get("commit") == "true" || r.URL.Query().Get("commit") == "1"
+
+		if !commit {
+			protocol.WriteData(w, "Pass parameter 'commit=true' to confirm Cloud Cost rebuild")
+			return
+		}
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+
+		// If no providerKey argument was provider, restart all Cloud Asset Pipelines
+		if integrationKey == "" {
+			s.ingestionManager.RebuildAll()
+			protocol.WriteData(w, "Rebuilding Cloud Usage For All Providers")
+			return
+		} else {
+			err := s.ingestionManager.Rebuild(integrationKey)
+			if err != nil {
+				http.Error(w, err.Error(), http.StatusBadRequest)
+				return
+			}
+			protocol.WriteData(w, fmt.Sprintf("Rebuilding Cloud Usage For Provider %s", integrationKey))
+			return
+		}
+	}
+}
+
+// GetCloudCostRepairHandler creates a handler from a http request which initiates a repair of cloud cost for a given window, if an
+// integrationKey is provided then it only repairs the specified integration
+func (s *PipelineService) GetCloudCostRepairHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// If Reporting Service is nil, always return 501
+	if s == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Reporting Service is nil", http.StatusNotImplemented)
+		}
+	}
+	if s.ingestionManager == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Cloud Cost Pipeline Service Ingestion Manager is nil", http.StatusNotImplemented)
+		}
+	}
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		windowStr := r.URL.Query().Get("window")
+
+		var window kubecost.Window
+		if windowStr != "" {
+			win, err := kubecost.ParseWindowWithOffset(windowStr, env.GetParsedUTCOffset())
+			if err != nil {
+				http.Error(w, fmt.Sprintf("Invalid parameter: %s", err), http.StatusBadRequest)
+				return
+			}
+			window = win
+		}
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+
+		// If no providerKey argument was provider, restart all Cloud Asset Pipelines
+		if integrationKey == "" {
+			err := s.ingestionManager.RepairAll(*window.Start(), *window.End())
+			if err != nil {
+				http.Error(w, err.Error(), http.StatusBadRequest)
+				return
+			}
+			protocol.WriteData(w, "Rebuilding Cloud Usage For All Providers")
+			return
+		} else {
+			err := s.ingestionManager.Repair(integrationKey, *window.Start(), *window.End())
+			if err != nil {
+				http.Error(w, err.Error(), http.StatusBadRequest)
+				return
+			}
+			protocol.WriteData(w, fmt.Sprintf("Rebuilding Cloud Usage For Provider %s", integrationKey))
+			return
+		}
+	}
+}
+
+// GetCloudCostStatusHandler creates a handler from a http request which returns a list of the billing integration status
+func (s *PipelineService) GetCloudCostStatusHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// If Reporting Service is nil, always return 501
+	if s == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Reporting Service is nil", http.StatusNotImplemented)
+		}
+	}
+	if s.ingestionManager == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Cloud Cost Pipeline Service Ingestion Manager is nil", http.StatusNotImplemented)
+		}
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		protocol.WriteData(w, s.Status())
+	}
+}

+ 89 - 0
pkg/cloudcost/querier.go

@@ -0,0 +1,89 @@
+package cloudcost
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"time"
+
+	filter "github.com/opencost/opencost/pkg/filter21"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+// Querier allows for querying ranges of CloudCost data
+type Querier interface {
+	Query(QueryRequest, context.Context) (*kubecost.CloudCostSetRange, error)
+}
+
+type QueryRequest struct {
+	Start       time.Time
+	End         time.Time
+	AggregateBy []string
+	Accumulate  kubecost.AccumulateOption
+	Filter      filter.Filter
+}
+
+// DefaultChartItemsLength the default max number of items for a ViewGraphDataSet
+const DefaultChartItemsLength int = 10
+
+// ViewQuerier defines a contract for return View types to the QueryService to service the View Api
+type ViewQuerier interface {
+	QueryViewGraph(ViewQueryRequest, context.Context) (ViewGraphData, error)
+	QueryViewTotals(ViewQueryRequest, context.Context) (*ViewTableRow, int, error)
+	QueryViewTable(ViewQueryRequest, context.Context) (ViewTableRows, error)
+}
+
+type ViewQueryRequest struct {
+	QueryRequest
+	CostMetricName   kubecost.CostMetricName
+	ChartItemsLength int
+	Offset           int
+	Limit            int
+	SortDirection    SortDirection
+	SortColumn       SortField
+}
+
+// SortDirection a string type that acts as an enumeration of possible request options
+type SortDirection string
+
+const (
+	SortDirectionNone       SortDirection = ""
+	SortDirectionAscending  SortDirection = "asc"
+	SortDirectionDescending SortDirection = "desc"
+)
+
+// ParseSortDirection provides a resilient way to parse one of the enumerated SortDirection types from a string
+// or throws an error if it is not able to.
+func ParseSortDirection(sortDirection string) (SortDirection, error) {
+	switch strings.ToLower(sortDirection) {
+	case strings.ToLower(string(SortDirectionAscending)):
+		return SortDirectionAscending, nil
+	case strings.ToLower(string(SortDirectionDescending)):
+		return SortDirectionDescending, nil
+	}
+	return SortDirectionNone, fmt.Errorf("failed to parse a valid CostMetricName from '%s'", sortDirection)
+}
+
+// SortField a string type that acts as an enumeration of possible request options
+type SortField string
+
+const (
+	SortFieldNone              SortField = ""
+	SortFieldName              SortField = "name"
+	SortFieldCost              SortField = "cost"
+	SortFieldKubernetesPercent SortField = "kubernetesPercent"
+)
+
+// ParseSortField provides a resilient way to parse one of the enumerated SortField types from a string
+// or throws an error if it is not able to.
+func ParseSortField(sortColumn string) (SortField, error) {
+	switch strings.ToLower(sortColumn) {
+	case strings.ToLower(string(SortFieldName)):
+		return SortFieldName, nil
+	case strings.ToLower(string(SortFieldCost)):
+		return SortFieldCost, nil
+	case strings.ToLower(string(SortFieldKubernetesPercent)):
+		return SortFieldKubernetesPercent, nil
+	}
+	return SortFieldNone, fmt.Errorf("failed to parse a valid CostMetricName from '%s'", sortColumn)
+}

+ 370 - 0
pkg/cloudcost/queryservice.go

@@ -0,0 +1,370 @@
+package cloudcost
+
+import (
+	"encoding/csv"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/julienschmidt/httprouter"
+	filter21 "github.com/opencost/opencost/pkg/filter21"
+	"github.com/opencost/opencost/pkg/filter21/cloudcost"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util/httputil"
+	"go.opentelemetry.io/otel"
+)
+
+const tracerName = "github.com/opencost/ooencost/pkg/cloudcost"
+
+const (
+	csvFormat = "csv"
+)
+
+// QueryService surfaces endpoints for accessing CloudCost data in raw form or for display in views
+type QueryService struct {
+	Querier     Querier
+	ViewQuerier ViewQuerier
+}
+
+func NewQueryService(querier Querier, viewQuerier ViewQuerier) *QueryService {
+	return &QueryService{
+		Querier:     querier,
+		ViewQuerier: viewQuerier,
+	}
+}
+
+func (s *QueryService) GetCloudCostHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		tracer := otel.Tracer(tracerName)
+		ctx, span := tracer.Start(r.Context(), "Service.GetCloudCostHandler")
+		defer span.End()
+
+		// If Query Service is nil, always return 501
+		if s == nil {
+			http.Error(w, "Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		if s.Querier == nil {
+			http.Error(w, "CloudCost Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		request, err := parseCloudCostRequest(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		resp, err := s.Querier.Query(*request, ctx)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Internal server error: %s", err), http.StatusInternalServerError)
+			return
+		}
+
+		_, spanResp := tracer.Start(ctx, "write response")
+		w.Header().Set("Content-Type", "application/json")
+		protocol.WriteData(w, resp)
+		spanResp.End()
+	}
+}
+
+func (s *QueryService) GetCloudCostViewGraphHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		tracer := otel.Tracer(tracerName)
+		ctx, span := tracer.Start(r.Context(), "Service.GetCloudCostViewGraphHandler")
+		defer span.End()
+
+		// If Query Service is nil, always return 501
+		if s == nil {
+			http.Error(w, "Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		if s.ViewQuerier == nil {
+			http.Error(w, "CloudCost Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		request, err := parseCloudCostViewRequest(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		resp, err := s.ViewQuerier.QueryViewGraph(*request, ctx)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Internal server error: %s", err), http.StatusInternalServerError)
+			return
+		}
+
+		_, spanResp := tracer.Start(ctx, "write response")
+		w.Header().Set("Content-Type", "application/json")
+		protocol.WriteData(w, resp)
+		spanResp.End()
+	}
+}
+
+type CloudCostViewTotalsResponse struct {
+	NumResults int           `json:"numResults"`
+	Combined   *ViewTableRow `json:"combined"`
+}
+
+func (s *QueryService) GetCloudCostViewTotalsHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		tracer := otel.Tracer(tracerName)
+		ctx, span := tracer.Start(r.Context(), "Service.GetCloudCostViewTotalsHandler")
+		defer span.End()
+
+		// If Query Service is nil, always return 501
+		if s == nil {
+			http.Error(w, "Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		if s.ViewQuerier == nil {
+			http.Error(w, "CloudCost Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		request, err := parseCloudCostViewRequest(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		totals, count, err := s.ViewQuerier.QueryViewTotals(*request, ctx)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Internal server error: %s", err), http.StatusInternalServerError)
+			return
+		}
+
+		resp := CloudCostViewTotalsResponse{
+			NumResults: count,
+			Combined:   totals,
+		}
+
+		_, spanResp := tracer.Start(ctx, "write response")
+		w.Header().Set("Content-Type", "application/json")
+		protocol.WriteData(w, resp)
+		spanResp.End()
+	}
+}
+
+func (s *QueryService) GetCloudCostViewTableHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		tracer := otel.Tracer(tracerName)
+		ctx, span := tracer.Start(r.Context(), "Service.GetCloudCostViewTableHandler")
+		defer span.End()
+
+		// If Query Service is nil, always return 501
+		if s == nil {
+			http.Error(w, "Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		if s.ViewQuerier == nil {
+			http.Error(w, "CloudCost Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		request, err := parseCloudCostViewRequest(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		qp := httputil.NewQueryParams(r.URL.Query())
+		format := qp.Get("format", "json")
+		if strings.HasPrefix(format, csvFormat) {
+			w.Header().Set("Content-Type", "text/csv")
+			w.Header().Set("Transfer-Encoding", "chunked")
+		} else {
+			// By default, send JSON
+			w.Header().Set("Content-Type", "application/json")
+		}
+
+		resp, err := s.ViewQuerier.QueryViewTable(*request, ctx)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Internal server error: %s", err), http.StatusInternalServerError)
+			return
+		}
+
+		_, spanResp := tracer.Start(ctx, "write response")
+		defer spanResp.End()
+		if format == csvFormat {
+			window := kubecost.NewClosedWindow(request.Start, request.End)
+			writeCloudCostViewTableRowsAsCSV(w, resp, window.String())
+			return
+		}
+		w.Header().Set("Content-Type", "application/json")
+		protocol.WriteData(w, resp)
+	}
+}
+
+func parseCloudCostRequest(r *http.Request) (*QueryRequest, error) {
+	qp := httputil.NewQueryParams(r.URL.Query())
+
+	windowStr := qp.Get("window", "")
+	if windowStr == "" {
+		return nil, fmt.Errorf("missing require window param")
+	}
+
+	window, err := kubecost.ParseWindowUTC(windowStr)
+	if err != nil {
+		return nil, fmt.Errorf("invalid window parameter: %w", err)
+	}
+	if window.IsOpen() {
+		return nil, fmt.Errorf("invalid window parameter: %s", window.String())
+	}
+
+	aggregateByRaw := qp.GetList("aggregate", ",")
+	aggregateBy := []string{}
+	for _, aggBy := range aggregateByRaw {
+		prop, err := ParseCloudCostProperty(aggBy)
+		if err != nil {
+			return nil, fmt.Errorf("error parsing aggregate by %v", err)
+		}
+		aggregateBy = append(aggregateBy, prop)
+	}
+	if len(aggregateBy) == 0 {
+		aggregateBy = []string{
+			kubecost.CloudCostInvoiceEntityIDProp,
+			kubecost.CloudCostAccountIDProp,
+			kubecost.CloudCostProviderProp,
+			kubecost.CloudCostProviderIDProp,
+			kubecost.CloudCostCategoryProp,
+			kubecost.CloudCostServiceProp,
+		}
+	}
+
+	accumulate := kubecost.ParseAccumulate(qp.Get("accumulate", ""))
+
+	var filter filter21.Filter
+	filterString := qp.Get("filter", "")
+	if filterString != "" {
+		parser := cloudcost.NewCloudCostFilterParser()
+		filter, err = parser.Parse(filterString)
+		if err != nil {
+			return nil, fmt.Errorf("Parsing 'filter' parameter: %s", err)
+		}
+	}
+
+	opts := &QueryRequest{
+		Start:       *window.Start(),
+		End:         *window.End(),
+		AggregateBy: aggregateBy,
+		Accumulate:  accumulate,
+		Filter:      filter,
+	}
+
+	return opts, nil
+}
+
+func ParseCloudCostProperty(text string) (string, error) {
+	switch strings.TrimSpace(strings.ToLower(text)) {
+	case strings.ToLower(kubecost.CloudCostInvoiceEntityIDProp):
+		return kubecost.CloudCostInvoiceEntityIDProp, nil
+	case strings.ToLower(kubecost.CloudCostAccountIDProp):
+		return kubecost.CloudCostAccountIDProp, nil
+	case strings.ToLower(kubecost.CloudCostProviderProp):
+		return kubecost.CloudCostProviderProp, nil
+	case strings.ToLower(kubecost.CloudCostProviderIDProp):
+		return kubecost.CloudCostProviderIDProp, nil
+	case strings.ToLower(kubecost.CloudCostCategoryProp):
+		return kubecost.CloudCostCategoryProp, nil
+	case strings.ToLower(kubecost.CloudCostServiceProp):
+		return kubecost.CloudCostServiceProp, nil
+	}
+
+	if strings.HasPrefix(text, "label:") {
+		label := prom.SanitizeLabelName(strings.TrimSpace(strings.TrimPrefix(text, "label:")))
+		return fmt.Sprintf("label:%s", label), nil
+	}
+
+	return "", fmt.Errorf("invalid cloud cost property: %s", text)
+}
+
+func parseCloudCostViewRequest(r *http.Request) (*ViewQueryRequest, error) {
+	qr, err := parseCloudCostRequest(r)
+	if err != nil {
+		return nil, err
+	}
+	qp := httputil.NewQueryParams(r.URL.Query())
+
+	// parse cost metric
+	costMetricName, err := kubecost.ParseCostMetricName(qp.Get("costMetric", string(kubecost.CostMetricAmortizedNetCost)))
+	if err != nil {
+		return nil, fmt.Errorf("error parsing 'costMetric': %w", err)
+	}
+
+	limit := qp.GetInt("limit", 0)
+	offset := qp.GetInt("offset", 0)
+
+	// parse order
+	order, err := ParseSortDirection(qp.Get("sortByOrder", "desc"))
+	if err != nil {
+		return nil, fmt.Errorf("error parsing 'sortByOrder: %w", err)
+	}
+
+	sortColumn, err := ParseSortField(qp.Get("sortBy", "cost"))
+	if err != nil {
+		return nil, fmt.Errorf("error parsing 'sortBy': %w", err)
+	}
+
+	return &ViewQueryRequest{
+		QueryRequest:     *qr,
+		CostMetricName:   costMetricName,
+		ChartItemsLength: DefaultChartItemsLength,
+		Limit:            limit,
+		Offset:           offset,
+		SortDirection:    order,
+		SortColumn:       sortColumn,
+	}, nil
+}
+
+// CloudCostViewTableRowsToCSV takes the csv writer and writes the ViewTableRows into the writer.
+func CloudCostViewTableRowsToCSV(writer *csv.Writer, ctr ViewTableRows, window string) error {
+	defer writer.Flush()
+	// Write the column headers
+	headers := []string{
+		"Name",
+		"K8s Utilization",
+		"Total",
+		"Window",
+	}
+	err := writer.Write(headers)
+	if err != nil {
+		return fmt.Errorf("CloudCostViewTableRowsToCSV: failed to convert ViewTableRows to csv with error: %w", err)
+	}
+
+	// Write one row per entry in the ViewTableRows
+	for _, row := range ctr {
+		err = writer.Write([]string{
+			row.Name,
+			fmt.Sprintf("%.3f", row.KubernetesPercent),
+			fmt.Sprintf("%.3f", row.Cost),
+			window,
+		})
+		if err != nil {
+			return fmt.Errorf("CloudCostViewTableRowsToCSV: failed to convert ViewTableRows to csv with error: %w", err)
+		}
+	}
+
+	return nil
+}
+
+func writeCloudCostViewTableRowsAsCSV(w http.ResponseWriter, ctr ViewTableRows, window string) {
+	writer := csv.NewWriter(w)
+
+	err := CloudCostViewTableRowsToCSV(writer, ctr, window)
+	if err != nil {
+		protocol.WriteError(w, protocol.InternalServerError(err.Error()))
+		return
+	}
+}

+ 16 - 0
pkg/cloudcost/repository.go

@@ -0,0 +1,16 @@
+package cloudcost
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+// Repository is an interface for storing and retrieving CloudCost data
+type Repository interface {
+	Has(time.Time, string) (bool, error)
+	Get(time.Time, string) (*kubecost.CloudCostSet, error)
+	Keys() ([]string, error)
+	Put(*kubecost.CloudCostSet) error
+	Expire(time.Time) error
+}

+ 229 - 0
pkg/cloudcost/repositoryquerier.go

@@ -0,0 +1,229 @@
+package cloudcost
+
+import (
+	"context"
+	"fmt"
+	"sort"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// RepositoryQuerier is an implementation of Querier and ViewQuerier which pulls directly from a Repository
+type RepositoryQuerier struct {
+	repo Repository
+}
+
+func NewRepositoryQuerier(repo Repository) *RepositoryQuerier {
+	return &RepositoryQuerier{repo: repo}
+}
+
+func (rq *RepositoryQuerier) Query(request QueryRequest, ctx context.Context) (*kubecost.CloudCostSetRange, error) {
+	repoKeys, err := rq.repo.Keys()
+	if err != nil {
+		return nil, fmt.Errorf("RepositoryQuerier: Query: failed to get list of keys from repository: %w", err)
+	}
+
+	// create filter
+	compiler := kubecost.NewCloudCostMatchCompiler()
+	matcher, err := compiler.Compile(request.Filter)
+	if err != nil {
+		return nil, fmt.Errorf("RepositoryQuerier: Query: failed to compile filters: %w", err)
+	}
+
+	// Create a Cloud Cost Set Range in the resolution of the repository
+	ccsr, err := kubecost.NewCloudCostSetRange(request.Start, request.End, kubecost.AccumulateOptionDay, "")
+	if err != nil {
+		return nil, fmt.Errorf("RepositoryQuerier: Query: failed to create Cloud Cost Set Range: %w", err)
+	}
+	for _, cloudCostSet := range ccsr.CloudCostSets {
+		// Setting this values creates
+		cloudCostSet.AggregationProperties = request.AggregateBy
+		for _, key := range repoKeys {
+			ccs, err := rq.repo.Get(cloudCostSet.Window.Start().UTC(), key)
+			if err != nil {
+				log.Errorf("RepositoryQuerier: Query: %s", err.Error())
+				continue
+			}
+			if ccs == nil {
+				continue
+			}
+
+			for _, cc := range ccs.CloudCosts {
+				if matcher.Matches(cc) {
+					cloudCostSet.Insert(cc)
+				}
+			}
+		}
+	}
+
+	if request.Accumulate != kubecost.AccumulateOptionNone {
+		ccsr, err = ccsr.Accumulate(request.Accumulate)
+		if err != nil {
+			return nil, fmt.Errorf("RepositoryQuerier: Query: error accumulating: %w", err)
+		}
+	}
+
+	return ccsr, nil
+}
+
+func (rq *RepositoryQuerier) QueryViewGraph(request ViewQueryRequest, ctx context.Context) (ViewGraphData, error) {
+	ccasr, err := rq.Query(request.QueryRequest, ctx)
+	if err != nil {
+		return nil, fmt.Errorf("QueryViewGraph: query failed: %w", err)
+	}
+
+	if ccasr.IsEmpty() {
+		return make([]*ViewGraphDataSet, 0), nil
+	}
+	var sets ViewGraphData
+	for _, ccas := range ccasr.CloudCostSets {
+		items := make([]ViewGraphDataSetItem, 0)
+
+		for key, cc := range ccas.CloudCosts {
+			costMetric, err := cc.GetCostMetric(request.CostMetricName)
+			if err != nil {
+				return nil, fmt.Errorf("QueryViewGraph: failed to get cost metric: %w", err)
+			}
+			items = append(items, ViewGraphDataSetItem{
+				Name:  key,
+				Value: costMetric.Cost,
+			})
+		}
+		sort.SliceStable(items, func(i, j int) bool {
+			return items[i].Value > items[j].Value
+		})
+
+		if len(items) > request.ChartItemsLength {
+			otherItems := items[request.ChartItemsLength:]
+			newItems := items[:request.ChartItemsLength]
+			// Rename last item other and add all other values into it
+			newItems[request.ChartItemsLength-1].Name = "Other"
+			for _, item := range otherItems {
+				newItems[request.ChartItemsLength-1].Value += item.Value
+			}
+			items = newItems
+		}
+
+		sets = append(sets, &ViewGraphDataSet{
+			Start: *ccas.Window.Start(),
+			End:   *ccas.Window.End(),
+			Items: items,
+		})
+	}
+	return sets, nil
+}
+
+func (rq *RepositoryQuerier) QueryViewTotals(request ViewQueryRequest, ctx context.Context) (*ViewTableRow, int, error) {
+	ccasr, err := rq.Query(request.QueryRequest, ctx)
+	if err != nil {
+		return nil, -1, fmt.Errorf("QueryViewTotals: query failed: %w", err)
+	}
+	acc, err := ccasr.AccumulateAll()
+	if err != nil {
+		return nil, -1, fmt.Errorf("QueryViewTotals: accumulate failed: %w", err)
+	}
+	if acc.IsEmpty() {
+		return nil, 0, nil
+	}
+	count := len(acc.CloudCosts)
+
+	total, err := acc.Aggregate([]string{})
+	if err != nil {
+		return nil, -1, fmt.Errorf("QueryViewTotals: aggregate total failed: %w", err)
+	}
+
+	if total.IsEmpty() {
+		return nil, -1, fmt.Errorf("QueryViewTotals: missing total: %w", err)
+	}
+
+	if len(total.CloudCosts) != 1 {
+		return nil, -1, fmt.Errorf("QueryViewTotals: total did not aggregate: %w", err)
+	}
+
+	cm, err := total.CloudCosts[""].GetCostMetric(request.CostMetricName)
+	if err != nil {
+		return nil, -1, fmt.Errorf("QueryViewTotals: failed to retrieve cost metric: %w", err)
+	}
+	return &ViewTableRow{
+		Name:              "Totals",
+		KubernetesPercent: cm.KubernetesPercent,
+		Cost:              cm.Cost,
+	}, count, nil
+}
+
+func (rq *RepositoryQuerier) QueryViewTable(request ViewQueryRequest, ctx context.Context) (ViewTableRows, error) {
+	ccasr, err := rq.Query(request.QueryRequest, ctx)
+	if err != nil {
+		return nil, fmt.Errorf("QueryViewTable: query failed: %w", err)
+	}
+	acc, err := ccasr.AccumulateAll()
+	if err != nil {
+		return nil, fmt.Errorf("QueryViewTable: accumulate failed: %w", err)
+	}
+
+	var rows ViewTableRows
+	for key, cloudCost := range acc.CloudCosts {
+		costMetric, err2 := cloudCost.GetCostMetric(request.CostMetricName)
+		if err2 != nil {
+			return nil, fmt.Errorf("QueryViewTable: failed to retrieve cost metric: %w", err)
+		}
+		vtr := &ViewTableRow{
+			Name:              key,
+			KubernetesPercent: costMetric.KubernetesPercent,
+			Cost:              costMetric.Cost,
+		}
+		rows = append(rows, vtr)
+	}
+	// Sort Results
+
+	// Sort by Name to ensure consistent return
+	sort.SliceStable(rows, func(i, j int) bool {
+		return rows[i].Name > rows[j].Name
+	})
+
+	switch request.SortColumn {
+	case SortFieldName:
+		if request.SortDirection == SortDirectionAscending {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].Name < rows[j].Name
+			})
+		}
+
+	case SortFieldCost:
+		if request.SortDirection == SortDirectionAscending {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].Cost < rows[j].Cost
+			})
+		} else {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].Cost > rows[j].Cost
+			})
+		}
+	case SortFieldKubernetesPercent:
+		if request.SortDirection == SortDirectionAscending {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].KubernetesPercent < rows[j].KubernetesPercent
+			})
+		} else {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].KubernetesPercent > rows[j].KubernetesPercent
+			})
+		}
+
+	default:
+		return nil, fmt.Errorf("invalid sort field '%s'", string(request.SortColumn))
+	}
+
+	// paginate sorted results
+	if request.Offset > len(rows) {
+		return make([]*ViewTableRow, 0), nil
+	}
+
+	limit := request.Offset + request.Limit
+	if limit > len(rows) {
+		return rows[request.Offset:], nil
+	}
+
+	return rows[request.Offset:limit], nil
+}

+ 24 - 0
pkg/cloudcost/status.go

@@ -0,0 +1,24 @@
+package cloudcost
+
+import (
+	"time"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud"
+)
+
+// Status gives the details and metadata of a CloudCost integration
+type Status struct {
+	Key              string             `json:"key"`
+	Source           string             `json:"source"`
+	Provider         string             `json:"provider"`
+	Active           bool               `json:"active"`
+	Valid            bool               `json:"valid"`
+	LastRun          time.Time          `json:"lastRun"`
+	NextRun          time.Time          `json:"nextRun"`
+	RefreshRate      string             `json:"RefreshRate"`
+	Created          time.Time          `json:"created"`
+	Runs             int                `json:"runs"`
+	Coverage         string             `json:"coverage"`
+	ConnectionStatus string             `json:"connectionStatus"`
+	Config           cloudconfig.Config `json:"config"`
+}

+ 107 - 0
pkg/cloudcost/view.go

@@ -0,0 +1,107 @@
+package cloudcost
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/pkg/util/mathutil"
+)
+
+// View serves data to the Cloud Cost front end, in the
+// structure it requires (i.e. a graph and a table).
+type View struct {
+	GraphData  ViewGraphData `json:"graphData"`
+	TableTotal *ViewTableRow `json:"tableTotal"`
+	TableRows  ViewTableRows `json:"tableRows"`
+}
+
+type ViewTableRows []*ViewTableRow
+
+func (vtrs ViewTableRows) Equal(that ViewTableRows) bool {
+	if len(vtrs) != len(that) {
+		return false
+	}
+
+	for i := 0; i < len(vtrs); i++ {
+		if !vtrs[i].Equal(that[i]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type ViewTableRow struct {
+	Name              string  `json:"name"`
+	KubernetesPercent float64 `json:"kubernetesPercent"`
+	Cost              float64 `json:"cost"`
+}
+
+func (vtr *ViewTableRow) Equal(that *ViewTableRow) bool {
+	if vtr.Name != that.Name {
+		return false
+	}
+
+	if !mathutil.Approximately(vtr.KubernetesPercent, that.KubernetesPercent) {
+		return false
+	}
+
+	if !mathutil.Approximately(vtr.Cost, that.Cost) {
+		return false
+	}
+
+	return true
+}
+
+type ViewGraphData []*ViewGraphDataSet
+
+func (vgd ViewGraphData) Equal(that ViewGraphData) bool {
+	if len(vgd) != len(that) {
+		return false
+	}
+
+	for i := 0; i < len(vgd); i++ {
+		if !vgd[i].Equal(that[i]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type ViewGraphDataSet struct {
+	Start time.Time              `json:"start"`
+	End   time.Time              `json:"end"`
+	Items []ViewGraphDataSetItem `json:"items"`
+}
+
+// NOTE: does not compare start and end times, just that the items are equal
+func (vgds *ViewGraphDataSet) Equal(that *ViewGraphDataSet) bool {
+	if len(vgds.Items) != len(that.Items) {
+		return false
+	}
+
+	for i := 0; i < len(vgds.Items); i++ {
+		if !vgds.Items[i].Equal(that.Items[i]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type ViewGraphDataSetItem struct {
+	Name  string  `json:"name"`
+	Value float64 `json:"value"`
+}
+
+func (vgdsi ViewGraphDataSetItem) Equal(that ViewGraphDataSetItem) bool {
+	if vgdsi.Name != that.Name {
+		return false
+	}
+
+	if !mathutil.Approximately(vgdsi.Value, that.Value) {
+		return false
+	}
+
+	return true
+}

+ 18 - 0
pkg/cmd/costmodel/costmodel.go

@@ -7,6 +7,7 @@ import (
 	"time"
 
 	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/pkg/cloudcost"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	"github.com/rs/cors"
 
@@ -39,11 +40,28 @@ func Execute(opts *CostModelOpts) error {
 		log.Errorf("couldn't start CSV export worker: %v", err)
 	}
 
+	if env.IsCloudCostEnabled() {
+		repo := cloudcost.NewMemoryRepository()
+		a.CloudCostPipelineService = cloudcost.NewPipelineService(repo, a.CloudConfigController, cloudcost.DefaultIngestorConfiguration())
+		repoQuerier := cloudcost.NewRepositoryQuerier(repo)
+		a.CloudCostQueryService = cloudcost.NewQueryService(repoQuerier, repoQuerier)
+	}
+
 	rootMux := http.NewServeMux()
 	a.Router.GET("/healthz", Healthz)
 	a.Router.GET("/allocation", a.ComputeAllocationHandler)
 	a.Router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
 	a.Router.GET("/assets", a.ComputeAssetsHandler)
+
+	a.Router.GET("/cloudCost", a.CloudCostQueryService.GetCloudCostHandler())
+	a.Router.GET("/cloudCost/view/graph", a.CloudCostQueryService.GetCloudCostViewGraphHandler())
+	a.Router.GET("/cloudCost/view/totals", a.CloudCostQueryService.GetCloudCostViewTotalsHandler())
+	a.Router.GET("/cloudCost/view/table", a.CloudCostQueryService.GetCloudCostViewTableHandler())
+
+	a.Router.GET("/cloudCost/status", a.CloudCostPipelineService.GetCloudCostStatusHandler())
+	a.Router.GET("/cloudCost/rebuild", a.CloudCostPipelineService.GetCloudCostRebuildHandler())
+	a.Router.GET("/cloudCost/repair", a.CloudCostPipelineService.GetCloudCostRepairHandler())
+
 	rootMux.Handle("/", a.Router)
 	rootMux.Handle("/metrics", promhttp.Handler())
 	telemetryHandler := metrics.ResponseMetricMiddleware(rootMux)

+ 47 - 35
pkg/costmodel/router.go

@@ -17,8 +17,10 @@ import (
 
 	"github.com/microcosm-cc/bluemonday"
 	"github.com/opencost/opencost/pkg/cloud/aws"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
 	"github.com/opencost/opencost/pkg/cloud/provider"
+	"github.com/opencost/opencost/pkg/cloudcost"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/kubeconfig"
 	"github.com/opencost/opencost/pkg/metrics"
@@ -82,23 +84,26 @@ var (
 // Accesses defines a singleton application instance, providing access to
 // Prometheus, Kubernetes, the cloud provider, and caches.
 type Accesses struct {
-	Router              *httprouter.Router
-	PrometheusClient    prometheus.Client
-	ThanosClient        prometheus.Client
-	KubeClientSet       kubernetes.Interface
-	ClusterCache        clustercache.ClusterCache
-	ClusterMap          clusters.ClusterMap
-	CloudProvider       models.Provider
-	ConfigFileManager   *config.ConfigFileManager
-	ClusterInfoProvider clusters.ClusterInfoProvider
-	Model               *CostModel
-	MetricsEmitter      *CostModelMetricsEmitter
-	OutOfClusterCache   *cache.Cache
-	AggregateCache      *cache.Cache
-	CostDataCache       *cache.Cache
-	ClusterCostsCache   *cache.Cache
-	CacheExpiration     map[time.Duration]time.Duration
-	AggAPI              Aggregator
+	Router                   *httprouter.Router
+	PrometheusClient         prometheus.Client
+	ThanosClient             prometheus.Client
+	KubeClientSet            kubernetes.Interface
+	ClusterCache             clustercache.ClusterCache
+	ClusterMap               clusters.ClusterMap
+	CloudProvider            models.Provider
+	ConfigFileManager        *config.ConfigFileManager
+	CloudConfigController    *cloudconfig.Controller
+	CloudCostPipelineService *cloudcost.PipelineService
+	CloudCostQueryService    *cloudcost.QueryService
+	ClusterInfoProvider      clusters.ClusterInfoProvider
+	Model                    *CostModel
+	MetricsEmitter           *CostModelMetricsEmitter
+	OutOfClusterCache        *cache.Cache
+	AggregateCache           *cache.Cache
+	CostDataCache            *cache.Cache
+	ClusterCostsCache        *cache.Cache
+	CacheExpiration          map[time.Duration]time.Duration
+	AggAPI                   Aggregator
 	// SettingsCache stores current state of app settings
 	SettingsCache *cache.Cache
 	// settingsSubscribers tracks channels through which changes to different
@@ -1714,25 +1719,27 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	metricsEmitter := NewCostModelMetricsEmitter(promCli, k8sCache, cloudProvider, clusterInfoProvider, costModel)
 
 	a := &Accesses{
-		Router:              httprouter.New(),
-		PrometheusClient:    promCli,
-		ThanosClient:        thanosClient,
-		KubeClientSet:       kubeClientset,
-		ClusterCache:        k8sCache,
-		ClusterMap:          clusterMap,
-		CloudProvider:       cloudProvider,
-		ConfigFileManager:   confManager,
-		ClusterInfoProvider: clusterInfoProvider,
-		Model:               costModel,
-		MetricsEmitter:      metricsEmitter,
-		AggregateCache:      aggregateCache,
-		CostDataCache:       costDataCache,
-		ClusterCostsCache:   clusterCostsCache,
-		OutOfClusterCache:   outOfClusterCache,
-		SettingsCache:       settingsCache,
-		CacheExpiration:     cacheExpiration,
-		httpServices:        services.NewCostModelServices(),
+		Router:                httprouter.New(),
+		PrometheusClient:      promCli,
+		ThanosClient:          thanosClient,
+		KubeClientSet:         kubeClientset,
+		ClusterCache:          k8sCache,
+		ClusterMap:            clusterMap,
+		CloudProvider:         cloudProvider,
+		CloudConfigController: cloudconfig.NewController(cloudProvider),
+		ConfigFileManager:     confManager,
+		ClusterInfoProvider:   clusterInfoProvider,
+		Model:                 costModel,
+		MetricsEmitter:        metricsEmitter,
+		AggregateCache:        aggregateCache,
+		CostDataCache:         costDataCache,
+		ClusterCostsCache:     clusterCostsCache,
+		OutOfClusterCache:     outOfClusterCache,
+		SettingsCache:         settingsCache,
+		CacheExpiration:       cacheExpiration,
+		httpServices:          services.NewCostModelServices(),
 	}
+
 	// Use the Accesses instance, itself, as the CostModelAggregator. This is
 	// confusing and unconventional, but necessary so that we can swap it
 	// out for the ETL-adapted version elsewhere.
@@ -1811,6 +1818,11 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	a.Router.GET("/logs/level", a.GetLogLevel)
 	a.Router.POST("/logs/level", a.SetLogLevel)
 
+	a.Router.GET("/cloud/config/export", a.CloudConfigController.GetExportConfigHandler())
+	a.Router.GET("/cloud/config/enable", a.CloudConfigController.GetEnableConfigHandler())
+	a.Router.GET("/cloud/config/disable", a.CloudConfigController.GetDisableConfigHandler())
+	a.Router.GET("/cloud/config/delete", a.CloudConfigController.GetDeleteConfigHandler())
+
 	a.httpServices.RegisterAll(a.Router)
 
 	return a

+ 32 - 0
pkg/env/costmodelenv.go

@@ -107,6 +107,14 @@ const (
 	ExportCSVLabelsList = "EXPORT_CSV_LABELS_LIST"
 	ExportCSVLabelsAll  = "EXPORT_CSV_LABELS_ALL"
 	ExportCSVMaxDays    = "EXPORT_CSV_MAX_DAYS"
+
+	DataRetentionDailyResolutionDaysEnvVar = "DATA_RETENTION_DAILY_RESOLUTION_DAYS"
+
+	CloudCostEnabledEnvVar          = "CLOUD_COST_ENABLED"
+	CloudCostMonthToDateIntervalVar = "CLOUD_COST_MONTH_TO_DATE_INTERVAL"
+	CloudCostRefreshRateHoursEnvVar = "CLOUD_COST_REFRESH_RATE_HOURS"
+	CloudCostQueryWindowDaysEnvVar  = "CLOUD_COST_QUERY_WINDOW_DAYS"
+	CloudCostRunWindowDaysEnvVar    = "CLOUD_COST_RUN_WINDOW_DAYS"
 )
 
 const DefaultConfigMountPath = "/var/configs"
@@ -608,3 +616,27 @@ func GetRegionOverrideList() []string {
 
 	return regionList
 }
+
+func GetDataRetentionDailyResolutionDays() int64 {
+	return GetInt64(DataRetentionDailyResolutionDaysEnvVar, 15)
+}
+
+func IsCloudCostEnabled() bool {
+	return GetBool(CloudCostEnabledEnvVar, false)
+}
+
+func GetCloudCostMonthToDateInterval() int {
+	return GetInt(CloudCostMonthToDateIntervalVar, 6)
+}
+
+func GetCloudCostRefreshRateHours() int64 {
+	return GetInt64(CloudCostRefreshRateHoursEnvVar, 6)
+}
+
+func GetCloudCostQueryWindowDays() int64 {
+	return GetInt64(CloudCostQueryWindowDaysEnvVar, 7)
+}
+
+func GetCloudCostRunWindowDays() int64 {
+	return GetInt64(CloudCostRunWindowDaysEnvVar, 3)
+}

+ 190 - 63
pkg/kubecost/cloudcost.go

@@ -9,6 +9,7 @@ import (
 	filter21 "github.com/opencost/opencost/pkg/filter21"
 	"github.com/opencost/opencost/pkg/filter21/ast"
 	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 // CloudCost represents a CUR line item, identifying a cloud resource and
@@ -131,17 +132,17 @@ func (cc *CloudCost) StringMapProperty(property string) (map[string]string, erro
 	}
 }
 
-func (cc *CloudCost) GetCostMetric(costMetricName string) (CostMetric, error) {
+func (cc *CloudCost) GetCostMetric(costMetricName CostMetricName) (CostMetric, error) {
 	switch costMetricName {
-	case ListCostMetric:
+	case CostMetricListCost:
 		return cc.ListCost, nil
-	case NetCostMetric:
+	case CostMetricNetCost:
 		return cc.NetCost, nil
-	case AmortizedNetCostMetric:
+	case CostMetricAmortizedNetCost:
 		return cc.AmortizedNetCost, nil
-	case InvoicedCostMetric:
+	case CostMetricInvoicedCost:
 		return cc.InvoicedCost, nil
-	case AmortizedCostMetric:
+	case CostMetricAmortizedCost:
 		return cc.AmortizedCost, nil
 	}
 	return CostMetric{}, fmt.Errorf("invalid Cost Metric: %s", costMetricName)
@@ -371,9 +372,12 @@ func (ccs *CloudCostSet) Clone() *CloudCostSet {
 
 // cloneSet creates a copy of the receiver without any of its CloudCosts
 func (ccs *CloudCostSet) cloneSet() *CloudCostSet {
-	aggProps := make([]string, len(ccs.AggregationProperties))
-	for i, v := range ccs.AggregationProperties {
-		aggProps[i] = v
+	var aggProps []string
+	if ccs.AggregationProperties != nil {
+		aggProps = make([]string, len(ccs.AggregationProperties))
+		for i, v := range ccs.AggregationProperties {
+			aggProps[i] = v
+		}
 	}
 	return &CloudCostSet{
 		CloudCosts:            make(map[string]*CloudCost),
@@ -443,8 +447,8 @@ type CloudCostSetRange struct {
 
 // NewCloudCostSetRange create a CloudCostSetRange containing CloudCostSets with windows of equal duration
 // the duration between start and end must be divisible by the window duration argument
-func NewCloudCostSetRange(start time.Time, end time.Time, window time.Duration, integration string) (*CloudCostSetRange, error) {
-	windows, err := GetWindows(start, end, window)
+func NewCloudCostSetRange(start time.Time, end time.Time, accumOpt AccumulateOption, integration string) (*CloudCostSetRange, error) {
+	windows, err := NewClosedWindow(start.UTC(), end.UTC()).GetAccumulateWindows(accumOpt)
 	if err != nil {
 		return nil, err
 	}
@@ -457,7 +461,6 @@ func NewCloudCostSetRange(start time.Time, end time.Time, window time.Duration,
 		cloudCostItemSets[i] = ccs
 	}
 	return &CloudCostSetRange{
-		Window:        NewWindow(&start, &end),
 		CloudCostSets: cloudCostItemSets,
 	}, nil
 }
@@ -468,7 +471,6 @@ func (ccsr *CloudCostSetRange) Clone() *CloudCostSetRange {
 		ccsSlice[i] = ccs.Clone()
 	}
 	return &CloudCostSetRange{
-		Window:        ccsr.Window.Clone(),
 		CloudCostSets: ccsSlice,
 	}
 }
@@ -482,12 +484,20 @@ func (ccsr *CloudCostSetRange) IsEmpty() bool {
 	return true
 }
 
-// Accumulate sums each CloudCostSet in the given range, returning a single cumulative
+// accumulate sums each CloudCostSet in the given range, returning a single cumulative
 // CloudCostSet for the entire range.
-func (ccsr *CloudCostSetRange) Accumulate() (*CloudCostSet, error) {
+func (ccsr *CloudCostSetRange) AccumulateAll() (*CloudCostSet, error) {
 	var cloudCostSet *CloudCostSet
 	var err error
 
+	if ccsr == nil {
+		return nil, fmt.Errorf("nil CloudCostSetRange in accumulation")
+	}
+
+	if len(ccsr.CloudCostSets) == 0 {
+		return nil, fmt.Errorf("CloudCostSetRange has empty CloudCostSet in accumulation")
+	}
+
 	for _, ccs := range ccsr.CloudCostSets {
 		if cloudCostSet == nil {
 			cloudCostSet = ccs.Clone()
@@ -502,6 +512,171 @@ func (ccsr *CloudCostSetRange) Accumulate() (*CloudCostSet, error) {
 	return cloudCostSet, nil
 }
 
+// Accumulate sums CloudCostSets based on the AccumulateOption (calendar week or calendar month).
+// The accumulated set is determined by the start of the window of the allocation set.
+func (ccsr *CloudCostSetRange) Accumulate(accumulateBy AccumulateOption) (*CloudCostSetRange, error) {
+	switch accumulateBy {
+	case AccumulateOptionNone:
+		return ccsr.accumulateByNone()
+	case AccumulateOptionAll:
+		return ccsr.accumulateByAll()
+	case AccumulateOptionHour:
+		return ccsr.accumulateByHour()
+	case AccumulateOptionDay:
+		return ccsr.accumulateByDay()
+	case AccumulateOptionWeek:
+		return ccsr.accumulateByWeek()
+	case AccumulateOptionMonth:
+		return ccsr.accumulateByMonth()
+	default:
+		// ideally, this should never happen
+		return nil, fmt.Errorf("unexpected error, invalid accumulateByType: %s", accumulateBy)
+	}
+}
+
+func (ccsr *CloudCostSetRange) accumulateByAll() (*CloudCostSetRange, error) {
+
+	ccs, err := ccsr.AccumulateAll()
+	if err != nil {
+		return nil, fmt.Errorf("error accumulating all:%s", err)
+	}
+
+	accumulated := &CloudCostSetRange{
+		CloudCostSets: []*CloudCostSet{ccs},
+	}
+	return accumulated, nil
+}
+
+func (ccsr *CloudCostSetRange) accumulateByNone() (*CloudCostSetRange, error) {
+	return ccsr.Clone(), nil
+}
+func (ccsr *CloudCostSetRange) accumulateByHour() (*CloudCostSetRange, error) {
+	// ensure that the summary allocation sets have a 1-hour window, if a set exists
+	if len(ccsr.CloudCostSets) > 0 && ccsr.CloudCostSets[0].Window.Duration() != time.Hour {
+		return nil, fmt.Errorf("window duration must equal 1 hour; got:%s", ccsr.CloudCostSets[0].Window.Duration())
+	}
+
+	return ccsr.Clone(), nil
+}
+
+func (ccsr *CloudCostSetRange) accumulateByDay() (*CloudCostSetRange, error) {
+	// if the allocation set window is 1-day, just return the existing allocation set range
+	if len(ccsr.CloudCostSets) > 0 && ccsr.CloudCostSets[0].Window.Duration() == time.Hour*24 {
+		return ccsr, nil
+	}
+
+	var toAccumulate *CloudCostSetRange
+	result := &CloudCostSetRange{}
+	for i, ccs := range ccsr.CloudCostSets {
+
+		if ccs.Window.Duration() != time.Hour {
+			return nil, fmt.Errorf("window duration must equal 1 hour; got:%s", ccs.Window.Duration())
+		}
+
+		hour := ccs.Window.Start().Hour()
+
+		if toAccumulate == nil {
+			toAccumulate = &CloudCostSetRange{}
+			ccs = ccs.Clone()
+		}
+
+		toAccumulate.Append(ccs)
+		accumulated, err := toAccumulate.accumulateByAll()
+		if err != nil {
+			return nil, fmt.Errorf("error accumulating result: %s", err)
+		}
+		toAccumulate = accumulated
+
+		if hour == 23 || i == len(ccsr.CloudCostSets)-1 {
+			if length := len(toAccumulate.CloudCostSets); length != 1 {
+				return nil, fmt.Errorf("failed accumulation, detected %d sets instead of 1", length)
+			}
+			result.Append(toAccumulate.CloudCostSets[0])
+			toAccumulate = nil
+		}
+	}
+	return result, nil
+}
+
+func (ccsr *CloudCostSetRange) accumulateByWeek() (*CloudCostSetRange, error) {
+	if len(ccsr.CloudCostSets) > 0 && ccsr.CloudCostSets[0].Window.Duration() == timeutil.Week {
+		return ccsr, nil
+	}
+
+	var toAccumulate *CloudCostSetRange
+	result := &CloudCostSetRange{}
+	for i, css := range ccsr.CloudCostSets {
+		if css.Window.Duration() != time.Hour*24 {
+			return nil, fmt.Errorf("window duration must equal 24 hours; got:%s", css.Window.Duration())
+		}
+
+		dayOfWeek := css.Window.Start().Weekday()
+
+		if toAccumulate == nil {
+			toAccumulate = &CloudCostSetRange{}
+			css = css.Clone()
+		}
+
+		toAccumulate.Append(css)
+		accumulated, err := toAccumulate.accumulateByAll()
+		if err != nil {
+			return nil, fmt.Errorf("error accumulating result: %s", err)
+		}
+		toAccumulate = accumulated
+
+		// current assumption is the week always ends on Saturday, or there are no more allocation sets
+		if dayOfWeek == time.Saturday || i == len(ccsr.CloudCostSets)-1 {
+			if length := len(toAccumulate.CloudCostSets); length != 1 {
+				return nil, fmt.Errorf("failed accumulation, detected %d sets instead of 1", length)
+			}
+			result.Append(toAccumulate.CloudCostSets[0])
+			toAccumulate = nil
+		}
+	}
+	return result, nil
+}
+
+func (ccsr *CloudCostSetRange) accumulateByMonth() (*CloudCostSetRange, error) {
+	var toAccumulate *CloudCostSetRange
+	result := &CloudCostSetRange{}
+	for i, css := range ccsr.CloudCostSets {
+		if css.Window.Duration() != time.Hour*24 {
+			return nil, fmt.Errorf("window duration must equal 24 hours; got:%s", css.Window.Duration())
+		}
+
+		_, month, _ := css.Window.Start().Date()
+		_, nextDayMonth, _ := css.Window.Start().Add(time.Hour * 24).Date()
+
+		if toAccumulate == nil {
+			toAccumulate = &CloudCostSetRange{}
+			css = css.Clone()
+		}
+
+		toAccumulate.Append(css)
+		accumulated, err := toAccumulate.accumulateByAll()
+		if err != nil {
+			return nil, fmt.Errorf("error accumulating result: %s", err)
+		}
+		toAccumulate = accumulated
+
+		// either the month has ended, or there are no more allocation sets
+		if month != nextDayMonth || i == len(ccsr.CloudCostSets)-1 {
+			if length := len(toAccumulate.CloudCostSets); length != 1 {
+				return nil, fmt.Errorf("failed accumulation, detected %d sets instead of 1", length)
+			}
+			result.Append(toAccumulate.CloudCostSets[0])
+			toAccumulate = nil
+		}
+	}
+	return result, nil
+}
+
+// Append appends the given CloudCostSet to the end of the range. It does not
+// validate whether or not that violates window continuity.
+func (ccsr *CloudCostSetRange) Append(that *CloudCostSet) {
+	ccsr.CloudCostSets = append(ccsr.CloudCostSets, that)
+}
+
 // LoadCloudCost loads CloudCosts into existing CloudCostSets of the CloudCostSetRange.
 // This function service to aggregate and distribute costs over predefined windows
 // are accumulated here so that the resulting CloudCost with the 1d window has the correct price for the entire day.
@@ -554,51 +729,3 @@ func (ccsr *CloudCostSetRange) LoadCloudCost(cloudCost *CloudCost) {
 		}
 	}
 }
-
-const (
-	ListCostMetric         string = "ListCost"
-	NetCostMetric          string = "NetCost"
-	AmortizedNetCostMetric string = "AmortizedNetCost"
-	InvoicedCostMetric     string = "InvoicedCost"
-	AmortizedCostMetric    string = "AmortizedCost"
-)
-
-type CostMetric struct {
-	Cost              float64 `json:"cost"`
-	KubernetesPercent float64 `json:"kubernetesPercent"`
-}
-
-func (cm CostMetric) Equal(that CostMetric) bool {
-	return cm.Cost == that.Cost && cm.KubernetesPercent == that.KubernetesPercent
-}
-
-func (cm CostMetric) Clone() CostMetric {
-	return CostMetric{
-		Cost:              cm.Cost,
-		KubernetesPercent: cm.KubernetesPercent,
-	}
-}
-
-func (cm CostMetric) add(that CostMetric) CostMetric {
-	// Compute KubernetesPercent for sum
-	k8sPct := 0.0
-	sumCost := cm.Cost + that.Cost
-	if sumCost > 0.0 {
-		thisK8sCost := cm.Cost * cm.KubernetesPercent
-		thatK8sCost := that.Cost * that.KubernetesPercent
-		k8sPct = (thisK8sCost + thatK8sCost) / sumCost
-	}
-
-	return CostMetric{
-		Cost:              sumCost,
-		KubernetesPercent: k8sPct,
-	}
-}
-
-// percent returns the product of the given percent and the cost, KubernetesPercent remains the same
-func (cm CostMetric) percent(pct float64) CostMetric {
-	return CostMetric{
-		Cost:              cm.Cost * pct,
-		KubernetesPercent: cm.KubernetesPercent,
-	}
-}

+ 1 - 1
pkg/kubecost/cloudcost_test.go

@@ -28,7 +28,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 	end := RoundBack(time.Now().UTC(), timeutil.Day)
 	start := end.Add(-3 * timeutil.Day)
 	dayWindows, _ := GetWindows(start, end, timeutil.Day)
-	emtpyCCSR, _ := NewCloudCostSetRange(start, end, timeutil.Day, "integration")
+	emtpyCCSR, _ := NewCloudCostSetRange(start, end, AccumulateOptionDay, "integration")
 	testCases := map[string]struct {
 		cc       []*CloudCost
 		ccsr     *CloudCostSetRange

+ 14 - 3
pkg/kubecost/cloudcostprops.go

@@ -1,7 +1,6 @@
 package kubecost
 
 import (
-	"fmt"
 	"strings"
 
 	"github.com/opencost/opencost/pkg/log"
@@ -156,10 +155,22 @@ func (ccp *CloudCostProperties) Intersection(that *CloudCostProperties) *CloudCo
 	return intersectionCCP
 }
 
+var cloudCostDefaultKeyProperties = []string{
+	CloudCostProviderProp,
+	CloudCostInvoiceEntityIDProp,
+	CloudCostAccountIDProp,
+	CloudCostCategoryProp,
+	CloudCostServiceProp,
+	CloudCostProviderIDProp,
+}
+
+// GenerateKey takes a list of properties and creates a "/" seperated key based on the values of the requested properties.
+// Invalid values are ignored with a warning. A nil input returns the default key, while an empty slice  returns the empty string
 func (ccp *CloudCostProperties) GenerateKey(props []string) string {
 
-	if len(props) == 0 {
-		return fmt.Sprintf("%s/%s/%s/%s/%s/%s", ccp.Provider, ccp.InvoiceEntityID, ccp.AccountID, ccp.Category, ccp.Service, ccp.ProviderID)
+	// nil props replaced with default property list
+	if props == nil {
+		props = cloudCostDefaultKeyProperties
 	}
 
 	values := make([]string, len(props))

+ 77 - 0
pkg/kubecost/costmetric.go

@@ -0,0 +1,77 @@
+package kubecost
+
+import (
+	"fmt"
+	"strings"
+)
+
+// CostMetricName a string type that acts as an enumeration of possible CostMetric options
+type CostMetricName string
+
+const (
+	CostMetricNone             CostMetricName = ""
+	CostMetricListCost         CostMetricName = "listCost"
+	CostMetricNetCost          CostMetricName = "netCost"
+	CostMetricAmortizedNetCost CostMetricName = "amortizedNetCost"
+	CostMetricInvoicedCost     CostMetricName = "invoicedCost"
+	CostMetricAmortizedCost    CostMetricName = "amortizedCost"
+)
+
+// ParseCostMetricName provides a resilient way to parse one of the enumerated CostMetricName types from a string
+// or throws an error if it is not able to.
+func ParseCostMetricName(costMetric string) (CostMetricName, error) {
+	switch strings.ToLower(costMetric) {
+	case strings.ToLower(string(CostMetricListCost)):
+		return CostMetricListCost, nil
+	case strings.ToLower(string(CostMetricAmortizedCost)):
+		return CostMetricAmortizedCost, nil
+	case strings.ToLower(string(CostMetricAmortizedNetCost)):
+		return CostMetricAmortizedNetCost, nil
+	case strings.ToLower(string(CostMetricNetCost)):
+		return CostMetricNetCost, nil
+	case strings.ToLower(string(CostMetricInvoicedCost)):
+		return CostMetricInvoicedCost, nil
+	}
+	return CostMetricNone, fmt.Errorf("failed to parse a valid CostMetricName from '%s'", costMetric)
+}
+
+// CostMetric is a container for values associated with a specific accounting method
+type CostMetric struct {
+	Cost              float64 `json:"cost"`
+	KubernetesPercent float64 `json:"kubernetesPercent"`
+}
+
+func (cm CostMetric) Equal(that CostMetric) bool {
+	return cm.Cost == that.Cost && cm.KubernetesPercent == that.KubernetesPercent
+}
+
+func (cm CostMetric) Clone() CostMetric {
+	return CostMetric{
+		Cost:              cm.Cost,
+		KubernetesPercent: cm.KubernetesPercent,
+	}
+}
+
+func (cm CostMetric) add(that CostMetric) CostMetric {
+	// Compute KubernetesPercent for sum
+	k8sPct := 0.0
+	sumCost := cm.Cost + that.Cost
+	if sumCost > 0.0 {
+		thisK8sCost := cm.Cost * cm.KubernetesPercent
+		thatK8sCost := that.Cost * that.KubernetesPercent
+		k8sPct = (thisK8sCost + thatK8sCost) / sumCost
+	}
+
+	return CostMetric{
+		Cost:              sumCost,
+		KubernetesPercent: k8sPct,
+	}
+}
+
+// percent returns the product of the given percent and the cost, KubernetesPercent remains the same
+func (cm CostMetric) percent(pct float64) CostMetric {
+	return CostMetric{
+		Cost:              cm.Cost * pct,
+		KubernetesPercent: cm.KubernetesPercent,
+	}
+}

+ 23 - 0
pkg/kubecost/query.go

@@ -1,6 +1,7 @@
 package kubecost
 
 import (
+	"strings"
 	"time"
 
 	filter21 "github.com/opencost/opencost/pkg/filter21"
@@ -69,6 +70,28 @@ const (
 	AccumulateOptionQuarter AccumulateOption = "quarter"
 )
 
+// ParseAccumulate converts a string to an AccumulateOption
+func ParseAccumulate(acc string) AccumulateOption {
+	var opt AccumulateOption
+	switch strings.ToLower(acc) {
+	case "quarter":
+		opt = AccumulateOptionQuarter
+	case "month":
+		opt = AccumulateOptionMonth
+	case "week":
+		opt = AccumulateOptionWeek
+	case "day":
+		opt = AccumulateOptionDay
+	case "hour":
+		opt = AccumulateOptionHour
+	case "true":
+		opt = AccumulateOptionAll
+	default:
+		opt = AccumulateOptionNone
+	}
+	return opt
+}
+
 // AssetQueryOptions defines optional parameters for querying an Asset Store
 type AssetQueryOptions struct {
 	Accumulate              bool

+ 229 - 0
pkg/kubecost/window.go

@@ -795,8 +795,237 @@ func (w Window) GetPercentInWindow(that Window) float64 {
 	return pct
 }
 
+// GetAccumulateWindow rounds the start and end of the window to the given accumulation option
+func (w Window) GetAccumulateWindow(accumOpt AccumulateOption) (Window, error) {
+	if w.IsOpen() {
+		return w, fmt.Errorf("could not get accumlate window for open window")
+	}
+	switch accumOpt {
+	case AccumulateOptionAll:
+		// just return the entire window
+		return w.Clone(), nil
+	case AccumulateOptionHour:
+		return w.getHourlyWindow(), nil
+	case AccumulateOptionDay:
+		return w.getDailyWindow(), nil
+	case AccumulateOptionWeek:
+		return w.getWeeklyWindow(), nil
+	case AccumulateOptionMonth:
+		return w.getMonthlyWindow(), nil
+	case AccumulateOptionQuarter:
+		return w.getQuarterlyWindow(), nil
+	case AccumulateOptionNone:
+		// the default behavior of the app currently is to return the highest resolution steps
+		// possible
+		fallthrough
+	default:
+
+		// if we are here, it means someone wants a window older than what we can query for
+		return w, fmt.Errorf("cannot round window to given accumulation option %s", string(accumOpt))
+
+	}
+}
+
+// GetAccumulateWindows breaks provided window into a []Window with each window having the resolution of the provided AccumulateOption
+func (w Window) GetAccumulateWindows(accumOpt AccumulateOption) ([]Window, error) {
+	if w.IsOpen() {
+		return nil, fmt.Errorf("could not get accumlate window for open window")
+	}
+	switch accumOpt {
+	case AccumulateOptionAll:
+		// just return the entire window
+		return []Window{w.Clone()}, nil
+	case AccumulateOptionDay:
+		wins := w.getDailyWindows()
+		return wins, nil
+	case AccumulateOptionWeek:
+		wins := w.getWeeklyWindows()
+		return wins, nil
+	case AccumulateOptionMonth:
+		wins := w.getMonthlyWindows()
+		return wins, nil
+	case AccumulateOptionHour:
+		// our maximum resolution is hourly
+		wins := w.getHourlyWindows()
+		return wins, nil
+	case AccumulateOptionQuarter:
+		wins := w.getQuarterlyWindows()
+		return wins, nil
+	case AccumulateOptionNone:
+		// the default behavior of the app currently is to return the highest resolution steps
+		// possible
+		fallthrough
+	default:
+
+		// if we are here, it means someone wants a window older than what we can query for
+		return nil, fmt.Errorf("store does not have coverage window starting at %v", w.Start())
+
+	}
+}
+
+func (w Window) getHourlyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar hour start and ends, respectively
+	roundedStart := time.Date(origStart.Year(), origStart.Month(), origStart.Day(), origStart.Hour(), 0, 0, 0, origStart.Location())
+	roundedEnd := time.Date(origEnd.Year(), origEnd.Month(), origEnd.Day(), origEnd.Hour()+1, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new hour, does not need rounding
+	if origEnd.Minute() == 0 && origEnd.Second() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getHourlyWindows breaks up a window into hours
+func (w Window) getHourlyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getHourlyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month(), currStart.Day(), currStart.Hour()+1, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month(), currEnd.Day(), currEnd.Hour()+1, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
+func (w Window) getDailyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar day start and ends, respectively
+	roundedStart := time.Date(origStart.Year(), origStart.Month(), origStart.Day(), 0, 0, 0, 0, origStart.Location())
+	roundedEnd := time.Date(origEnd.Year(), origEnd.Month(), origEnd.Day()+1, 0, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new day, does not need rounding
+	if origEnd.Minute() == 0 && origEnd.Second() == 0 && origEnd.Hour() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getDailyWindows breaks up a window into days
+func (w Window) getDailyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getDailyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month(), currStart.Day()+1, 0, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month(), currEnd.Day()+1, 0, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
+func (w Window) getWeeklyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar month start and ends, respectively
+	roundedStart := origStart.Add(-1 * time.Duration(origStart.Weekday()) * time.Hour * 24)
+	roundedStart = time.Date(roundedStart.Year(), roundedStart.Month(), roundedStart.Day(), 0, 0, 0, 0, origEnd.Location())
+	roundedEnd := origEnd.Add(time.Duration(6-origEnd.Weekday()) * time.Hour * 24)
+	roundedEnd = time.Date(roundedEnd.Year(), roundedEnd.Month(), roundedEnd.Day()+1, 0, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new day, does not need rounding
+	if origEnd.Weekday() == 0 && origEnd.Second() == 0 && origEnd.Hour() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getWeeklyWindows breaks up a window into weeks, with weeks starting on Sunday
+func (w Window) getWeeklyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getDailyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month(), currStart.Day()+7, 0, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month(), currEnd.Day()+7, 0, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
+func (w Window) getMonthlyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar month start and ends, respectively
+	roundedStart := time.Date(origStart.Year(), origStart.Month(), 1, 0, 0, 0, 0, origStart.Location())
+	roundedEnd := time.Date(origEnd.Year(), origEnd.Month()+1, 1, 0, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new month, does not need rounding
+	if origEnd.Day() == 1 && origEnd.Hour() == 0 && origEnd.Minute() == 0 && origEnd.Second() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getMonthlyWindows breaks up a window into calendar months
+func (w Window) getMonthlyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getMonthlyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month()+1, 1, 0, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month()+1, 1, 0, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
+func (w Window) getQuarterlyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar quarter start and ends, respectively
+	// get quarter fraction from month
+	startQuarterNum := int(math.Ceil(float64(origStart.Month()) / 3.0))
+	endQuarterNum := int(math.Ceil(float64(origEnd.Month()) / 3.0))
+
+	roundedStart := time.Date(origStart.Year(), time.Month((startQuarterNum*3)-2), 1, 0, 0, 0, 0, origStart.Location())
+	roundedEnd := time.Date(origEnd.Year(), time.Month(((endQuarterNum+1)*3)-2), 1, 0, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new quarter, does not need rounding
+	if origEnd.Month() == time.Month(((endQuarterNum)*3)-2) && origEnd.Day() == 1 && origEnd.Hour() == 0 && origEnd.Minute() == 0 && origEnd.Second() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getQuarterlyWindows breaks up a window into calendar months
+func (w Window) getQuarterlyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getQuarterlyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month()+3, 1, 0, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month()+3, 1, 0, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
 // GetWindows returns a slice of Window with equal size between the given start and end. If windowSize does not evenly
 // divide the period between start and end, the last window is not added
+// Deprecated: in v1.107 use Window.GetWindows() instead
 func GetWindows(start time.Time, end time.Time, windowSize time.Duration) ([]Window, error) {
 	// Ensure the range is evenly divisible into windows of the given duration
 	dur := end.Sub(start)

+ 209 - 0
pkg/proto/http.go

@@ -0,0 +1,209 @@
+package proto
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/opencost/opencost/pkg/util/json"
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/proto"
+)
+
+// HTTPProtocol is a struct used as a selector for request/response protocol utility methods
+type HTTPProtocol struct{}
+
+// HTTPError represents an http error response
+type HTTPError struct {
+	StatusCode int
+	Body       string
+}
+
+// Error returns the error string
+func (he HTTPError) Error() string {
+	return string(he.Body)
+}
+
+// BadRequest creates a BadRequest HTTPError
+func (hp HTTPProtocol) BadRequest(message string) HTTPError {
+	return HTTPError{
+		StatusCode: http.StatusBadRequest,
+		Body:       message,
+	}
+}
+
+// InternalServerError creates an InternalServerError HTTPError
+func (hp HTTPProtocol) InternalServerError(message string) HTTPError {
+	if message == "" {
+		message = "Internal Server Error"
+	}
+	return HTTPError{
+		StatusCode: http.StatusInternalServerError,
+		Body:       message,
+	}
+}
+
+// NotFound creates a NotFound HTTPError
+func (hp HTTPProtocol) NotFound() HTTPError {
+	return HTTPError{
+		StatusCode: http.StatusNotFound,
+		Body:       "Not Found",
+	}
+}
+
+// HTTPResponse represents a data envelope for our HTTP messaging
+type HTTPResponse struct {
+	Code    int         `json:"code"`
+	Data    interface{} `json:"data"`
+	Message string      `json:"message,omitempty"`
+	Warning string      `json:"warning,omitempty"`
+}
+
+// ToResponse accepts a data payload and/or error to encode into a new HTTPResponse instance. Responses
+// which should not contain an error should pass nil for err.
+func (hp HTTPProtocol) ToResponse(data interface{}, err error) *HTTPResponse {
+	if err != nil {
+		return &HTTPResponse{
+			Code:    http.StatusInternalServerError,
+			Data:    data,
+			Message: err.Error(),
+		}
+	}
+
+	return &HTTPResponse{
+		Code: http.StatusOK,
+		Data: data,
+	}
+}
+
+// WriteData wraps the data payload in an HTTPResponse and writes the resulting response using the
+// http.ResponseWriter
+func (hp HTTPProtocol) WriteData(w http.ResponseWriter, data interface{}) {
+	status := http.StatusOK
+	resp, err := json.Marshal(&HTTPResponse{
+		Code: status,
+		Data: data,
+	})
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteDataWithWarning writes the data payload similiar to WriteData except it provides an additional warning message.
+func (hp HTTPProtocol) WriteDataWithWarning(w http.ResponseWriter, data interface{}, warning string) {
+	status := http.StatusOK
+	resp, err := json.Marshal(&HTTPResponse{
+		Code:    status,
+		Data:    data,
+		Warning: warning,
+	})
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteDataWithMessage writes the data payload similiar to WriteData except it provides an additional string message.
+func (hp HTTPProtocol) WriteDataWithMessage(w http.ResponseWriter, data interface{}, message string) {
+	status := http.StatusOK
+	resp, err := json.Marshal(&HTTPResponse{
+		Code:    status,
+		Data:    data,
+		Message: message,
+	})
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteProtoWithMessage uses the protojson package to convert proto3 response to json response and
+// return it to the requester. Proto3 drops messages with default values but overriding the param
+// EmitUnpopulated to true it returns default values in the Json response payload. If error is
+// encountered it sent InternalServerError and the error why the json conversion failed.
+func (hp HTTPProtocol) WriteProtoWithMessage(w http.ResponseWriter, data proto.Message) {
+	m := protojson.MarshalOptions{
+		EmitUnpopulated: true,
+	}
+	status := http.StatusOK
+	resp, err := m.Marshal(data)
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteDataWithMessageAndWarning writes the data payload similiar to WriteData except it provides a warning and additional message string.
+func (hp HTTPProtocol) WriteDataWithMessageAndWarning(w http.ResponseWriter, data interface{}, message string, warning string) {
+	status := http.StatusOK
+	resp, err := json.Marshal(&HTTPResponse{
+		Code:    status,
+		Data:    data,
+		Message: message,
+		Warning: warning,
+	})
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteError wraps the HTTPError in a HTTPResponse and writes it via http.ResponseWriter
+func (hp HTTPProtocol) WriteError(w http.ResponseWriter, err HTTPError) {
+	status := err.StatusCode
+	if status == 0 {
+		status = http.StatusInternalServerError
+	}
+	w.WriteHeader(status)
+
+	resp, _ := json.Marshal(&HTTPResponse{
+		Code:    status,
+		Message: err.Body,
+	})
+	w.Write(resp)
+}
+
+// WriteResponse writes the provided HTTPResponse instance via http.ResponseWriter
+func (hp HTTPProtocol) WriteResponse(w http.ResponseWriter, r *HTTPResponse) {
+	status := r.Code
+	resp, err := json.Marshal(r)
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}

+ 22 - 0
pkg/proto/proto.go

@@ -0,0 +1,22 @@
+package proto
+
+////////////////////////////////////////////////////////////////////////////////
+//
+//  The purpose of this package is to provide a general set of utilities for
+//  writing responses in networked communication. Since go often uses the basic
+//  protocol names (ie: "net/http") for their packages, keeping protocol utilities
+//  in their own packages can be a bit annoying with respect to building an API.
+//  To provide a "static" set of utilities, we can utilize method selectors on
+//  structs allowing callers to use proto.<protocol>() to access the utility methods
+//  with package-like syntax. We can also expand on the supported protocols as needed.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+var (
+	httpProtocol HTTPProtocol
+)
+
+// HTTP returns the HTTPProtocol utilities.
+func HTTP() HTTPProtocol {
+	return httpProtocol
+}