Просмотр исходного кода

Merge branch 'develop' into develop

Cliff Colvin 2 лет назад
Родитель
Сommit
f4d841fae4
100 измененных файлов с 7533 добавлено и 1262 удалено
  1. 4 1
      .gitignore
  2. 1 1
      CONTRIBUTING.md
  3. 3 3
      README.md
  4. 13 9
      go.mod
  5. 23 13
      go.sum
  6. 2 2
      kubernetes/opencost.yaml
  7. 6 6
      pkg/cloud/alibaba/authorizer.go
  8. 12 7
      pkg/cloud/alibaba/boaconfiguration.go
  9. 2 2
      pkg/cloud/alibaba/boaconfiguration_test.go
  10. 6 4
      pkg/cloud/alibaba/boaquerier.go
  11. 1 1
      pkg/cloud/authorizer.go
  12. 31 24
      pkg/cloud/aws/athenaconfiguration.go
  13. 2 2
      pkg/cloud/aws/athenaconfiguration_test.go
  14. 1 11
      pkg/cloud/aws/athenaintegration.go
  15. 6 4
      pkg/cloud/aws/athenaquerier.go
  16. 14 14
      pkg/cloud/aws/authorizer.go
  17. 3 3
      pkg/cloud/aws/authorizer_test.go
  18. 43 2
      pkg/cloud/aws/provider.go
  19. 67 0
      pkg/cloud/aws/provider_test.go
  20. 12 7
      pkg/cloud/aws/s3configuration.go
  21. 5 2
      pkg/cloud/aws/s3connection.go
  22. 2 2
      pkg/cloud/aws/s3connection_test.go
  23. 1 2
      pkg/cloud/aws/s3selectintegration.go
  24. 1 2
      pkg/cloud/aws/s3selectquerier.go
  25. 6 6
      pkg/cloud/azure/authorizer.go
  26. 2 5
      pkg/cloud/azure/azurestorageintegration.go
  27. 20 9
      pkg/cloud/azure/storagebillingparser.go
  28. 16 11
      pkg/cloud/azure/storageconfiguration.go
  29. 2 2
      pkg/cloud/azure/storageconfiguration_test.go
  30. 5 2
      pkg/cloud/azure/storageconnection.go
  31. 0 12
      pkg/cloud/cloudcostintegration.go
  32. 2 1
      pkg/cloud/config.go
  33. 291 0
      pkg/cloud/config/configurations.go
  34. 290 0
      pkg/cloud/config/configurations_test.go
  35. 305 0
      pkg/cloud/config/controller.go
  36. 160 0
      pkg/cloud/config/controller_handlers.go
  37. 871 0
      pkg/cloud/config/controller_test.go
  38. 95 0
      pkg/cloud/config/mock.go
  39. 14 0
      pkg/cloud/config/observer.go
  40. 351 0
      pkg/cloud/config/watcher.go
  41. 9 9
      pkg/cloud/gcp/authorizer.go
  42. 13 8
      pkg/cloud/gcp/bigqueryconfiguration.go
  43. 2 2
      pkg/cloud/gcp/bigqueryconfiguration_test.go
  44. 2 1
      pkg/cloud/gcp/bigqueryintegration.go
  45. 19 3
      pkg/cloud/gcp/bigqueryquerier.go
  46. 2 0
      pkg/cloud/gcp/provider.go
  47. 28 0
      pkg/cloud/provider/providerconfig.go
  48. 1 1
      pkg/cloud/scaleway/provider.go
  49. 207 0
      pkg/cloudcost/ingestionmanager.go
  50. 342 0
      pkg/cloudcost/ingestor.go
  51. 96 0
      pkg/cloudcost/integration.go
  52. 103 0
      pkg/cloudcost/memoryrepository.go
  53. 194 0
      pkg/cloudcost/pipelineservice.go
  54. 89 0
      pkg/cloudcost/querier.go
  55. 370 0
      pkg/cloudcost/queryservice.go
  56. 16 0
      pkg/cloudcost/repository.go
  57. 229 0
      pkg/cloudcost/repositoryquerier.go
  58. 24 0
      pkg/cloudcost/status.go
  59. 107 0
      pkg/cloudcost/view.go
  60. 18 0
      pkg/cmd/costmodel/costmodel.go
  61. 3 2
      pkg/costmodel/allocation.go
  62. 7 3
      pkg/costmodel/cluster.go
  63. 6 0
      pkg/costmodel/costmodel.go
  64. 47 35
      pkg/costmodel/router.go
  65. 37 0
      pkg/env/costmodelenv.go
  66. 44 0
      pkg/env/costmodelenv_test.go
  67. 56 7
      pkg/kubecost/asset.go
  68. 74 3
      pkg/kubecost/asset_test.go
  69. 5 1
      pkg/kubecost/assetprops.go
  70. 190 63
      pkg/kubecost/cloudcost.go
  71. 1 1
      pkg/kubecost/cloudcost_test.go
  72. 14 3
      pkg/kubecost/cloudcostprops.go
  73. 77 0
      pkg/kubecost/costmetric.go
  74. 23 0
      pkg/kubecost/query.go
  75. 238 0
      pkg/kubecost/window.go
  76. 15 7
      pkg/kubecost/window_test.go
  77. 1 1
      pkg/metrics/kubemetrics.go
  78. 209 0
      pkg/proto/http.go
  79. 22 0
      pkg/proto/proto.go
  80. 8 1
      pkg/util/formatutil/formatutil.go
  81. 7 5
      ui/README.md
  82. 3 1
      ui/default.nginx.conf
  83. 265 488
      ui/package-lock.json
  84. 10 9
      ui/package.json
  85. 204 175
      ui/src/Reports.js
  86. 4 17
      ui/src/app.js
  87. 217 0
      ui/src/cloudCost/cloudCost.js
  88. 14 0
      ui/src/cloudCost/cloudCostChart/index.js
  89. 275 0
      ui/src/cloudCost/cloudCostChart/rangeChart.js
  90. 178 0
      ui/src/cloudCost/cloudCostDetails.js
  91. 48 0
      ui/src/cloudCost/cloudCostRow.js
  92. 91 0
      ui/src/cloudCost/controls/cloudCostEditControls.js
  93. 53 0
      ui/src/cloudCost/tokens.js
  94. 305 0
      ui/src/cloudCostReports.js
  95. 0 200
      ui/src/components/AllocationReport.js
  96. 37 25
      ui/src/components/Header.js
  97. 78 0
      ui/src/components/Nav/NavItem.js
  98. 70 0
      ui/src/components/Nav/SidebarNav.js
  99. 3 0
      ui/src/components/Nav/index.js
  100. 32 19
      ui/src/components/Page.js

+ 4 - 1
.gitignore

@@ -12,4 +12,7 @@ cmd/costmodel/costmodel-arm64
 pkg/cloud/azureorphan_test.go
 
 # VS Code
-.vscode
+.vscode
+
+#Apple
+*.DS_Store

+ 1 - 1
CONTRIBUTING.md

@@ -30,7 +30,7 @@ will run on both AMD64 and ARM64 clusters).
 
 Dependencies:
 1. Docker (with `buildx`)
-2. [just](https://github.com/casey/just) (if you don't want to install Just, read the `justfile` and run the commands manually)
+2. [just](https://github.com/casey/just) (if you don't want to install it , Just read the `justfile` and run the commands manually)
 3. Multi-arch `buildx` builders set up via https://github.com/tonistiigi/binfmt
 4. `npm` (if you want to build the UI)
 

+ 3 - 3
README.md

@@ -11,11 +11,11 @@ OpenCost was originally developed and open sourced by [Kubecost](https://kubecos
 To see the full functionality of OpenCost you can view [OpenCost features](https://opencost.io). Here is a summary of features enabled:
 
 - Real-time cost allocation by Kubernetes cluster, node, namespace, controller kind, controller, service, or pod
-- Dynamic onDemand asset pricing enabled by integrations with AWS, Azure, and GCP billing APIs
+- Dynamic on-demand asset pricing enabled by integrations with AWS, Azure, and GCP billing APIs
 - Supports on-prem k8s clusters with custom CSV pricing
 - Allocation for in-cluster resources like CPU, GPU, memory, and persistent volumes.
-- Easily export pricing data to Prometheus with /metrics endpoint ([learn more](PROMETHEUS.md))
-- Free and open source distribution (Apache2 license)
+- Easily export pricing data to Prometheus with /metrics endpoint ([learn more](https://www.opencost.io/docs/installation/prometheus))
+- Free and open source distribution ([Apache2 license](LICENSE))
 
 ## Getting Started
 

+ 13 - 9
go.mod

@@ -25,6 +25,7 @@ require (
 	github.com/aws/aws-sdk-go-v2/service/ec2 v1.29.0
 	github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0
 	github.com/aws/aws-sdk-go-v2/service/sts v1.14.0
+	github.com/aws/smithy-go v1.13.5
 	github.com/davecgh/go-spew v1.1.1
 	github.com/getsentry/sentry-go v0.6.1
 	github.com/goccy/go-json v0.9.11
@@ -48,13 +49,15 @@ require (
 	github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9
 	github.com/spf13/cobra v1.2.1
 	github.com/spf13/viper v1.8.1
-	github.com/stretchr/testify v1.8.1
+	github.com/stretchr/testify v1.8.4
 	go.etcd.io/bbolt v1.3.5
+	go.opentelemetry.io/otel v1.19.0
 	golang.org/x/exp v0.0.0-20221031165847-c99f073a8326
 	golang.org/x/oauth2 v0.6.0
 	golang.org/x/sync v0.1.0
-	golang.org/x/text v0.8.0
+	golang.org/x/text v0.13.0
 	google.golang.org/api v0.114.0
+	google.golang.org/protobuf v1.29.1
 	gopkg.in/yaml.v2 v2.4.0
 	k8s.io/api v0.25.3
 	k8s.io/apimachinery v0.25.3
@@ -91,7 +94,6 @@ require (
 	github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect
 	github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect
 	github.com/aws/aws-sdk-go-v2/service/sso v1.9.0 // indirect
-	github.com/aws/smithy-go v1.13.5 // indirect
 	github.com/aymerick/douceur v0.2.0 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
@@ -99,7 +101,8 @@ require (
 	github.com/dustin/go-humanize v1.0.1 // indirect
 	github.com/emicklei/go-restful/v3 v3.10.2 // indirect
 	github.com/fsnotify/fsnotify v1.6.0 // indirect
-	github.com/go-logr/logr v1.2.3 // indirect
+	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-openapi/jsonpointer v0.19.5 // indirect
 	github.com/go-openapi/jsonreference v0.19.6 // indirect
 	github.com/go-openapi/swag v0.21.1 // indirect
@@ -154,19 +157,20 @@ require (
 	github.com/subosito/gotenv v1.2.0 // indirect
 	github.com/zeebo/xxh3 v1.0.2 // indirect
 	go.opencensus.io v0.24.0 // indirect
+	go.opentelemetry.io/otel/metric v1.19.0 // indirect
+	go.opentelemetry.io/otel/trace v1.19.0 // indirect
 	go.uber.org/atomic v1.10.0 // indirect
-	golang.org/x/crypto v0.6.0 // indirect
+	golang.org/x/crypto v0.14.0 // indirect
 	golang.org/x/mod v0.8.0 // indirect
-	golang.org/x/net v0.8.0 // indirect
-	golang.org/x/sys v0.6.0 // indirect
-	golang.org/x/term v0.6.0 // indirect
+	golang.org/x/net v0.17.0 // indirect
+	golang.org/x/sys v0.13.0 // indirect
+	golang.org/x/term v0.13.0 // indirect
 	golang.org/x/time v0.1.0 // indirect
 	golang.org/x/tools v0.6.0 // indirect
 	golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
 	google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 // indirect
 	google.golang.org/grpc v1.53.0 // indirect
-	google.golang.org/protobuf v1.29.1 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect

+ 23 - 13
go.sum

@@ -261,8 +261,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
 github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
 github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
 github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
 github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
 github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
@@ -660,8 +663,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
 github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
@@ -709,6 +713,12 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
 go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
 go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
 go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
+go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
+go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
+go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
+go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
+go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
 go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
@@ -731,8 +741,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
 golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
-golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -826,8 +836,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
 golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
 golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -927,14 +937,14 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
-golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
-golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
-golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -946,8 +956,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

+ 2 - 2
kubernetes/opencost.yaml

@@ -142,7 +142,7 @@ spec:
       restartPolicy: Always
       serviceAccountName: opencost
       containers:
-        - image: quay.io/kubecost1/kubecost-cost-model:latest
+        - image: gcr.io/kubecost1/opencost
           name: opencost
           resources:
             requests:
@@ -167,7 +167,7 @@ spec:
             privileged: false
             readOnlyRootFilesystem: true
             runAsUser: 1001
-        - image: quay.io/kubecost1/opencost-ui:latest
+        - image: gcr.io/kubecost1/opencost-ui
           name: opencost-ui
           resources:
             requests:

+ 6 - 6
pkg/cloud/alibaba/authorizer.go

@@ -5,7 +5,7 @@ import (
 
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -13,7 +13,7 @@ const AccessKeyAuthorizerType = "AlibabaAccessKey"
 
 // Authorizer provide *bssopenapi.Client for Alibaba cloud BOS for Billing related SDK calls
 type Authorizer interface {
-	config.Authorizer
+	cloud.Authorizer
 	GetCredentials() (auth.Credential, error)
 }
 
@@ -36,7 +36,7 @@ type AccessKey struct {
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (ak *AccessKey) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 3)
-	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = AccessKeyAuthorizerType
 	fmap["accessKeyID"] = ak.AccessKeyID
 	fmap["accessKeySecret"] = ak.AccessKeySecret
 	return json.Marshal(fmap)
@@ -52,7 +52,7 @@ func (ak *AccessKey) Validate() error {
 	return nil
 }
 
-func (ak *AccessKey) Equals(config config.Config) bool {
+func (ak *AccessKey) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -70,10 +70,10 @@ func (ak *AccessKey) Equals(config config.Config) bool {
 	return true
 }
 
-func (ak *AccessKey) Sanitize() config.Config {
+func (ak *AccessKey) Sanitize() cloud.Config {
 	return &AccessKey{
 		AccessKeyID:     ak.AccessKeyID,
-		AccessKeySecret: config.Redacted,
+		AccessKeySecret: cloud.Redacted,
 	}
 }
 

+ 12 - 7
pkg/cloud/alibaba/boaconfiguration.go

@@ -3,7 +3,8 @@ package alibaba
 import (
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -36,7 +37,7 @@ func (bc *BOAConfiguration) Validate() error {
 	return nil
 }
 
-func (bc *BOAConfiguration) Equals(config config.Config) bool {
+func (bc *BOAConfiguration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -65,7 +66,7 @@ func (bc *BOAConfiguration) Equals(config config.Config) bool {
 	return true
 }
 
-func (bc *BOAConfiguration) Sanitize() config.Config {
+func (bc *BOAConfiguration) Sanitize() cloud.Config {
 	return &BOAConfiguration{
 		Account:    bc.Account,
 		Region:     bc.Region,
@@ -77,6 +78,10 @@ func (bc *BOAConfiguration) Key() string {
 	return fmt.Sprintf("%s/%s", bc.Account, bc.Region)
 }
 
+func (bc *BOAConfiguration) Provider() string {
+	return kubecost.AlibabaProvider
+}
+
 func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
 	var f interface{}
 	err := json.Unmarshal(b, &f)
@@ -86,13 +91,13 @@ func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	account, err := config.GetInterfaceValue[string](fmap, "account")
+	account, err := cloud.GetInterfaceValue[string](fmap, "account")
 	if err != nil {
 		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	bc.Account = account
 
-	region, err := config.GetInterfaceValue[string](fmap, "region")
+	region, err := cloud.GetInterfaceValue[string](fmap, "region")
 	if err != nil {
 		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -102,7 +107,7 @@ func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -111,7 +116,7 @@ func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
 	return nil
 }
 
-func ConvertAlibabaInfoToConfig(acc AlibabaInfo) config.KeyedConfig {
+func ConvertAlibabaInfoToConfig(acc AlibabaInfo) cloud.KeyedConfig {
 	if acc.IsEmpty() {
 		return nil
 	}

+ 2 - 2
pkg/cloud/alibaba/boaconfiguration_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -97,7 +97,7 @@ func TestBoaConfiguration_Validate(t *testing.T) {
 func TestBOAConfiguration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     BOAConfiguration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 6 - 4
pkg/cloud/alibaba/boaquerier.go

@@ -4,11 +4,9 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
-
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
 	"github.com/aliyun/alibaba-cloud-sdk-go/services/bssopenapi"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
 )
@@ -25,10 +23,14 @@ type BoaQuerier struct {
 }
 
 func (bq *BoaQuerier) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if bq.ConnectionStatus.String() == "" {
+		bq.ConnectionStatus = cloud.InitialStatus
+	}
 	return bq.ConnectionStatus
 }
 
-func (bq *BoaQuerier) Equals(config cloudconfig.Config) bool {
+func (bq *BoaQuerier) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*BoaQuerier)
 	if !ok {
 		return false

+ 1 - 1
pkg/cloud/config/authorizer.go → pkg/cloud/authorizer.go

@@ -1,4 +1,4 @@
-package config
+package cloud
 
 import (
 	"fmt"

+ 31 - 24
pkg/cloud/aws/athenaconfiguration.go

@@ -3,7 +3,8 @@ package aws
 import (
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -12,7 +13,7 @@ type AthenaConfiguration struct {
 	Bucket     string     `json:"bucket"`
 	Region     string     `json:"region"`
 	Database   string     `json:"database"`
-	Catalog    string     `json:"catalog""`
+	Catalog    string     `json:"catalog"`
 	Table      string     `json:"table"`
 	Workgroup  string     `json:"workgroup"`
 	Account    string     `json:"account"`
@@ -55,7 +56,7 @@ func (ac *AthenaConfiguration) Validate() error {
 	return nil
 }
 
-func (ac *AthenaConfiguration) Equals(config config.Config) bool {
+func (ac *AthenaConfiguration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -105,7 +106,7 @@ func (ac *AthenaConfiguration) Equals(config config.Config) bool {
 	return true
 }
 
-func (ac *AthenaConfiguration) Sanitize() config.Config {
+func (ac *AthenaConfiguration) Sanitize() cloud.Config {
 	return &AthenaConfiguration{
 		Bucket:     ac.Bucket,
 		Region:     ac.Region,
@@ -122,6 +123,10 @@ func (ac *AthenaConfiguration) Key() string {
 	return fmt.Sprintf("%s/%s", ac.Account, ac.Bucket)
 }
 
+func (ac *AthenaConfiguration) Provider() string {
+	return kubecost.AWSProvider
+}
+
 func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
 	var f interface{}
 	err := json.Unmarshal(b, &f)
@@ -131,45 +136,47 @@ func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	bucket, err := cloud.GetInterfaceValue[string](fmap, "bucket")
 	if err != nil {
-		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Bucket = bucket
 
-	region, err := config.GetInterfaceValue[string](fmap, "region")
+	region, err := cloud.GetInterfaceValue[string](fmap, "region")
 	if err != nil {
-		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Region = region
 
-	database, err := config.GetInterfaceValue[string](fmap, "database")
+	database, err := cloud.GetInterfaceValue[string](fmap, "database")
 	if err != nil {
-		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Database = database
 
-	catalog, err := config.GetInterfaceValue[string](fmap, "catalog")
-	if err != nil {
-		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	if _, ok := fmap["catalog"]; ok {
+		catalog, err := cloud.GetInterfaceValue[string](fmap, "catalog")
+		if err != nil {
+			return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
+		}
+		ac.Catalog = catalog
 	}
-	ac.Catalog = catalog
 
-	table, err := config.GetInterfaceValue[string](fmap, "table")
+	table, err := cloud.GetInterfaceValue[string](fmap, "table")
 	if err != nil {
-		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Table = table
 
-	workgroup, err := config.GetInterfaceValue[string](fmap, "workgroup")
+	workgroup, err := cloud.GetInterfaceValue[string](fmap, "workgroup")
 	if err != nil {
-		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Workgroup = workgroup
 
-	account, err := config.GetInterfaceValue[string](fmap, "account")
+	account, err := cloud.GetInterfaceValue[string](fmap, "account")
 	if err != nil {
-		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Account = account
 
@@ -177,9 +184,9 @@ func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
-		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %w", err)
 	}
 	ac.Authorizer = authorizer
 
@@ -188,7 +195,7 @@ func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
 
 // ConvertAwsAthenaInfoToConfig takes a legacy config and generates a Config based on the presence of properties to match
 // legacy behavior
-func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) config.KeyedConfig {
+func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) cloud.KeyedConfig {
 	if aai.IsEmpty() {
 		return nil
 	}
@@ -211,7 +218,7 @@ func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) config.KeyedConfig {
 		}
 	}
 
-	var config config.KeyedConfig
+	var config cloud.KeyedConfig
 	if aai.AthenaTable != "" || aai.AthenaDatabase != "" {
 		config = &AthenaConfiguration{
 			Bucket:     aai.AthenaBucketName,

+ 2 - 2
pkg/cloud/aws/athenaconfiguration_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -184,7 +184,7 @@ func TestAthenaConfiguration_Validate(t *testing.T) {
 func TestAthenaConfiguration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     AthenaConfiguration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 1 - 11
pkg/cloud/aws/athenaintegration.go

@@ -11,7 +11,6 @@ import (
 	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
-	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 const LabelColumnPrefix = "resource_tags_user_"
@@ -154,7 +153,7 @@ func (ai *AthenaIntegration) GetCloudCost(start, end time.Time) (*kubecost.Cloud
 	`
 	aqi.Query = fmt.Sprintf(queryStr, columnStr, ai.Table, whereClause, groupByStr)
 
-	ccsr, err := kubecost.NewCloudCostSetRange(start, end, timeutil.Day, ai.Key())
+	ccsr, err := kubecost.NewCloudCostSetRange(start, end, kubecost.AccumulateOptionDay, ai.Key())
 	if err != nil {
 		return nil, err
 	}
@@ -442,12 +441,3 @@ func (ai *AthenaIntegration) GetConnectionStatusFromResult(result cloud.EmptyChe
 	}
 	return cloud.SuccessfulConnection
 }
-
-func (ai *AthenaIntegration) GetConnectionStatus() string {
-	// initialize status if it has not done so; this can happen if the integration is inactive
-	if ai.ConnectionStatus.String() == "" {
-		ai.ConnectionStatus = cloud.InitialStatus
-	}
-
-	return ai.ConnectionStatus.String()
-}

+ 6 - 4
pkg/cloud/aws/athenaquerier.go

@@ -8,12 +8,10 @@ import (
 	"strings"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
-
 	"github.com/aws/aws-sdk-go-v2/aws"
 	"github.com/aws/aws-sdk-go-v2/service/athena"
 	"github.com/aws/aws-sdk-go-v2/service/athena/types"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/stringutil"
@@ -25,10 +23,14 @@ type AthenaQuerier struct {
 }
 
 func (aq *AthenaQuerier) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if aq.ConnectionStatus.String() == "" {
+		aq.ConnectionStatus = cloud.InitialStatus
+	}
 	return aq.ConnectionStatus
 }
 
-func (aq *AthenaQuerier) Equals(config cloudconfig.Config) bool {
+func (aq *AthenaQuerier) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*AthenaQuerier)
 	if !ok {
 		return false

+ 14 - 14
pkg/cloud/aws/authorizer.go

@@ -8,7 +8,7 @@ import (
 	awsconfig "github.com/aws/aws-sdk-go-v2/config"
 	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
 	"github.com/aws/aws-sdk-go-v2/service/sts"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -18,7 +18,7 @@ const AssumeRoleAuthorizerType = "AWSAssumeRole"
 
 // Authorizer implementations provide aws.Config for AWS SDK calls
 type Authorizer interface {
-	config.Authorizer
+	cloud.Authorizer
 	CreateAWSConfig(string) (aws.Config, error)
 }
 
@@ -45,7 +45,7 @@ type AccessKey struct {
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (ak *AccessKey) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 3)
-	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = AccessKeyAuthorizerType
 	fmap["id"] = ak.ID
 	fmap["secret"] = ak.Secret
 	return json.Marshal(fmap)
@@ -70,7 +70,7 @@ func (ak *AccessKey) Validate() error {
 	return nil
 }
 
-func (ak *AccessKey) Equals(config config.Config) bool {
+func (ak *AccessKey) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -88,10 +88,10 @@ func (ak *AccessKey) Equals(config config.Config) bool {
 	return true
 }
 
-func (ak *AccessKey) Sanitize() config.Config {
+func (ak *AccessKey) Sanitize() cloud.Config {
 	return &AccessKey{
 		ID:     ak.ID,
-		Secret: config.Redacted,
+		Secret: cloud.Redacted,
 	}
 }
 
@@ -115,7 +115,7 @@ type ServiceAccount struct{}
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (sa *ServiceAccount) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 1)
-	fmap[config.AuthorizerTypeProperty] = ServiceAccountAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = ServiceAccountAuthorizerType
 	return json.Marshal(fmap)
 }
 
@@ -124,7 +124,7 @@ func (sa *ServiceAccount) Validate() error {
 	return nil
 }
 
-func (sa *ServiceAccount) Equals(config config.Config) bool {
+func (sa *ServiceAccount) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -136,7 +136,7 @@ func (sa *ServiceAccount) Equals(config config.Config) bool {
 	return true
 }
 
-func (sa *ServiceAccount) Sanitize() config.Config {
+func (sa *ServiceAccount) Sanitize() cloud.Config {
 	return &ServiceAccount{}
 }
 
@@ -157,7 +157,7 @@ type AssumeRole struct {
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (ara *AssumeRole) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 3)
-	fmap[config.AuthorizerTypeProperty] = AssumeRoleAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = AssumeRoleAuthorizerType
 	fmap["roleARN"] = ara.RoleARN
 	fmap["authorizer"] = ara.Authorizer
 	return json.Marshal(fmap)
@@ -173,7 +173,7 @@ func (ara *AssumeRole) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	roleARN, err := config.GetInterfaceValue[string](fmap, "roleARN")
+	roleARN, err := cloud.GetInterfaceValue[string](fmap, "roleARN")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -183,7 +183,7 @@ func (ara *AssumeRole) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("AssumeRole: UnmarshalJSON: missing Authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("AssumeRole: UnmarshalJSON: %s", err.Error())
 	}
@@ -218,7 +218,7 @@ func (ara *AssumeRole) Validate() error {
 	return nil
 }
 
-func (ara *AssumeRole) Equals(config config.Config) bool {
+func (ara *AssumeRole) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -243,7 +243,7 @@ func (ara *AssumeRole) Equals(config config.Config) bool {
 	return true
 }
 
-func (ara *AssumeRole) Sanitize() config.Config {
+func (ara *AssumeRole) Sanitize() cloud.Config {
 	return &AssumeRole{
 		Authorizer: ara.Authorizer.Sanitize().(Authorizer),
 		RoleARN:    ara.RoleARN,

+ 3 - 3
pkg/cloud/aws/authorizer_test.go

@@ -3,7 +3,7 @@ package aws
 import (
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 )
 
 func TestAuthorizerJSON_Sanitize(t *testing.T) {
@@ -19,7 +19,7 @@ func TestAuthorizerJSON_Sanitize(t *testing.T) {
 			},
 			expected: &AccessKey{
 				ID:     "ID",
-				Secret: config.Redacted,
+				Secret: cloud.Redacted,
 			},
 		},
 		"Service Account": {
@@ -37,7 +37,7 @@ func TestAuthorizerJSON_Sanitize(t *testing.T) {
 			expected: &AssumeRole{
 				Authorizer: &AccessKey{
 					ID:     "ID",
-					Secret: config.Redacted,
+					Secret: cloud.Redacted,
 				},
 				RoleARN: "role arn",
 			},

+ 43 - 2
pkg/cloud/aws/provider.go

@@ -70,6 +70,13 @@ var (
 	usageTypeRegx = regexp.MustCompile(".*(-|^)(EBS.+)")
 	versionRx     = regexp.MustCompile(`^#Version: (\\d+)\\.\\d+$`)
 	regionRx      = regexp.MustCompile("([a-z]+-[a-z]+-[0-9])")
+
+	// StorageClassProvisionerDefaults specifies the default storage class types depending upon the provisioner
+	StorageClassProvisionerDefaults = map[string]string{
+		"kubernetes.io/aws-ebs": "gp2",
+		"ebs.csi.aws.com":       "gp3",
+		// TODO: add efs provisioner
+	}
 )
 
 func (aws *AWS) PricingSourceStatus() map[string]*models.PricingSource {
@@ -666,6 +673,9 @@ func (k *awsKey) Features() string {
 // If the instance is a spot instance, it will return PreemptibleType
 // Otherwise returns an empty string
 func (k *awsKey) getUsageType(labels map[string]string) string {
+	if kLabel, ok := labels[k.SpotLabelName]; ok && kLabel == k.SpotLabelValue {
+		return PreemptibleType
+	}
 	if eksLabel, ok := labels[EKSCapacityTypeLabel]; ok && eksLabel == EKSCapacitySpotTypeValue {
 		// We currently write out spot instances as "preemptible" in the pricing data, so these need to match
 		return PreemptibleType
@@ -720,7 +730,12 @@ func (key *awsPVKey) GetStorageClass() string {
 }
 
 func (key *awsPVKey) Features() string {
-	storageClass := key.StorageClassParameters["type"]
+	storageClass, ok := key.StorageClassParameters["type"]
+	if !ok {
+		log.Debugf("storage class %s doesn't have a 'type' parameter", key.Name)
+		storageClass = getStorageClassTypeFrom(key.StorageClassParameters["provisioner"])
+	}
+
 	if storageClass == "standard" {
 		storageClass = "gp2"
 	}
@@ -738,6 +753,22 @@ func (key *awsPVKey) Features() string {
 	return region + "," + class
 }
 
+// getStorageClassTypeFrom returns the default ebs volume type for a provider provisioner
+func getStorageClassTypeFrom(provisioner string) string {
+	// if there isn't any provided provisioner, return empty volume type
+	if provisioner == "" {
+		return ""
+	}
+
+	scType, ok := StorageClassProvisionerDefaults[provisioner]
+	if ok {
+		log.Debugf("using default voltype %s for provisioner %s", scType, provisioner)
+		return scType
+	}
+
+	return ""
+}
+
 // GetKey maps node labels to information needed to retrieve pricing data
 func (aws *AWS) GetKey(labels map[string]string, n *v1.Node) models.Key {
 	return &awsKey{
@@ -862,6 +893,9 @@ func (aws *AWS) DownloadPricingData() error {
 	storageClassMap := make(map[string]map[string]string)
 	for _, storageClass := range storageClasses {
 		params := storageClass.Parameters
+		if params != nil {
+			params["provisioner"] = storageClass.Provisioner
+		}
 		storageClassMap[storageClass.ObjectMeta.Name] = params
 		if storageClass.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.GetAnnotations()["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
 			storageClassMap["default"] = params
@@ -1814,6 +1848,12 @@ func (aws *AWS) GetOrphanedResources() ([]models.OrphanedResource, error) {
 				url = "https://console.aws.amazon.com/ec2/home?#Volumes:sort=desc:createTime"
 			}
 
+			// output tags as desc
+			tags := map[string]string{}
+			for _, tag := range volume.Tags {
+				tags[*tag.Key] = *tag.Value
+			}
+
 			or := models.OrphanedResource{
 				Kind:        "disk",
 				Region:      zone,
@@ -1821,6 +1861,7 @@ func (aws *AWS) GetOrphanedResources() ([]models.OrphanedResource, error) {
 				DiskName:    *volume.VolumeId,
 				Url:         url,
 				MonthlyCost: cost,
+				Description: tags,
 			}
 
 			orphanedResources = append(orphanedResources, or)
@@ -1868,7 +1909,7 @@ func (aws *AWS) findCostForDisk(disk *ec2Types.Volume) (*float64, error) {
 
 	class := volTypes[string(disk.VolumeType)]
 
-	key := "us-east-2" + "," + class
+	key := aws.ClusterRegion + "," + class
 
 	pricing, ok := aws.Pricing[key]
 	if !ok {

+ 67 - 0
pkg/cloud/aws/provider_test.go

@@ -9,6 +9,7 @@ import (
 	"testing"
 
 	"github.com/opencost/opencost/pkg/cloud/models"
+	v1 "k8s.io/api/core/v1"
 )
 
 func Test_awsKey_getUsageType(t *testing.T) {
@@ -492,5 +493,71 @@ func Test_populate_pricing(t *testing.T) {
 	if !reflect.DeepEqual(expectedPricing, awsTest.Pricing) {
 		t.Fatalf("expected parsed pricing did not match actual parsed result (cn)")
 	}
+}
+
+func TestFeatures(t *testing.T) {
+	testCases := map[string]struct {
+		aws      awsKey
+		expected string
+	}{
+		"Spot from custom labels": {
+			aws: awsKey{
+				SpotLabelName:  "node-type",
+				SpotLabelValue: "node-spot",
+				Labels: map[string]string{
+					"node-type":                "node-spot",
+					v1.LabelOSStable:           "linux",
+					v1.LabelHostname:           "my-hostname",
+					v1.LabelTopologyRegion:     "us-west-2",
+					v1.LabelTopologyZone:       "us-west-2b",
+					v1.LabelInstanceTypeStable: "m5.large",
+				},
+			},
+			expected: "us-west-2,m5.large,linux,preemptible",
+		},
+	}
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			features := tc.aws.Features()
+			if features != tc.expected {
+				t.Errorf("expected %s, got %s", tc.expected, features)
+			}
+		})
+	}
+}
 
+func Test_getStorageClassTypeFrom(t *testing.T) {
+	tests := []struct {
+		name        string
+		provisioner string
+		want        string
+	}{
+		{
+			name:        "empty-provisioner",
+			provisioner: "",
+			want:        "",
+		},
+		{
+			name:        "ebs-default-provisioner",
+			provisioner: "kubernetes.io/aws-ebs",
+			want:        "gp2",
+		},
+		{
+			name:        "ebs-csi-provisioner",
+			provisioner: "ebs.csi.aws.com",
+			want:        "gp3",
+		},
+		{
+			name:        "unknown-provisioner",
+			provisioner: "unknown",
+			want:        "",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := getStorageClassTypeFrom(tt.provisioner); got != tt.want {
+				t.Errorf("getStorageClassTypeFrom() = %v, want %v", got, tt.want)
+			}
+		})
+	}
 }

+ 12 - 7
pkg/cloud/aws/s3configuration.go

@@ -4,7 +4,8 @@ import (
 	"fmt"
 
 	"github.com/aws/aws-sdk-go-v2/aws"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -42,7 +43,7 @@ func (s3c *S3Configuration) Validate() error {
 	return nil
 }
 
-func (s3c *S3Configuration) Equals(config config.Config) bool {
+func (s3c *S3Configuration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -76,7 +77,7 @@ func (s3c *S3Configuration) Equals(config config.Config) bool {
 	return true
 }
 
-func (s3c *S3Configuration) Sanitize() config.Config {
+func (s3c *S3Configuration) Sanitize() cloud.Config {
 	return &S3Configuration{
 		Bucket:     s3c.Bucket,
 		Region:     s3c.Region,
@@ -89,6 +90,10 @@ func (s3c *S3Configuration) Key() string {
 	return fmt.Sprintf("%s/%s", s3c.Account, s3c.Bucket)
 }
 
+func (s3c *S3Configuration) Provider() string {
+	return kubecost.AWSProvider
+}
+
 func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
 	var f interface{}
 	err := json.Unmarshal(b, &f)
@@ -98,19 +103,19 @@ func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	bucket, err := cloud.GetInterfaceValue[string](fmap, "bucket")
 	if err != nil {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
 	}
 	s3c.Bucket = bucket
 
-	region, err := config.GetInterfaceValue[string](fmap, "region")
+	region, err := cloud.GetInterfaceValue[string](fmap, "region")
 	if err != nil {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
 	}
 	s3c.Region = region
 
-	account, err := config.GetInterfaceValue[string](fmap, "account")
+	account, err := cloud.GetInterfaceValue[string](fmap, "account")
 	if err != nil {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
 	}
@@ -120,7 +125,7 @@ func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
 	}

+ 5 - 2
pkg/cloud/aws/s3connection.go

@@ -6,7 +6,6 @@ import (
 	"github.com/aws/aws-sdk-go-v2/aws"
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	"github.com/opencost/opencost/pkg/cloud"
-	"github.com/opencost/opencost/pkg/cloud/config"
 )
 
 type S3Connection struct {
@@ -15,10 +14,14 @@ type S3Connection struct {
 }
 
 func (s3c *S3Connection) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if s3c.ConnectionStatus.String() == "" {
+		s3c.ConnectionStatus = cloud.InitialStatus
+	}
 	return s3c.ConnectionStatus
 }
 
-func (s3c *S3Connection) Equals(config config.Config) bool {
+func (s3c *S3Connection) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*S3Connection)
 	if !ok {
 		return false

+ 2 - 2
pkg/cloud/aws/s3connection_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -105,7 +105,7 @@ func TestS3Configuration_Validate(t *testing.T) {
 func TestS3Configuration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     S3Configuration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 1 - 2
pkg/cloud/aws/s3selectintegration.go

@@ -10,7 +10,6 @@ import (
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
-	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 const S3SelectDateLayout = "2006-01-02T15:04:05Z"
@@ -58,7 +57,7 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 	ccsr, err := kubecost.NewCloudCostSetRange(
 		start,
 		end,
-		timeutil.Day,
+		kubecost.AccumulateOptionDay,
 		s3si.Key(),
 	)
 	if err != nil {

+ 1 - 2
pkg/cloud/aws/s3selectquerier.go

@@ -13,7 +13,6 @@ import (
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
 	"github.com/opencost/opencost/pkg/cloud"
-	"github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/util/stringutil"
 )
 
@@ -22,7 +21,7 @@ type S3SelectQuerier struct {
 	connectionStatus cloud.ConnectionStatus
 }
 
-func (s3sq *S3SelectQuerier) Equals(config config.Config) bool {
+func (s3sq *S3SelectQuerier) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*S3SelectQuerier)
 	if !ok {
 		return false

+ 6 - 6
pkg/cloud/azure/authorizer.go

@@ -5,13 +5,13 @@ import (
 	"fmt"
 
 	"github.com/Azure/azure-storage-blob-go/azblob"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 )
 
 const AccessKeyAuthorizerType = "AzureAccessKey"
 
 type Authorizer interface {
-	config.Authorizer
+	cloud.Authorizer
 	GetBlobCredentials() (azblob.Credential, error)
 }
 
@@ -32,7 +32,7 @@ type AccessKey struct {
 
 func (ak *AccessKey) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 3)
-	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = AccessKeyAuthorizerType
 	fmap["accessKey"] = ak.AccessKey
 	fmap["account"] = ak.Account
 	return json.Marshal(fmap)
@@ -48,7 +48,7 @@ func (ak *AccessKey) Validate() error {
 	return nil
 }
 
-func (ak *AccessKey) Equals(config config.Config) bool {
+func (ak *AccessKey) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -67,9 +67,9 @@ func (ak *AccessKey) Equals(config config.Config) bool {
 	return true
 }
 
-func (ak *AccessKey) Sanitize() config.Config {
+func (ak *AccessKey) Sanitize() cloud.Config {
 	return &AccessKey{
-		AccessKey: config.Redacted,
+		AccessKey: cloud.Redacted,
 		Account:   ak.Account,
 	}
 }

+ 2 - 5
pkg/cloud/azure/azurestorageintegration.go

@@ -4,23 +4,21 @@ import (
 	"strings"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 type AzureStorageIntegration struct {
 	AzureStorageBillingParser
-	ConnectionStatus cloud.ConnectionStatus
 }
 
 func (asi *AzureStorageIntegration) GetCloudCost(start, end time.Time) (*kubecost.CloudCostSetRange, error) {
-	ccsr, err := kubecost.NewCloudCostSetRange(start, end, timeutil.Day, asi.Key())
+	ccsr, err := kubecost.NewCloudCostSetRange(start, end, kubecost.AccumulateOptionDay, asi.Key())
 	if err != nil {
 		return nil, err
 	}
 
-	status, err := asi.ParseBillingData(start, end, func(abv *BillingRowValues) error {
+	err = asi.ParseBillingData(start, end, func(abv *BillingRowValues) error {
 		s := abv.Date
 		e := abv.Date.Add(timeutil.Day)
 		window := kubecost.NewWindow(&s, &e)
@@ -77,7 +75,6 @@ func (asi *AzureStorageIntegration) GetCloudCost(start, end time.Time) (*kubecos
 		return nil
 	})
 	if err != nil {
-		asi.ConnectionStatus = status
 		return nil, err
 	}
 	return ccsr, nil

+ 20 - 9
pkg/cloud/azure/storagebillingparser.go

@@ -11,7 +11,6 @@ import (
 
 	"github.com/Azure/azure-storage-blob-go/azblob"
 	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/log"
 )
 
@@ -20,7 +19,7 @@ type AzureStorageBillingParser struct {
 	StorageConnection
 }
 
-func (asbp *AzureStorageBillingParser) Equals(config cloudconfig.Config) bool {
+func (asbp *AzureStorageBillingParser) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*AzureStorageBillingParser)
 	if !ok {
 		return false
@@ -30,33 +29,45 @@ func (asbp *AzureStorageBillingParser) Equals(config cloudconfig.Config) bool {
 
 type AzureBillingResultFunc func(*BillingRowValues) error
 
-func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, resultFn AzureBillingResultFunc) (cloud.ConnectionStatus, error) {
+func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, resultFn AzureBillingResultFunc) error {
 	err := asbp.Validate()
 	if err != nil {
-		return cloud.InvalidConfiguration, err
+		asbp.ConnectionStatus = cloud.InvalidConfiguration
+		return err
 	}
 
 	containerURL, err := asbp.getContainer()
 	if err != nil {
-		return cloud.FailedConnection, err
+		asbp.ConnectionStatus = cloud.FailedConnection
+		return err
 	}
 	ctx := context.Background()
 	blobNames, err := asbp.getMostRecentBlobs(start, end, containerURL, ctx)
 	if err != nil {
-		return cloud.FailedConnection, err
+		asbp.ConnectionStatus = cloud.FailedConnection
+		return err
 	}
+
+	if len(blobNames) == 0 && asbp.ConnectionStatus != cloud.SuccessfulConnection {
+		asbp.ConnectionStatus = cloud.MissingData
+		return nil
+	}
+
 	for _, blobName := range blobNames {
 		blobBytes, err2 := asbp.DownloadBlob(blobName, containerURL, ctx)
 		if err2 != nil {
-			return cloud.FailedConnection, err2
+			asbp.ConnectionStatus = cloud.FailedConnection
+			return err2
 		}
 		err2 = asbp.parseCSV(start, end, csv.NewReader(bytes.NewReader(blobBytes)), resultFn)
 		if err2 != nil {
-			return cloud.ParseError, err2
+			asbp.ConnectionStatus = cloud.ParseError
+			return err2
 		}
 
 	}
-	return cloud.SuccessfulConnection, nil
+	asbp.ConnectionStatus = cloud.SuccessfulConnection
+	return nil
 }
 
 func (asbp *AzureStorageBillingParser) parseCSV(start, end time.Time, reader *csv.Reader, resultFn AzureBillingResultFunc) error {

+ 16 - 11
pkg/cloud/azure/storageconfiguration.go

@@ -3,7 +3,8 @@ package azure
 import (
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -43,7 +44,7 @@ func (sc *StorageConfiguration) Validate() error {
 	return nil
 }
 
-func (sc *StorageConfiguration) Equals(config config.Config) bool {
+func (sc *StorageConfiguration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -85,7 +86,7 @@ func (sc *StorageConfiguration) Equals(config config.Config) bool {
 	return true
 }
 
-func (sc *StorageConfiguration) Sanitize() config.Config {
+func (sc *StorageConfiguration) Sanitize() cloud.Config {
 	return &StorageConfiguration{
 		SubscriptionID: sc.SubscriptionID,
 		Account:        sc.Account,
@@ -105,6 +106,10 @@ func (sc *StorageConfiguration) Key() string {
 	return key
 }
 
+func (sc *StorageConfiguration) Provider() string {
+	return kubecost.AzureProvider
+}
+
 func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
 	var f interface{}
 	err := json.Unmarshal(b, &f)
@@ -114,41 +119,41 @@ func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	subscriptionID, err := config.GetInterfaceValue[string](fmap, "subscriptionID")
+	subscriptionID, err := cloud.GetInterfaceValue[string](fmap, "subscriptionID")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	sc.SubscriptionID = subscriptionID
 
-	account, err := config.GetInterfaceValue[string](fmap, "account")
+	account, err := cloud.GetInterfaceValue[string](fmap, "account")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	sc.Account = account
 
-	container, err := config.GetInterfaceValue[string](fmap, "container")
+	container, err := cloud.GetInterfaceValue[string](fmap, "container")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	sc.Container = container
 
-	path, err := config.GetInterfaceValue[string](fmap, "path")
+	path, err := cloud.GetInterfaceValue[string](fmap, "path")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
 	sc.Path = path
 
-	cloud, err := config.GetInterfaceValue[string](fmap, "cloud")
+	cloudValue, err := cloud.GetInterfaceValue[string](fmap, "cloud")
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
-	sc.Cloud = cloud
+	sc.Cloud = cloudValue
 
 	authAny, ok := fmap["authorizer"]
 	if !ok {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -157,7 +162,7 @@ func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
 	return nil
 }
 
-func ConvertAzureStorageConfigToConfig(asc AzureStorageConfig) config.KeyedConfig {
+func ConvertAzureStorageConfigToConfig(asc AzureStorageConfig) cloud.KeyedConfig {
 	if asc.IsEmpty() {
 		return nil
 	}

+ 2 - 2
pkg/cloud/azure/storageconfiguration_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -145,7 +145,7 @@ func TestStorageConfiguration_Validate(t *testing.T) {
 func TestStorageConfiguration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     StorageConfiguration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 5 - 2
pkg/cloud/azure/storageconnection.go

@@ -9,7 +9,6 @@ import (
 
 	"github.com/Azure/azure-storage-blob-go/azblob"
 	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/log"
 )
 
@@ -20,10 +19,14 @@ type StorageConnection struct {
 }
 
 func (sc *StorageConnection) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if sc.ConnectionStatus.String() == "" {
+		sc.ConnectionStatus = cloud.InitialStatus
+	}
 	return sc.ConnectionStatus
 }
 
-func (sc *StorageConnection) Equals(config cloudconfig.Config) bool {
+func (sc *StorageConnection) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*StorageConnection)
 	if !ok {
 		return false

+ 0 - 12
pkg/cloud/cloudcostintegration.go

@@ -1,12 +0,0 @@
-package cloud
-
-import (
-	"time"
-
-	"github.com/opencost/opencost/pkg/kubecost"
-)
-
-// CloudCostIntegration is an interface for retrieving daily granularity CloudCost data for a given range
-type CloudCostIntegration interface {
-	GetCloudCost(time.Time, time.Time) (*kubecost.CloudCostSetRange, error)
-}

+ 2 - 1
pkg/cloud/config/config.go → pkg/cloud/config.go

@@ -1,4 +1,4 @@
-package config
+package cloud
 
 import (
 	"fmt"
@@ -17,6 +17,7 @@ type Config interface {
 type KeyedConfig interface {
 	Config
 	Key() string
+	Provider() string
 }
 
 type KeyedConfigWatcher interface {

+ 291 - 0
pkg/cloud/config/configurations.go

@@ -0,0 +1,291 @@
+package config
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// MultiCloudConfig struct is used to unmarshal cloud configs for each provider out of cloud-integration file
+// Deprecated: v1.104 use Configurations
+type MultiCloudConfig struct {
+	AzureConfigs   []azure.AzureStorageConfig `json:"azure"`
+	GCPConfigs     []gcp.BigQueryConfig       `json:"gcp"`
+	AWSConfigs     []aws.AwsAthenaInfo        `json:"aws"`
+	AlibabaConfigs []alibaba.AlibabaInfo      `json:"alibaba"`
+}
+
+func (mcc MultiCloudConfig) loadConfigurations(configs *Configurations) {
+	// Load AWS configs
+	for _, awsConfig := range mcc.AWSConfigs {
+		kc := aws.ConvertAwsAthenaInfoToConfig(awsConfig)
+		err := configs.Insert(kc)
+		if err != nil {
+			log.Errorf("MultiCloudConfig: error converting AWS config %s", err.Error())
+		}
+
+	}
+
+	// Load GCP configs
+	for _, gcpConfig := range mcc.GCPConfigs {
+		kc := gcp.ConvertBigQueryConfigToConfig(gcpConfig)
+		err := configs.Insert(kc)
+		if err != nil {
+			log.Errorf("MultiCloudConfig: error converting GCP config %s", err.Error())
+		}
+	}
+
+	// Load Azure configs
+	for _, azureConfig := range mcc.AzureConfigs {
+		kc := azure.ConvertAzureStorageConfigToConfig(azureConfig)
+		err := configs.Insert(kc)
+		if err != nil {
+			log.Errorf("MultiCloudConfig: error converting Azure config %s", err.Error())
+		}
+	}
+
+	// Load Alibaba Cloud Configs
+	for _, aliCloudConfig := range mcc.AlibabaConfigs {
+		kc := alibaba.ConvertAlibabaInfoToConfig(aliCloudConfig)
+		err := configs.Insert(kc)
+		if err != nil {
+			log.Errorf("MultiCloudConfig: error converting Alibaba config %s", err.Error())
+		}
+	}
+}
+
+// Configurations is a general use container for all configuration types
+type Configurations struct {
+	AWS     *AWSConfigs     `json:"aws,omitempty"`
+	GCP     *GCPConfigs     `json:"gcp,omitempty"`
+	Azure   *AzureConfigs   `json:"azure,omitempty"`
+	Alibaba *AlibabaConfigs `json:"alibaba,omitempty"`
+}
+
+// UnmarshalJSON custom json unmarshalling to maintain support for MultiCloudConfig format
+func (c *Configurations) UnmarshalJSON(bytes []byte) error {
+	// Attempt to unmarshal into old config object
+	multiConfig := &MultiCloudConfig{}
+	err := json.Unmarshal(bytes, multiConfig)
+	// If unmarshal is successful, move values into config and return
+	if err == nil {
+		multiConfig.loadConfigurations(c)
+		return nil
+	}
+	// Create inline type to gain access to default Unmarshalling
+	type ConfUnmarshaller *Configurations
+	var conf ConfUnmarshaller = c
+	return json.Unmarshal(bytes, conf)
+}
+
+func (c *Configurations) Equals(that *Configurations) bool {
+	if c == nil && that == nil {
+		return true
+	}
+	if c == nil || that == nil {
+		return false
+	}
+
+	if !c.AWS.Equals(that.AWS) {
+		return false
+	}
+
+	if !c.GCP.Equals(that.GCP) {
+		return false
+	}
+
+	if !c.Azure.Equals(that.Azure) {
+		return false
+	}
+
+	if !c.Alibaba.Equals(that.Alibaba) {
+		return false
+	}
+
+	return true
+}
+
+func (c *Configurations) Insert(keyedConfig cloud.Config) error {
+	switch keyedConfig.(type) {
+	case *aws.AthenaConfiguration:
+		if c.AWS == nil {
+			c.AWS = &AWSConfigs{}
+		}
+		c.AWS.Athena = append(c.AWS.Athena, keyedConfig.(*aws.AthenaConfiguration))
+	case *aws.S3Configuration:
+		if c.AWS == nil {
+			c.AWS = &AWSConfigs{}
+		}
+		c.AWS.S3 = append(c.AWS.S3, keyedConfig.(*aws.S3Configuration))
+	case *gcp.BigQueryConfiguration:
+		if c.GCP == nil {
+			c.GCP = &GCPConfigs{}
+		}
+		c.GCP.BigQuery = append(c.GCP.BigQuery, keyedConfig.(*gcp.BigQueryConfiguration))
+	case *azure.StorageConfiguration:
+		if c.Azure == nil {
+			c.Azure = &AzureConfigs{}
+		}
+		c.Azure.Storage = append(c.Azure.Storage, keyedConfig.(*azure.StorageConfiguration))
+	case *alibaba.BOAConfiguration:
+		if c.Alibaba == nil {
+			c.Alibaba = &AlibabaConfigs{}
+		}
+		c.Alibaba.BOA = append(c.Alibaba.BOA, keyedConfig.(*alibaba.BOAConfiguration))
+	default:
+		return fmt.Errorf("Configurations: Insert: failed to insert config of type: %T", keyedConfig)
+	}
+	return nil
+}
+
+func (c *Configurations) ToSlice() []cloud.KeyedConfig {
+	var keyedConfigs []cloud.KeyedConfig
+	if c.AWS != nil {
+		for _, athenaConfig := range c.AWS.Athena {
+			keyedConfigs = append(keyedConfigs, athenaConfig)
+		}
+
+		for _, s3Config := range c.AWS.S3 {
+			keyedConfigs = append(keyedConfigs, s3Config)
+		}
+	}
+
+	if c.GCP != nil {
+		for _, bigQueryConfig := range c.GCP.BigQuery {
+			keyedConfigs = append(keyedConfigs, bigQueryConfig)
+		}
+	}
+
+	if c.Azure != nil {
+		for _, azureStorageConfig := range c.Azure.Storage {
+			keyedConfigs = append(keyedConfigs, azureStorageConfig)
+		}
+	}
+
+	if c.Alibaba != nil {
+		for _, boaConfig := range c.Alibaba.BOA {
+			keyedConfigs = append(keyedConfigs, boaConfig)
+		}
+	}
+
+	return keyedConfigs
+
+}
+
+type AWSConfigs struct {
+	Athena []*aws.AthenaConfiguration `json:"athena,omitempty"`
+	S3     []*aws.S3Configuration     `json:"s3,omitempty"`
+}
+
+func (ac *AWSConfigs) Equals(that *AWSConfigs) bool {
+	if ac == nil && that == nil {
+		return true
+	}
+	if ac == nil || that == nil {
+		return false
+	}
+	// Check Athena
+	if len(ac.Athena) != len(that.Athena) {
+		return false
+	}
+	for i, thisAthena := range ac.Athena {
+		thatAthena := that.Athena[i]
+		if !thisAthena.Equals(thatAthena) {
+			return false
+		}
+	}
+
+	// Check S3
+	if len(ac.S3) != len(that.S3) {
+		return false
+	}
+	for i, thisS3 := range ac.S3 {
+		thatS3 := that.S3[i]
+		if !thisS3.Equals(thatS3) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type GCPConfigs struct {
+	BigQuery []*gcp.BigQueryConfiguration `json:"bigQuery,omitempty"`
+}
+
+func (gc *GCPConfigs) Equals(that *GCPConfigs) bool {
+	if gc == nil && that == nil {
+		return true
+	}
+	if gc == nil || that == nil {
+		return false
+	}
+	// Check BigQuery
+	if len(gc.BigQuery) != len(that.BigQuery) {
+		return false
+	}
+	for i, thisBigQuery := range gc.BigQuery {
+		thatBigQuery := that.BigQuery[i]
+		if !thisBigQuery.Equals(thatBigQuery) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type AzureConfigs struct {
+	Storage []*azure.StorageConfiguration `json:"storage,omitempty"`
+}
+
+func (ac *AzureConfigs) Equals(that *AzureConfigs) bool {
+	if ac == nil && that == nil {
+		return true
+	}
+	if ac == nil || that == nil {
+		return false
+	}
+	// Check Storage
+	if len(ac.Storage) != len(that.Storage) {
+		return false
+	}
+	for i, thisStorage := range ac.Storage {
+		thatStorage := that.Storage[i]
+		if !thisStorage.Equals(thatStorage) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type AlibabaConfigs struct {
+	BOA []*alibaba.BOAConfiguration `json:"boa,omitempty"`
+}
+
+func (ac *AlibabaConfigs) Equals(that *AlibabaConfigs) bool {
+	if ac == nil && that == nil {
+		return true
+	}
+	if ac == nil || that == nil {
+		return false
+	}
+	// Check BOA
+	if len(ac.BOA) != len(that.BOA) {
+		return false
+	}
+	for i, thisBOA := range ac.BOA {
+		thatBOA := that.BOA[i]
+		if !thisBOA.Equals(thatBOA) {
+			return false
+		}
+	}
+
+	return true
+}

+ 290 - 0
pkg/cloud/config/configurations_test.go

@@ -0,0 +1,290 @@
+package config
+
+import (
+	"encoding/json"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+)
+
+var (
+	azureMultiCloudConf = MultiCloudConfig{
+		AzureConfigs: []azure.AzureStorageConfig{
+			{
+				SubscriptionId: "subscriptionID",
+				AccountName:    "accountName",
+				AccessKey:      "accessKey",
+				ContainerName:  "containerName",
+				ContainerPath:  "containerPath",
+				AzureCloud:     "azureCloud",
+			},
+		},
+	}
+	azureConfiguration = &Configurations{
+		Azure: &AzureConfigs{
+			Storage: []*azure.StorageConfiguration{
+				{
+					SubscriptionID: "subscriptionID",
+					Account:        "accountName",
+					Container:      "containerName",
+					Path:           "containerPath",
+					Cloud:          "azureCloud",
+					Authorizer: &azure.AccessKey{
+						AccessKey: "accessKey",
+						Account:   "accountName",
+					},
+				},
+			},
+		},
+	}
+
+	GCPKeyMultiCloudConf = MultiCloudConfig{
+		GCPConfigs: []gcp.BigQueryConfig{
+			{
+				ProjectID:          "projectID",
+				BillingDataDataset: "dataset.table",
+				Key: map[string]string{
+					"key": "value",
+				},
+			},
+		},
+	}
+
+	GCPKeyConfigurations = Configurations{
+		GCP: &GCPConfigs{BigQuery: []*gcp.BigQueryConfiguration{{
+			ProjectID: "projectID",
+			Dataset:   "dataset",
+			Table:     "table",
+			Authorizer: &gcp.ServiceAccountKey{
+				Key: map[string]string{
+					"key": "value",
+				},
+			},
+		},
+		}},
+	}
+
+	GCPWIMultiCloudConf = MultiCloudConfig{
+		GCPConfigs: []gcp.BigQueryConfig{
+			{
+				ProjectID:          "projectID",
+				BillingDataDataset: "dataset.table",
+				Key:                nil,
+			},
+		},
+	}
+
+	GCPWIConfigurations = Configurations{
+		GCP: &GCPConfigs{BigQuery: []*gcp.BigQueryConfiguration{{
+			ProjectID:  "projectID",
+			Dataset:    "dataset",
+			Table:      "table",
+			Authorizer: &gcp.WorkloadIdentity{},
+		},
+		}},
+	}
+
+	AWSAthenaKeyMultiCloudConfig = MultiCloudConfig{
+		AWSConfigs: []aws.AwsAthenaInfo{
+			{
+				AthenaBucketName: "bucket",
+				AthenaRegion:     "region",
+				AthenaDatabase:   "database",
+				AthenaTable:      "table",
+				AthenaWorkgroup:  "workgroup",
+				ServiceKeyName:   "id",
+				ServiceKeySecret: "secret",
+				AccountID:        "account",
+				MasterPayerARN:   "",
+			},
+		},
+	}
+
+	AWSAthenaKeyConfigurations = &Configurations{
+		AWS: &AWSConfigs{
+			Athena: []*aws.AthenaConfiguration{
+				{
+					Bucket:    "bucket",
+					Region:    "region",
+					Database:  "database",
+					Table:     "table",
+					Workgroup: "workgroup",
+					Account:   "account",
+					Authorizer: &aws.AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+				},
+			},
+		},
+	}
+
+	AWSAthenaAssumeRoleServiceAccountMultiCloudConfig = MultiCloudConfig{
+		AWSConfigs: []aws.AwsAthenaInfo{
+			{
+				AthenaBucketName: "bucket",
+				AthenaRegion:     "region",
+				AthenaDatabase:   "database",
+				AthenaTable:      "table",
+				AthenaWorkgroup:  "workgroup",
+				AccountID:        "account",
+				MasterPayerARN:   "roleArn",
+			},
+		},
+	}
+
+	AWSAthenaAssumeRoleServiceAccountConfigurations = &Configurations{
+		AWS: &AWSConfigs{
+			Athena: []*aws.AthenaConfiguration{
+				{
+					Bucket:    "bucket",
+					Region:    "region",
+					Database:  "database",
+					Table:     "table",
+					Workgroup: "workgroup",
+					Account:   "account",
+					Authorizer: &aws.AssumeRole{
+						Authorizer: &aws.ServiceAccount{},
+						RoleARN:    "roleArn",
+					},
+				},
+			},
+		},
+	}
+	AWSS3ServiceAccountMultiCloudConfig = MultiCloudConfig{
+		AWSConfigs: []aws.AwsAthenaInfo{
+			{
+				AthenaBucketName: "bucket",
+				AthenaRegion:     "region",
+				AccountID:        "account",
+				MasterPayerARN:   "",
+			},
+		},
+	}
+
+	AWSS3ServiceAccountConfigurations = &Configurations{
+		AWS: &AWSConfigs{
+			S3: []*aws.S3Configuration{
+				{
+					Bucket:     "bucket",
+					Region:     "region",
+					Account:    "account",
+					Authorizer: &aws.ServiceAccount{},
+				},
+			},
+		},
+	}
+
+	AWSS3AssumeRoleAccessKeyMultiCloudConfig = MultiCloudConfig{
+		AWSConfigs: []aws.AwsAthenaInfo{
+			{
+				AthenaBucketName: "bucket",
+				AthenaRegion:     "region",
+				AccountID:        "account",
+				ServiceKeyName:   "id",
+				ServiceKeySecret: "secret",
+				MasterPayerARN:   "roleARN",
+			},
+		},
+	}
+	AWSS3AssumeRoleAccessKeyConfigurations = &Configurations{
+		AWS: &AWSConfigs{
+			S3: []*aws.S3Configuration{
+				{
+					Bucket:  "bucket",
+					Region:  "region",
+					Account: "account",
+					Authorizer: &aws.AssumeRole{
+						Authorizer: &aws.AccessKey{
+							ID:     "id",
+							Secret: "secret",
+						},
+						RoleARN: "roleARN",
+					},
+				},
+			},
+		},
+	}
+)
+
+func TestConfigurations_UnmarshalJSON(t *testing.T) {
+	tests := map[string]struct {
+		input    any
+		expected *Configurations
+	}{
+		"Azure Storage AccessKey": {
+			input:    azureConfiguration,
+			expected: azureConfiguration,
+		},
+		"Azure Storage AccessKey Conversion": {
+			input:    azureMultiCloudConf,
+			expected: azureConfiguration,
+		},
+		"GCP BigQuery ServiceAccountKey": {
+			input:    GCPKeyConfigurations,
+			expected: &GCPKeyConfigurations,
+		},
+		"GCP BigQuery ServiceAccountKey Conversion": {
+			input:    GCPKeyMultiCloudConf,
+			expected: &GCPKeyConfigurations,
+		},
+		"GCP BigQuery Workload Identity ": {
+			input:    &GCPWIConfigurations,
+			expected: &GCPWIConfigurations,
+		},
+		"GCP BigQuery Workload Identity Conversion": {
+			input:    GCPWIMultiCloudConf,
+			expected: &GCPWIConfigurations,
+		},
+		"AWS Athena Access Key": {
+			input:    AWSAthenaKeyConfigurations,
+			expected: AWSAthenaKeyConfigurations,
+		},
+		"AWS Athena Access Key Conversion": {
+			input:    AWSAthenaKeyMultiCloudConfig,
+			expected: AWSAthenaKeyConfigurations,
+		},
+		"AWS Athena Assume Role Service Account": {
+			input:    AWSAthenaAssumeRoleServiceAccountConfigurations,
+			expected: AWSAthenaAssumeRoleServiceAccountConfigurations,
+		},
+		"AWS Athena Assume Role Service Account Conversion": {
+			input:    AWSAthenaAssumeRoleServiceAccountMultiCloudConfig,
+			expected: AWSAthenaAssumeRoleServiceAccountConfigurations,
+		},
+		"AWS S3 Service Account": {
+			input:    AWSS3ServiceAccountConfigurations,
+			expected: AWSS3ServiceAccountConfigurations,
+		},
+		"AWS S3 Service Account Conversion": {
+			input:    AWSS3ServiceAccountMultiCloudConfig,
+			expected: AWSS3ServiceAccountConfigurations,
+		},
+		"AWS S3 Assume Role Access Key": {
+			input:    AWSS3AssumeRoleAccessKeyConfigurations,
+			expected: AWSS3AssumeRoleAccessKeyConfigurations,
+		},
+		"AWS S3 Assume Role Service Access Key": {
+			input:    AWSS3AssumeRoleAccessKeyMultiCloudConfig,
+			expected: AWSS3AssumeRoleAccessKeyConfigurations,
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			b, err := json.Marshal(tt.input)
+			if err != nil {
+				t.Fatalf("failed to marshal input")
+			}
+			actual := &Configurations{}
+			err = json.Unmarshal(b, actual)
+			if err != nil && tt.expected != nil {
+				t.Fatalf("Unmarshal failed with error %s", err.Error())
+			}
+			if !tt.expected.Equals(actual) {
+				t.Fatalf("actual Configuration did not match expected")
+			}
+		})
+	}
+}

+ 305 - 0
pkg/cloud/config/controller.go

@@ -0,0 +1,305 @@
+package config
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/provider"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+)
+
+// configID identifies the source and the ID of a configuration to handle duplicate configs from multiple sources
+type configID struct {
+	source ConfigSource
+	key    string
+}
+
+func (cid configID) Equals(that configID) bool {
+	return cid.source == that.source && cid.key == that.key
+}
+
+func newConfigID(source, key string) configID {
+	return configID{
+		source: GetConfigSource(source),
+		key:    key,
+	}
+}
+
+type Status struct {
+	Source ConfigSource
+	Key    string
+	Active bool
+	Valid  bool
+	Config cloud.KeyedConfig
+}
+
+// Controller manages the cloud.Config using config Watcher(s) to track various configuration
+// methods. To do this it has a map of config watchers mapped on configuration source and a list Observers that it updates
+// upon any change detected from the config watchers.
+type Controller struct {
+	statuses  map[configID]*Status
+	observers []Observer
+	watchers  map[ConfigSource]cloud.KeyedConfigWatcher
+}
+
+// NewController initializes an Config Controller
+func NewController(cp models.Provider) *Controller {
+	providerConfig := provider.ExtractConfigFromProviders(cp)
+	watchers := GetCloudBillingWatchers(providerConfig)
+	ic := &Controller{
+		statuses: make(map[configID]*Status),
+		watchers: watchers,
+	}
+
+	ic.load()
+	ic.pullWatchers()
+
+	go func() {
+		ticker := timeutil.NewJobTicker()
+		defer ticker.Close()
+
+		for {
+			ticker.TickIn(10 * time.Second)
+
+			<-ticker.Ch
+
+			ic.pullWatchers()
+		}
+	}()
+
+	return ic
+}
+
+func (c *Controller) EnableConfig(key, source string) error {
+	cID := newConfigID(source, key)
+	cs, ok := c.statuses[cID]
+	if !ok {
+		return fmt.Errorf("Controller: EnableConfig: config with key %s from source %s does not exist", key, source)
+	}
+	if cs.Active {
+		return fmt.Errorf("Controller: EnableConfig: config with key %s from source %s is already active", key, source)
+	}
+
+	// check for configurations with the same configuration key that are already active.
+	for confID, confStat := range c.statuses {
+		if confID.key != key || confID.source == cID.source {
+			continue
+		}
+
+		// if active disable
+		if confStat.Active == true {
+			confStat.Active = false
+		}
+	}
+
+	cs.Active = true
+	c.putConfig(cs.Config)
+	c.save()
+	return nil
+}
+
+// DisableConfig updates an config status if it was enabled
+func (c *Controller) DisableConfig(key, source string) error {
+	iID := newConfigID(source, key)
+	is, ok := c.statuses[iID]
+	if !ok {
+		return fmt.Errorf("Controller: DisableConfig: config with key %s from source %s does not exist", key, source)
+	}
+	if !is.Active {
+		return fmt.Errorf("Controller: DisableConfig: config with key %s from source %s is already disabled", key, source)
+	}
+
+	is.Active = false
+	c.deleteConfig(iID.key)
+	c.save()
+	return nil
+}
+
+// DeleteConfig removes an config from the statuses and deletes the config on all observers if it was active
+func (c *Controller) DeleteConfig(key, source string) error {
+	id := newConfigID(source, key)
+	is, ok := c.statuses[id]
+	if !ok {
+		return fmt.Errorf("Controller: DisableConfig: config with key %s from source %s does not exist", key, source)
+	}
+
+	// delete config on observers if active
+	if is.Active {
+		c.deleteConfig(id.key)
+	}
+	delete(c.statuses, id)
+	c.save()
+	return nil
+}
+
+// pullWatchers retrieve configs from watchers and update configs according to priority of sources
+func (c *Controller) pullWatchers() {
+
+	for source, watcher := range c.watchers {
+		for _, conf := range watcher.GetConfigs() {
+			key := conf.Key()
+			cID := configID{
+				source: source,
+				key:    key,
+			}
+
+			err := conf.Validate()
+			valid := err == nil
+
+			status := Status{
+				Key:    key,
+				Source: source,
+				Active: valid, // active if valid, for now
+				Valid:  valid,
+				Config: conf,
+			}
+
+			// Check existing configs for matching key and source
+			if existingStatus, ok := c.statuses[cID]; ok {
+				// if config has not changed continue
+				if existingStatus.Config.Equals(conf) {
+					continue
+				}
+				// if existing CS is active then it should be replaced by the updated config
+				if existingStatus.Active {
+					if status.Valid {
+						c.putConfig(conf)
+					} else {
+						// if active config is being overwritten by an invalid one, delete the config, as it will not be active
+						c.deleteConfig(key)
+					}
+					c.statuses[cID] = &status
+					continue
+				}
+			}
+
+			// At this point we know that the config from this watcher has changed
+
+			// handle an config with a new unique key for a source or an update config from a source which was inactive before
+			if valid {
+				for matchID, matchCS := range c.statuses {
+					// skip matching configs
+					if matchID.Equals(cID) {
+						continue
+					}
+
+					if matchCS.Active {
+						// if source is non-multi-cloud disable all other non-multi-cloud sourced configs
+						if cID.source == HelmSource || cID.source == ConfigFileSource {
+							if matchID.source == HelmSource || matchID.source == ConfigFileSource {
+								matchCS.Active = false
+								c.deleteConfig(matchID.key)
+							}
+						}
+
+						// check for configs with the same key that are active
+						if matchID.key == key {
+							// If source has higher priority disable other active configs
+							matchCS.Active = false
+							c.deleteConfig(matchID.key)
+						}
+					}
+				}
+			}
+
+			// update config and put to observers if active
+			c.statuses[cID] = &status
+			if status.Active {
+				c.putConfig(conf)
+			}
+		}
+	}
+}
+
+// todo implement when building config api and persistence is necessary
+func (c *Controller) load() {}
+
+// todo implement when building config api and persistence is necessary
+func (c *Controller) save() {}
+
+func (c *Controller) ExportConfigs(key string) (*Configurations, error) {
+	configs := new(Configurations)
+
+	activeConfigs := make(map[string]cloud.Config)
+	for iID, cs := range c.statuses {
+		if cs.Active {
+			activeConfigs[iID.key] = cs.Config
+		}
+	}
+	if key != "" {
+		conf, ok := activeConfigs[key]
+		if !ok {
+			return nil, fmt.Errorf("Config with key %s does not exist or is inactive", key)
+		}
+		sanitizedConfig := conf.Sanitize()
+		err := configs.Insert(sanitizedConfig)
+		if err != nil {
+			return nil, fmt.Errorf("failed to insert config: %w", err)
+		}
+		return configs, nil
+	}
+
+	for _, conf := range activeConfigs {
+		sanitizedConfig := conf.Sanitize()
+		err := configs.Insert(sanitizedConfig)
+		if err != nil {
+			return nil, fmt.Errorf("failed to insert config: %w", err)
+		}
+	}
+	return configs, nil
+}
+
+func (c *Controller) getActiveConfigs() map[string]cloud.KeyedConfig {
+	bi := make(map[string]cloud.KeyedConfig)
+	for iID, cs := range c.statuses {
+		if cs.Active {
+			bi[iID.key] = cs.Config
+		}
+	}
+	return bi
+}
+
+// deleteConfig ask observers to remove and stop all processes related to a configuration with a given key
+func (c *Controller) deleteConfig(key string) {
+	var wg sync.WaitGroup
+	for _, obs := range c.observers {
+		observer := obs
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			observer.DeleteConfig(key)
+		}()
+	}
+	wg.Wait()
+}
+
+// RegisterObserver gives out the current active list configs and adds the observer to the push list
+func (c *Controller) RegisterObserver(obs Observer) {
+	obs.SetConfigs(c.getActiveConfigs())
+	c.observers = append(c.observers, obs)
+}
+
+func (c *Controller) GetStatus() []Status {
+	var status []Status
+	for _, intStat := range c.statuses {
+		status = append(status, *intStat)
+	}
+	return status
+}
+
+// putConfig gives observers a new config to handle
+func (c *Controller) putConfig(conf cloud.KeyedConfig) {
+	var wg sync.WaitGroup
+	for _, obs := range c.observers {
+		observer := obs
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			observer.PutConfig(conf)
+		}()
+	}
+	wg.Wait()
+}

+ 160 - 0
pkg/cloud/config/controller_handlers.go

@@ -0,0 +1,160 @@
+package config
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/proto"
+)
+
+var protocol = proto.HTTP()
+
+func (c *Controller) cloudCostChecks() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// If Pipeline is nil, always return 503
+	if c == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "ConfigController: is nil", http.StatusServiceUnavailable)
+		}
+	}
+
+	if !env.IsCloudCostEnabled() {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "ConfigController: is not enabled", http.StatusServiceUnavailable)
+		}
+	}
+
+	return nil
+}
+
+// GetEnableConfigHandler creates a handler from a http request which enables an integration via the integrationController
+func (c *Controller) GetExportConfigHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// perform basic checks to ensure that the pipeline can be accessed
+	fn := c.cloudCostChecks()
+	if fn != nil {
+		return fn
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+
+		configs, err := c.ExportConfigs(integrationKey)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		protocol.WriteDataWithMessage(w, configs, "Configurations have been sanitized to protect secrets")
+	}
+}
+
+// GetEnableConfigHandler creates a handler from a http request which enables an integration via the integrationController
+func (c *Controller) GetEnableConfigHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// perform basic checks to ensure that the pipeline can be accessed
+	fn := c.cloudCostChecks()
+	if fn != nil {
+		return fn
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+		if integrationKey == "" {
+			http.Error(w, "required parameter 'integrationKey' is missing", http.StatusBadRequest)
+			return
+		}
+
+		source := r.URL.Query().Get("source")
+		if source == "" {
+			http.Error(w, "required parameter 'source' is missing", http.StatusBadRequest)
+			return
+		}
+
+		err := c.EnableConfig(integrationKey, source)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		protocol.WriteData(w, fmt.Sprintf("Successfully enabled integration with key %s from source %s", integrationKey, source))
+	}
+}
+
+// GetDisableConfigHandler creates a handler from a http request which disables an integration via the integrationController
+func (c *Controller) GetDisableConfigHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// perform basic checks to ensure that the pipeline can be accessed
+	fn := c.cloudCostChecks()
+	if fn != nil {
+		return fn
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+		if integrationKey == "" {
+			http.Error(w, "required parameter 'integrationKey' is missing", http.StatusBadRequest)
+			return
+		}
+
+		source := r.URL.Query().Get("source")
+		if source == "" {
+			http.Error(w, "required parameter 'source' is missing", http.StatusBadRequest)
+			return
+		}
+
+		err := c.DisableConfig(integrationKey, source)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		protocol.WriteData(w, fmt.Sprintf("Successfully disabled integration with key %s from source %s", integrationKey, source))
+	}
+}
+
+// GetDeleteConfigHandler creates a handler from a http request which deletes an integration via the integrationController
+// if there are no other integrations with the given integration key, it also clears the data.
+func (c *Controller) GetDeleteConfigHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// perform basic checks to ensure that the pipeline can be accessed
+	fn := c.cloudCostChecks()
+	if fn != nil {
+		return fn
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+		if integrationKey == "" {
+			http.Error(w, "required parameter 'integrationKey' is missing", http.StatusBadRequest)
+			return
+		}
+
+		source := r.URL.Query().Get("source")
+		if source == "" {
+			http.Error(w, "required parameter 'source' is missing", http.StatusBadRequest)
+			return
+		}
+
+		err := c.DeleteConfig(integrationKey, source)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+		protocol.WriteData(w, fmt.Sprintf("Successfully deleted integration with key %s from source %s", integrationKey, source))
+
+		for _, intStat := range c.GetStatus() {
+			if intStat.Key == integrationKey {
+				protocol.WriteData(w, fmt.Sprintf("Found addition integration with integration key %s from source %s. If you wish to delete this data do so manually or delete all integrations with matching keys", integrationKey, intStat.Source))
+				return
+			}
+		}
+		protocol.WriteData(w, fmt.Sprintf("Successfully deleted cloud cost data with key %s", integrationKey))
+	}
+}

+ 871 - 0
pkg/cloud/config/controller_test.go

@@ -0,0 +1,871 @@
+package config
+
+import (
+	"testing"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+)
+
+// Baseline valid config
+var validAthenaConf = &aws.AthenaConfiguration{
+	Bucket:     "bucket",
+	Region:     "region",
+	Database:   "database",
+	Table:      "table",
+	Workgroup:  "workgroup",
+	Account:    "account",
+	Authorizer: &aws.ServiceAccount{},
+}
+
+// Config with the same key as the baseline but is not equal to it because of the change in the non-keyed property Workgroup
+var validAthenaConfModifiedProperty = &aws.AthenaConfiguration{
+	Bucket:     "bucket",
+	Region:     "region",
+	Database:   "database",
+	Table:      "table",
+	Workgroup:  "workgroup1",
+	Account:    "account",
+	Authorizer: &aws.ServiceAccount{},
+}
+
+// Config with the same key as baseline but is invalid due to missing Authorizer
+var invalidAthenaConf = &aws.AthenaConfiguration{
+	Bucket:     "bucket",
+	Region:     "region",
+	Database:   "database",
+	Table:      "table",
+	Workgroup:  "workgroup",
+	Account:    "account",
+	Authorizer: nil,
+}
+
+// A valid config with a different key from the baseline
+var validBigQueryConf = &gcp.BigQueryConfiguration{
+	ProjectID:  "projectID",
+	Dataset:    "dataset",
+	Table:      "table",
+	Authorizer: &gcp.WorkloadIdentity{},
+}
+
+func TestIntegrationController_pullWatchers(t *testing.T) {
+	testCases := map[string]struct {
+		initialStatuses  []*Status
+		configWatchers   map[ConfigSource]cloudconfig.KeyedConfigWatcher
+		expectedStatuses []*Status
+	}{
+		// Helm Source
+		"Helm Source init": {
+			initialStatuses: []*Status{},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Helm Source No Change": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Helm Source Update Config": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConfModifiedProperty.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConfModifiedProperty,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Helm Source Update Config Invalid": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Helm Source New Config": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: false, // this value changed
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		// Config File
+		"Config File Source init": {
+			initialStatuses: []*Status{},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Config File No Change": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Config File Update Config": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Config File Update Config Invalid": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Config File New Config": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: false, // this value changed
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		// Multi Cloud
+		"Multi Cloud Source init": {
+			initialStatuses: []*Status{},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Multi Cloud No Change": {
+			initialStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+		},
+		"Multi Cloud Update Config": {
+			initialStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConfModifiedProperty,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConfModifiedProperty.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConfModifiedProperty,
+				},
+			},
+		},
+		"Multi Cloud Update Config Invalid": {
+			initialStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Multi Cloud New Config": {
+			initialStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				MultiCloudSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: MultiCloudSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: MultiCloudSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		// Watch Interaction
+		"New Helm, Existing Config File": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		"Update Helm, Existing Config File": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConfModifiedProperty,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConfModifiedProperty.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConfModifiedProperty,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		"New Helm Invalid, Existing Config File": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Update Helm Invalid, Existing Config File": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"New Config File, Existing Helm": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConf,
+					},
+				},
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		"Update Config File, Existing Helm": {
+			initialStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{},
+				},
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validAthenaConfModifiedProperty,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConfModifiedProperty.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validAthenaConfModifiedProperty,
+				},
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+		},
+		"New Config File Invalid, Existing Helm": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+		"Update Config File Invalid, Existing Helm": {
+			initialStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    validAthenaConf.Key(),
+					Active: false,
+					Valid:  true,
+					Config: validAthenaConf,
+				},
+			},
+			configWatchers: map[ConfigSource]cloudconfig.KeyedConfigWatcher{
+				HelmSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						validBigQueryConf,
+					},
+				},
+				ConfigFileSource: &MockKeyedConfigWatcher{
+					Integrations: []cloudconfig.KeyedConfig{
+						invalidAthenaConf,
+					},
+				},
+			},
+			expectedStatuses: []*Status{
+				{
+					Source: HelmSource,
+					Key:    validBigQueryConf.Key(),
+					Active: true,
+					Valid:  true,
+					Config: validBigQueryConf,
+				},
+				{
+					Source: ConfigFileSource,
+					Key:    invalidAthenaConf.Key(),
+					Active: false,
+					Valid:  false,
+					Config: invalidAthenaConf,
+				},
+			},
+		},
+	}
+
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// Test set up and validation
+			initialStatuses := make(map[configID]*Status)
+			for _, status := range tc.initialStatuses {
+				iID := configID{
+					source: status.Source,
+					key:    status.Key,
+				}
+				if _, ok := initialStatuses[iID]; ok {
+					t.Errorf("invalid test, duplicate initial status with key: %s source: %s", iID.key, iID.source.String())
+				}
+				initialStatuses[iID] = status
+			}
+
+			expectedStatuses := make(map[configID]*Status)
+			for _, status := range tc.expectedStatuses {
+				iID := configID{
+					source: status.Source,
+					key:    status.Key,
+				}
+				if _, ok := expectedStatuses[iID]; ok {
+					t.Errorf("invalid test, duplicate expected status with key: %s source: %s", iID.key, iID.source.String())
+				}
+				expectedStatuses[iID] = status
+			}
+
+			// Initialize controller
+			icd := &Controller{
+				statuses: initialStatuses,
+				watchers: tc.configWatchers,
+			}
+			icd.pullWatchers()
+			if len(icd.statuses) != len(tc.expectedStatuses) {
+				t.Errorf("integration statueses did not have the correct length actaul: %d, expected: %d", len(icd.statuses), len(tc.expectedStatuses))
+			}
+
+			for iID, actualStatus := range icd.statuses {
+				expectedStatus, ok := expectedStatuses[iID]
+				if !ok {
+					t.Errorf("expected integration statuses is missing with integration ID: %v", iID)
+				}
+
+				// failure here indicates an issue with the configID
+				if actualStatus.Key != expectedStatus.Key {
+					t.Errorf("integration status does not have the correct Key values actual: %s, expected: %s", actualStatus.Key, expectedStatus.Key)
+				}
+
+				// failure here indicates an issue with the configID
+				if actualStatus.Key != expectedStatus.Key {
+					t.Errorf("integration status does not have the correct Source values actual: %s, expected: %s", actualStatus.Source, expectedStatus.Source)
+				}
+
+				if actualStatus.Active != expectedStatus.Active {
+					t.Errorf("integration status does not have the correct Active values actual: %v, expected: %v", actualStatus.Active, expectedStatus.Active)
+				}
+
+				if actualStatus.Valid != expectedStatus.Valid {
+					t.Errorf("integration status does not have the correct Valid values actual: %v, expected: %v", actualStatus.Valid, expectedStatus.Valid)
+				}
+
+				if !actualStatus.Config.Equals(expectedStatus.Config) {
+					t.Errorf("integration status does not have the correct config values actual: %v, expected: %v", actualStatus.Config, expectedStatus.Config)
+				}
+			}
+		})
+	}
+}

+ 95 - 0
pkg/cloud/config/mock.go

@@ -0,0 +1,95 @@
+package config
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+type MockConfig struct {
+}
+
+func (mc *MockConfig) Validate() error {
+	return nil
+}
+
+func (mc *MockConfig) Equals(config cloud.Config) bool {
+	_, ok := config.(*MockConfig)
+	return ok
+}
+
+func (mc *MockConfig) Sanitize() cloud.Config {
+	return &MockConfig{}
+}
+
+// MockKeyedConfig implements KeyedConfig it only requires a key to be valid, there is an additional property allowing
+// MockKeyedConfig with the same key to not be equal
+type MockKeyedConfig struct {
+	key      string
+	property string
+	valid    bool
+}
+
+func NewMockKeyedConfig(key, property string, valid bool) cloud.KeyedConfig {
+	return &MockKeyedConfig{
+		key:      key,
+		property: property,
+		valid:    valid,
+	}
+}
+
+func (mkc *MockKeyedConfig) Validate() error {
+	if !mkc.valid {
+		return fmt.Errorf("MockKeyedConfig: set to invalid")
+	}
+	if mkc.key == "" {
+		return fmt.Errorf("MockKeyedConfig: missing key")
+	}
+	return nil
+}
+
+func (mkc *MockKeyedConfig) Equals(config cloud.Config) bool {
+	that, ok := config.(*MockKeyedConfig)
+	if !ok {
+		return false
+	}
+
+	if mkc.key != that.key {
+		return false
+	}
+
+	if mkc.property != that.property {
+		return false
+	}
+
+	if mkc.valid != that.valid {
+		return false
+	}
+
+	return true
+}
+
+func (mkc *MockKeyedConfig) Sanitize() cloud.Config {
+	return &MockKeyedConfig{
+		key:      mkc.key,
+		property: mkc.property,
+		valid:    mkc.valid,
+	}
+}
+
+func (mkc *MockKeyedConfig) Key() string {
+	return mkc.key
+}
+
+func (mkc *MockKeyedConfig) Provider() string {
+	return kubecost.CustomProvider
+}
+
+type MockKeyedConfigWatcher struct {
+	Integrations []cloud.KeyedConfig
+}
+
+func (mkcw *MockKeyedConfigWatcher) GetConfigs() []cloud.KeyedConfig {
+	return mkcw.Integrations
+}

+ 14 - 0
pkg/cloud/config/observer.go

@@ -0,0 +1,14 @@
+package config
+
+import (
+	"github.com/opencost/opencost/pkg/cloud"
+)
+
+// Observer should be implemented by any struct which need access to the up-to-date list of active configs
+// that the Config.Controller provides. Any cloud billing Integration in the application that is used in the application
+// should pass through this interface, and be revoked if it is not included in a Delete call.
+type Observer interface {
+	PutConfig(cloud.KeyedConfig)
+	DeleteConfig(string)
+	SetConfigs(map[string]cloud.KeyedConfig)
+}

+ 351 - 0
pkg/cloud/config/watcher.go

@@ -0,0 +1,351 @@
+package config
+
+import (
+	"fmt"
+	"io/ioutil"
+	"path"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/models"
+
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const authSecretPath = "/var/secrets/service-key.json"
+const storageConfigSecretPath = "/var/azure-storage-config/azure-storage-config.json"
+const cloudIntegrationSecretPath = "/cloud-integration/cloud-integration.json"
+
+type HelmWatcher struct {
+	providerConfig models.ProviderConfig
+}
+
+// GetConfigs checks secret files and config map set via the helm chart for Cloud Billing integrations. Returns
+// only one billing integration due to values being shared by different configuration types.
+func (hw *HelmWatcher) GetConfigs() []cloud.KeyedConfig {
+	var configs []cloud.KeyedConfig
+
+	customPricing, _ := hw.providerConfig.GetCustomPricingData()
+
+	// check for Azure Storage config in secret file
+	exists, err := fileutil.FileExists(storageConfigSecretPath)
+	if err != nil {
+		log.Errorf("HelmWatcher: AzureStorage: error checking file at '%s': %s", storageConfigSecretPath, err.Error())
+	}
+
+	// If file does not exist implies that this configuration method was not used
+	if exists {
+		result, err2 := ioutil.ReadFile(storageConfigSecretPath)
+		if err2 != nil {
+			log.Errorf("HelmWatcher: AzureStorage: Error reading file: %s", err2.Error())
+			return nil
+		}
+
+		asc := &azure.AzureStorageConfig{}
+		err2 = json.Unmarshal(result, asc)
+		if err2 != nil {
+			log.Errorf("HelmWatcher: AzureStorage: Error reading json: %s", err2.Error())
+			return nil
+		}
+		if asc != nil && !asc.IsEmpty() {
+			// If subscription id is not set it may be present in the rate card API
+			if asc.SubscriptionId == "" {
+				ask := &azure.AzureServiceKey{}
+				err3 := loadFile(authSecretPath, ask)
+				if err3 != nil {
+					log.Errorf("HelmWatcher: AzureStorage: AzureRateCard: %s", err3)
+				}
+				if ask != nil {
+					asc.SubscriptionId = ask.SubscriptionID
+				}
+			}
+			// If SubscriptionID is still empty check the customPricing
+			if asc.SubscriptionId == "" {
+				asc.SubscriptionId = customPricing.AzureSubscriptionID
+			}
+			kc := azure.ConvertAzureStorageConfigToConfig(*asc)
+			configs = append(configs, kc)
+			return configs
+		}
+
+	}
+
+	exists, err = fileutil.FileExists(authSecretPath)
+	if err != nil {
+		log.Errorf("HelmWatcher:  error checking file at '%s': %s", authSecretPath, err.Error())
+	}
+
+	// If the Auth Secret is not set then the config file watch will be responsible for providing the configurer for the
+	// config values present in the CustomPricing object
+	if exists {
+		if customPricing.BillingDataDataset != "" {
+			// Big Query Configuration
+			bqc := gcp.BigQueryConfig{
+				ProjectID:          customPricing.ProjectID,
+				BillingDataDataset: customPricing.BillingDataDataset,
+			}
+
+			key := make(map[string]string)
+			err2 := loadFile(authSecretPath, &key)
+			if err2 != nil {
+				log.Errorf("HelmWatcher: GCP: %s", err2)
+			}
+			if key != nil && len(key) != 0 {
+				bqc.Key = key
+			}
+
+			kc := gcp.ConvertBigQueryConfigToConfig(bqc)
+			configs = append(configs, kc)
+			return configs
+		}
+
+		if customPricing.AthenaBucketName != "" {
+			aai := aws.AwsAthenaInfo{
+				AthenaBucketName: customPricing.AthenaBucketName,
+				AthenaRegion:     customPricing.AthenaRegion,
+				AthenaDatabase:   customPricing.AthenaDatabase,
+				AthenaTable:      customPricing.AthenaTable,
+				AthenaWorkgroup:  customPricing.AthenaWorkgroup,
+				AccountID:        customPricing.AthenaProjectID,
+				MasterPayerARN:   customPricing.MasterPayerARN,
+			}
+
+			// If Account ID is blank check ProjectID
+			if aai.AccountID == "" {
+				aai.AccountID = customPricing.ProjectID
+			}
+
+			var accessKey aws.AWSAccessKey
+			err2 := loadFile(authSecretPath, &accessKey)
+			if err2 != nil {
+				log.Errorf("HelmWatcher: AWS: %s", err2)
+			}
+
+			aai.ServiceKeyName = accessKey.AccessKeyID
+			aai.ServiceKeySecret = accessKey.SecretAccessKey
+
+			kc := aws.ConvertAwsAthenaInfoToConfig(aai)
+			configs = append(configs, kc)
+			return configs
+
+		}
+	}
+
+	return configs
+}
+
+type ConfigFileWatcher struct {
+	providerConfig models.ProviderConfig
+}
+
+// GetConfigs checks secret files and config map set via the helm chart for Cloud Billing integrations. Returns
+// only one billing integration due to values being shared by different configuration types.
+func (cfw *ConfigFileWatcher) GetConfigs() []cloud.KeyedConfig {
+	var configs []cloud.KeyedConfig
+
+	customPricing, _ := cfw.providerConfig.GetCustomPricingData()
+
+	// Detect Azure Storage configuration
+	if customPricing.AzureSubscriptionID != "" {
+		asc := azure.AzureStorageConfig{
+			SubscriptionId: customPricing.AzureSubscriptionID,
+			AccountName:    customPricing.AzureStorageAccount,
+			AccessKey:      customPricing.AzureStorageAccessKey,
+			ContainerName:  customPricing.AzureStorageContainer,
+			ContainerPath:  customPricing.AzureContainerPath,
+			AzureCloud:     customPricing.AzureCloud,
+		}
+		kc := azure.ConvertAzureStorageConfigToConfig(asc)
+		configs = append(configs, kc)
+		return configs
+
+	}
+
+	// Detect Big Query Configuration
+	if customPricing.BillingDataDataset != "" {
+		bqc := gcp.BigQueryConfig{
+			ProjectID:          customPricing.ProjectID,
+			BillingDataDataset: customPricing.BillingDataDataset,
+		}
+
+		var key map[string]string
+		err2 := loadFile(env.GetConfigPathWithDefault("/models/")+"key.json", &key)
+		if err2 != nil {
+			log.Errorf("ConfigFileWatcher: GCP: %s", err2)
+		}
+		if key != nil && len(key) != 0 {
+			bqc.Key = key
+		}
+
+		kc := gcp.ConvertBigQueryConfigToConfig(bqc)
+		configs = append(configs, kc)
+		return configs
+	}
+
+	// Detect AWS configuration
+	if customPricing.AthenaBucketName != "" {
+		aai := aws.AwsAthenaInfo{
+			AthenaBucketName: customPricing.AthenaBucketName,
+			AthenaRegion:     customPricing.AthenaRegion,
+			AthenaDatabase:   customPricing.AthenaDatabase,
+			AthenaTable:      customPricing.AthenaTable,
+			AthenaWorkgroup:  customPricing.AthenaWorkgroup,
+			ServiceKeyName:   customPricing.ServiceKeyName,
+			ServiceKeySecret: customPricing.ServiceKeySecret,
+			AccountID:        customPricing.AthenaProjectID,
+			MasterPayerARN:   customPricing.MasterPayerARN,
+		}
+
+		// If Account ID is blank check ProjectID
+		if aai.AccountID == "" {
+			aai.AccountID = customPricing.ProjectID
+		}
+
+		// If the sample nil service key name is set, zero it out so that it is not
+		// misinterpreted as a real service key.
+		if aai.ServiceKeyName == "AKIXXX" {
+			aai.ServiceKeyName = ""
+		}
+
+		kc := aws.ConvertAwsAthenaInfoToConfig(aai)
+		configs = append(configs, kc)
+		return configs
+	}
+
+	//detect Alibaba Configuration
+
+	if customPricing.AlibabaClusterRegion != "" {
+		aliCloudInfo := alibaba.AlibabaInfo{
+			AlibabaClusterRegion:    customPricing.AlibabaClusterRegion,
+			AlibabaServiceKeyName:   customPricing.AlibabaServiceKeyName,
+			AlibabaServiceKeySecret: customPricing.AlibabaServiceKeySecret,
+			AlibabaAccountID:        customPricing.ProjectID,
+		}
+		kc := alibaba.ConvertAlibabaInfoToConfig(aliCloudInfo)
+		configs = append(configs, kc)
+		return configs
+	}
+	return configs
+}
+
+// MultiCloudWatcher ingests values a MultiCloudConfig from the file pulled in from the secret by the helm chart
+type MultiCloudWatcher struct {
+}
+
+func (mcw *MultiCloudWatcher) GetConfigs() []cloud.KeyedConfig {
+	multiConfigPath := path.Join(env.GetConfigPathWithDefault("/var/configs"), cloudIntegrationSecretPath)
+	exists, err := fileutil.FileExists(multiConfigPath)
+	if err != nil {
+		log.Errorf("MultiCloudWatcher:  error checking file at '%s': %s", multiConfigPath, err.Error())
+	}
+
+	// If config does not exist implies that this configuration method was not used
+	if !exists {
+		// check the original location of secret mount
+		multiConfigPath = path.Join("/var", cloudIntegrationSecretPath)
+		exists, err = fileutil.FileExists(multiConfigPath)
+		if err != nil {
+			log.Errorf("MultiCloudWatcher:  error checking file at '%s': %s", multiConfigPath, err.Error())
+		}
+
+		// If config does not exist implies that this configuration method was not used
+		if !exists {
+			return nil
+		}
+	}
+
+	configurations := &Configurations{}
+	err = loadFile(multiConfigPath, configurations)
+	if err != nil {
+		log.Errorf("MultiCloudWatcher: Error getting file '%s': %s", multiConfigPath, err.Error())
+		return nil
+	}
+
+	return configurations.ToSlice()
+}
+
+func GetCloudBillingWatchers(providerConfig models.ProviderConfig) map[ConfigSource]cloud.KeyedConfigWatcher {
+	watchers := make(map[ConfigSource]cloud.KeyedConfigWatcher, 3)
+	watchers[MultiCloudSource] = &MultiCloudWatcher{}
+	if providerConfig != nil {
+		watchers[HelmSource] = &HelmWatcher{providerConfig: providerConfig}
+		watchers[ConfigFileSource] = &ConfigFileWatcher{providerConfig: providerConfig}
+	}
+
+	return watchers
+}
+
+// loadFile unmarshals the json content of a file into the provided object
+// an empty return with no error indicates that the file did not exist.
+func loadFile[T any](path string, content T) error {
+	exists, err := fileutil.FileExists(path)
+	if err != nil {
+		return fmt.Errorf("loadFile: error checking file at '%s': %s", path, err.Error())
+	}
+
+	// If file does not exist implies that this configuration method was not used
+	if !exists {
+		return nil
+	}
+
+	result, err := ioutil.ReadFile(path)
+	if err != nil {
+		return fmt.Errorf("loadFile: Error reading file: %s", err.Error())
+	}
+
+	err = json.Unmarshal(result, content)
+	if err != nil {
+		return fmt.Errorf("loadFile: Error reading json: %s", err.Error())
+	}
+
+	return nil
+}
+
+// ConfigSource is an Enum of the sources int value of the Source determines its priority
+type ConfigSource int
+
+const (
+	UnknownSource ConfigSource = iota
+	ConfigControllerSource
+	MultiCloudSource
+	ConfigFileSource
+	HelmSource
+)
+
+func GetConfigSource(str string) ConfigSource {
+	switch str {
+	case "configController":
+		return ConfigControllerSource
+	case "configfile":
+		return ConfigFileSource
+	case "helm":
+		return HelmSource
+	case "multicloud":
+		return MultiCloudSource
+	default:
+		return UnknownSource
+	}
+}
+
+func (cs ConfigSource) String() string {
+	switch cs {
+	case ConfigControllerSource:
+		return "configController"
+	case ConfigFileSource:
+		return "configfile"
+	case HelmSource:
+		return "helm"
+	case MultiCloudSource:
+		return "multicloud"
+	case UnknownSource:
+		return "unknown"
+	default:
+		return "unknown"
+	}
+}

+ 9 - 9
pkg/cloud/gcp/authorizer.go

@@ -4,7 +4,7 @@ import (
 	"encoding/json"
 	"fmt"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"google.golang.org/api/option"
 )
 
@@ -13,7 +13,7 @@ const WorkloadIdentityAuthorizerType = "GCPWorkloadIdentity"
 
 // Authorizer provide a []option.ClientOption which is used in when creating clients in the GCP SDK
 type Authorizer interface {
-	config.Authorizer
+	cloud.Authorizer
 	CreateGCPClientOptions() ([]option.ClientOption, error)
 }
 
@@ -36,7 +36,7 @@ type ServiceAccountKey struct {
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (gkc *ServiceAccountKey) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 2)
-	fmap[config.AuthorizerTypeProperty] = ServiceAccountKeyAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = ServiceAccountKeyAuthorizerType
 	fmap["key"] = gkc.Key
 	return json.Marshal(fmap)
 }
@@ -49,7 +49,7 @@ func (gkc *ServiceAccountKey) Validate() error {
 	return nil
 }
 
-func (gkc *ServiceAccountKey) Equals(config config.Config) bool {
+func (gkc *ServiceAccountKey) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -71,10 +71,10 @@ func (gkc *ServiceAccountKey) Equals(config config.Config) bool {
 	return true
 }
 
-func (gkc *ServiceAccountKey) Sanitize() config.Config {
+func (gkc *ServiceAccountKey) Sanitize() cloud.Config {
 	redactedMap := make(map[string]string, len(gkc.Key))
 	for key, _ := range gkc.Key {
-		redactedMap[key] = config.Redacted
+		redactedMap[key] = cloud.Redacted
 	}
 	return &ServiceAccountKey{
 		Key: redactedMap,
@@ -103,7 +103,7 @@ type WorkloadIdentity struct{}
 // MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
 func (wi *WorkloadIdentity) MarshalJSON() ([]byte, error) {
 	fmap := make(map[string]any, 1)
-	fmap[config.AuthorizerTypeProperty] = WorkloadIdentityAuthorizerType
+	fmap[cloud.AuthorizerTypeProperty] = WorkloadIdentityAuthorizerType
 	return json.Marshal(fmap)
 }
 
@@ -111,7 +111,7 @@ func (wi *WorkloadIdentity) Validate() error {
 	return nil
 }
 
-func (wi *WorkloadIdentity) Equals(config config.Config) bool {
+func (wi *WorkloadIdentity) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -123,7 +123,7 @@ func (wi *WorkloadIdentity) Equals(config config.Config) bool {
 	return true
 }
 
-func (wi *WorkloadIdentity) Sanitize() config.Config {
+func (wi *WorkloadIdentity) Sanitize() cloud.Config {
 	return &WorkloadIdentity{}
 }
 

+ 13 - 8
pkg/cloud/gcp/bigqueryconfiguration.go

@@ -6,7 +6,8 @@ import (
 	"strings"
 
 	"cloud.google.com/go/bigquery"
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/util/json"
 )
 
@@ -43,7 +44,7 @@ func (bqc *BigQueryConfiguration) Validate() error {
 	return nil
 }
 
-func (bqc *BigQueryConfiguration) Equals(config config.Config) bool {
+func (bqc *BigQueryConfiguration) Equals(config cloud.Config) bool {
 	if config == nil {
 		return false
 	}
@@ -77,7 +78,7 @@ func (bqc *BigQueryConfiguration) Equals(config config.Config) bool {
 	return true
 }
 
-func (bqc *BigQueryConfiguration) Sanitize() config.Config {
+func (bqc *BigQueryConfiguration) Sanitize() cloud.Config {
 	return &BigQueryConfiguration{
 		ProjectID:  bqc.ProjectID,
 		Dataset:    bqc.Dataset,
@@ -91,6 +92,10 @@ func (bqc *BigQueryConfiguration) Key() string {
 	return fmt.Sprintf("%s/%s", bqc.ProjectID, bqc.GetBillingDataDataset())
 }
 
+func (bqc *BigQueryConfiguration) Provider() string {
+	return kubecost.GCPProvider
+}
+
 func (bqc *BigQueryConfiguration) GetBillingDataDataset() string {
 	return fmt.Sprintf("%s.%s", bqc.Dataset, bqc.Table)
 }
@@ -113,19 +118,19 @@ func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
 
 	fmap := f.(map[string]interface{})
 
-	projectID, err := config.GetInterfaceValue[string](fmap, "projectID")
+	projectID, err := cloud.GetInterfaceValue[string](fmap, "projectID")
 	if err != nil {
 		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
 	}
 	bqc.ProjectID = projectID
 
-	dataset, err := config.GetInterfaceValue[string](fmap, "dataset")
+	dataset, err := cloud.GetInterfaceValue[string](fmap, "dataset")
 	if err != nil {
 		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
 	}
 	bqc.Dataset = dataset
 
-	table, err := config.GetInterfaceValue[string](fmap, "table")
+	table, err := cloud.GetInterfaceValue[string](fmap, "table")
 	if err != nil {
 		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
 	}
@@ -135,7 +140,7 @@ func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
 	if !ok {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
 	}
-	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
 	if err != nil {
 		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
 	}
@@ -143,7 +148,7 @@ func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
 	return nil
 }
 
-func ConvertBigQueryConfigToConfig(bqc BigQueryConfig) config.KeyedConfig {
+func ConvertBigQueryConfigToConfig(bqc BigQueryConfig) cloud.KeyedConfig {
 	if bqc.IsEmpty() {
 		return nil
 	}

+ 2 - 2
pkg/cloud/gcp/bigqueryconfiguration_test.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"testing"
 
-	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util/json"
 )
@@ -122,7 +122,7 @@ func TestBigQueryConfiguration_Validate(t *testing.T) {
 func TestBigQueryConfiguration_Equals(t *testing.T) {
 	testCases := map[string]struct {
 		left     BigQueryConfiguration
-		right    config.Config
+		right    cloud.Config
 		expected bool
 	}{
 		"matching config": {

+ 2 - 1
pkg/cloud/gcp/bigqueryintegration.go

@@ -84,7 +84,7 @@ func (bqi *BigQueryIntegration) GetCloudCost(start time.Time, end time.Time) (*k
 
 	// Perform Query and parse values
 
-	ccsr, err := kubecost.NewCloudCostSetRange(start, end, timeutil.Day, bqi.Key())
+	ccsr, err := kubecost.NewCloudCostSetRange(start, end, kubecost.AccumulateOptionDay, bqi.Key())
 	if err != nil {
 		return ccsr, fmt.Errorf("error creating new CloudCostSetRange: %s", err)
 	}
@@ -110,6 +110,7 @@ func (bqi *BigQueryIntegration) GetCloudCost(start time.Time, end time.Time) (*k
 		ccsr.LoadCloudCost(ccl.CloudCost)
 
 	}
+
 	return ccsr, nil
 
 }

+ 19 - 3
pkg/cloud/gcp/bigqueryquerier.go

@@ -2,10 +2,10 @@ package gcp
 
 import (
 	"context"
+	"fmt"
 
 	"cloud.google.com/go/bigquery"
 	"github.com/opencost/opencost/pkg/cloud"
-	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
 )
 
 type BigQueryQuerier struct {
@@ -14,10 +14,14 @@ type BigQueryQuerier struct {
 }
 
 func (bqq *BigQueryQuerier) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if bqq.ConnectionStatus.String() == "" {
+		bqq.ConnectionStatus = cloud.InitialStatus
+	}
 	return bqq.ConnectionStatus
 }
 
-func (bqq *BigQueryQuerier) Equals(config cloudconfig.Config) bool {
+func (bqq *BigQueryQuerier) Equals(config cloud.Config) bool {
 	thatConfig, ok := config.(*BigQueryQuerier)
 	if !ok {
 		return false
@@ -41,5 +45,17 @@ func (bqq *BigQueryQuerier) Query(ctx context.Context, queryStr string) (*bigque
 	}
 
 	query := client.Query(queryStr)
-	return query.Read(ctx)
+	iter, err := query.Read(ctx)
+
+	// If result is empty and connection status is not already successful update status to missing data
+	if iter == nil && bqq.ConnectionStatus != cloud.SuccessfulConnection {
+		bqq.ConnectionStatus = cloud.MissingData
+	} else {
+		bqq.ConnectionStatus = cloud.SuccessfulConnection
+	}
+
+	if err != nil {
+		return iter, fmt.Errorf("BigQueryQuerier: Query: error reading query results: %w", err)
+	}
+	return iter, nil
 }

+ 2 - 0
pkg/cloud/gcp/provider.go

@@ -72,6 +72,7 @@ var gcpRegions = []string{
 	"europe-west3",
 	"europe-west4",
 	"europe-west6",
+	"europe-west9",
 	"northamerica-northeast1",
 	"northamerica-northeast2",
 	"southamerica-east1",
@@ -794,6 +795,7 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]models.Key, pvKeys m
 					case "a2":
 						candidateKeys = append(candidateKeys, region+","+"a2highgpu"+","+usageType)
 						candidateKeys = append(candidateKeys, region+","+"a2megagpu"+","+usageType)
+						candidateKeys = append(candidateKeys, region+","+"a2ultragpu"+","+usageType)
 					default:
 						candidateKey := region + "," + instanceType + "," + usageType
 						candidateKeys = append(candidateKeys, candidateKey)

+ 28 - 0
pkg/cloud/provider/providerconfig.go

@@ -7,6 +7,10 @@ import (
 	"strconv"
 	"sync"
 
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 	"github.com/opencost/opencost/pkg/config"
@@ -294,3 +298,27 @@ func ReturnPricingFromConfigs(filename string) (*models.CustomPricing, error) {
 	}
 	return defaultPricing, nil
 }
+
+func ExtractConfigFromProviders(prov models.Provider) models.ProviderConfig {
+	if prov == nil {
+		log.Errorf("cannot extract config from nil provider")
+		return nil
+	}
+	switch p := prov.(type) {
+	case *CSVProvider:
+		return ExtractConfigFromProviders(p.CustomProvider)
+	case *CustomProvider:
+		return p.Config
+	case *gcp.GCP:
+		return p.Config
+	case *aws.AWS:
+		return p.Config
+	case *azure.Azure:
+		return p.Config
+	case *alibaba.Alibaba:
+		return p.Config
+	default:
+		log.Errorf("failed to extract config from provider")
+		return nil
+	}
+}

+ 1 - 1
pkg/cloud/scaleway/provider.go

@@ -149,7 +149,7 @@ func (c *Scaleway) NodePricing(key models.Key) (*models.Node, models.PricingMeta
 				RAM:         fmt.Sprintf("%d", info.RAM),
 				// This is tricky, as instances can have local volumes or not
 				Storage:      fmt.Sprintf("%d", info.PerVolumeConstraint.LSSD.MinSize),
-				GPU:          fmt.Sprintf("%d", info.Gpu),
+				GPU:          fmt.Sprintf("%d", *info.Gpu),
 				InstanceType: split[1],
 				Region:       split[0],
 				GPUName:      key.GPUType(),

+ 207 - 0
pkg/cloudcost/ingestionmanager.go

@@ -0,0 +1,207 @@
+package cloudcost
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// IngestionManager is a config.Observer which creates Ingestor instances based on the signals that it receives from the
+// config.Controller
+type IngestionManager struct {
+	lock      sync.Mutex
+	ingestors map[string]*ingestor
+	config    IngestorConfig
+	repo      Repository
+}
+
+// NewIngestionManager creates a new IngestionManager and registers it with the provided integration controller
+func NewIngestionManager(controller *config.Controller, repo Repository, ingConf IngestorConfig) *IngestionManager {
+	// return empty ingestion manager if store or integration controller are nil
+	if controller == nil || repo == nil {
+		return &IngestionManager{
+			ingestors: map[string]*ingestor{},
+		}
+	}
+
+	im := &IngestionManager{
+		ingestors: map[string]*ingestor{},
+		repo:      repo,
+		config:    ingConf,
+	}
+	controller.RegisterObserver(im)
+
+	return im
+}
+
+// PutConfig is an imperative function which puts an ingestor for the provided Integration
+func (im *IngestionManager) PutConfig(kc cloud.KeyedConfig) {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	err := im.createIngestor(kc)
+	if err != nil {
+		log.Errorf("IngestionManager: PutConfig failed to create billing integration: %s", err.Error())
+	}
+}
+
+// DeleteConfig is an imperative function which removes an ingestor with a matching key
+func (im *IngestionManager) DeleteConfig(key string) {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	im.deleteIngestor(key)
+}
+
+// SetConfigs is a declarative function for setting which BillingIntegrations IngestionManager should have ingestors for
+func (im *IngestionManager) SetConfigs(configs map[string]cloud.KeyedConfig) {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	// delete any exiting ingestors
+	for key, _ := range im.ingestors {
+		im.deleteIngestor(key)
+	}
+	// create  ingestors for provided
+	for _, conf := range configs {
+		err := im.createIngestor(conf)
+		if err != nil {
+			log.Errorf("IngestionManager: error creating ingestor: %s", err.Error())
+		}
+	}
+}
+
+func (im *IngestionManager) StartAll() {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	var wg sync.WaitGroup
+	wg.Add(len(im.ingestors))
+	for key := range im.ingestors {
+		ing := im.ingestors[key]
+		go func() {
+			defer wg.Done()
+			ing.Start(false)
+
+		}()
+	}
+	wg.Wait()
+}
+
+func (im *IngestionManager) StopAll() {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	var wg sync.WaitGroup
+	wg.Add(len(im.ingestors))
+	for key := range im.ingestors {
+		ing := im.ingestors[key]
+		go func() {
+			defer wg.Done()
+			ing.Stop()
+		}()
+	}
+	wg.Wait()
+}
+
+func (im *IngestionManager) RebuildAll() {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	var wg sync.WaitGroup
+	wg.Add(len(im.ingestors))
+	for key := range im.ingestors {
+		go func(ing *ingestor) {
+			defer wg.Done()
+			ing.Stop()
+			ing.Start(true)
+
+		}(im.ingestors[key])
+	}
+	wg.Wait()
+}
+
+func (im *IngestionManager) Rebuild(integrationKey string) error {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	ing, ok := im.ingestors[integrationKey]
+	if !ok {
+		return fmt.Errorf("CloudCost: IngestionManager: Rebuild: failed to rebuild, integration with key does not exist: %s", integrationKey)
+	}
+	ing.Stop()
+	ing.Start(true)
+	return nil
+}
+
+func (im *IngestionManager) RepairAll(start, end time.Time) error {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	s := kubecost.RoundForward(start, im.config.Resolution)
+	e := kubecost.RoundForward(end, im.config.Resolution)
+	windows, err := kubecost.GetWindowsForQueryWindow(s, e, im.config.QueryWindow)
+	if err != nil {
+		return fmt.Errorf("CloudCost: IngestionManager: Repair could not retrieve windows: %s", err.Error())
+	}
+
+	for key := range im.ingestors {
+		go func(ing *ingestor) {
+			for _, window := range windows {
+				ing.BuildWindow(*window.Start(), *window.End())
+			}
+		}(im.ingestors[key])
+	}
+
+	return nil
+}
+
+func (im *IngestionManager) Repair(integrationKey string, start, end time.Time) error {
+	im.lock.Lock()
+	defer im.lock.Unlock()
+	s := kubecost.RoundForward(start, im.config.Resolution)
+	e := kubecost.RoundForward(end, im.config.Resolution)
+	windows, err := kubecost.GetWindowsForQueryWindow(s, e, im.config.QueryWindow)
+	if err != nil {
+		return fmt.Errorf("CloudCost: IngestionManager: Repair could not retrieve windows: %s", err.Error())
+	}
+	ing, ok := im.ingestors[integrationKey]
+	if !ok {
+		return fmt.Errorf("CloudCost: IngestionManager: Repair: failed to rebuild, integration with key does not exist: %s", integrationKey)
+	}
+	go func(ing *ingestor) {
+		for _, window := range windows {
+			ing.BuildWindow(*window.Start(), *window.End())
+		}
+	}(ing)
+	return nil
+}
+
+// deleteIngestor stops then removes an ingestor from the map of ingestors
+func (im *IngestionManager) deleteIngestor(integrationKey string) {
+	ing, ok := im.ingestors[integrationKey]
+	if !ok {
+		return
+	}
+	log.Infof("CloudCost: IngestionManager: deleting integration with key: %s", integrationKey)
+	ing.Stop()
+
+	delete(im.ingestors, integrationKey)
+}
+
+// createIngestor stops existing ingestor with matching key then creates and starts and new ingestor
+func (im *IngestionManager) createIngestor(config cloud.KeyedConfig) error {
+	if config == nil {
+		return fmt.Errorf("cannot create ingestor from nil integration")
+	}
+	// delete ingestor with matching key if it exists
+	im.deleteIngestor(config.Key())
+	log.Infof("CloudCost: IngestionManager: creating integration with key: %s", config.Key())
+	ing, err := NewIngestor(im.config, im.repo, config)
+	if err != nil {
+		return fmt.Errorf("IngestionManager: createIngestor: %w", err)
+	}
+
+	ing.Start(false)
+
+	im.ingestors[config.Key()] = ing
+
+	return nil
+}

+ 342 - 0
pkg/cloudcost/ingestor.go

@@ -0,0 +1,342 @@
+package cloudcost
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+)
+
+// IngestorStatus includes diagnostic values for a given Ingestor
+type IngestorStatus struct {
+	Created          time.Time
+	LastRun          time.Time
+	NextRun          time.Time
+	Runs             int
+	Coverage         kubecost.Window
+	ConnectionStatus cloud.ConnectionStatus
+}
+
+// IngestorConfig is a configuration struct for an Ingestor
+type IngestorConfig struct {
+	MonthToDateRunInterval int
+	RefreshRate            time.Duration
+	Resolution             time.Duration
+	Duration               time.Duration
+	QueryWindow            time.Duration
+	RunWindow              time.Duration
+}
+
+// DefaultIngestorConfiguration retrieves an IngestorConfig from env variables
+func DefaultIngestorConfiguration() IngestorConfig {
+	return IngestorConfig{
+		Resolution:             timeutil.Day,
+		Duration:               timeutil.Day * time.Duration(env.GetDataRetentionDailyResolutionDays()),
+		MonthToDateRunInterval: env.GetCloudCostMonthToDateInterval(),
+		RefreshRate:            time.Hour * time.Duration(env.GetCloudCostRefreshRateHours()),
+		QueryWindow:            timeutil.Day * time.Duration(env.GetCloudCostQueryWindowDays()),
+		RunWindow:              timeutil.Day * time.Duration(env.GetCloudCostRunWindowDays()),
+	}
+}
+
+// ingestor runs the process for ingesting CloudCost from its CloudCostIntegration and store it in a Repository
+type ingestor struct {
+	key          string
+	integration  CloudCostIntegration
+	config       IngestorConfig
+	repo         Repository
+	runID        string
+	lastRun      time.Time
+	runs         int
+	creationTime time.Time
+	coverage     kubecost.Window
+	coverageLock sync.Mutex
+	isRunning    atomic.Bool
+	isStopping   atomic.Bool
+	exitBuildCh  chan string
+	exitRunCh    chan string
+}
+
+// NewIngestor is an initializer for ingestor
+func NewIngestor(ingestorConfig IngestorConfig, repo Repository, config cloud.KeyedConfig) (*ingestor, error) {
+	if repo == nil {
+		return nil, fmt.Errorf("CloudCost: NewIngestor: repository connot be nil")
+	}
+	if config == nil {
+		return nil, fmt.Errorf("CloudCost: NewIngestor: integration connot be nil")
+	}
+	cci := GetIntegrationFromConfig(config)
+	if cci == nil {
+		return nil, fmt.Errorf("CloudCost: NewIngestor: provider integration config was not a valid type: %T", config)
+	}
+	now := time.Now().UTC()
+	midnight := kubecost.RoundForward(now, timeutil.Day)
+	return &ingestor{
+		config:       ingestorConfig,
+		repo:         repo,
+		key:          config.Key(),
+		integration:  cci,
+		creationTime: now,
+		lastRun:      now,
+		coverage:     kubecost.NewClosedWindow(midnight, midnight),
+	}, nil
+}
+
+func (ing *ingestor) LoadWindow(start, end time.Time) {
+	windows, err := kubecost.GetWindows(start, end, timeutil.Day)
+	if err != nil {
+		log.Errorf("CloudCost[%s]: ingestor: invalid window %s", ing.key, kubecost.NewWindow(&start, &end))
+		return
+	}
+
+	for _, window := range windows {
+		has, err2 := ing.repo.Has(*window.Start(), ing.key)
+		if err2 != nil {
+			log.Errorf("CloudCost[%s]: ingestor: error when loading window: %s", ing.key, err2.Error())
+		}
+		if !has {
+			ing.BuildWindow(start, end)
+			return
+		}
+		ing.expandCoverage(window)
+		log.Debugf("CloudCost[%s]: ingestor: skipping build for window %s, coverage already exists", ing.key, window.String())
+	}
+
+}
+
+func (ing *ingestor) BuildWindow(start, end time.Time) {
+	log.Infof("CloudCost[%s]: ingestor: building window %s", ing.key, kubecost.NewWindow(&start, &end))
+	ccsr, err := ing.integration.GetCloudCost(start, end)
+	if err != nil {
+		log.Errorf("CloudCost[%s]: ingestor: build failed for window %s: %s", ing.key, kubecost.NewWindow(&start, &end), err.Error())
+		return
+	}
+	for _, ccs := range ccsr.CloudCostSets {
+		log.Debugf("BuildWindow[%s]: GetCloudCost: writing cloud costs for window %s: %d", ccs.Integration, ccs.Window, len(ccs.CloudCosts))
+		err2 := ing.repo.Put(ccs)
+		if err2 != nil {
+			log.Errorf("CloudCost[%s]: ingestor: failed to save Cloud Cost Set with window %s: %s", ing.key, ccs.GetWindow().String(), err2.Error())
+		}
+		ing.expandCoverage(ccs.Window)
+	}
+}
+
+func (ing *ingestor) Start(rebuild bool) {
+
+	// If already running, log that and return.
+	if !ing.isRunning.CompareAndSwap(false, true) {
+		log.Infof("CloudCost: ingestor: is already running")
+		return
+	}
+
+	ing.runID = stringutil.RandSeq(5)
+
+	ing.exitBuildCh = make(chan string)
+	ing.exitRunCh = make(chan string)
+
+	// Build the store once, advancing backward in time from the earliest
+	// point of coverage.
+	go ing.build(rebuild)
+
+	go ing.run()
+}
+
+func (ing *ingestor) Stop() {
+	// If already stopping, log that and return.
+	if !ing.isStopping.CompareAndSwap(false, true) {
+		log.Infof("CloudCost: ingestor: is already stopping")
+		return
+	}
+
+	msg := "Stopping"
+
+	// If the processes are running (and thus there are channels available for
+	// stopping them) then stop all sub-processes (i.e. build and run)
+	var wg sync.WaitGroup
+
+	if ing.exitBuildCh != nil {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			ing.exitBuildCh <- msg
+		}()
+	}
+
+	if ing.exitRunCh != nil {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			ing.exitRunCh <- msg
+		}()
+	}
+
+	wg.Wait()
+
+	// Declare that the store is officially no longer running. This allows
+	// Start to be called again, restarting the store from scratch.
+	ing.isRunning.Store(false)
+	ing.isStopping.Store(false)
+}
+
+// Status returns an IngestorStatus that describes the current state of the ingestor
+func (ing *ingestor) Status() IngestorStatus {
+	return IngestorStatus{
+		Created:          ing.creationTime,
+		LastRun:          ing.lastRun,
+		NextRun:          ing.lastRun.Add(ing.config.RefreshRate).UTC(),
+		Runs:             ing.runs,
+		Coverage:         ing.coverage,
+		ConnectionStatus: ing.integration.GetStatus(),
+	}
+}
+
+func (ing *ingestor) build(rebuild bool) {
+	defer errors.HandlePanic()
+
+	// Profile the full Duration of the build time
+	buildStart := time.Now()
+
+	// Build as far back as the configures build Duration
+	limit := kubecost.RoundBack(time.Now().UTC().Add(-ing.config.Duration), ing.config.Resolution)
+
+	queryWindowStr := timeutil.FormatStoreResolution(ing.config.QueryWindow)
+	log.Infof("CloudCost[%s]: ingestor: build[%s]: Starting build back to %s in blocks of %s", ing.key, ing.runID, limit.String(), queryWindowStr)
+
+	// Start with a window of the configured Duration and ending on the given
+	// start time. Build windows repeating until the window reaches the
+	// given limit time
+
+	// Round end times back to nearest Resolution points in the past,
+	// querying for exactly one interval
+	e := kubecost.RoundBack(time.Now().UTC(), ing.config.Resolution)
+	s := e.Add(-ing.config.QueryWindow)
+
+	// Continue until limit is reached
+	for limit.Before(e) {
+		// If exit instruction is received, log and return
+		select {
+		case <-ing.exitBuildCh:
+			log.Debugf("CloudCost[%s]: ingestor: build[%s]: exiting", ing.key, ing.runID)
+			return
+		default:
+		}
+
+		// Profile the current build step
+		stepStart := time.Now()
+
+		// if rebuild is not specified then check for existing coverage on window
+		if rebuild {
+			ing.BuildWindow(s, e)
+		} else {
+			ing.LoadWindow(s, e)
+		}
+
+		log.Infof("CloudCost[%s]: ingestor: build[%s]:  %s in %v", ing.key, ing.runID, kubecost.NewClosedWindow(s, e), time.Since(stepStart))
+
+		// Shift to next QueryWindow
+		s = s.Add(-ing.config.QueryWindow)
+		if s.Before(limit) {
+			s = limit
+		}
+		e = e.Add(-ing.config.QueryWindow)
+	}
+
+	log.Infof(fmt.Sprintf("CloudCost[%s]: ingestor: build[%s]: completed in %v", ing.key, ing.runID, time.Since(buildStart)))
+
+	// In order to be able to Stop, we have to wait on an exit message
+	// here
+	<-ing.exitBuildCh
+
+}
+
+func (ing *ingestor) run() {
+	defer errors.HandlePanic()
+
+	ticker := timeutil.NewJobTicker()
+	defer ticker.Close()
+	ticker.TickIn(0)
+
+	for {
+		// If an exit instruction is received, break the run loop
+		select {
+		case <-ing.exitRunCh:
+			log.Debugf("CloudCost[%s]: ingestor: Run[%s] exiting", ing.key, ing.runID)
+			return
+		case <-ticker.Ch:
+			// Wait for next tick
+		}
+
+		// Start from the last covered time, minus the RunWindow
+		start := ing.lastRun
+		start = start.Add(-ing.config.RunWindow)
+
+		// Every Nth (determined by the MonthToDateRunInterval) run should be a month to date run. Where the start is
+		// truncated to the beginning of its current month this can mean that early in a new month we will build all of
+		// last month and the first few days of the current month.
+		if ing.runs%ing.config.MonthToDateRunInterval == 0 {
+			start = time.Date(start.Year(), start.Month(), 1, 0, 0, 0, 0, time.UTC)
+			log.Infof("CloudCost[%s]: ingestor: Run[%s]: running month-to-date update starting at %s", ing.key, ing.runID, start.String())
+		}
+
+		// Round start time back to the nearest Resolution point in the past from the
+		// last update to the QueryWindow
+		s := kubecost.RoundBack(start.UTC(), ing.config.Resolution)
+		e := s.Add(ing.config.QueryWindow)
+
+		// Start with a window of the configured Duration and starting on the given
+		// start time. Do the following, repeating until the window reaches the
+		// current time:
+		// 1. Instruct builder to build window
+		// 2. Move window forward one Resolution
+		for time.Now().After(s) {
+			profStart := time.Now()
+			ing.BuildWindow(s, e)
+
+			log.Debugf("CloudCost[%s]: ingestor: Run[%s]: completed %s in %v", ing.key, ing.runID, kubecost.NewWindow(&s, &e), time.Since(profStart))
+
+			s = s.Add(ing.config.QueryWindow)
+			e = e.Add(ing.config.QueryWindow)
+			// prevent builds into the future
+			if e.After(time.Now().UTC()) {
+				e = kubecost.RoundForward(time.Now().UTC(), ing.config.Resolution)
+			}
+
+		}
+		ing.lastRun = time.Now().UTC()
+
+		limit := kubecost.RoundBack(time.Now().UTC(), ing.config.Resolution).Add(-ing.config.Duration)
+		err := ing.repo.Expire(limit)
+		if err != nil {
+			log.Errorf("CloudCost: Ingestor: failed to expire Data: %s", err)
+		}
+
+		ing.coverageLock.Lock()
+		ing.coverage = ing.coverage.ContractStart(limit)
+		ing.coverageLock.Unlock()
+
+		ing.runs++
+
+		ticker.TickIn(ing.config.RefreshRate)
+	}
+}
+
+func (ing *ingestor) expandCoverage(window kubecost.Window) {
+	if window.IsOpen() {
+		return
+	}
+	ing.coverageLock.Lock()
+	defer ing.coverageLock.Unlock()
+
+	coverage := ing.coverage.ExpandStart(*window.Start())
+	coverage = coverage.ExpandEnd(*window.End())
+
+	ing.coverage = coverage
+}

+ 96 - 0
pkg/cloudcost/integration.go

@@ -0,0 +1,96 @@
+package cloudcost
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+// CloudCostIntegration is an interface for retrieving daily granularity CloudCost data for a given range
+type CloudCostIntegration interface {
+	GetCloudCost(time.Time, time.Time) (*kubecost.CloudCostSetRange, error)
+	GetStatus() cloud.ConnectionStatus
+}
+
+// GetIntegrationFromConfig coverts any valid KeyedConfig into the appropriate BillingIntegration if possible
+func GetIntegrationFromConfig(kc cloud.KeyedConfig) CloudCostIntegration {
+	switch keyedConfig := kc.(type) {
+	// AthenaIntegration
+	case *aws.AthenaConfiguration:
+		return &aws.AthenaIntegration{
+			AthenaQuerier: aws.AthenaQuerier{
+				AthenaConfiguration: *keyedConfig,
+			},
+		}
+	case *aws.AthenaQuerier:
+		return &aws.AthenaIntegration{
+			AthenaQuerier: *keyedConfig,
+		}
+	case *aws.AthenaIntegration:
+		return keyedConfig
+	// BigQueryIntegration
+	case *gcp.BigQueryConfiguration:
+		return &gcp.BigQueryIntegration{
+			BigQueryQuerier: gcp.BigQueryQuerier{
+				BigQueryConfiguration: *keyedConfig,
+			},
+		}
+	case *gcp.BigQueryQuerier:
+		return &gcp.BigQueryIntegration{
+			BigQueryQuerier: *keyedConfig,
+		}
+	case *gcp.BigQueryIntegration:
+		return keyedConfig
+	// AzureStorageIntegration
+	case *azure.StorageConfiguration:
+		return &azure.AzureStorageIntegration{
+			AzureStorageBillingParser: azure.AzureStorageBillingParser{
+				StorageConnection: azure.StorageConnection{
+					StorageConfiguration: *keyedConfig},
+			},
+		}
+	case *azure.StorageConnection:
+		return &azure.AzureStorageIntegration{
+			AzureStorageBillingParser: azure.AzureStorageBillingParser{
+				StorageConnection: *keyedConfig,
+			},
+		}
+	case *azure.AzureStorageBillingParser:
+		return &azure.AzureStorageIntegration{
+			AzureStorageBillingParser: *keyedConfig,
+		}
+	case *azure.AzureStorageIntegration:
+		return keyedConfig
+	// S3SelectIntegration
+	case *aws.S3Configuration:
+		return &aws.S3SelectIntegration{
+			S3SelectQuerier: aws.S3SelectQuerier{
+				S3Connection: aws.S3Connection{
+					S3Configuration: *keyedConfig,
+				},
+			},
+		}
+	case *aws.S3Connection:
+		return &aws.S3SelectIntegration{
+			S3SelectQuerier: aws.S3SelectQuerier{
+				S3Connection: *keyedConfig,
+			},
+		}
+	case *aws.S3SelectQuerier:
+		return &aws.S3SelectIntegration{
+			S3SelectQuerier: *keyedConfig,
+		}
+	case *aws.S3SelectIntegration:
+		return keyedConfig
+	// Alibaba BOA Integration
+	case *alibaba.BOAConfiguration:
+		return nil
+	default:
+		return nil
+	}
+}

+ 103 - 0
pkg/cloudcost/memoryrepository.go

@@ -0,0 +1,103 @@
+package cloudcost
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"golang.org/x/exp/maps"
+)
+
+// MemoryRepository is an implementation of Repository that uses a map keyed on config key and window start along with a
+// RWMutex to make it threadsafe
+type MemoryRepository struct {
+	rwLock sync.RWMutex
+	data   map[string]map[time.Time]*kubecost.CloudCostSet
+}
+
+func NewMemoryRepository() *MemoryRepository {
+	return &MemoryRepository{
+		data: make(map[string]map[time.Time]*kubecost.CloudCostSet),
+	}
+}
+
+func (m *MemoryRepository) Has(startTime time.Time, billingIntegration string) (bool, error) {
+	m.rwLock.RLock()
+	defer m.rwLock.RUnlock()
+
+	billingIntegrationData, ok := m.data[billingIntegration]
+	if !ok {
+		return false, nil
+	}
+
+	_, ook := billingIntegrationData[startTime.UTC()]
+	return ook, nil
+}
+
+func (m *MemoryRepository) Get(startTime time.Time, billingIntegration string) (*kubecost.CloudCostSet, error) {
+	m.rwLock.RLock()
+	defer m.rwLock.RUnlock()
+
+	billingIntegrationData, ok := m.data[billingIntegration]
+	if !ok {
+		return nil, nil
+	}
+
+	ccs, ook := billingIntegrationData[startTime.UTC()]
+	if !ook {
+		return nil, nil
+	}
+	return ccs.Clone(), nil
+}
+
+func (m *MemoryRepository) Keys() ([]string, error) {
+	m.rwLock.RLock()
+	defer m.rwLock.RUnlock()
+
+	keys := maps.Keys(m.data)
+	return keys, nil
+}
+
+func (m *MemoryRepository) Put(ccs *kubecost.CloudCostSet) error {
+	m.rwLock.Lock()
+	defer m.rwLock.Unlock()
+
+	if ccs == nil {
+		return fmt.Errorf("MemoryRepository: Put: cannot save nil")
+	}
+
+	if ccs.Window.IsOpen() {
+		return fmt.Errorf("MemoryRepository: Put: cloud cost set has invalid window %s", ccs.Window.String())
+	}
+
+	if ccs.Integration == "" {
+		return fmt.Errorf("MemoryRepository: Put: cloud cost set does not have an integration value")
+	}
+
+	if _, ok := m.data[ccs.Integration]; !ok {
+		m.data[ccs.Integration] = make(map[time.Time]*kubecost.CloudCostSet)
+	}
+
+	m.data[ccs.Integration][ccs.Window.Start().UTC()] = ccs
+	return nil
+}
+
+// Expire deletes all items in the map with a start time before the given limit
+func (m *MemoryRepository) Expire(limit time.Time) error {
+	m.rwLock.Lock()
+	defer m.rwLock.Unlock()
+
+	for key, integration := range m.data {
+		for startTime := range integration {
+			if startTime.Before(limit) {
+				delete(integration, startTime)
+			}
+		}
+		// remove integration if it is now empty
+		if len(integration) == 0 {
+			delete(m.data, key)
+		}
+	}
+	return nil
+}

+ 194 - 0
pkg/cloudcost/pipelineservice.go

@@ -0,0 +1,194 @@
+package cloudcost
+
+import (
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/julienschmidt/httprouter"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/proto"
+)
+
+var protocol = proto.HTTP()
+
+// PipelineService exposes CloudCost pipeline controls and diagnostics endpoints
+type PipelineService struct {
+	ingestionManager *IngestionManager
+	store            Repository
+	configController *config.Controller
+}
+
+// NewPipelineService is a constructor for a PipelineService
+func NewPipelineService(repo Repository, ic *config.Controller, ingConf IngestorConfig) *PipelineService {
+	im := NewIngestionManager(ic, repo, ingConf)
+	return &PipelineService{
+		ingestionManager: im,
+		store:            repo,
+		configController: ic,
+	}
+}
+
+// Status merges status values from the config.Controller and the IngestionManager to give a combined view of that state
+// of configs and their ingestion status
+func (dp *PipelineService) Status() []Status {
+	var statuses []Status
+	// Pull config status from the config controller
+	confStatuses := dp.configController.GetStatus()
+	refreshRate := time.Hour * time.Duration(env.GetCloudCostRefreshRateHours())
+	for _, confStat := range confStatuses {
+		var conf cloudconfig.Config
+		var provider string
+		if confStat.Config != nil {
+			conf = confStat.Config.Sanitize()
+			provider = confStat.Config.Provider()
+		}
+
+		var ingestorStatus IngestorStatus
+		if ing, ok := dp.ingestionManager.ingestors[confStat.Key]; ok {
+			ingestorStatus = ing.Status()
+		}
+
+		// These are the statuses
+		status := Status{
+			Key:              confStat.Key,
+			Source:           confStat.Source.String(),
+			Active:           confStat.Active,
+			Valid:            confStat.Valid,
+			Config:           conf,
+			Provider:         provider,
+			ConnectionStatus: ingestorStatus.ConnectionStatus.String(),
+			LastRun:          ingestorStatus.LastRun,
+			NextRun:          ingestorStatus.NextRun,
+			Runs:             ingestorStatus.Runs,
+			Created:          ingestorStatus.Created,
+			Coverage:         ingestorStatus.Coverage.String(),
+			RefreshRate:      refreshRate.String(),
+		}
+		statuses = append(statuses, status)
+	}
+
+	return statuses
+}
+
+// GetCloudCostRebuildHandler creates a handler from a http request which initiates a rebuild of cloud cost pipeline, if an
+// integrationKey is provided then it only rebuilds the specified billing integration
+func (s *PipelineService) GetCloudCostRebuildHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// If Reporting Service is nil, always return 501
+	if s == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Cloud Cost Pipeline Service is nil", http.StatusNotImplemented)
+		}
+	}
+	if s.ingestionManager == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Cloud Cost Pipeline Service Ingestion Manager is nil", http.StatusNotImplemented)
+		}
+	}
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		commit := r.URL.Query().Get("commit") == "true" || r.URL.Query().Get("commit") == "1"
+
+		if !commit {
+			protocol.WriteData(w, "Pass parameter 'commit=true' to confirm Cloud Cost rebuild")
+			return
+		}
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+
+		// If no providerKey argument was provider, restart all Cloud Asset Pipelines
+		if integrationKey == "" {
+			s.ingestionManager.RebuildAll()
+			protocol.WriteData(w, "Rebuilding Cloud Usage For All Providers")
+			return
+		} else {
+			err := s.ingestionManager.Rebuild(integrationKey)
+			if err != nil {
+				http.Error(w, err.Error(), http.StatusBadRequest)
+				return
+			}
+			protocol.WriteData(w, fmt.Sprintf("Rebuilding Cloud Usage For Provider %s", integrationKey))
+			return
+		}
+	}
+}
+
+// GetCloudCostRepairHandler creates a handler from a http request which initiates a repair of cloud cost for a given window, if an
+// integrationKey is provided then it only repairs the specified integration
+func (s *PipelineService) GetCloudCostRepairHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// If Reporting Service is nil, always return 501
+	if s == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Reporting Service is nil", http.StatusNotImplemented)
+		}
+	}
+	if s.ingestionManager == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Cloud Cost Pipeline Service Ingestion Manager is nil", http.StatusNotImplemented)
+		}
+	}
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		windowStr := r.URL.Query().Get("window")
+
+		var window kubecost.Window
+		if windowStr != "" {
+			win, err := kubecost.ParseWindowWithOffset(windowStr, env.GetParsedUTCOffset())
+			if err != nil {
+				http.Error(w, fmt.Sprintf("Invalid parameter: %s", err), http.StatusBadRequest)
+				return
+			}
+			window = win
+		}
+
+		integrationKey := r.URL.Query().Get("integrationKey")
+
+		// If no providerKey argument was provider, restart all Cloud Asset Pipelines
+		if integrationKey == "" {
+			err := s.ingestionManager.RepairAll(*window.Start(), *window.End())
+			if err != nil {
+				http.Error(w, err.Error(), http.StatusBadRequest)
+				return
+			}
+			protocol.WriteData(w, "Rebuilding Cloud Usage For All Providers")
+			return
+		} else {
+			err := s.ingestionManager.Repair(integrationKey, *window.Start(), *window.End())
+			if err != nil {
+				http.Error(w, err.Error(), http.StatusBadRequest)
+				return
+			}
+			protocol.WriteData(w, fmt.Sprintf("Rebuilding Cloud Usage For Provider %s", integrationKey))
+			return
+		}
+	}
+}
+
+// GetCloudCostStatusHandler creates a handler from a http request which returns a list of the billing integration status
+func (s *PipelineService) GetCloudCostStatusHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// If Reporting Service is nil, always return 501
+	if s == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Reporting Service is nil", http.StatusNotImplemented)
+		}
+	}
+	if s.ingestionManager == nil {
+		return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+			http.Error(w, "Cloud Cost Pipeline Service Ingestion Manager is nil", http.StatusNotImplemented)
+		}
+	}
+
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		w.Header().Set("Content-Type", "application/json")
+
+		protocol.WriteData(w, s.Status())
+	}
+}

+ 89 - 0
pkg/cloudcost/querier.go

@@ -0,0 +1,89 @@
+package cloudcost
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"time"
+
+	filter "github.com/opencost/opencost/pkg/filter21"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+// Querier allows for querying ranges of CloudCost data
+type Querier interface {
+	Query(QueryRequest, context.Context) (*kubecost.CloudCostSetRange, error)
+}
+
+type QueryRequest struct {
+	Start       time.Time
+	End         time.Time
+	AggregateBy []string
+	Accumulate  kubecost.AccumulateOption
+	Filter      filter.Filter
+}
+
+// DefaultChartItemsLength the default max number of items for a ViewGraphDataSet
+const DefaultChartItemsLength int = 10
+
+// ViewQuerier defines a contract for return View types to the QueryService to service the View Api
+type ViewQuerier interface {
+	QueryViewGraph(ViewQueryRequest, context.Context) (ViewGraphData, error)
+	QueryViewTotals(ViewQueryRequest, context.Context) (*ViewTableRow, int, error)
+	QueryViewTable(ViewQueryRequest, context.Context) (ViewTableRows, error)
+}
+
+type ViewQueryRequest struct {
+	QueryRequest
+	CostMetricName   kubecost.CostMetricName
+	ChartItemsLength int
+	Offset           int
+	Limit            int
+	SortDirection    SortDirection
+	SortColumn       SortField
+}
+
+// SortDirection a string type that acts as an enumeration of possible request options
+type SortDirection string
+
+const (
+	SortDirectionNone       SortDirection = ""
+	SortDirectionAscending  SortDirection = "asc"
+	SortDirectionDescending SortDirection = "desc"
+)
+
+// ParseSortDirection provides a resilient way to parse one of the enumerated SortDirection types from a string
+// or throws an error if it is not able to.
+func ParseSortDirection(sortDirection string) (SortDirection, error) {
+	switch strings.ToLower(sortDirection) {
+	case strings.ToLower(string(SortDirectionAscending)):
+		return SortDirectionAscending, nil
+	case strings.ToLower(string(SortDirectionDescending)):
+		return SortDirectionDescending, nil
+	}
+	return SortDirectionNone, fmt.Errorf("failed to parse a valid CostMetricName from '%s'", sortDirection)
+}
+
+// SortField a string type that acts as an enumeration of possible request options
+type SortField string
+
+const (
+	SortFieldNone              SortField = ""
+	SortFieldName              SortField = "name"
+	SortFieldCost              SortField = "cost"
+	SortFieldKubernetesPercent SortField = "kubernetesPercent"
+)
+
+// ParseSortField provides a resilient way to parse one of the enumerated SortField types from a string
+// or throws an error if it is not able to.
+func ParseSortField(sortColumn string) (SortField, error) {
+	switch strings.ToLower(sortColumn) {
+	case strings.ToLower(string(SortFieldName)):
+		return SortFieldName, nil
+	case strings.ToLower(string(SortFieldCost)):
+		return SortFieldCost, nil
+	case strings.ToLower(string(SortFieldKubernetesPercent)):
+		return SortFieldKubernetesPercent, nil
+	}
+	return SortFieldNone, fmt.Errorf("failed to parse a valid CostMetricName from '%s'", sortColumn)
+}

+ 370 - 0
pkg/cloudcost/queryservice.go

@@ -0,0 +1,370 @@
+package cloudcost
+
+import (
+	"encoding/csv"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/julienschmidt/httprouter"
+	filter21 "github.com/opencost/opencost/pkg/filter21"
+	"github.com/opencost/opencost/pkg/filter21/cloudcost"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/prom"
+	"github.com/opencost/opencost/pkg/util/httputil"
+	"go.opentelemetry.io/otel"
+)
+
+const tracerName = "github.com/opencost/ooencost/pkg/cloudcost"
+
+const (
+	csvFormat = "csv"
+)
+
+// QueryService surfaces endpoints for accessing CloudCost data in raw form or for display in views
+type QueryService struct {
+	Querier     Querier
+	ViewQuerier ViewQuerier
+}
+
+func NewQueryService(querier Querier, viewQuerier ViewQuerier) *QueryService {
+	return &QueryService{
+		Querier:     querier,
+		ViewQuerier: viewQuerier,
+	}
+}
+
+func (s *QueryService) GetCloudCostHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		tracer := otel.Tracer(tracerName)
+		ctx, span := tracer.Start(r.Context(), "Service.GetCloudCostHandler")
+		defer span.End()
+
+		// If Query Service is nil, always return 501
+		if s == nil {
+			http.Error(w, "Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		if s.Querier == nil {
+			http.Error(w, "CloudCost Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		request, err := parseCloudCostRequest(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		resp, err := s.Querier.Query(*request, ctx)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Internal server error: %s", err), http.StatusInternalServerError)
+			return
+		}
+
+		_, spanResp := tracer.Start(ctx, "write response")
+		w.Header().Set("Content-Type", "application/json")
+		protocol.WriteData(w, resp)
+		spanResp.End()
+	}
+}
+
+func (s *QueryService) GetCloudCostViewGraphHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		tracer := otel.Tracer(tracerName)
+		ctx, span := tracer.Start(r.Context(), "Service.GetCloudCostViewGraphHandler")
+		defer span.End()
+
+		// If Query Service is nil, always return 501
+		if s == nil {
+			http.Error(w, "Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		if s.ViewQuerier == nil {
+			http.Error(w, "CloudCost Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		request, err := parseCloudCostViewRequest(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		resp, err := s.ViewQuerier.QueryViewGraph(*request, ctx)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Internal server error: %s", err), http.StatusInternalServerError)
+			return
+		}
+
+		_, spanResp := tracer.Start(ctx, "write response")
+		w.Header().Set("Content-Type", "application/json")
+		protocol.WriteData(w, resp)
+		spanResp.End()
+	}
+}
+
+type CloudCostViewTotalsResponse struct {
+	NumResults int           `json:"numResults"`
+	Combined   *ViewTableRow `json:"combined"`
+}
+
+func (s *QueryService) GetCloudCostViewTotalsHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		tracer := otel.Tracer(tracerName)
+		ctx, span := tracer.Start(r.Context(), "Service.GetCloudCostViewTotalsHandler")
+		defer span.End()
+
+		// If Query Service is nil, always return 501
+		if s == nil {
+			http.Error(w, "Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		if s.ViewQuerier == nil {
+			http.Error(w, "CloudCost Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		request, err := parseCloudCostViewRequest(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		totals, count, err := s.ViewQuerier.QueryViewTotals(*request, ctx)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Internal server error: %s", err), http.StatusInternalServerError)
+			return
+		}
+
+		resp := CloudCostViewTotalsResponse{
+			NumResults: count,
+			Combined:   totals,
+		}
+
+		_, spanResp := tracer.Start(ctx, "write response")
+		w.Header().Set("Content-Type", "application/json")
+		protocol.WriteData(w, resp)
+		spanResp.End()
+	}
+}
+
+func (s *QueryService) GetCloudCostViewTableHandler() func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	// Return valid handler func
+	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+		tracer := otel.Tracer(tracerName)
+		ctx, span := tracer.Start(r.Context(), "Service.GetCloudCostViewTableHandler")
+		defer span.End()
+
+		// If Query Service is nil, always return 501
+		if s == nil {
+			http.Error(w, "Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		if s.ViewQuerier == nil {
+			http.Error(w, "CloudCost Query Service is nil", http.StatusNotImplemented)
+			return
+		}
+
+		request, err := parseCloudCostViewRequest(r)
+		if err != nil {
+			http.Error(w, err.Error(), http.StatusBadRequest)
+			return
+		}
+
+		qp := httputil.NewQueryParams(r.URL.Query())
+		format := qp.Get("format", "json")
+		if strings.HasPrefix(format, csvFormat) {
+			w.Header().Set("Content-Type", "text/csv")
+			w.Header().Set("Transfer-Encoding", "chunked")
+		} else {
+			// By default, send JSON
+			w.Header().Set("Content-Type", "application/json")
+		}
+
+		resp, err := s.ViewQuerier.QueryViewTable(*request, ctx)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Internal server error: %s", err), http.StatusInternalServerError)
+			return
+		}
+
+		_, spanResp := tracer.Start(ctx, "write response")
+		defer spanResp.End()
+		if format == csvFormat {
+			window := kubecost.NewClosedWindow(request.Start, request.End)
+			writeCloudCostViewTableRowsAsCSV(w, resp, window.String())
+			return
+		}
+		w.Header().Set("Content-Type", "application/json")
+		protocol.WriteData(w, resp)
+	}
+}
+
+func parseCloudCostRequest(r *http.Request) (*QueryRequest, error) {
+	qp := httputil.NewQueryParams(r.URL.Query())
+
+	windowStr := qp.Get("window", "")
+	if windowStr == "" {
+		return nil, fmt.Errorf("missing require window param")
+	}
+
+	window, err := kubecost.ParseWindowUTC(windowStr)
+	if err != nil {
+		return nil, fmt.Errorf("invalid window parameter: %w", err)
+	}
+	if window.IsOpen() {
+		return nil, fmt.Errorf("invalid window parameter: %s", window.String())
+	}
+
+	aggregateByRaw := qp.GetList("aggregate", ",")
+	aggregateBy := []string{}
+	for _, aggBy := range aggregateByRaw {
+		prop, err := ParseCloudCostProperty(aggBy)
+		if err != nil {
+			return nil, fmt.Errorf("error parsing aggregate by %v", err)
+		}
+		aggregateBy = append(aggregateBy, prop)
+	}
+	if len(aggregateBy) == 0 {
+		aggregateBy = []string{
+			kubecost.CloudCostInvoiceEntityIDProp,
+			kubecost.CloudCostAccountIDProp,
+			kubecost.CloudCostProviderProp,
+			kubecost.CloudCostProviderIDProp,
+			kubecost.CloudCostCategoryProp,
+			kubecost.CloudCostServiceProp,
+		}
+	}
+
+	accumulate := kubecost.ParseAccumulate(qp.Get("accumulate", ""))
+
+	var filter filter21.Filter
+	filterString := qp.Get("filter", "")
+	if filterString != "" {
+		parser := cloudcost.NewCloudCostFilterParser()
+		filter, err = parser.Parse(filterString)
+		if err != nil {
+			return nil, fmt.Errorf("Parsing 'filter' parameter: %s", err)
+		}
+	}
+
+	opts := &QueryRequest{
+		Start:       *window.Start(),
+		End:         *window.End(),
+		AggregateBy: aggregateBy,
+		Accumulate:  accumulate,
+		Filter:      filter,
+	}
+
+	return opts, nil
+}
+
+func ParseCloudCostProperty(text string) (string, error) {
+	switch strings.TrimSpace(strings.ToLower(text)) {
+	case strings.ToLower(kubecost.CloudCostInvoiceEntityIDProp):
+		return kubecost.CloudCostInvoiceEntityIDProp, nil
+	case strings.ToLower(kubecost.CloudCostAccountIDProp):
+		return kubecost.CloudCostAccountIDProp, nil
+	case strings.ToLower(kubecost.CloudCostProviderProp):
+		return kubecost.CloudCostProviderProp, nil
+	case strings.ToLower(kubecost.CloudCostProviderIDProp):
+		return kubecost.CloudCostProviderIDProp, nil
+	case strings.ToLower(kubecost.CloudCostCategoryProp):
+		return kubecost.CloudCostCategoryProp, nil
+	case strings.ToLower(kubecost.CloudCostServiceProp):
+		return kubecost.CloudCostServiceProp, nil
+	}
+
+	if strings.HasPrefix(text, "label:") {
+		label := prom.SanitizeLabelName(strings.TrimSpace(strings.TrimPrefix(text, "label:")))
+		return fmt.Sprintf("label:%s", label), nil
+	}
+
+	return "", fmt.Errorf("invalid cloud cost property: %s", text)
+}
+
+func parseCloudCostViewRequest(r *http.Request) (*ViewQueryRequest, error) {
+	qr, err := parseCloudCostRequest(r)
+	if err != nil {
+		return nil, err
+	}
+	qp := httputil.NewQueryParams(r.URL.Query())
+
+	// parse cost metric
+	costMetricName, err := kubecost.ParseCostMetricName(qp.Get("costMetric", string(kubecost.CostMetricAmortizedNetCost)))
+	if err != nil {
+		return nil, fmt.Errorf("error parsing 'costMetric': %w", err)
+	}
+
+	limit := qp.GetInt("limit", 0)
+	offset := qp.GetInt("offset", 0)
+
+	// parse order
+	order, err := ParseSortDirection(qp.Get("sortByOrder", "desc"))
+	if err != nil {
+		return nil, fmt.Errorf("error parsing 'sortByOrder: %w", err)
+	}
+
+	sortColumn, err := ParseSortField(qp.Get("sortBy", "cost"))
+	if err != nil {
+		return nil, fmt.Errorf("error parsing 'sortBy': %w", err)
+	}
+
+	return &ViewQueryRequest{
+		QueryRequest:     *qr,
+		CostMetricName:   costMetricName,
+		ChartItemsLength: DefaultChartItemsLength,
+		Limit:            limit,
+		Offset:           offset,
+		SortDirection:    order,
+		SortColumn:       sortColumn,
+	}, nil
+}
+
+// CloudCostViewTableRowsToCSV takes the csv writer and writes the ViewTableRows into the writer.
+func CloudCostViewTableRowsToCSV(writer *csv.Writer, ctr ViewTableRows, window string) error {
+	defer writer.Flush()
+	// Write the column headers
+	headers := []string{
+		"Name",
+		"K8s Utilization",
+		"Total",
+		"Window",
+	}
+	err := writer.Write(headers)
+	if err != nil {
+		return fmt.Errorf("CloudCostViewTableRowsToCSV: failed to convert ViewTableRows to csv with error: %w", err)
+	}
+
+	// Write one row per entry in the ViewTableRows
+	for _, row := range ctr {
+		err = writer.Write([]string{
+			row.Name,
+			fmt.Sprintf("%.3f", row.KubernetesPercent),
+			fmt.Sprintf("%.3f", row.Cost),
+			window,
+		})
+		if err != nil {
+			return fmt.Errorf("CloudCostViewTableRowsToCSV: failed to convert ViewTableRows to csv with error: %w", err)
+		}
+	}
+
+	return nil
+}
+
+func writeCloudCostViewTableRowsAsCSV(w http.ResponseWriter, ctr ViewTableRows, window string) {
+	writer := csv.NewWriter(w)
+
+	err := CloudCostViewTableRowsToCSV(writer, ctr, window)
+	if err != nil {
+		protocol.WriteError(w, protocol.InternalServerError(err.Error()))
+		return
+	}
+}

+ 16 - 0
pkg/cloudcost/repository.go

@@ -0,0 +1,16 @@
+package cloudcost
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+// Repository is an interface for storing and retrieving CloudCost data
+type Repository interface {
+	Has(time.Time, string) (bool, error)
+	Get(time.Time, string) (*kubecost.CloudCostSet, error)
+	Keys() ([]string, error)
+	Put(*kubecost.CloudCostSet) error
+	Expire(time.Time) error
+}

+ 229 - 0
pkg/cloudcost/repositoryquerier.go

@@ -0,0 +1,229 @@
+package cloudcost
+
+import (
+	"context"
+	"fmt"
+	"sort"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// RepositoryQuerier is an implementation of Querier and ViewQuerier which pulls directly from a Repository
+type RepositoryQuerier struct {
+	repo Repository
+}
+
+func NewRepositoryQuerier(repo Repository) *RepositoryQuerier {
+	return &RepositoryQuerier{repo: repo}
+}
+
+func (rq *RepositoryQuerier) Query(request QueryRequest, ctx context.Context) (*kubecost.CloudCostSetRange, error) {
+	repoKeys, err := rq.repo.Keys()
+	if err != nil {
+		return nil, fmt.Errorf("RepositoryQuerier: Query: failed to get list of keys from repository: %w", err)
+	}
+
+	// create filter
+	compiler := kubecost.NewCloudCostMatchCompiler()
+	matcher, err := compiler.Compile(request.Filter)
+	if err != nil {
+		return nil, fmt.Errorf("RepositoryQuerier: Query: failed to compile filters: %w", err)
+	}
+
+	// Create a Cloud Cost Set Range in the resolution of the repository
+	ccsr, err := kubecost.NewCloudCostSetRange(request.Start, request.End, kubecost.AccumulateOptionDay, "")
+	if err != nil {
+		return nil, fmt.Errorf("RepositoryQuerier: Query: failed to create Cloud Cost Set Range: %w", err)
+	}
+	for _, cloudCostSet := range ccsr.CloudCostSets {
+		// Setting this values creates
+		cloudCostSet.AggregationProperties = request.AggregateBy
+		for _, key := range repoKeys {
+			ccs, err := rq.repo.Get(cloudCostSet.Window.Start().UTC(), key)
+			if err != nil {
+				log.Errorf("RepositoryQuerier: Query: %s", err.Error())
+				continue
+			}
+			if ccs == nil {
+				continue
+			}
+
+			for _, cc := range ccs.CloudCosts {
+				if matcher.Matches(cc) {
+					cloudCostSet.Insert(cc)
+				}
+			}
+		}
+	}
+
+	if request.Accumulate != kubecost.AccumulateOptionNone {
+		ccsr, err = ccsr.Accumulate(request.Accumulate)
+		if err != nil {
+			return nil, fmt.Errorf("RepositoryQuerier: Query: error accumulating: %w", err)
+		}
+	}
+
+	return ccsr, nil
+}
+
+func (rq *RepositoryQuerier) QueryViewGraph(request ViewQueryRequest, ctx context.Context) (ViewGraphData, error) {
+	ccasr, err := rq.Query(request.QueryRequest, ctx)
+	if err != nil {
+		return nil, fmt.Errorf("QueryViewGraph: query failed: %w", err)
+	}
+
+	if ccasr.IsEmpty() {
+		return make([]*ViewGraphDataSet, 0), nil
+	}
+	var sets ViewGraphData
+	for _, ccas := range ccasr.CloudCostSets {
+		items := make([]ViewGraphDataSetItem, 0)
+
+		for key, cc := range ccas.CloudCosts {
+			costMetric, err := cc.GetCostMetric(request.CostMetricName)
+			if err != nil {
+				return nil, fmt.Errorf("QueryViewGraph: failed to get cost metric: %w", err)
+			}
+			items = append(items, ViewGraphDataSetItem{
+				Name:  key,
+				Value: costMetric.Cost,
+			})
+		}
+		sort.SliceStable(items, func(i, j int) bool {
+			return items[i].Value > items[j].Value
+		})
+
+		if len(items) > request.ChartItemsLength {
+			otherItems := items[request.ChartItemsLength:]
+			newItems := items[:request.ChartItemsLength]
+			// Rename last item other and add all other values into it
+			newItems[request.ChartItemsLength-1].Name = "Other"
+			for _, item := range otherItems {
+				newItems[request.ChartItemsLength-1].Value += item.Value
+			}
+			items = newItems
+		}
+
+		sets = append(sets, &ViewGraphDataSet{
+			Start: *ccas.Window.Start(),
+			End:   *ccas.Window.End(),
+			Items: items,
+		})
+	}
+	return sets, nil
+}
+
+func (rq *RepositoryQuerier) QueryViewTotals(request ViewQueryRequest, ctx context.Context) (*ViewTableRow, int, error) {
+	ccasr, err := rq.Query(request.QueryRequest, ctx)
+	if err != nil {
+		return nil, -1, fmt.Errorf("QueryViewTotals: query failed: %w", err)
+	}
+	acc, err := ccasr.AccumulateAll()
+	if err != nil {
+		return nil, -1, fmt.Errorf("QueryViewTotals: accumulate failed: %w", err)
+	}
+	if acc.IsEmpty() {
+		return nil, 0, nil
+	}
+	count := len(acc.CloudCosts)
+
+	total, err := acc.Aggregate([]string{})
+	if err != nil {
+		return nil, -1, fmt.Errorf("QueryViewTotals: aggregate total failed: %w", err)
+	}
+
+	if total.IsEmpty() {
+		return nil, -1, fmt.Errorf("QueryViewTotals: missing total: %w", err)
+	}
+
+	if len(total.CloudCosts) != 1 {
+		return nil, -1, fmt.Errorf("QueryViewTotals: total did not aggregate: %w", err)
+	}
+
+	cm, err := total.CloudCosts[""].GetCostMetric(request.CostMetricName)
+	if err != nil {
+		return nil, -1, fmt.Errorf("QueryViewTotals: failed to retrieve cost metric: %w", err)
+	}
+	return &ViewTableRow{
+		Name:              "Totals",
+		KubernetesPercent: cm.KubernetesPercent,
+		Cost:              cm.Cost,
+	}, count, nil
+}
+
+func (rq *RepositoryQuerier) QueryViewTable(request ViewQueryRequest, ctx context.Context) (ViewTableRows, error) {
+	ccasr, err := rq.Query(request.QueryRequest, ctx)
+	if err != nil {
+		return nil, fmt.Errorf("QueryViewTable: query failed: %w", err)
+	}
+	acc, err := ccasr.AccumulateAll()
+	if err != nil {
+		return nil, fmt.Errorf("QueryViewTable: accumulate failed: %w", err)
+	}
+
+	var rows ViewTableRows
+	for key, cloudCost := range acc.CloudCosts {
+		costMetric, err2 := cloudCost.GetCostMetric(request.CostMetricName)
+		if err2 != nil {
+			return nil, fmt.Errorf("QueryViewTable: failed to retrieve cost metric: %w", err)
+		}
+		vtr := &ViewTableRow{
+			Name:              key,
+			KubernetesPercent: costMetric.KubernetesPercent,
+			Cost:              costMetric.Cost,
+		}
+		rows = append(rows, vtr)
+	}
+	// Sort Results
+
+	// Sort by Name to ensure consistent return
+	sort.SliceStable(rows, func(i, j int) bool {
+		return rows[i].Name > rows[j].Name
+	})
+
+	switch request.SortColumn {
+	case SortFieldName:
+		if request.SortDirection == SortDirectionAscending {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].Name < rows[j].Name
+			})
+		}
+
+	case SortFieldCost:
+		if request.SortDirection == SortDirectionAscending {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].Cost < rows[j].Cost
+			})
+		} else {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].Cost > rows[j].Cost
+			})
+		}
+	case SortFieldKubernetesPercent:
+		if request.SortDirection == SortDirectionAscending {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].KubernetesPercent < rows[j].KubernetesPercent
+			})
+		} else {
+			sort.SliceStable(rows, func(i, j int) bool {
+				return rows[i].KubernetesPercent > rows[j].KubernetesPercent
+			})
+		}
+
+	default:
+		return nil, fmt.Errorf("invalid sort field '%s'", string(request.SortColumn))
+	}
+
+	// paginate sorted results
+	if request.Offset > len(rows) {
+		return make([]*ViewTableRow, 0), nil
+	}
+
+	limit := request.Offset + request.Limit
+	if limit > len(rows) {
+		return rows[request.Offset:], nil
+	}
+
+	return rows[request.Offset:limit], nil
+}

+ 24 - 0
pkg/cloudcost/status.go

@@ -0,0 +1,24 @@
+package cloudcost
+
+import (
+	"time"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud"
+)
+
+// Status gives the details and metadata of a CloudCost integration
+type Status struct {
+	Key              string             `json:"key"`
+	Source           string             `json:"source"`
+	Provider         string             `json:"provider"`
+	Active           bool               `json:"active"`
+	Valid            bool               `json:"valid"`
+	LastRun          time.Time          `json:"lastRun"`
+	NextRun          time.Time          `json:"nextRun"`
+	RefreshRate      string             `json:"RefreshRate"`
+	Created          time.Time          `json:"created"`
+	Runs             int                `json:"runs"`
+	Coverage         string             `json:"coverage"`
+	ConnectionStatus string             `json:"connectionStatus"`
+	Config           cloudconfig.Config `json:"config"`
+}

+ 107 - 0
pkg/cloudcost/view.go

@@ -0,0 +1,107 @@
+package cloudcost
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/pkg/util/mathutil"
+)
+
+// View serves data to the Cloud Cost front end, in the
+// structure it requires (i.e. a graph and a table).
+type View struct {
+	GraphData  ViewGraphData `json:"graphData"`
+	TableTotal *ViewTableRow `json:"tableTotal"`
+	TableRows  ViewTableRows `json:"tableRows"`
+}
+
+type ViewTableRows []*ViewTableRow
+
+func (vtrs ViewTableRows) Equal(that ViewTableRows) bool {
+	if len(vtrs) != len(that) {
+		return false
+	}
+
+	for i := 0; i < len(vtrs); i++ {
+		if !vtrs[i].Equal(that[i]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type ViewTableRow struct {
+	Name              string  `json:"name"`
+	KubernetesPercent float64 `json:"kubernetesPercent"`
+	Cost              float64 `json:"cost"`
+}
+
+func (vtr *ViewTableRow) Equal(that *ViewTableRow) bool {
+	if vtr.Name != that.Name {
+		return false
+	}
+
+	if !mathutil.Approximately(vtr.KubernetesPercent, that.KubernetesPercent) {
+		return false
+	}
+
+	if !mathutil.Approximately(vtr.Cost, that.Cost) {
+		return false
+	}
+
+	return true
+}
+
+type ViewGraphData []*ViewGraphDataSet
+
+func (vgd ViewGraphData) Equal(that ViewGraphData) bool {
+	if len(vgd) != len(that) {
+		return false
+	}
+
+	for i := 0; i < len(vgd); i++ {
+		if !vgd[i].Equal(that[i]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type ViewGraphDataSet struct {
+	Start time.Time              `json:"start"`
+	End   time.Time              `json:"end"`
+	Items []ViewGraphDataSetItem `json:"items"`
+}
+
+// NOTE: does not compare start and end times, just that the items are equal
+func (vgds *ViewGraphDataSet) Equal(that *ViewGraphDataSet) bool {
+	if len(vgds.Items) != len(that.Items) {
+		return false
+	}
+
+	for i := 0; i < len(vgds.Items); i++ {
+		if !vgds.Items[i].Equal(that.Items[i]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type ViewGraphDataSetItem struct {
+	Name  string  `json:"name"`
+	Value float64 `json:"value"`
+}
+
+func (vgdsi ViewGraphDataSetItem) Equal(that ViewGraphDataSetItem) bool {
+	if vgdsi.Name != that.Name {
+		return false
+	}
+
+	if !mathutil.Approximately(vgdsi.Value, that.Value) {
+		return false
+	}
+
+	return true
+}

+ 18 - 0
pkg/cmd/costmodel/costmodel.go

@@ -7,6 +7,7 @@ import (
 	"time"
 
 	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/pkg/cloudcost"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	"github.com/rs/cors"
 
@@ -39,11 +40,28 @@ func Execute(opts *CostModelOpts) error {
 		log.Errorf("couldn't start CSV export worker: %v", err)
 	}
 
+	if env.IsCloudCostEnabled() {
+		repo := cloudcost.NewMemoryRepository()
+		a.CloudCostPipelineService = cloudcost.NewPipelineService(repo, a.CloudConfigController, cloudcost.DefaultIngestorConfiguration())
+		repoQuerier := cloudcost.NewRepositoryQuerier(repo)
+		a.CloudCostQueryService = cloudcost.NewQueryService(repoQuerier, repoQuerier)
+	}
+
 	rootMux := http.NewServeMux()
 	a.Router.GET("/healthz", Healthz)
 	a.Router.GET("/allocation", a.ComputeAllocationHandler)
 	a.Router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
 	a.Router.GET("/assets", a.ComputeAssetsHandler)
+
+	a.Router.GET("/cloudCost", a.CloudCostQueryService.GetCloudCostHandler())
+	a.Router.GET("/cloudCost/view/graph", a.CloudCostQueryService.GetCloudCostViewGraphHandler())
+	a.Router.GET("/cloudCost/view/totals", a.CloudCostQueryService.GetCloudCostViewTotalsHandler())
+	a.Router.GET("/cloudCost/view/table", a.CloudCostQueryService.GetCloudCostViewTableHandler())
+
+	a.Router.GET("/cloudCost/status", a.CloudCostPipelineService.GetCloudCostStatusHandler())
+	a.Router.GET("/cloudCost/rebuild", a.CloudCostPipelineService.GetCloudCostRebuildHandler())
+	a.Router.GET("/cloudCost/repair", a.CloudCostPipelineService.GetCloudCostRepairHandler())
+
 	rootMux.Handle("/", a.Router)
 	rootMux.Handle("/metrics", promhttp.Handler())
 	telemetryHandler := metrics.ResponseMetricMiddleware(rootMux)

+ 3 - 2
pkg/costmodel/allocation.go

@@ -295,8 +295,9 @@ func (cm *CostModel) ComputeAllocation(start, end time.Time, resolution time.Dur
 // it supposed to be a good indicator of available allocation data
 func (cm *CostModel) DateRange() (time.Time, time.Time, error) {
 	ctx := prom.NewNamedContext(cm.PrometheusClient, prom.AllocationContextName)
+	exportCsvDaysFmt := fmt.Sprintf("%dd", env.GetExportCSVMaxDays())
 
-	resOldest, _, err := ctx.QuerySync(fmt.Sprintf(queryFmtOldestSample, env.GetPromClusterFilter(), "90d", "1h"))
+	resOldest, _, err := ctx.QuerySync(fmt.Sprintf(queryFmtOldestSample, env.GetPromClusterFilter(), exportCsvDaysFmt, "1h"))
 	if err != nil {
 		return time.Time{}, time.Time{}, fmt.Errorf("querying oldest sample: %w", err)
 	}
@@ -305,7 +306,7 @@ func (cm *CostModel) DateRange() (time.Time, time.Time, error) {
 	}
 	oldest := time.Unix(int64(resOldest[0].Values[0].Value), 0)
 
-	resNewest, _, err := ctx.QuerySync(fmt.Sprintf(queryFmtNewestSample, env.GetPromClusterFilter(), "90d", "1h"))
+	resNewest, _, err := ctx.QuerySync(fmt.Sprintf(queryFmtNewestSample, env.GetPromClusterFilter(), exportCsvDaysFmt, "1h"))
 	if err != nil {
 		return time.Time{}, time.Time{}, fmt.Errorf("querying newest sample: %w", err)
 	}

+ 7 - 3
pkg/costmodel/cluster.go

@@ -848,10 +848,14 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 
 			// interpolate any missing data
 			resultMins := lb.Minutes
-			scaleFactor := (resultMins + resolution.Minutes()) / resultMins
+			if resultMins > 0 {
+				scaleFactor := (resultMins + resolution.Minutes()) / resultMins
 
-			hrs := (lb.Minutes * scaleFactor) / 60.0
-			lb.Cost += lbPricePerHr * hrs
+				hrs := (lb.Minutes * scaleFactor) / 60.0
+				lb.Cost += lbPricePerHr * hrs
+			} else {
+				log.DedupedWarningf(20, "ClusterLoadBalancers: found zero minutes for key: %v", key)
+			}
 
 			if lb.Ip != "" && lb.Ip != providerID {
 				log.DedupedWarningf(5, "ClusterLoadBalancers: multiple IPs per load balancer not supported, using most recent IP")

+ 6 - 0
pkg/costmodel/costmodel.go

@@ -333,6 +333,9 @@ func (cm *CostModel) ComputeCostData(cli prometheusClient.Client, cp costAnalyze
 	// Determine if there are vgpus configured and if so get the total allocatable number
 	// If there are no vgpus, the coefficient is set to 1.0
 	vgpuCount, err := getAllocatableVGPUs(cm.Cache)
+	if err != nil {
+		log.Warnf("getAllocatableVGCPUs error: %s", err.Error())
+	}
 	vgpuCoeff := 10.0
 	if vgpuCount > 0.0 {
 		vgpuCoeff = vgpuCount
@@ -1019,6 +1022,9 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 	nodes := make(map[string]*costAnalyzerCloud.Node)
 
 	vgpuCount, err := getAllocatableVGPUs(cm.Cache)
+	if err != nil {
+		return nil, err
+	}
 	vgpuCoeff := 10.0
 	if vgpuCount > 0.0 {
 		vgpuCoeff = vgpuCount

+ 47 - 35
pkg/costmodel/router.go

@@ -17,8 +17,10 @@ import (
 
 	"github.com/microcosm-cc/bluemonday"
 	"github.com/opencost/opencost/pkg/cloud/aws"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
 	"github.com/opencost/opencost/pkg/cloud/provider"
+	"github.com/opencost/opencost/pkg/cloudcost"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/kubeconfig"
 	"github.com/opencost/opencost/pkg/metrics"
@@ -82,23 +84,26 @@ var (
 // Accesses defines a singleton application instance, providing access to
 // Prometheus, Kubernetes, the cloud provider, and caches.
 type Accesses struct {
-	Router              *httprouter.Router
-	PrometheusClient    prometheus.Client
-	ThanosClient        prometheus.Client
-	KubeClientSet       kubernetes.Interface
-	ClusterCache        clustercache.ClusterCache
-	ClusterMap          clusters.ClusterMap
-	CloudProvider       models.Provider
-	ConfigFileManager   *config.ConfigFileManager
-	ClusterInfoProvider clusters.ClusterInfoProvider
-	Model               *CostModel
-	MetricsEmitter      *CostModelMetricsEmitter
-	OutOfClusterCache   *cache.Cache
-	AggregateCache      *cache.Cache
-	CostDataCache       *cache.Cache
-	ClusterCostsCache   *cache.Cache
-	CacheExpiration     map[time.Duration]time.Duration
-	AggAPI              Aggregator
+	Router                   *httprouter.Router
+	PrometheusClient         prometheus.Client
+	ThanosClient             prometheus.Client
+	KubeClientSet            kubernetes.Interface
+	ClusterCache             clustercache.ClusterCache
+	ClusterMap               clusters.ClusterMap
+	CloudProvider            models.Provider
+	ConfigFileManager        *config.ConfigFileManager
+	CloudConfigController    *cloudconfig.Controller
+	CloudCostPipelineService *cloudcost.PipelineService
+	CloudCostQueryService    *cloudcost.QueryService
+	ClusterInfoProvider      clusters.ClusterInfoProvider
+	Model                    *CostModel
+	MetricsEmitter           *CostModelMetricsEmitter
+	OutOfClusterCache        *cache.Cache
+	AggregateCache           *cache.Cache
+	CostDataCache            *cache.Cache
+	ClusterCostsCache        *cache.Cache
+	CacheExpiration          map[time.Duration]time.Duration
+	AggAPI                   Aggregator
 	// SettingsCache stores current state of app settings
 	SettingsCache *cache.Cache
 	// settingsSubscribers tracks channels through which changes to different
@@ -1714,25 +1719,27 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	metricsEmitter := NewCostModelMetricsEmitter(promCli, k8sCache, cloudProvider, clusterInfoProvider, costModel)
 
 	a := &Accesses{
-		Router:              httprouter.New(),
-		PrometheusClient:    promCli,
-		ThanosClient:        thanosClient,
-		KubeClientSet:       kubeClientset,
-		ClusterCache:        k8sCache,
-		ClusterMap:          clusterMap,
-		CloudProvider:       cloudProvider,
-		ConfigFileManager:   confManager,
-		ClusterInfoProvider: clusterInfoProvider,
-		Model:               costModel,
-		MetricsEmitter:      metricsEmitter,
-		AggregateCache:      aggregateCache,
-		CostDataCache:       costDataCache,
-		ClusterCostsCache:   clusterCostsCache,
-		OutOfClusterCache:   outOfClusterCache,
-		SettingsCache:       settingsCache,
-		CacheExpiration:     cacheExpiration,
-		httpServices:        services.NewCostModelServices(),
+		Router:                httprouter.New(),
+		PrometheusClient:      promCli,
+		ThanosClient:          thanosClient,
+		KubeClientSet:         kubeClientset,
+		ClusterCache:          k8sCache,
+		ClusterMap:            clusterMap,
+		CloudProvider:         cloudProvider,
+		CloudConfigController: cloudconfig.NewController(cloudProvider),
+		ConfigFileManager:     confManager,
+		ClusterInfoProvider:   clusterInfoProvider,
+		Model:                 costModel,
+		MetricsEmitter:        metricsEmitter,
+		AggregateCache:        aggregateCache,
+		CostDataCache:         costDataCache,
+		ClusterCostsCache:     clusterCostsCache,
+		OutOfClusterCache:     outOfClusterCache,
+		SettingsCache:         settingsCache,
+		CacheExpiration:       cacheExpiration,
+		httpServices:          services.NewCostModelServices(),
 	}
+
 	// Use the Accesses instance, itself, as the CostModelAggregator. This is
 	// confusing and unconventional, but necessary so that we can swap it
 	// out for the ETL-adapted version elsewhere.
@@ -1811,6 +1818,11 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	a.Router.GET("/logs/level", a.GetLogLevel)
 	a.Router.POST("/logs/level", a.SetLogLevel)
 
+	a.Router.GET("/cloud/config/export", a.CloudConfigController.GetExportConfigHandler())
+	a.Router.GET("/cloud/config/enable", a.CloudConfigController.GetEnableConfigHandler())
+	a.Router.GET("/cloud/config/disable", a.CloudConfigController.GetDisableConfigHandler())
+	a.Router.GET("/cloud/config/delete", a.CloudConfigController.GetDeleteConfigHandler())
+
 	a.httpServices.RegisterAll(a.Router)
 
 	return a

+ 37 - 0
pkg/env/costmodelenv.go

@@ -106,6 +106,15 @@ const (
 	ExportCSVFile       = "EXPORT_CSV_FILE"
 	ExportCSVLabelsList = "EXPORT_CSV_LABELS_LIST"
 	ExportCSVLabelsAll  = "EXPORT_CSV_LABELS_ALL"
+	ExportCSVMaxDays    = "EXPORT_CSV_MAX_DAYS"
+
+	DataRetentionDailyResolutionDaysEnvVar = "DATA_RETENTION_DAILY_RESOLUTION_DAYS"
+
+	CloudCostEnabledEnvVar          = "CLOUD_COST_ENABLED"
+	CloudCostMonthToDateIntervalVar = "CLOUD_COST_MONTH_TO_DATE_INTERVAL"
+	CloudCostRefreshRateHoursEnvVar = "CLOUD_COST_REFRESH_RATE_HOURS"
+	CloudCostQueryWindowDaysEnvVar  = "CLOUD_COST_QUERY_WINDOW_DAYS"
+	CloudCostRunWindowDaysEnvVar    = "CLOUD_COST_RUN_WINDOW_DAYS"
 )
 
 const DefaultConfigMountPath = "/var/configs"
@@ -128,6 +137,10 @@ func GetExportCSVLabelsList() []string {
 	return GetList(ExportCSVLabelsList, ",")
 }
 
+func GetExportCSVMaxDays() int {
+	return GetInt(ExportCSVMaxDays, 90)
+}
+
 // GetKubecostConfigBucket returns a file location for a mounted bucket configuration which is used to store
 // a subset of kubecost configurations that require sharing via remote storage.
 func GetKubecostConfigBucket() string {
@@ -603,3 +616,27 @@ func GetRegionOverrideList() []string {
 
 	return regionList
 }
+
+func GetDataRetentionDailyResolutionDays() int64 {
+	return GetInt64(DataRetentionDailyResolutionDaysEnvVar, 15)
+}
+
+func IsCloudCostEnabled() bool {
+	return GetBool(CloudCostEnabledEnvVar, false)
+}
+
+func GetCloudCostMonthToDateInterval() int {
+	return GetInt(CloudCostMonthToDateIntervalVar, 6)
+}
+
+func GetCloudCostRefreshRateHours() int64 {
+	return GetInt64(CloudCostRefreshRateHoursEnvVar, 6)
+}
+
+func GetCloudCostQueryWindowDays() int64 {
+	return GetInt64(CloudCostQueryWindowDaysEnvVar, 7)
+}
+
+func GetCloudCostRunWindowDays() int64 {
+	return GetInt64(CloudCostRunWindowDaysEnvVar, 3)
+}

+ 44 - 0
pkg/env/costmodelenv_test.go

@@ -41,3 +41,47 @@ func TestIsCacheDisabled(t *testing.T) {
 		})
 	}
 }
+
+func TestGetExportCSVMaxDays(t *testing.T) {
+	tests := []struct {
+		name string
+		want int
+		pre  func()
+	}{
+		{
+			name: "Ensure the default value is 90d",
+			want: 90,
+		},
+		{
+			name: "Ensure the value is 30 when EXPORT_CSV_MAX_DAYS is set to 30",
+			want: 30,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "30")
+			},
+		},
+		{
+			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to empty string",
+			want: 90,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "")
+			},
+		},
+		{
+			name: "Ensure the value is 90 when EXPORT_CSV_MAX_DAYS is set to invalid value",
+			want: 90,
+			pre: func() {
+				os.Setenv("EXPORT_CSV_MAX_DAYS", "foo")
+			},
+		},
+	}
+	for _, tt := range tests {
+		if tt.pre != nil {
+			tt.pre()
+		}
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetExportCSVMaxDays(); got != tt.want {
+				t.Errorf("GetExportCSVMaxDays() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 56 - 7
pkg/kubecost/asset.go

@@ -1589,6 +1589,10 @@ func (b *Breakdown) Clone() *Breakdown {
 
 // Equal returns true if the two Breakdowns are exact matches
 func (b *Breakdown) Equal(that *Breakdown) bool {
+	if b == nil && that == nil {
+		return true
+	}
+
 	if b == nil || that == nil {
 		return false
 	}
@@ -1889,6 +1893,32 @@ func (n *NodeOverhead) SanitizeNaN() {
 	}
 }
 
+func (n *NodeOverhead) Equal(other *NodeOverhead) bool {
+	if n == nil && other != nil {
+		return false
+	}
+	if n != nil && other == nil {
+		return false
+	}
+	if n == nil && other == nil {
+		return true
+	}
+
+	// This is okay because everything in NodeOverhead is a value type.
+	return *n == *other
+}
+
+func (n *NodeOverhead) Clone() *NodeOverhead {
+	if n == nil {
+		return nil
+	}
+	return &NodeOverhead{
+		CpuOverheadFraction:  n.CpuOverheadFraction,
+		RamOverheadFraction:  n.RamOverheadFraction,
+		OverheadCostFraction: n.OverheadCostFraction,
+	}
+}
+
 // Node is an Asset representing a single node in a cluster
 type Node struct {
 	Properties   *AssetProperties
@@ -2130,6 +2160,15 @@ func (n *Node) add(that *Node) {
 		n.RAMBreakdown.User = (n.RAMBreakdown.User*n.RAMCost + that.RAMBreakdown.User*that.RAMCost) / totalRAMCost
 	}
 
+	// These calculations have to happen before the mutable fields of n they
+	// depend on (cpu cost, ram cost) are mutated with post-add totals.
+	if n.Overhead != nil && that.Overhead != nil {
+		n.Overhead.RamOverheadFraction = (n.Overhead.RamOverheadFraction*n.RAMCost + that.Overhead.RamOverheadFraction*that.RAMCost) / totalRAMCost
+		n.Overhead.CpuOverheadFraction = (n.Overhead.CpuOverheadFraction*n.CPUCost + that.Overhead.CpuOverheadFraction*that.CPUCost) / totalCPUCost
+	} else {
+		n.Overhead = nil
+	}
+
 	n.CPUCoreHours += that.CPUCoreHours
 	n.RAMByteHours += that.RAMByteHours
 	n.GPUHours += that.GPUHours
@@ -2139,10 +2178,9 @@ func (n *Node) add(that *Node) {
 	n.RAMCost += that.RAMCost
 	n.Adjustment += that.Adjustment
 
-	if n.Overhead != nil && that.Overhead != nil {
-
-		n.Overhead.RamOverheadFraction = (n.Overhead.RamOverheadFraction*n.RAMCost + that.Overhead.RamOverheadFraction*that.RAMCost) / totalRAMCost
-		n.Overhead.CpuOverheadFraction = (n.Overhead.CpuOverheadFraction*n.CPUCost + that.Overhead.CpuOverheadFraction*that.CPUCost) / totalCPUCost
+	// The cost-weighted overhead is calculated after the node is totaled
+	// because the cost-weighted overhead is based on post-add data.
+	if n.Overhead != nil {
 		n.Overhead.OverheadCostFraction = ((n.Overhead.CpuOverheadFraction * n.CPUCost) + (n.Overhead.RamOverheadFraction * n.RAMCost)) / n.TotalCost()
 	}
 }
@@ -2171,6 +2209,7 @@ func (n *Node) Clone() Asset {
 		GPUCount:     n.GPUCount,
 		RAMCost:      n.RAMCost,
 		Preemptible:  n.Preemptible,
+		Overhead:     n.Overhead.Clone(),
 		Discount:     n.Discount,
 	}
 }
@@ -2233,6 +2272,9 @@ func (n *Node) Equal(a Asset) bool {
 	if n.Preemptible != that.Preemptible {
 		return false
 	}
+	if !n.Overhead.Equal(that.Overhead) {
+		return false
+	}
 
 	return true
 }
@@ -2345,9 +2387,16 @@ func (n *Node) SanitizeNaN() {
 		n.Preemptible = 0
 	}
 
-	n.CPUBreakdown.SanitizeNaN()
-	n.RAMBreakdown.SanitizeNaN()
-	n.Overhead.SanitizeNaN()
+	if n.CPUBreakdown != nil {
+		n.CPUBreakdown.SanitizeNaN()
+	}
+	if n.RAMBreakdown != nil {
+		n.RAMBreakdown.SanitizeNaN()
+	}
+
+	if n.Overhead != nil {
+		n.Overhead.SanitizeNaN()
+	}
 }
 
 // LoadBalancer is an Asset representing a single load balancer in a cluster

+ 74 - 3
pkg/kubecost/asset_test.go

@@ -384,6 +384,11 @@ func TestNode_Add(t *testing.T) {
 		Other:  0.0,
 	}
 	node1.SetAdjustment(1.6)
+	node1.Overhead = &NodeOverhead{
+		CpuOverheadFraction:  1,
+		RamOverheadFraction:  1,
+		OverheadCostFraction: 1,
+	}
 
 	node2 := NewNode("node2", "cluster1", "node2", *windows[0].start, *windows[0].end, windows[0])
 	node2.CPUCoreHours = 1.0 * hours
@@ -406,6 +411,11 @@ func TestNode_Add(t *testing.T) {
 		Other:  0.05,
 	}
 	node2.SetAdjustment(1.0)
+	node2.Overhead = &NodeOverhead{
+		CpuOverheadFraction:  0.6,
+		RamOverheadFraction:  0.75,
+		OverheadCostFraction: 0.7,
+	}
 
 	nodeT := node1.Add(node2).(*Node)
 
@@ -434,6 +444,19 @@ func TestNode_Add(t *testing.T) {
 	if nodeT.RAMBytes() != 4.0*gb {
 		t.Fatalf("Node.Add: expected %f; got %f", 4.0*gb, nodeT.RAMBytes())
 	}
+	if o := nodeT.Overhead; o == nil {
+		t.Errorf("Node.Add (1 + 2): expected overhead to be non-nil")
+	} else {
+		if o.CpuOverheadFraction < 0 || o.CpuOverheadFraction > 1 {
+			t.Errorf("CPU overhead must be within [0, 1], is: %f", o.CpuOverheadFraction)
+		}
+		if o.RamOverheadFraction < 0 || o.RamOverheadFraction > 1 {
+			t.Errorf("RAM overhead must be within [0, 1], is: %f", o.RamOverheadFraction)
+		}
+		if o.OverheadCostFraction < 0 || o.OverheadCostFraction > 1 {
+			t.Errorf("Cost-weighted overhead must be within [0, 1], is: %f", o.OverheadCostFraction)
+		}
+	}
 
 	// Check that the original assets are unchanged
 	if !util.IsApproximately(node1.TotalCost(), 10.0) {
@@ -459,6 +482,11 @@ func TestNode_Add(t *testing.T) {
 	node3.RAMCost = 0.0
 	node3.Discount = 0.3
 	node3.SetAdjustment(0.0)
+	node3.Overhead = &NodeOverhead{
+		CpuOverheadFraction:  0.6,
+		RamOverheadFraction:  0.75,
+		OverheadCostFraction: 0.7,
+	}
 
 	node4 := NewNode("node4", "cluster1", "node4", *windows[0].start, *windows[0].end, windows[0])
 	node4.CPUCoreHours = 0 * hours
@@ -469,6 +497,7 @@ func TestNode_Add(t *testing.T) {
 	node4.RAMCost = 0.0
 	node4.Discount = 0.1
 	node4.SetAdjustment(0.0)
+	node4.Overhead = nil
 
 	nodeT = node3.Add(node4).(*Node)
 
@@ -479,6 +508,9 @@ func TestNode_Add(t *testing.T) {
 	if nodeT.Discount != 0.2 {
 		t.Fatalf("Node.Add: expected %f; got %f", 0.2, nodeT.Discount)
 	}
+	if nodeT.Overhead != nil {
+		t.Errorf("Node.Add: adding a node with nil overhead should nil the resulting overhead")
+	}
 
 	// Accumulate: one nodes, two window
 	nodeA1 := NewNode("nodeA1", "cluster1", "nodeA1", *windows[0].start, *windows[0].end, windows[0])
@@ -548,7 +580,37 @@ func TestNode_Add(t *testing.T) {
 }
 
 func TestNode_Clone(t *testing.T) {
-	// TODO
+	cases := []struct {
+		name string
+
+		input *Node
+	}{
+		{
+			name: "overhead nil",
+			input: &Node{
+				Overhead: nil,
+			},
+		},
+		{
+			name: "overhead non-nil",
+			input: &Node{
+				Overhead: &NodeOverhead{
+					CpuOverheadFraction:  3,
+					RamOverheadFraction:  7,
+					OverheadCostFraction: 6,
+				},
+			},
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			result := c.input.Clone()
+			if !result.Equal(c.input) {
+				t.Errorf("clone result doesn't equal input")
+			}
+		})
+	}
 }
 
 func TestNode_MarshalJSON(t *testing.T) {
@@ -897,10 +959,13 @@ func TestAssetSetRange_AccumulateToAssetSet(t *testing.T) {
 		GenerateMockAssetSet(startD2, day),
 	)
 	err = asr.AggregateBy(nil, nil)
-	as, err = asr.AccumulateToAssetSet()
 	if err != nil {
 		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
 	}
+	as, err = asr.AccumulateToAssetSet()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AccumulateToAssetSet: unexpected error: %s", err)
+	}
 	assertAssetSet(t, as, "1a", window, map[string]float64{
 		"__undefined__/__undefined__/__undefined__/Compute/cluster1/Node/Kubernetes/gcp-node1/node1":                   21.00,
 		"__undefined__/__undefined__/__undefined__/Compute/cluster1/Node/Kubernetes/gcp-node2/node2":                   16.50,
@@ -921,10 +986,13 @@ func TestAssetSetRange_AccumulateToAssetSet(t *testing.T) {
 		GenerateMockAssetSet(startD2, day),
 	)
 	err = asr.AggregateBy([]string{}, nil)
-	as, err = asr.AccumulateToAssetSet()
 	if err != nil {
 		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
 	}
+	as, err = asr.AccumulateToAssetSet()
+	if err != nil {
+		t.Fatalf("AssetSetRange.AccumulateToAssetSet: unexpected error: %s", err)
+	}
 	assertAssetSet(t, as, "1b", window, map[string]float64{
 		"": 180.00,
 	}, nil)
@@ -976,6 +1044,9 @@ func TestAssetSetRange_AccumulateToAssetSet(t *testing.T) {
 	)
 
 	err = asr.AggregateBy([]string{string(AssetTypeProp)}, nil)
+	if err != nil {
+		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)
+	}
 	as, err = asr.AccumulateToAssetSet()
 	if err != nil {
 		t.Fatalf("AssetSetRange.AggregateBy: unexpected error: %s", err)

+ 5 - 1
pkg/kubecost/assetprops.go

@@ -199,8 +199,12 @@ func (ap *AssetProperties) Clone() *AssetProperties {
 	return clone
 }
 
-// Equal returns true only if both AssetProperties are non-nil exact matches
+// Equal returns true only if both AssetProperties are matches
 func (ap *AssetProperties) Equal(that *AssetProperties) bool {
+	if ap == nil && that == nil {
+		return true
+	}
+
 	if ap == nil || that == nil {
 		return false
 	}

+ 190 - 63
pkg/kubecost/cloudcost.go

@@ -9,6 +9,7 @@ import (
 	filter21 "github.com/opencost/opencost/pkg/filter21"
 	"github.com/opencost/opencost/pkg/filter21/ast"
 	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 // CloudCost represents a CUR line item, identifying a cloud resource and
@@ -131,17 +132,17 @@ func (cc *CloudCost) StringMapProperty(property string) (map[string]string, erro
 	}
 }
 
-func (cc *CloudCost) GetCostMetric(costMetricName string) (CostMetric, error) {
+func (cc *CloudCost) GetCostMetric(costMetricName CostMetricName) (CostMetric, error) {
 	switch costMetricName {
-	case ListCostMetric:
+	case CostMetricListCost:
 		return cc.ListCost, nil
-	case NetCostMetric:
+	case CostMetricNetCost:
 		return cc.NetCost, nil
-	case AmortizedNetCostMetric:
+	case CostMetricAmortizedNetCost:
 		return cc.AmortizedNetCost, nil
-	case InvoicedCostMetric:
+	case CostMetricInvoicedCost:
 		return cc.InvoicedCost, nil
-	case AmortizedCostMetric:
+	case CostMetricAmortizedCost:
 		return cc.AmortizedCost, nil
 	}
 	return CostMetric{}, fmt.Errorf("invalid Cost Metric: %s", costMetricName)
@@ -371,9 +372,12 @@ func (ccs *CloudCostSet) Clone() *CloudCostSet {
 
 // cloneSet creates a copy of the receiver without any of its CloudCosts
 func (ccs *CloudCostSet) cloneSet() *CloudCostSet {
-	aggProps := make([]string, len(ccs.AggregationProperties))
-	for i, v := range ccs.AggregationProperties {
-		aggProps[i] = v
+	var aggProps []string
+	if ccs.AggregationProperties != nil {
+		aggProps = make([]string, len(ccs.AggregationProperties))
+		for i, v := range ccs.AggregationProperties {
+			aggProps[i] = v
+		}
 	}
 	return &CloudCostSet{
 		CloudCosts:            make(map[string]*CloudCost),
@@ -443,8 +447,8 @@ type CloudCostSetRange struct {
 
 // NewCloudCostSetRange create a CloudCostSetRange containing CloudCostSets with windows of equal duration
 // the duration between start and end must be divisible by the window duration argument
-func NewCloudCostSetRange(start time.Time, end time.Time, window time.Duration, integration string) (*CloudCostSetRange, error) {
-	windows, err := GetWindows(start, end, window)
+func NewCloudCostSetRange(start time.Time, end time.Time, accumOpt AccumulateOption, integration string) (*CloudCostSetRange, error) {
+	windows, err := NewClosedWindow(start.UTC(), end.UTC()).GetAccumulateWindows(accumOpt)
 	if err != nil {
 		return nil, err
 	}
@@ -457,7 +461,6 @@ func NewCloudCostSetRange(start time.Time, end time.Time, window time.Duration,
 		cloudCostItemSets[i] = ccs
 	}
 	return &CloudCostSetRange{
-		Window:        NewWindow(&start, &end),
 		CloudCostSets: cloudCostItemSets,
 	}, nil
 }
@@ -468,7 +471,6 @@ func (ccsr *CloudCostSetRange) Clone() *CloudCostSetRange {
 		ccsSlice[i] = ccs.Clone()
 	}
 	return &CloudCostSetRange{
-		Window:        ccsr.Window.Clone(),
 		CloudCostSets: ccsSlice,
 	}
 }
@@ -482,12 +484,20 @@ func (ccsr *CloudCostSetRange) IsEmpty() bool {
 	return true
 }
 
-// Accumulate sums each CloudCostSet in the given range, returning a single cumulative
+// accumulate sums each CloudCostSet in the given range, returning a single cumulative
 // CloudCostSet for the entire range.
-func (ccsr *CloudCostSetRange) Accumulate() (*CloudCostSet, error) {
+func (ccsr *CloudCostSetRange) AccumulateAll() (*CloudCostSet, error) {
 	var cloudCostSet *CloudCostSet
 	var err error
 
+	if ccsr == nil {
+		return nil, fmt.Errorf("nil CloudCostSetRange in accumulation")
+	}
+
+	if len(ccsr.CloudCostSets) == 0 {
+		return nil, fmt.Errorf("CloudCostSetRange has empty CloudCostSet in accumulation")
+	}
+
 	for _, ccs := range ccsr.CloudCostSets {
 		if cloudCostSet == nil {
 			cloudCostSet = ccs.Clone()
@@ -502,6 +512,171 @@ func (ccsr *CloudCostSetRange) Accumulate() (*CloudCostSet, error) {
 	return cloudCostSet, nil
 }
 
+// Accumulate sums CloudCostSets based on the AccumulateOption (calendar week or calendar month).
+// The accumulated set is determined by the start of the window of the allocation set.
+func (ccsr *CloudCostSetRange) Accumulate(accumulateBy AccumulateOption) (*CloudCostSetRange, error) {
+	switch accumulateBy {
+	case AccumulateOptionNone:
+		return ccsr.accumulateByNone()
+	case AccumulateOptionAll:
+		return ccsr.accumulateByAll()
+	case AccumulateOptionHour:
+		return ccsr.accumulateByHour()
+	case AccumulateOptionDay:
+		return ccsr.accumulateByDay()
+	case AccumulateOptionWeek:
+		return ccsr.accumulateByWeek()
+	case AccumulateOptionMonth:
+		return ccsr.accumulateByMonth()
+	default:
+		// ideally, this should never happen
+		return nil, fmt.Errorf("unexpected error, invalid accumulateByType: %s", accumulateBy)
+	}
+}
+
+func (ccsr *CloudCostSetRange) accumulateByAll() (*CloudCostSetRange, error) {
+
+	ccs, err := ccsr.AccumulateAll()
+	if err != nil {
+		return nil, fmt.Errorf("error accumulating all:%s", err)
+	}
+
+	accumulated := &CloudCostSetRange{
+		CloudCostSets: []*CloudCostSet{ccs},
+	}
+	return accumulated, nil
+}
+
+func (ccsr *CloudCostSetRange) accumulateByNone() (*CloudCostSetRange, error) {
+	return ccsr.Clone(), nil
+}
+func (ccsr *CloudCostSetRange) accumulateByHour() (*CloudCostSetRange, error) {
+	// ensure that the summary allocation sets have a 1-hour window, if a set exists
+	if len(ccsr.CloudCostSets) > 0 && ccsr.CloudCostSets[0].Window.Duration() != time.Hour {
+		return nil, fmt.Errorf("window duration must equal 1 hour; got:%s", ccsr.CloudCostSets[0].Window.Duration())
+	}
+
+	return ccsr.Clone(), nil
+}
+
+func (ccsr *CloudCostSetRange) accumulateByDay() (*CloudCostSetRange, error) {
+	// if the allocation set window is 1-day, just return the existing allocation set range
+	if len(ccsr.CloudCostSets) > 0 && ccsr.CloudCostSets[0].Window.Duration() == time.Hour*24 {
+		return ccsr, nil
+	}
+
+	var toAccumulate *CloudCostSetRange
+	result := &CloudCostSetRange{}
+	for i, ccs := range ccsr.CloudCostSets {
+
+		if ccs.Window.Duration() != time.Hour {
+			return nil, fmt.Errorf("window duration must equal 1 hour; got:%s", ccs.Window.Duration())
+		}
+
+		hour := ccs.Window.Start().Hour()
+
+		if toAccumulate == nil {
+			toAccumulate = &CloudCostSetRange{}
+			ccs = ccs.Clone()
+		}
+
+		toAccumulate.Append(ccs)
+		accumulated, err := toAccumulate.accumulateByAll()
+		if err != nil {
+			return nil, fmt.Errorf("error accumulating result: %s", err)
+		}
+		toAccumulate = accumulated
+
+		if hour == 23 || i == len(ccsr.CloudCostSets)-1 {
+			if length := len(toAccumulate.CloudCostSets); length != 1 {
+				return nil, fmt.Errorf("failed accumulation, detected %d sets instead of 1", length)
+			}
+			result.Append(toAccumulate.CloudCostSets[0])
+			toAccumulate = nil
+		}
+	}
+	return result, nil
+}
+
+func (ccsr *CloudCostSetRange) accumulateByWeek() (*CloudCostSetRange, error) {
+	if len(ccsr.CloudCostSets) > 0 && ccsr.CloudCostSets[0].Window.Duration() == timeutil.Week {
+		return ccsr, nil
+	}
+
+	var toAccumulate *CloudCostSetRange
+	result := &CloudCostSetRange{}
+	for i, css := range ccsr.CloudCostSets {
+		if css.Window.Duration() != time.Hour*24 {
+			return nil, fmt.Errorf("window duration must equal 24 hours; got:%s", css.Window.Duration())
+		}
+
+		dayOfWeek := css.Window.Start().Weekday()
+
+		if toAccumulate == nil {
+			toAccumulate = &CloudCostSetRange{}
+			css = css.Clone()
+		}
+
+		toAccumulate.Append(css)
+		accumulated, err := toAccumulate.accumulateByAll()
+		if err != nil {
+			return nil, fmt.Errorf("error accumulating result: %s", err)
+		}
+		toAccumulate = accumulated
+
+		// current assumption is the week always ends on Saturday, or there are no more allocation sets
+		if dayOfWeek == time.Saturday || i == len(ccsr.CloudCostSets)-1 {
+			if length := len(toAccumulate.CloudCostSets); length != 1 {
+				return nil, fmt.Errorf("failed accumulation, detected %d sets instead of 1", length)
+			}
+			result.Append(toAccumulate.CloudCostSets[0])
+			toAccumulate = nil
+		}
+	}
+	return result, nil
+}
+
+func (ccsr *CloudCostSetRange) accumulateByMonth() (*CloudCostSetRange, error) {
+	var toAccumulate *CloudCostSetRange
+	result := &CloudCostSetRange{}
+	for i, css := range ccsr.CloudCostSets {
+		if css.Window.Duration() != time.Hour*24 {
+			return nil, fmt.Errorf("window duration must equal 24 hours; got:%s", css.Window.Duration())
+		}
+
+		_, month, _ := css.Window.Start().Date()
+		_, nextDayMonth, _ := css.Window.Start().Add(time.Hour * 24).Date()
+
+		if toAccumulate == nil {
+			toAccumulate = &CloudCostSetRange{}
+			css = css.Clone()
+		}
+
+		toAccumulate.Append(css)
+		accumulated, err := toAccumulate.accumulateByAll()
+		if err != nil {
+			return nil, fmt.Errorf("error accumulating result: %s", err)
+		}
+		toAccumulate = accumulated
+
+		// either the month has ended, or there are no more allocation sets
+		if month != nextDayMonth || i == len(ccsr.CloudCostSets)-1 {
+			if length := len(toAccumulate.CloudCostSets); length != 1 {
+				return nil, fmt.Errorf("failed accumulation, detected %d sets instead of 1", length)
+			}
+			result.Append(toAccumulate.CloudCostSets[0])
+			toAccumulate = nil
+		}
+	}
+	return result, nil
+}
+
+// Append appends the given CloudCostSet to the end of the range. It does not
+// validate whether or not that violates window continuity.
+func (ccsr *CloudCostSetRange) Append(that *CloudCostSet) {
+	ccsr.CloudCostSets = append(ccsr.CloudCostSets, that)
+}
+
 // LoadCloudCost loads CloudCosts into existing CloudCostSets of the CloudCostSetRange.
 // This function service to aggregate and distribute costs over predefined windows
 // are accumulated here so that the resulting CloudCost with the 1d window has the correct price for the entire day.
@@ -554,51 +729,3 @@ func (ccsr *CloudCostSetRange) LoadCloudCost(cloudCost *CloudCost) {
 		}
 	}
 }
-
-const (
-	ListCostMetric         string = "ListCost"
-	NetCostMetric          string = "NetCost"
-	AmortizedNetCostMetric string = "AmortizedNetCost"
-	InvoicedCostMetric     string = "InvoicedCost"
-	AmortizedCostMetric    string = "AmortizedCost"
-)
-
-type CostMetric struct {
-	Cost              float64 `json:"cost"`
-	KubernetesPercent float64 `json:"kubernetesPercent"`
-}
-
-func (cm CostMetric) Equal(that CostMetric) bool {
-	return cm.Cost == that.Cost && cm.KubernetesPercent == that.KubernetesPercent
-}
-
-func (cm CostMetric) Clone() CostMetric {
-	return CostMetric{
-		Cost:              cm.Cost,
-		KubernetesPercent: cm.KubernetesPercent,
-	}
-}
-
-func (cm CostMetric) add(that CostMetric) CostMetric {
-	// Compute KubernetesPercent for sum
-	k8sPct := 0.0
-	sumCost := cm.Cost + that.Cost
-	if sumCost > 0.0 {
-		thisK8sCost := cm.Cost * cm.KubernetesPercent
-		thatK8sCost := that.Cost * that.KubernetesPercent
-		k8sPct = (thisK8sCost + thatK8sCost) / sumCost
-	}
-
-	return CostMetric{
-		Cost:              sumCost,
-		KubernetesPercent: k8sPct,
-	}
-}
-
-// percent returns the product of the given percent and the cost, KubernetesPercent remains the same
-func (cm CostMetric) percent(pct float64) CostMetric {
-	return CostMetric{
-		Cost:              cm.Cost * pct,
-		KubernetesPercent: cm.KubernetesPercent,
-	}
-}

+ 1 - 1
pkg/kubecost/cloudcost_test.go

@@ -28,7 +28,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 	end := RoundBack(time.Now().UTC(), timeutil.Day)
 	start := end.Add(-3 * timeutil.Day)
 	dayWindows, _ := GetWindows(start, end, timeutil.Day)
-	emtpyCCSR, _ := NewCloudCostSetRange(start, end, timeutil.Day, "integration")
+	emtpyCCSR, _ := NewCloudCostSetRange(start, end, AccumulateOptionDay, "integration")
 	testCases := map[string]struct {
 		cc       []*CloudCost
 		ccsr     *CloudCostSetRange

+ 14 - 3
pkg/kubecost/cloudcostprops.go

@@ -1,7 +1,6 @@
 package kubecost
 
 import (
-	"fmt"
 	"strings"
 
 	"github.com/opencost/opencost/pkg/log"
@@ -156,10 +155,22 @@ func (ccp *CloudCostProperties) Intersection(that *CloudCostProperties) *CloudCo
 	return intersectionCCP
 }
 
+var cloudCostDefaultKeyProperties = []string{
+	CloudCostProviderProp,
+	CloudCostInvoiceEntityIDProp,
+	CloudCostAccountIDProp,
+	CloudCostCategoryProp,
+	CloudCostServiceProp,
+	CloudCostProviderIDProp,
+}
+
+// GenerateKey takes a list of properties and creates a "/" seperated key based on the values of the requested properties.
+// Invalid values are ignored with a warning. A nil input returns the default key, while an empty slice  returns the empty string
 func (ccp *CloudCostProperties) GenerateKey(props []string) string {
 
-	if len(props) == 0 {
-		return fmt.Sprintf("%s/%s/%s/%s/%s/%s", ccp.Provider, ccp.InvoiceEntityID, ccp.AccountID, ccp.Category, ccp.Service, ccp.ProviderID)
+	// nil props replaced with default property list
+	if props == nil {
+		props = cloudCostDefaultKeyProperties
 	}
 
 	values := make([]string, len(props))

+ 77 - 0
pkg/kubecost/costmetric.go

@@ -0,0 +1,77 @@
+package kubecost
+
+import (
+	"fmt"
+	"strings"
+)
+
+// CostMetricName a string type that acts as an enumeration of possible CostMetric options
+type CostMetricName string
+
+const (
+	CostMetricNone             CostMetricName = ""
+	CostMetricListCost         CostMetricName = "listCost"
+	CostMetricNetCost          CostMetricName = "netCost"
+	CostMetricAmortizedNetCost CostMetricName = "amortizedNetCost"
+	CostMetricInvoicedCost     CostMetricName = "invoicedCost"
+	CostMetricAmortizedCost    CostMetricName = "amortizedCost"
+)
+
+// ParseCostMetricName provides a resilient way to parse one of the enumerated CostMetricName types from a string
+// or throws an error if it is not able to.
+func ParseCostMetricName(costMetric string) (CostMetricName, error) {
+	switch strings.ToLower(costMetric) {
+	case strings.ToLower(string(CostMetricListCost)):
+		return CostMetricListCost, nil
+	case strings.ToLower(string(CostMetricAmortizedCost)):
+		return CostMetricAmortizedCost, nil
+	case strings.ToLower(string(CostMetricAmortizedNetCost)):
+		return CostMetricAmortizedNetCost, nil
+	case strings.ToLower(string(CostMetricNetCost)):
+		return CostMetricNetCost, nil
+	case strings.ToLower(string(CostMetricInvoicedCost)):
+		return CostMetricInvoicedCost, nil
+	}
+	return CostMetricNone, fmt.Errorf("failed to parse a valid CostMetricName from '%s'", costMetric)
+}
+
+// CostMetric is a container for values associated with a specific accounting method
+type CostMetric struct {
+	Cost              float64 `json:"cost"`
+	KubernetesPercent float64 `json:"kubernetesPercent"`
+}
+
+func (cm CostMetric) Equal(that CostMetric) bool {
+	return cm.Cost == that.Cost && cm.KubernetesPercent == that.KubernetesPercent
+}
+
+func (cm CostMetric) Clone() CostMetric {
+	return CostMetric{
+		Cost:              cm.Cost,
+		KubernetesPercent: cm.KubernetesPercent,
+	}
+}
+
+func (cm CostMetric) add(that CostMetric) CostMetric {
+	// Compute KubernetesPercent for sum
+	k8sPct := 0.0
+	sumCost := cm.Cost + that.Cost
+	if sumCost > 0.0 {
+		thisK8sCost := cm.Cost * cm.KubernetesPercent
+		thatK8sCost := that.Cost * that.KubernetesPercent
+		k8sPct = (thisK8sCost + thatK8sCost) / sumCost
+	}
+
+	return CostMetric{
+		Cost:              sumCost,
+		KubernetesPercent: k8sPct,
+	}
+}
+
+// percent returns the product of the given percent and the cost, KubernetesPercent remains the same
+func (cm CostMetric) percent(pct float64) CostMetric {
+	return CostMetric{
+		Cost:              cm.Cost * pct,
+		KubernetesPercent: cm.KubernetesPercent,
+	}
+}

+ 23 - 0
pkg/kubecost/query.go

@@ -1,6 +1,7 @@
 package kubecost
 
 import (
+	"strings"
 	"time"
 
 	filter21 "github.com/opencost/opencost/pkg/filter21"
@@ -69,6 +70,28 @@ const (
 	AccumulateOptionQuarter AccumulateOption = "quarter"
 )
 
+// ParseAccumulate converts a string to an AccumulateOption
+func ParseAccumulate(acc string) AccumulateOption {
+	var opt AccumulateOption
+	switch strings.ToLower(acc) {
+	case "quarter":
+		opt = AccumulateOptionQuarter
+	case "month":
+		opt = AccumulateOptionMonth
+	case "week":
+		opt = AccumulateOptionWeek
+	case "day":
+		opt = AccumulateOptionDay
+	case "hour":
+		opt = AccumulateOptionHour
+	case "true":
+		opt = AccumulateOptionAll
+	default:
+		opt = AccumulateOptionNone
+	}
+	return opt
+}
+
 // AssetQueryOptions defines optional parameters for querying an Asset Store
 type AssetQueryOptions struct {
 	Accumulate              bool

+ 238 - 0
pkg/kubecost/window.go

@@ -246,6 +246,15 @@ func parseWindow(window string, now time.Time) (Window, error) {
 		end := now
 		start := end.Add(-time.Duration(num) * dur)
 
+		// when using windows such as "7d" and "1w", we have to have a definition for what "the past X days" means.
+		// let "the past X days" be defined as the entirety of today plus the entirety of the past X-1 days, where
+		// "entirety" is defined as midnight to midnight, UTC. given this definition, we round forward the calculated
+		// start and end times to the nearest day to align with midnight boundaries
+		if match[2] == "d" || match[2] == "w" {
+			end = end.Truncate(timeutil.Day).Add(timeutil.Day)
+			start = start.Truncate(timeutil.Day).Add(timeutil.Day)
+		}
+
 		return NewWindow(&start, &end), nil
 	}
 
@@ -786,8 +795,237 @@ func (w Window) GetPercentInWindow(that Window) float64 {
 	return pct
 }
 
+// GetAccumulateWindow rounds the start and end of the window to the given accumulation option
+func (w Window) GetAccumulateWindow(accumOpt AccumulateOption) (Window, error) {
+	if w.IsOpen() {
+		return w, fmt.Errorf("could not get accumlate window for open window")
+	}
+	switch accumOpt {
+	case AccumulateOptionAll:
+		// just return the entire window
+		return w.Clone(), nil
+	case AccumulateOptionHour:
+		return w.getHourlyWindow(), nil
+	case AccumulateOptionDay:
+		return w.getDailyWindow(), nil
+	case AccumulateOptionWeek:
+		return w.getWeeklyWindow(), nil
+	case AccumulateOptionMonth:
+		return w.getMonthlyWindow(), nil
+	case AccumulateOptionQuarter:
+		return w.getQuarterlyWindow(), nil
+	case AccumulateOptionNone:
+		// the default behavior of the app currently is to return the highest resolution steps
+		// possible
+		fallthrough
+	default:
+
+		// if we are here, it means someone wants a window older than what we can query for
+		return w, fmt.Errorf("cannot round window to given accumulation option %s", string(accumOpt))
+
+	}
+}
+
+// GetAccumulateWindows breaks provided window into a []Window with each window having the resolution of the provided AccumulateOption
+func (w Window) GetAccumulateWindows(accumOpt AccumulateOption) ([]Window, error) {
+	if w.IsOpen() {
+		return nil, fmt.Errorf("could not get accumlate window for open window")
+	}
+	switch accumOpt {
+	case AccumulateOptionAll:
+		// just return the entire window
+		return []Window{w.Clone()}, nil
+	case AccumulateOptionDay:
+		wins := w.getDailyWindows()
+		return wins, nil
+	case AccumulateOptionWeek:
+		wins := w.getWeeklyWindows()
+		return wins, nil
+	case AccumulateOptionMonth:
+		wins := w.getMonthlyWindows()
+		return wins, nil
+	case AccumulateOptionHour:
+		// our maximum resolution is hourly
+		wins := w.getHourlyWindows()
+		return wins, nil
+	case AccumulateOptionQuarter:
+		wins := w.getQuarterlyWindows()
+		return wins, nil
+	case AccumulateOptionNone:
+		// the default behavior of the app currently is to return the highest resolution steps
+		// possible
+		fallthrough
+	default:
+
+		// if we are here, it means someone wants a window older than what we can query for
+		return nil, fmt.Errorf("store does not have coverage window starting at %v", w.Start())
+
+	}
+}
+
+func (w Window) getHourlyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar hour start and ends, respectively
+	roundedStart := time.Date(origStart.Year(), origStart.Month(), origStart.Day(), origStart.Hour(), 0, 0, 0, origStart.Location())
+	roundedEnd := time.Date(origEnd.Year(), origEnd.Month(), origEnd.Day(), origEnd.Hour()+1, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new hour, does not need rounding
+	if origEnd.Minute() == 0 && origEnd.Second() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getHourlyWindows breaks up a window into hours
+func (w Window) getHourlyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getHourlyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month(), currStart.Day(), currStart.Hour()+1, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month(), currEnd.Day(), currEnd.Hour()+1, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
+func (w Window) getDailyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar day start and ends, respectively
+	roundedStart := time.Date(origStart.Year(), origStart.Month(), origStart.Day(), 0, 0, 0, 0, origStart.Location())
+	roundedEnd := time.Date(origEnd.Year(), origEnd.Month(), origEnd.Day()+1, 0, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new day, does not need rounding
+	if origEnd.Minute() == 0 && origEnd.Second() == 0 && origEnd.Hour() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getDailyWindows breaks up a window into days
+func (w Window) getDailyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getDailyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month(), currStart.Day()+1, 0, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month(), currEnd.Day()+1, 0, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
+func (w Window) getWeeklyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar month start and ends, respectively
+	roundedStart := origStart.Add(-1 * time.Duration(origStart.Weekday()) * time.Hour * 24)
+	roundedStart = time.Date(roundedStart.Year(), roundedStart.Month(), roundedStart.Day(), 0, 0, 0, 0, origEnd.Location())
+	roundedEnd := origEnd.Add(time.Duration(6-origEnd.Weekday()) * time.Hour * 24)
+	roundedEnd = time.Date(roundedEnd.Year(), roundedEnd.Month(), roundedEnd.Day()+1, 0, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new day, does not need rounding
+	if origEnd.Weekday() == 0 && origEnd.Second() == 0 && origEnd.Hour() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getWeeklyWindows breaks up a window into weeks, with weeks starting on Sunday
+func (w Window) getWeeklyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getDailyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month(), currStart.Day()+7, 0, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month(), currEnd.Day()+7, 0, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
+func (w Window) getMonthlyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar month start and ends, respectively
+	roundedStart := time.Date(origStart.Year(), origStart.Month(), 1, 0, 0, 0, 0, origStart.Location())
+	roundedEnd := time.Date(origEnd.Year(), origEnd.Month()+1, 1, 0, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new month, does not need rounding
+	if origEnd.Day() == 1 && origEnd.Hour() == 0 && origEnd.Minute() == 0 && origEnd.Second() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getMonthlyWindows breaks up a window into calendar months
+func (w Window) getMonthlyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getMonthlyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month()+1, 1, 0, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month()+1, 1, 0, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
+func (w Window) getQuarterlyWindow() Window {
+	origStart := w.Start()
+	origEnd := w.End()
+	// round the start and end windows to the calendar quarter start and ends, respectively
+	// get quarter fraction from month
+	startQuarterNum := int(math.Ceil(float64(origStart.Month()) / 3.0))
+	endQuarterNum := int(math.Ceil(float64(origEnd.Month()) / 3.0))
+
+	roundedStart := time.Date(origStart.Year(), time.Month((startQuarterNum*3)-2), 1, 0, 0, 0, 0, origStart.Location())
+	roundedEnd := time.Date(origEnd.Year(), time.Month(((endQuarterNum+1)*3)-2), 1, 0, 0, 0, 0, origEnd.Location())
+	// edge case - if user has exactly specified first instant of new quarter, does not need rounding
+	if origEnd.Month() == time.Month(((endQuarterNum)*3)-2) && origEnd.Day() == 1 && origEnd.Hour() == 0 && origEnd.Minute() == 0 && origEnd.Second() == 0 {
+		roundedEnd = *origEnd
+	}
+	return NewClosedWindow(roundedStart, roundedEnd)
+}
+
+// getQuarterlyWindows breaks up a window into calendar months
+func (w Window) getQuarterlyWindows() []Window {
+	wins := []Window{}
+	roundedWindow := w.getQuarterlyWindow()
+
+	roundedStart := *roundedWindow.Start()
+	roundedEnd := *roundedWindow.End()
+
+	currStart := roundedStart
+	currEnd := time.Date(currStart.Year(), currStart.Month()+3, 1, 0, 0, 0, 0, currStart.Location())
+	for currEnd.Before(roundedEnd) || currEnd.Equal(roundedEnd) {
+		wins = append(wins, NewClosedWindow(currStart, currEnd))
+		currStart = currEnd
+		currEnd = time.Date(currEnd.Year(), currEnd.Month()+3, 1, 0, 0, 0, 0, currStart.Location())
+	}
+	return wins
+}
+
 // GetWindows returns a slice of Window with equal size between the given start and end. If windowSize does not evenly
 // divide the period between start and end, the last window is not added
+// Deprecated: in v1.107 use Window.GetWindows() instead
 func GetWindows(start time.Time, end time.Time, windowSize time.Duration) ([]Window, error) {
 	// Ensure the range is evenly divisible into windows of the given duration
 	dur := end.Sub(start)

+ 15 - 7
pkg/kubecost/window_test.go

@@ -265,6 +265,7 @@ func TestParseWindowUTC(t *testing.T) {
 	}
 
 	ago12h := time.Now().UTC().Add(-12 * time.Hour)
+	ago24h := time.Now().UTC().Add(-24 * time.Hour)
 	ago36h := time.Now().UTC().Add(-36 * time.Hour)
 	ago60h := time.Now().UTC().Add(-60 * time.Hour)
 
@@ -291,8 +292,8 @@ func TestParseWindowUTC(t *testing.T) {
 	if dur2d.Duration().Hours() != 48 {
 		t.Fatalf(`expect: window "2d" to have duration 48 hour; actual: %f hours`, dur2d.Duration().Hours())
 	}
-	if !dur2d.Contains(ago36h) {
-		t.Fatalf(`expect: window "2d" to contain 36 hours ago; actual: %s doesn't contain %s`, dur2d, ago36h)
+	if !dur2d.Contains(ago24h) {
+		t.Fatalf(`expect: window "2d" to contain 24 hours ago; actual: %s doesn't contain %s`, dur2d, ago24h)
 	}
 	if dur2d.Contains(ago60h) {
 		t.Fatalf(`expect: window "2d" to not contain 60 hours ago; actual: %s contains %s`, dur2d, ago60h)
@@ -658,16 +659,21 @@ func TestWindow_DurationOffsetForPrometheus(t *testing.T) {
 		t.Fatalf("expected env.IsThanosEnabled() == false")
 	}
 
-	w, err := ParseWindowUTC("1d")
+	now := time.Now().UTC()
+	startOfToday := now.Truncate(timeutil.Day)
+	w, err := parseWindow("1d", now)
 	if err != nil {
 		t.Fatalf(`unexpected error parsing "1d": %s`, err)
 	}
+
 	dur, off, err := w.DurationOffsetForPrometheus()
+	expDur := int(now.Sub(startOfToday).Seconds())
+	expDurStr := fmt.Sprintf("%ds", expDur)
 	if err != nil {
 		t.Fatalf("unexpected error: %s", err)
 	}
-	if dur != "1d" {
-		t.Fatalf(`expect: window to be "1d"; actual: "%s"`, dur)
+	if dur != expDurStr {
+		t.Fatalf(`expect: window to be "%s"; actual: "%s"`, expDurStr, dur)
 	}
 	if off != "" {
 		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
@@ -739,9 +745,11 @@ func TestWindow_DurationOffsetForPrometheus(t *testing.T) {
 		t.Fatalf("expected env.IsThanosEnabled() == true")
 	}
 
-	w, err = ParseWindowUTC("1d")
+	// Note - with the updated logic of 1d, 1w, etc. rounding the start and end times forward to the nearest midnight,
+	// DurationOffsetForPrometheus may fail if not using a window using "Xh" as the string to parse
+	w, err = ParseWindowUTC("24h")
 	if err != nil {
-		t.Fatalf(`unexpected error parsing "1d": %s`, err)
+		t.Fatalf(`unexpected error parsing "24h": %s`, err)
 	}
 	dur, off, err = w.DurationOffsetForPrometheus()
 	if err != nil {

+ 1 - 1
pkg/metrics/kubemetrics.go

@@ -156,7 +156,7 @@ func getPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string {
 	}
 
 	// Special non-empty string to indicate absence of storage class.
-	return "<none>"
+	return ""
 }
 
 // toResourceUnitValue accepts a resource name and quantity and returns the sanitized resource, the unit, and the value in the units.

+ 209 - 0
pkg/proto/http.go

@@ -0,0 +1,209 @@
+package proto
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/opencost/opencost/pkg/util/json"
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/proto"
+)
+
+// HTTPProtocol is a struct used as a selector for request/response protocol utility methods
+type HTTPProtocol struct{}
+
+// HTTPError represents an http error response
+type HTTPError struct {
+	StatusCode int
+	Body       string
+}
+
+// Error returns the error string
+func (he HTTPError) Error() string {
+	return string(he.Body)
+}
+
+// BadRequest creates a BadRequest HTTPError
+func (hp HTTPProtocol) BadRequest(message string) HTTPError {
+	return HTTPError{
+		StatusCode: http.StatusBadRequest,
+		Body:       message,
+	}
+}
+
+// InternalServerError creates an InternalServerError HTTPError
+func (hp HTTPProtocol) InternalServerError(message string) HTTPError {
+	if message == "" {
+		message = "Internal Server Error"
+	}
+	return HTTPError{
+		StatusCode: http.StatusInternalServerError,
+		Body:       message,
+	}
+}
+
+// NotFound creates a NotFound HTTPError
+func (hp HTTPProtocol) NotFound() HTTPError {
+	return HTTPError{
+		StatusCode: http.StatusNotFound,
+		Body:       "Not Found",
+	}
+}
+
+// HTTPResponse represents a data envelope for our HTTP messaging
+type HTTPResponse struct {
+	Code    int         `json:"code"`
+	Data    interface{} `json:"data"`
+	Message string      `json:"message,omitempty"`
+	Warning string      `json:"warning,omitempty"`
+}
+
+// ToResponse accepts a data payload and/or error to encode into a new HTTPResponse instance. Responses
+// which should not contain an error should pass nil for err.
+func (hp HTTPProtocol) ToResponse(data interface{}, err error) *HTTPResponse {
+	if err != nil {
+		return &HTTPResponse{
+			Code:    http.StatusInternalServerError,
+			Data:    data,
+			Message: err.Error(),
+		}
+	}
+
+	return &HTTPResponse{
+		Code: http.StatusOK,
+		Data: data,
+	}
+}
+
+// WriteData wraps the data payload in an HTTPResponse and writes the resulting response using the
+// http.ResponseWriter
+func (hp HTTPProtocol) WriteData(w http.ResponseWriter, data interface{}) {
+	status := http.StatusOK
+	resp, err := json.Marshal(&HTTPResponse{
+		Code: status,
+		Data: data,
+	})
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteDataWithWarning writes the data payload similiar to WriteData except it provides an additional warning message.
+func (hp HTTPProtocol) WriteDataWithWarning(w http.ResponseWriter, data interface{}, warning string) {
+	status := http.StatusOK
+	resp, err := json.Marshal(&HTTPResponse{
+		Code:    status,
+		Data:    data,
+		Warning: warning,
+	})
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteDataWithMessage writes the data payload similiar to WriteData except it provides an additional string message.
+func (hp HTTPProtocol) WriteDataWithMessage(w http.ResponseWriter, data interface{}, message string) {
+	status := http.StatusOK
+	resp, err := json.Marshal(&HTTPResponse{
+		Code:    status,
+		Data:    data,
+		Message: message,
+	})
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteProtoWithMessage uses the protojson package to convert proto3 response to json response and
+// return it to the requester. Proto3 drops messages with default values but overriding the param
+// EmitUnpopulated to true it returns default values in the Json response payload. If error is
+// encountered it sent InternalServerError and the error why the json conversion failed.
+func (hp HTTPProtocol) WriteProtoWithMessage(w http.ResponseWriter, data proto.Message) {
+	m := protojson.MarshalOptions{
+		EmitUnpopulated: true,
+	}
+	status := http.StatusOK
+	resp, err := m.Marshal(data)
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteDataWithMessageAndWarning writes the data payload similiar to WriteData except it provides a warning and additional message string.
+func (hp HTTPProtocol) WriteDataWithMessageAndWarning(w http.ResponseWriter, data interface{}, message string, warning string) {
+	status := http.StatusOK
+	resp, err := json.Marshal(&HTTPResponse{
+		Code:    status,
+		Data:    data,
+		Message: message,
+		Warning: warning,
+	})
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}
+
+// WriteError wraps the HTTPError in a HTTPResponse and writes it via http.ResponseWriter
+func (hp HTTPProtocol) WriteError(w http.ResponseWriter, err HTTPError) {
+	status := err.StatusCode
+	if status == 0 {
+		status = http.StatusInternalServerError
+	}
+	w.WriteHeader(status)
+
+	resp, _ := json.Marshal(&HTTPResponse{
+		Code:    status,
+		Message: err.Body,
+	})
+	w.Write(resp)
+}
+
+// WriteResponse writes the provided HTTPResponse instance via http.ResponseWriter
+func (hp HTTPProtocol) WriteResponse(w http.ResponseWriter, r *HTTPResponse) {
+	status := r.Code
+	resp, err := json.Marshal(r)
+	if err != nil {
+		status = http.StatusInternalServerError
+		resp, _ = json.Marshal(&HTTPResponse{
+			Code:    status,
+			Message: fmt.Sprintf("Error: %s", err),
+		})
+	}
+
+	w.WriteHeader(status)
+	w.Write(resp)
+}

+ 22 - 0
pkg/proto/proto.go

@@ -0,0 +1,22 @@
+package proto
+
+////////////////////////////////////////////////////////////////////////////////
+//
+//  The purpose of this package is to provide a general set of utilities for
+//  writing responses in networked communication. Since go often uses the basic
+//  protocol names (ie: "net/http") for their packages, keeping protocol utilities
+//  in their own packages can be a bit annoying with respect to building an API.
+//  To provide a "static" set of utilities, we can utilize method selectors on
+//  structs allowing callers to use proto.<protocol>() to access the utility methods
+//  with package-like syntax. We can also expand on the supported protocols as needed.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+var (
+	httpProtocol HTTPProtocol
+)
+
+// HTTP returns the HTTPProtocol utilities.
+func HTTP() HTTPProtocol {
+	return httpProtocol
+}

+ 8 - 1
pkg/util/formatutil/formatutil.go

@@ -1,6 +1,9 @@
 package formatutil
 
-import "math"
+import (
+	"math"
+	"strings"
+)
 
 func Float64ToResponse(f float64) *float64 {
 	if math.IsNaN(f) || math.IsInf(f, 0) {
@@ -9,3 +12,7 @@ func Float64ToResponse(f float64) *float64 {
 
 	return &f
 }
+
+func StripWhitespace(s string) string {
+	return strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(s, " ", ""), "\t", ""), "\n", "")
+}

+ 7 - 5
ui/README.md

@@ -26,8 +26,10 @@ kubectl port-forward --namespace opencost service/opencost 9090
 The UI can be run locally using the `npm run serve` command.
 
 ```sh
+$ npm install
+...
 $ npm run serve
-> kubecost-ui-open@0.0.1 serve
+> opencost-ui@0.1.0 serve
 > npx parcel serve src/index.html
 
 Server running at http://localhost:1234
@@ -39,7 +41,7 @@ And can have a custom URL backend prefix.
 ```sh
 BASE_URL=http://localhost:9090/test npm run serve
 
-> kubecost-ui-open@0.0.1 serve
+> opencost-ui@0.1.0 serve
 > npx parcel serve src/index.html
 
 Server running at http://localhost:1234
@@ -51,13 +53,13 @@ In addition, similar behavior can be replicated with the docker container:
 ```sh
 $ docker run -e BASE_URL_OVERRIDE=test -p 9091:9090 -d opencost-ui:latest
 $ curl localhost:9091
-<html gibberish> 
+<html gibberish>
 ```
 
 ## Overriding the Base API URL
 
-For some use cases such as the case of [Opencost deployed behind an ingress controller](https://github.com/opencost/opencost/issues/1677), it is useful to override the `BASE_URL` variable responsible for requests sent from the UI to the API.  This means that instead of sending requests to `<domain>/model/allocation/compute/etc`, requests can be sent to `<domain>/{BASE_URL_OVERRIDE}/allocation/compute/etc`.  To do this, supply the environment variable `BASE_URL_OVERRIDE` to the docker image.
+For some use cases such as the case of [OpenCost deployed behind an ingress controller](https://github.com/opencost/opencost/issues/1677), it is useful to override the `BASE_URL` variable responsible for requests sent from the UI to the API.  This means that instead of sending requests to `<domain>/model/allocation/compute/etc`, requests can be sent to `<domain>/{BASE_URL_OVERRIDE}/allocation/compute/etc`.  To do this, supply the environment variable `BASE_URL_OVERRIDE` to the docker image.
 
 ```sh
 $ docker run -p 9091:9090 -e BASE_URL_OVERRIDE=anything -d opencost-ui:latest
-```
+```

+ 3 - 1
ui/default.nginx.conf

@@ -52,7 +52,9 @@ server {
 
     add_header Cache-Control "max-age=300";
     location / {
-        try_files $uri $uri/ /index.html;
+        root /var/www;
+        index index.html index.htm;
+        try_files $uri /index.html;
     }
 
     add_header ETag "1.96.0";

Разница между файлами не показана из-за своего большого размера
+ 265 - 488
ui/package-lock.json


+ 10 - 9
ui/package.json

@@ -1,16 +1,15 @@
 {
-  "name": "kubecost-ui-open",
-  "version": "0.0.1",
-  "description": "Open source UI for Kubecost",
+  "name": "opencost-ui",
+  "description": "Open source UI for OpenCost",
+  "version": "0.1.0",
+  "license": "Apache-2.0",
   "scripts": {
     "build": "npx parcel build src/index.html",
-    "serve": "npx parcel serve src/index.html",
+    "serve": "npx parcel serve src/index.html --no-cache",
     "clean": "rm -rf dist/*",
     "test": "echo \"Error: no test specified\" && exit 1",
     "preinstall": "npx npm-force-resolutions"
   },
-  "author": "",
-  "license": "Apache-2.0",
   "browserslist": [
     "defaults"
   ],
@@ -21,9 +20,9 @@
     "@material-ui/core": "^4.11.3",
     "@material-ui/icons": "^4.11.2",
     "@material-ui/pickers": "^3.3.10",
-    "@material-ui/styles": "^4.11.3",
-    "axios": "^0.21.2",
-    "date-fns": "^2.19.0",
+    "@material-ui/styles": "^4.11.5",
+    "axios": "^1.4.0",
+    "date-fns": "^2.30.0",
     "material-design-icons-iconfont": "^6.1.0",
     "prop-types": "^15.7.2",
     "react": "^17.0.1",
@@ -36,7 +35,9 @@
     "@babel/plugin-proposal-class-properties": "^7.13.0",
     "@babel/plugin-transform-runtime": "^7.13.10",
     "@babel/preset-react": "^7.12.13",
+    "buffer": "^6.0.3",
     "parcel": "^2.2.1",
+    "process": "^0.11.10",
     "set-value": "4.0.1"
   },
   "resolutions": {

+ 204 - 175
ui/src/Reports.js

@@ -1,201 +1,230 @@
-import CircularProgress from '@material-ui/core/CircularProgress'
-import IconButton from '@material-ui/core/IconButton'
-import Paper from '@material-ui/core/Paper'
-import Typography from '@material-ui/core/Typography'
-import RefreshIcon from '@material-ui/icons/Refresh'
-import { makeStyles } from '@material-ui/styles'
-import { filter, find, forEach, get, isArray, sortBy, toArray, trim } from 'lodash'
-import React, { useEffect, useState } from 'react'
-import ReactDOM from 'react-dom'
-import { useLocation, useHistory } from 'react-router';
+import CircularProgress from "@material-ui/core/CircularProgress";
+import IconButton from "@material-ui/core/IconButton";
+import Paper from "@material-ui/core/Paper";
+import Typography from "@material-ui/core/Typography";
+import RefreshIcon from "@material-ui/icons/Refresh";
+import { makeStyles } from "@material-ui/styles";
+import {
+  filter,
+  find,
+  forEach,
+  get,
+  isArray,
+  sortBy,
+  toArray,
+  trim,
+} from "lodash";
+import React, { useEffect, useState } from "react";
+import ReactDOM from "react-dom";
+import { useLocation, useHistory } from "react-router";
 
-import AllocationReport from './components/AllocationReport';
-import Controls from './components/Controls';
-import Header from './components/Header';
-import Page from './components/Page';
-import Subtitle from './components/Subtitle';
-import Warnings from './components/Warnings';
-import AllocationService from './services/allocation';
-import { checkCustomWindow, cumulativeToTotals, rangeToCumulative, toVerboseTimeRange } from './util';
-import { currencyCodes } from './constants/currencyCodes'
+import AllocationReport from "./components/allocationReport";
+import Controls from "./components/Controls";
+import Header from "./components/Header";
+import Page from "./components/Page";
+import Subtitle from "./components/Subtitle";
+import Warnings from "./components/Warnings";
+import AllocationService from "./services/allocation";
+import {
+  checkCustomWindow,
+  cumulativeToTotals,
+  rangeToCumulative,
+  toVerboseTimeRange,
+} from "./util";
+import { currencyCodes } from "./constants/currencyCodes";
 
 const windowOptions = [
-  { name: 'Today', value: 'today' },
-  { name: 'Yesterday', value: 'yesterday' },
-  { name: 'Week-to-date', value: 'week' },
-  { name: 'Month-to-date', value: 'month' },
-  { name: 'Last week', value: 'lastweek' },
-  { name: 'Last month', value: 'lastmonth' },
-  { name: 'Last 7 days', value: '6d' },
-  { name: 'Last 30 days', value: '29d' },
-  { name: 'Last 60 days', value: '59d' },
-  { name: 'Last 90 days', value: '89d' },
-]
+  { name: "Today", value: "today" },
+  { name: "Yesterday", value: "yesterday" },
+  { name: "Week-to-date", value: "week" },
+  { name: "Month-to-date", value: "month" },
+  { name: "Last week", value: "lastweek" },
+  { name: "Last month", value: "lastmonth" },
+  { name: "Last 7 days", value: "6d" },
+  { name: "Last 30 days", value: "29d" },
+  { name: "Last 60 days", value: "59d" },
+  { name: "Last 90 days", value: "89d" },
+];
 
 const aggregationOptions = [
-  { name: 'Cluster', value: 'cluster' },
-  { name: 'Node', value: 'node' },
-  { name: 'Namespace', value: 'namespace' },
-  { name: 'Controller kind', value: 'controllerKind' },
-  { name: 'Controller', value: 'controller' },
-  { name: 'Service', value: 'service' },
-  { name: 'Pod', value: 'pod' },
-  { name: 'Container', value: 'container' },
-]
+  { name: "Cluster", value: "cluster" },
+  { name: "Node", value: "node" },
+  { name: "Namespace", value: "namespace" },
+  { name: "Controller kind", value: "controllerKind" },
+  { name: "Controller", value: "controller" },
+  { name: "Service", value: "service" },
+  { name: "Pod", value: "pod" },
+  { name: "Container", value: "container" },
+];
 
 const accumulateOptions = [
-  { name: 'Entire window', value: true },
-  { name: 'Daily', value: false },
-]
+  { name: "Entire window", value: true },
+  { name: "Daily", value: false },
+];
 
 const useStyles = makeStyles({
   reportHeader: {
-    display: 'flex',
-    flexFlow: 'row',
+    display: "flex",
+    flexFlow: "row",
     padding: 24,
   },
   titles: {
     flexGrow: 1,
   },
-})
+});
 
 // generateTitle generates a string title from a report object
 function generateTitle({ window, aggregateBy, accumulate }) {
-  let windowName = get(find(windowOptions, { value: window }), 'name', '')
-  if (windowName === '') {
+  let windowName = get(find(windowOptions, { value: window }), "name", "");
+  if (windowName === "") {
     if (checkCustomWindow(window)) {
-      windowName = toVerboseTimeRange(window)
+      windowName = toVerboseTimeRange(window);
     } else {
-      console.warn(`unknown window: ${window}`)
+      console.warn(`unknown window: ${window}`);
     }
   }
 
-  let aggregationName = get(find(aggregationOptions, { value: aggregateBy }), 'name', '').toLowerCase()
-  if (aggregationName === '') {
-    console.warn(`unknown aggregation: ${aggregateBy}`)
+  let aggregationName = get(
+    find(aggregationOptions, { value: aggregateBy }),
+    "name",
+    ""
+  ).toLowerCase();
+  if (aggregationName === "") {
+    console.warn(`unknown aggregation: ${aggregateBy}`);
   }
 
-  let str = `${windowName} by ${aggregationName}`
+  let str = `${windowName} by ${aggregationName}`;
 
   if (!accumulate) {
-    str = `${str} daily`
+    str = `${str} daily`;
   }
 
-  return str
+  return str;
 }
 
-
 const ReportsPage = () => {
-  const classes = useStyles()
+  const classes = useStyles();
 
   // Allocation data state
-  const [allocationData, setAllocationData] = useState([])
-  const [cumulativeData, setCumulativeData] = useState({})
-  const [totalData, setTotalData] = useState({})
+  const [allocationData, setAllocationData] = useState([]);
+  const [cumulativeData, setCumulativeData] = useState({});
+  const [totalData, setTotalData] = useState({});
 
   // When allocation data changes, create a cumulative version of it
   useEffect(() => {
-    const cumulative = rangeToCumulative(allocationData, aggregateBy)
-    setCumulativeData(toArray(cumulative))
-    setTotalData(cumulativeToTotals(cumulative))
-  }, [allocationData])
+    const cumulative = rangeToCumulative(allocationData, aggregateBy);
+    setCumulativeData(toArray(cumulative));
+    setTotalData(cumulativeToTotals(cumulative));
+  }, [allocationData]);
 
   // Form state, which controls form elements, but not the report itself. On
   // certain actions, the form state may flow into the report state.
-  const [window, setWindow] = useState(windowOptions[0].value)
-  const [aggregateBy, setAggregateBy] = useState(aggregationOptions[0].value)
-  const [accumulate, setAccumulate] = useState(accumulateOptions[0].value)
-  const [currency, setCurrency] = useState('USD')
+  const [window, setWindow] = useState(windowOptions[0].value);
+  const [aggregateBy, setAggregateBy] = useState(aggregationOptions[0].value);
+  const [accumulate, setAccumulate] = useState(accumulateOptions[0].value);
+  const [currency, setCurrency] = useState("USD");
 
   // Report state, including current report and saved options
-  const [title, setTitle] = useState('Last 7 days by namespace daily')
+  const [title, setTitle] = useState("Last 7 days by namespace daily");
 
   // When parameters changes, fetch data. This should be the
   // only mechanism used to fetch data. Also generate a sensible title from the paramters.
   useEffect(() => {
-    setFetch(true)
-    setTitle(generateTitle({ window, aggregateBy, accumulate }))
-  }, [window, aggregateBy, accumulate])
+    setFetch(true);
+    setTitle(generateTitle({ window, aggregateBy, accumulate }));
+  }, [window, aggregateBy, accumulate]);
 
   // page and settings state
-  const [init, setInit] = useState(false)
-  const [fetch, setFetch] = useState(false)
-  const [loading, setLoading] = useState(true)
-  const [errors, setErrors] = useState([])
+  const [init, setInit] = useState(false);
+  const [fetch, setFetch] = useState(false);
+  const [loading, setLoading] = useState(true);
+  const [errors, setErrors] = useState([]);
 
   // Initialize once, then fetch report each time setFetch(true) is called
   useEffect(() => {
     if (!init) {
-      initialize()
+      initialize();
     }
-    if (init && fetch) {
-      fetchData()
+    if (init || fetch) {
+      fetchData();
     }
-  }, [init, fetch])
+  }, [init, fetch]);
 
   // parse any context information from the URL
   const routerLocation = useLocation();
   const searchParams = new URLSearchParams(routerLocation.search);
   const routerHistory = useHistory();
   useEffect(() => {
-    setWindow(searchParams.get('window') || '6d');
-    setAggregateBy(searchParams.get('agg') || 'namespace');
-    setAccumulate((searchParams.get('acc') === 'true') || false);
-    setCurrency(searchParams.get('currency') || 'USD');
+    setWindow(searchParams.get("window") || "6d");
+    setAggregateBy(searchParams.get("agg") || "namespace");
+    setAccumulate(searchParams.get("acc") === "true" || false);
+    setCurrency(searchParams.get("currency") || "USD");
   }, [routerLocation]);
 
   async function initialize() {
-    setInit(true)
+    setInit(true);
   }
 
   async function fetchData() {
-    setLoading(true)
-    setErrors([])
+    setLoading(true);
+    setErrors([]);
 
     try {
-      const resp = await AllocationService.fetchAllocation(window, aggregateBy, { accumulate })
+      const resp = await AllocationService.fetchAllocation(
+        window,
+        aggregateBy,
+        { accumulate }
+      );
       if (resp.data && resp.data.length > 0) {
-        const allocationRange = resp.data
+        const allocationRange = resp.data;
         for (const i in allocationRange) {
           // update cluster aggregations to use clusterName/clusterId names
-          allocationRange[i] = sortBy(allocationRange[i], a => a.totalCost)
+          allocationRange[i] = sortBy(allocationRange[i], (a) => a.totalCost);
         }
-        setAllocationData(allocationRange)
+        setAllocationData(allocationRange);
       } else {
-        if (resp.message && resp.message.indexOf('boundary error') >= 0) {
-          let match = resp.message.match(/(ETL is \d+\.\d+% complete)/)
-          let secondary = 'Try again after ETL build is complete'
+        if (resp.message && resp.message.indexOf("boundary error") >= 0) {
+          let match = resp.message.match(/(ETL is \d+\.\d+% complete)/);
+          let secondary = "Try again after ETL build is complete";
           if (match.length > 0) {
-            secondary = `${match[1]}. ${secondary}`
+            secondary = `${match[1]}. ${secondary}`;
           }
-          setErrors([{
-            primary: 'Data unavailable while ETL is building',
-            secondary: secondary,
-          }])
+          setErrors([
+            {
+              primary: "Data unavailable while ETL is building",
+              secondary: secondary,
+            },
+          ]);
         }
-        setAllocationData([])
+        setAllocationData([]);
       }
     } catch (err) {
-      if (err.message.indexOf('404') === 0) {
-        setErrors([{
-          primary: 'Failed to load report data',
-          secondary: 'Please update Kubecost to the latest version, then contact support if problems persist.'
-        }])
+      if (err.message.indexOf("404") === 0) {
+        setErrors([
+          {
+            primary: "Failed to load report data",
+            secondary:
+              "Please update Kubecost to the latest version, then contact support if problems persist.",
+          },
+        ]);
       } else {
-        let secondary = 'Please contact Kubecost support with a bug report if problems persist.'
+        let secondary =
+          "Please contact Kubecost support with a bug report if problems persist.";
         if (err.message.length > 0) {
-          secondary = err.message
+          secondary = err.message;
         }
-        setErrors([{
-          primary: 'Failed to load report data',
-          secondary: secondary,
-        }])
+        setErrors([
+          {
+            primary: "Failed to load report data",
+            secondary: secondary,
+          },
+        ]);
       }
-      setAllocationData([])
+      setAllocationData([]);
     }
 
-    setLoading(false)
-    setFetch(false)
+    setLoading(false);
+    setFetch(false);
   }
   return (
     <Page active="reports.html">
@@ -211,71 +240,71 @@ const ReportsPage = () => {
         </div>
       )}
 
-      {init && <Paper id="report">
-        <div className={classes.reportHeader}>
-          <div className={classes.titles}>
-            <Typography variant="h5">{title}</Typography>
-            <Subtitle
-              report={{ window, aggregateBy, accumulate }}
+      {init && (
+        <Paper id="report">
+          <div className={classes.reportHeader}>
+            <div className={classes.titles}>
+              <Typography variant="h5">{title}</Typography>
+              <Subtitle report={{ window, aggregateBy, accumulate }} />
+            </div>
+
+            <Controls
+              windowOptions={windowOptions}
+              window={window}
+              setWindow={(win) => {
+                searchParams.set("window", win);
+                routerHistory.push({
+                  search: `?${searchParams.toString()}`,
+                });
+              }}
+              aggregationOptions={aggregationOptions}
+              aggregateBy={aggregateBy}
+              setAggregateBy={(agg) => {
+                searchParams.set("agg", agg);
+                routerHistory.push({
+                  search: `?${searchParams.toString()}`,
+                });
+              }}
+              accumulateOptions={accumulateOptions}
+              accumulate={accumulate}
+              setAccumulate={(acc) => {
+                searchParams.set("acc", acc);
+                routerHistory.push({
+                  search: `?${searchParams.toString()}`,
+                });
+              }}
+              title={title}
+              cumulativeData={cumulativeData}
+              currency={currency}
+              currencyOptions={currencyCodes}
+              setCurrency={(curr) => {
+                searchParams.set("currency", curr);
+                routerHistory.push({
+                  search: `?${searchParams.toString()}`,
+                });
+              }}
             />
           </div>
 
-          <Controls
-            windowOptions={windowOptions}
-            window={window}
-            setWindow={(win) => {
-              searchParams.set('window', win);
-              routerHistory.push({
-                search: `?${searchParams.toString()}`,
-              });
-            }}
-            aggregationOptions={aggregationOptions}
-            aggregateBy={aggregateBy}
-            setAggregateBy={(agg) => {
-              searchParams.set('agg', agg);
-              routerHistory.push({
-                search: `?${searchParams.toString()}`,
-              });
-            }}
-            accumulateOptions={accumulateOptions}
-            accumulate={accumulate}
-            setAccumulate={(acc) => {
-              searchParams.set('acc', acc);
-              routerHistory.push({
-                search: `?${searchParams.toString()}`
-              });
-            }}
-            title={title}
-            cumulativeData={cumulativeData}
-            currency={currency}
-            currencyOptions={currencyCodes}
-            setCurrency={(curr) => {
-              searchParams.set('currency', curr);
-              routerHistory.push({
-                search: `?${searchParams.toString()}`
-              });
-            }}
-          />
-        </div>
-
-        {loading && (
-          <div style={{ display: 'flex', justifyContent: 'center' }}>
-            <div style={{ paddingTop: 100, paddingBottom: 100 }}>
-              <CircularProgress />
+          {loading && (
+            <div style={{ display: "flex", justifyContent: "center" }}>
+              <div style={{ paddingTop: 100, paddingBottom: 100 }}>
+                <CircularProgress />
+              </div>
             </div>
-          </div>
-        )}
-        {!loading && (
-          <AllocationReport
-            allocationData={allocationData}
-            cumulativeData={cumulativeData}
-            totalData={totalData}
-            currency={currency}
-          />
-        )}
-      </Paper>}
+          )}
+          {!loading && (
+            <AllocationReport
+              allocationData={allocationData}
+              cumulativeData={cumulativeData}
+              totalData={totalData}
+              currency={currency}
+            />
+          )}
+        </Paper>
+      )}
     </Page>
-  )
-}
+  );
+};
 
 export default React.memo(ReportsPage);

+ 4 - 17
ui/src/app.js

@@ -1,18 +1,5 @@
-import React from 'react';
-import ReactDOM from 'react-dom';
-import { BrowserRouter as Router } from 'react-router-dom';
+import * as React from "react";
+import ReactDOM from "react-dom";
+import Routes from "./route";
 
-import Reports from './Reports.js';
-
-function ReportsPage() {
-  return (
-    <Router>
-      <Reports path="/" />
-    </Router>
-  );
-}
-
-ReactDOM.render(
-  <ReportsPage />,
-  document.getElementById('app')
-);
+ReactDOM.render(<Routes />, document.getElementById("app"));

+ 217 - 0
ui/src/cloudCost/cloudCost.js

@@ -0,0 +1,217 @@
+import * as React from "react";
+import { get } from "lodash";
+import { makeStyles } from "@material-ui/styles";
+import {
+  Typography,
+  TableContainer,
+  TableCell,
+  TableHead,
+  TablePagination,
+  TableRow,
+  TableSortLabel,
+  Table,
+  TableBody,
+} from "@material-ui/core";
+
+import { toCurrency } from "../util";
+import CloudCostChart from "./cloudCostChart";
+import { CloudCostRow } from "./cloudCostRow";
+
+const CloudCost = ({
+  cumulativeData = [],
+  totalData: totalsRow = {},
+  graphData = [],
+  currency = "USD",
+  drilldown,
+  sampleData = false,
+}) => {
+  const useStyles = makeStyles({
+    noResults: {
+      padding: 24,
+    },
+  });
+
+ 
+
+  const classes = useStyles();
+
+  function descendingComparator(a, b, orderBy) {
+    if (get(b, orderBy) < get(a, orderBy)) {
+      return -1;
+    }
+    if (get(b, orderBy) > get(a, orderBy)) {
+      return 1;
+    }
+    return 0;
+  }
+
+  function getComparator(order, orderBy) {
+    return order === "desc"
+      ? (a, b) => descendingComparator(a, b, orderBy)
+      : (a, b) => -descendingComparator(a, b, orderBy);
+  }
+
+  function stableSort(array, comparator) {
+    const stabilizedThis = array.map((el, index) => [el, index]);
+    stabilizedThis.sort((a, b) => {
+      const order = comparator(a[0], b[0]);
+      if (order !== 0) return order;
+      return a[1] - b[1];
+    });
+    return stabilizedThis.map((el) => el[0]);
+  }
+
+  const headCells = [
+    {
+      id: "name",
+      numeric: false,
+      label: "Name",
+      width: "auto",
+    },
+    {
+      id: "kubernetesPercent",
+      numeric: true,
+      label: "K8s Utilization",
+      width: 160,
+    },
+    sampleData
+      ? {
+          id: "cost",
+          numeric: true,
+          label: "Sum of Sample Data",
+          width: 200,
+        }
+      : {
+          id: "cost",
+          numeric: true,
+          label: "Total cost",
+          width: 155,
+        },
+  ];
+
+  const [order, setOrder] = React.useState("desc");
+  const [orderBy, setOrderBy] = React.useState("totalCost");
+  const [page, setPage] = React.useState(0);
+  const [rowsPerPage, setRowsPerPage] = React.useState(25);
+  const numData = cumulativeData?.length;
+
+  const lastPage = Math.floor(numData / rowsPerPage);
+
+  const handleChangePage = (event, newPage) => setPage(newPage);
+
+  const handleChangeRowsPerPage = (event) => {
+    setRowsPerPage(parseInt(event.target.value, 10));
+    setPage(0);
+  };
+
+  const orderedRows = stableSort(cumulativeData, getComparator(order, orderBy));
+  const pageRows = orderedRows.slice(
+    page * rowsPerPage,
+    page * rowsPerPage + rowsPerPage
+  );
+
+  React.useEffect(() => {
+    setPage(0);
+  }, [numData]);
+
+  if (cumulativeData.length === 0) {
+    return (
+      <Typography variant="body2" className={classes.noResults}>
+        No results
+      </Typography>
+    );
+  }
+
+  function dataToCloudCostRow(row) {
+    const suffix =
+      { hourly: "/hr", monthly: "/mo", daily: "/day" }["cumulative"] || "";
+    return (
+      <CloudCostRow
+        costSuffix={suffix}
+        cost={row.cost}
+        drilldown={drilldown}
+        key={row.name}
+        kubernetesPercent={row.kubernetesPercent}
+        name={
+          sampleData && row.labelName ? row.labelName ?? "" : row.name ?? ""
+        }
+        row={row}
+        sampleData={sampleData}
+      />
+    );
+  }
+
+  return (
+    <div id="cloud-cost">
+      <div id="cloud-graph-">
+        <CloudCostChart
+          currency={currency}
+          graphData={graphData}
+          height={300}
+          n={10}
+        />
+      </div>
+      <div id="cloud-cost-table">
+        <TableContainer>
+          <Table>
+            <TableHead>
+              <TableRow>
+                {headCells.map((cell) => (
+                  <TableCell
+                    key={cell.id}
+                    colSpan={cell.colspan}
+                    align={cell.numeric ? "right" : "left"}
+                    sortDirection={orderBy === cell.id ? order : false}
+                    style={{ width: cell.width }}
+                  >
+                    <TableSortLabel
+                      active={orderBy === cell.id}
+                      direction={orderBy === cell.id ? order : "asc"}
+                      onClick={() => {
+                        const isDesc = orderBy === cell.id && order === "desc";
+                        setOrder(isDesc ? "asc" : "desc");
+                        setOrderBy(cell.id);
+                      }}
+                    >
+                      {cell.label}
+                    </TableSortLabel>
+                  </TableCell>
+                ))}
+              </TableRow>
+            </TableHead>
+            <TableBody>
+              <TableRow>
+                <TableCell align={"left"} style={{ fontWeight: 500 }}>
+                  {totalsRow?.name || "Totals"}
+                </TableCell>
+
+                <TableCell align={"right"} style={{ fontWeight: 500 }}>
+                  {Math.round(totalsRow?.kubernetesPercent * 100)}%
+                </TableCell>
+
+                <TableCell
+                  align={"right"}
+                  style={{ fontWeight: 500, paddingRight: "2em" }}
+                >
+                  {toCurrency(totalsRow?.cost || 0, currency)}
+                </TableCell>
+              </TableRow>
+              {pageRows.map(dataToCloudCostRow)}
+            </TableBody>
+          </Table>
+        </TableContainer>
+        <TablePagination
+          component="div"
+          count={numData}
+          rowsPerPage={rowsPerPage}
+          rowsPerPageOptions={[10, 25, 50]}
+          page={Math.min(page, lastPage)}
+          onChangePage={handleChangePage}
+          onChangeRowsPerPage={handleChangeRowsPerPage}
+        />
+      </div>
+    </div>
+  );
+};
+
+export default React.memo(CloudCost);

+ 14 - 0
ui/src/cloudCost/cloudCostChart/index.js

@@ -0,0 +1,14 @@
+import * as React from "react";
+
+import Typography from "@material-ui/core/Typography";
+
+import RangeChart from "./rangeChart";
+
+const CloudCostChart = ({ graphData, currency, n, height }) => {
+  if (graphData.length === 0) {
+    return <Typography variant="body2">No data</Typography>;
+  }
+  return <RangeChart data={graphData} currency={currency} height={height} />;
+};
+
+export default React.memo(CloudCostChart);

+ 275 - 0
ui/src/cloudCost/cloudCostChart/rangeChart.js

@@ -0,0 +1,275 @@
+import * as React from "react";
+import { makeStyles } from "@material-ui/styles";
+import {
+  BarChart,
+  Bar,
+  XAxis,
+  YAxis,
+  CartesianGrid,
+  Tooltip,
+  ResponsiveContainer,
+  Cell,
+} from "recharts";
+import { primary, greyscale, browns } from "../../constants/colors";
+import { toCurrency } from "../../util";
+
+const RangeChart = ({ data, currency, height }) => {
+  const useStyles = makeStyles({
+    tooltip: {
+      borderRadius: 2,
+      background: "rgba(255, 255, 255, 0.95)",
+      padding: 12,
+    },
+    tooltipLineItem: {
+      fontSize: "1rem",
+      margin: 0,
+      marginBottom: 4,
+      padding: 0,
+    },
+  });
+
+  const accents = [...primary, ...greyscale, ...browns];
+
+  const _IDLE_ = "__idle__";
+  const _OTHER_ = "others";
+
+  const getItemCost = (item) => {
+    return item.value;
+  };
+
+  function toBar({ end, graph, start }) {
+    const points = graph.map((item) => ({
+      ...item,
+      window: { end, start },
+    }));
+
+    const dateFormatter = Intl.DateTimeFormat(navigator.language, {
+      year: "numeric",
+      month: "numeric",
+      day: "numeric",
+      timeZone: "UTC",
+    });
+
+    const timeFormatter = Intl.DateTimeFormat(navigator.language, {
+      hour: "numeric",
+      minute: "numeric",
+      timeZone: "UTC",
+    });
+
+    const s = new Date(start);
+    const e = new Date(end);
+    const interval = (e.valueOf() - s.valueOf()) / 1000 / 60 / 60;
+
+    const bar = {
+      end: new Date(end),
+      key: interval >= 24 ? dateFormatter.format(s) : timeFormatter.format(s),
+      items: {},
+      start: new Date(start),
+    };
+
+    points.forEach((item) => {
+      const windowStart = new Date(item.window.start);
+      const windowEnd = new Date(item.window.end);
+      const windowHours =
+        (windowEnd.valueOf() - windowStart.valueOf()) / 1000 / 60 / 60;
+
+      if (windowHours >= 24) {
+        bar.key = dateFormatter.format(bar.start);
+      } else {
+        bar.key = timeFormatter.format(bar.start);
+      }
+
+      bar.items[item.name] = getItemCost(item);
+    });
+
+    return bar;
+  }
+
+  const getDataForCloudDay = (dayData) => {
+    const { end, start } = dayData;
+    const copy = [...dayData.items];
+
+    // find items for idle and other
+    const idleIndex = copy.findIndex((item) => item.name === _IDLE_);
+    let idle = undefined;
+    if (idleIndex > -1) {
+      idle = copy[idleIndex];
+      copy.splice(idleIndex, 1);
+    }
+    const otherIndex = copy.findIndex(
+      (i) => i.name === _OTHER_ || i.name === "other"
+    );
+    let other = undefined;
+    if (otherIndex > -1) {
+      other = { ...copy[otherIndex], name: "other" };
+      copy.splice(otherIndex, 1);
+    }
+
+    // sort and remove any items < top 8
+    const sortedItems = copy.slice().sort((a, b) => {
+      return a.value > b.value ? -1 : 1;
+    });
+
+    const top8 = sortedItems.slice(0, 8);
+    // get items that didn't make the cut and shove into other
+    const lefovers = sortedItems.slice(8);
+    if (lefovers.length > 0) {
+      const othersTotal = lefovers.reduce((a, b) => a.value + b.value);
+      if (other) {
+        other.value += othersTotal;
+      } else if (othersTotal) {
+        other = {
+          name: "other",
+          value: othersTotal,
+        };
+      }
+    }
+    // add in idle and other
+    if (idle) {
+      top8.unshift(idle);
+    }
+    if (other) {
+      top8.unshift(other);
+    }
+
+    return { end, start, graph: top8 };
+  };
+
+  const getDataForGraph = (dataPoints) => {
+    // for each day, we want top 8 + Idle and Other
+    const orderedDataPoints = dataPoints.map(getDataForCloudDay);
+    const bars = orderedDataPoints.map(toBar);
+
+    const keyToFill = {};
+    // we want to keep track of the order of fill assignment
+    const assignmentOrder = [];
+    let p = 0;
+
+    orderedDataPoints.forEach(({ graph, start, end }) => {
+      graph.forEach(({ name }) => {
+        const key = name;
+        if (keyToFill[key] === undefined) {
+          assignmentOrder.push(key);
+          if (key === _IDLE_) {
+            keyToFill[key] = browns;
+          } else if (key === _OTHER_ || key === "other") {
+            keyToFill[key] = greyscale;
+          } else {
+            // non-idle/other allocations get the next available color
+            keyToFill[key] = accents[p];
+            p = (p + 1) % accents.length;
+          }
+        }
+      });
+    });
+    // list of dataKeys and fillColors in order of importance (price w/ 'others' last)
+    const labels = assignmentOrder.map((dataKey) => ({
+      dataKey,
+      fill: keyToFill[dataKey],
+    }));
+
+    return { bars, labels, keyToFill };
+  };
+
+  const { bars: barData, labels: barLabels, keyToFill } = getDataForGraph(data);
+
+  const classes = useStyles();
+
+  const CustomTooltip = (params) => {
+    const { active, payload } = params;
+
+    if (!payload || payload.length == 0) {
+      return null;
+    }
+
+    const total = payload.reduce((sum, item) => sum + item.value, 0.0);
+    if (active) {
+      return (
+        <div className={classes.tooltip}>
+          <p
+            className={classes.tooltipLineItem}
+            style={{ color: "#000000" }}
+          >{`Total: ${toCurrency(total, currency)}`}</p>
+
+          {payload
+            .slice()
+            .map((item, i) => (
+              <div
+                key={item.name}
+                style={{
+                  display: "grid",
+                  gridTemplateColumns: "20px 1fr",
+                  gap: ".5em",
+                  margin: ".25em",
+                }}
+              >
+                <div>
+                  <div
+                    style={{
+                      backgroundColor: keyToFill[item.payload.items[i][0]],
+                      width: 18,
+                      height: 18,
+                    }}
+                  />
+                </div>
+                <div>
+                  <p className={classes.tooltipLineItem}>{`${
+                    item.payload.items[i][0]
+                  }: ${toCurrency(item.value, currency)}`}</p>
+                </div>
+              </div>
+            ))
+            .reverse()}
+        </div>
+      );
+    }
+
+    return null;
+  };
+
+  const orderedBars = barData.map((bar) => {
+    return {
+      ...bar,
+      items: Object.entries(bar.items).sort((a, b) => {
+        if (a[0] === "other") {
+          return -1;
+        }
+        if (b[0] === "other") {
+          return 1;
+        }
+        return a[1] > b[1] ? -1 : 1;
+      }),
+    };
+  });
+
+  return (
+    <ResponsiveContainer height={height} width={"100%"}>
+      <BarChart
+        data={orderedBars}
+        margin={{ top: 30, right: 35, left: 30, bottom: 45 }}
+      >
+        <CartesianGrid strokeDasharray={"3 3"} vertical={false} />
+        <XAxis dataKey={"key"} />
+        <YAxis tickFormatter={(val) => toCurrency(val, currency, 2, true)} />
+        <Tooltip content={<CustomTooltip />} wrapperStyle={{ zIndex: 1000 }} />
+
+        {new Array(10).fill(0).map((item, idx) => (
+          <Bar
+            dataKey={(entry) => (entry.items[idx] ? entry.items[idx][1] : null)}
+            stackId="x"
+          >
+            {orderedBars.map((bar) =>
+              bar.items[idx] ? (
+                <Cell fill={keyToFill[bar.items[idx][0]]} />
+              ) : (
+                <Cell />
+              )
+            )}
+          </Bar>
+        ))}
+      </BarChart>
+    </ResponsiveContainer>
+  );
+};
+
+export default RangeChart;

+ 178 - 0
ui/src/cloudCost/cloudCostDetails.js

@@ -0,0 +1,178 @@
+import * as React from "react";
+import { Modal, Paper, Typography } from "@material-ui/core";
+import Warnings from "../components/Warnings";
+import CircularProgress from "@material-ui/core/CircularProgress";
+
+import {
+  ResponsiveContainer,
+  CartesianGrid,
+  Legend,
+  XAxis,
+  YAxis,
+  Tooltip,
+  BarChart,
+  Bar,
+} from "recharts";
+import { toCurrency } from "../util";
+import cloudCostDayTotals from "../services/cloudCostDayTotals";
+
+const CloudCostDetails = ({
+  onClose,
+  selectedProviderId,
+  selectedItem,
+  agg,
+  filters,
+  costMetric,
+  window,
+  currency,
+}) => {
+  const [data, setData] = React.useState([]);
+  const [loading, setLoading] = React.useState(false);
+  const [errors, setErrors] = React.useState([]);
+  const [fetch, setFetch] = React.useState(true);
+
+  const nextFilters = [
+    ...(filters ?? []),
+    { property: "providerIds", value: selectedProviderId },
+  ];
+
+  async function fetchData() {
+    setLoading(true);
+    setErrors([]);
+
+    try {
+      const resp = await cloudCostDayTotals.fetchCloudCostData(
+        window,
+        agg,
+        costMetric,
+        nextFilters
+      );
+
+      if (resp.data) {
+        setData(resp.data);
+      } else {
+        if (resp.message && resp.message.indexOf("boundary error") >= 0) {
+          let match = resp.message.match(/(ETL is \d+\.\d+% complete)/);
+          let secondary = "Try again after ETL build is complete";
+          if (match.length > 0) {
+            secondary = `${match[1]}. ${secondary}`;
+          }
+          setErrors([
+            {
+              primary: "Data unavailable while ETL is building",
+              secondary: secondary,
+            },
+          ]);
+        }
+        setData([]);
+      }
+    } catch (err) {
+      console.log(err);
+      if (err.message.indexOf("404") === 0) {
+        setErrors([
+          {
+            primary: "Failed to load report data",
+            secondary:
+              "Please update Kubecost to the latest version, then contact support if problems persist.",
+          },
+        ]);
+      } else {
+        let secondary =
+          "Please contact Kubecost support with a bug report if problems persist.";
+        if (err.message.length > 0) {
+          secondary = err.message;
+        }
+        setErrors([
+          {
+            primary: "Failed to load report data",
+            secondary: secondary,
+          },
+        ]);
+      }
+      setData([]);
+    }
+    setLoading(false);
+    setFetch(false);
+  }
+
+  React.useEffect(() => {
+    if (fetch) {
+      fetchData();
+    }
+  }, [fetch]);
+
+  const drilldownData = data.sort(
+    (a, b) =>
+      new Date(a.date ?? "").getTime() - new Date(b.date ?? "").getTime()
+  );
+
+  const itemData = drilldownData.map((items) => {
+    const dataPoint = {
+      time: new Date(items.date),
+      cost: items.cost,
+    };
+    return dataPoint;
+  });
+
+  return (
+    <div>
+      <Modal
+        open={true}
+        onClose={onClose}
+        title={`Costs over the last ${window}`}
+        style={{ margin: "10%" }}
+      >
+        <Paper>
+          <Typography style={{ marginTop: "1rem" }} variant="body1">
+            {selectedItem}
+          </Typography>
+
+          {loading && (
+            <div style={{ display: "flex", justifyContent: "center" }}>
+              <div style={{ paddingTop: 100, paddingBottom: 100 }}>
+                <CircularProgress />
+              </div>
+            </div>
+          )}
+          {!loading && errors.length > 0 && (
+            <div style={{ marginBottom: 20 }}>
+              <Warnings warnings={errors} />
+            </div>
+          )}
+          {data && (
+            <div style={{ display: "flex", marginTop: "2.5rem" }}>
+              <ResponsiveContainer
+                height={250}
+                id={"cloud-cost-drilldown"}
+                width={"100%"}
+              >
+                <BarChart
+                  data={itemData}
+                  margin={{
+                    top: 0,
+                    bottom: 10,
+                    left: 20,
+                    right: 0,
+                  }}
+                >
+                  <CartesianGrid vertical={false} />
+                  <Legend verticalAlign={"bottom"} />
+                  <XAxis dataKey={"time"} />
+                  <YAxis tickFormatter={(tick) => `${toCurrency(tick)}`} />
+                  <Bar dataKey={"cost"} fill={"#2196f3"} name={"Item Cost"} />
+                  <Tooltip
+                    formatter={(value) =>
+                      `${toCurrency(value ?? 0, currency, 4, true)}`
+                    }
+                  />
+                </BarChart>
+              </ResponsiveContainer>
+            </div>
+          )}
+        </Paper>
+      </Modal>
+    </div>
+  );
+};
+
+export { CloudCostDetails };

+ 48 - 0
ui/src/cloudCost/cloudCostRow.js

@@ -0,0 +1,48 @@
+import * as React from "react";
+
+import { TableCell, TableRow } from "@material-ui/core";
+
+import { toCurrency } from "../util";
+import { primary } from "../constants/colors";
+
+const displayCurrencyAsLessThanPenny = (amount, currency) =>
+  amount > 0 && amount < 0.01
+    ? `<${toCurrency(0.01, currency)}`
+    : toCurrency(amount, currency);
+
+const CloudCostRow = ({
+  cost,
+  costSuffix,
+  currency,
+  drilldown,
+  kubernetesPercent,
+  name,
+  row,
+  sampleData,
+}) => {
+  function calculatePercent() {
+    const totalPercent = (kubernetesPercent * 100).toFixed();
+    return `${totalPercent}%`;
+  }
+
+  const whichPercent = sampleData
+    ? `${(kubernetesPercent * 100).toFixed(1)}%`
+    : calculatePercent();
+  return (
+    <TableRow onClick={() => drilldown(row)}>
+      <TableCell
+        align={"left"}
+        style={{ cursor: "pointer", color: "#346ef2", padding: "1rem" }}
+      >
+        {name}
+      </TableCell>
+      <TableCell align={"right"}>{whichPercent}</TableCell>
+      {/* total cost */}
+      <TableCell align={"right"} style={{ paddingRight: "2em" }}>
+        {`${displayCurrencyAsLessThanPenny(cost, currency)}${costSuffix}`}
+      </TableCell>
+    </TableRow>
+  );
+};
+
+export { CloudCostRow };

+ 91 - 0
ui/src/cloudCost/controls/cloudCostEditControls.js

@@ -0,0 +1,91 @@
+import { makeStyles } from "@material-ui/styles";
+import FormControl from "@material-ui/core/FormControl";
+import InputLabel from "@material-ui/core/InputLabel";
+import MenuItem from "@material-ui/core/MenuItem";
+import Select from "@material-ui/core/Select";
+
+import * as React from "react";
+
+import SelectWindow from "../../components/SelectWindow";
+
+const useStyles = makeStyles({
+  wrapper: {
+    display: "inline-flex",
+  },
+  formControl: {
+    margin: 8,
+    minWidth: 120,
+  },
+});
+
+function EditCloudCostControls({
+  windowOptions,
+  window,
+  setWindow,
+  aggregationOptions,
+  aggregateBy,
+  setAggregateBy,
+  costMetricOptions,
+  costMetric,
+  setCostMetric,
+  currencyOptions,
+  currency,
+  setCurrency,
+}) {
+  const classes = useStyles();
+  return (
+    <div className={classes.wrapper}>
+      <SelectWindow
+        windowOptions={windowOptions}
+        window={window}
+        setWindow={setWindow}
+      />
+      <FormControl className={classes.formControl}>
+        <InputLabel id="aggregation-select-label">Breakdown</InputLabel>
+        <Select
+          id="aggregation-select"
+          value={aggregateBy}
+          onChange={(e) => {
+            setAggregateBy(e.target.value);
+          }}
+        >
+          {aggregationOptions.map((opt) => (
+            <MenuItem key={opt.value} value={opt.value}>
+              {opt.name}
+            </MenuItem>
+          ))}
+        </Select>
+      </FormControl>
+      <FormControl className={classes.formControl}>
+        <InputLabel id="costMetric-label">Cost Metric</InputLabel>
+        <Select
+          id="costMetric"
+          value={costMetric}
+          onChange={(e) => setCostMetric(e.target.value)}
+        >
+          {costMetricOptions.map((opt) => (
+            <MenuItem key={opt.value} value={opt.value}>
+              {opt.name}
+            </MenuItem>
+          ))}
+        </Select>
+      </FormControl>
+      <FormControl className={classes.formControl}>
+        <InputLabel id="currency-label">Currency</InputLabel>
+        <Select
+          id="currency"
+          value={currency}
+          onChange={(e) => setCurrency(e.target.value)}
+        >
+          {currencyOptions?.map((currency) => (
+            <MenuItem key={currency} value={currency}>
+              {currency}
+            </MenuItem>
+          ))}
+        </Select>
+      </FormControl>
+    </div>
+  );
+}
+
+export default React.memo(EditCloudCostControls);

+ 53 - 0
ui/src/cloudCost/tokens.js

@@ -0,0 +1,53 @@
+const windowOptions = [
+  { name: "Today", value: "today" },
+  { name: "Yesterday", value: "yesterday" },
+  { name: "Week-to-date", value: "week" },
+  // { name: "Month-to-date", value: "month" },
+  { name: "Last week", value: "lastweek" },
+  // { name: "Last month", value: "lastmonth" },
+  { name: "Last 24h", value: "24h" },
+  { name: "Last 48h", value: "48h" },
+  { name: "Last 7 days", value: "7d" },
+  // { name: "Last 30 days", value: "30d" },
+  // { name: "Last 60 days", value: "60d" },
+  // { name: "Last 90 days", value: "90d" },
+];
+
+const aggregationOptions = [
+  { name: "Account", value: "accountID" },
+  { name: "Invoice Entity", value: "invoiceEntityID" },
+  { name: "Provider", value: "provider" },
+  { name: "Service ", value: "service" },
+  { name: "Category", value: "category" },
+  { name: "Item", value: "item" },
+];
+
+const costMetricOptions = [
+  { name: "Amortized Net Cost", value: "AmortizedNetCost" },
+  { name: "List Cost", value: "ListCost" },
+  { name: "Invoiced Cost", value: "InvoicedCost" },
+  { name: "Amortized Cost", value: "AmortizedCost" },
+];
+
+const aggMap = {
+  invoiceEntityID: "Invoice Entity",
+  provider: "Provider",
+  service: "Service",
+  accountID: "Account",
+};
+
+const costMetricToPropName = {
+  AmortizedNetCost: "amortizedNetCost",
+  AmortizedCost: "amortizedCost",
+  ListCost: "listCost",
+  NetCost: "netCost",
+  InvoicedCost: "invoicedCost",
+};
+
+export {
+  windowOptions,
+  aggregationOptions,
+  costMetricOptions,
+  aggMap,
+  costMetricToPropName,
+};

+ 305 - 0
ui/src/cloudCostReports.js

@@ -0,0 +1,305 @@
+import * as React from "react";
+import Page from "./components/Page";
+import Header from "./components/Header";
+import IconButton from "@material-ui/core/IconButton";
+import RefreshIcon from "@material-ui/icons/Refresh";
+import { makeStyles } from "@material-ui/styles";
+import { Paper, Typography } from "@material-ui/core";
+import CircularProgress from "@material-ui/core/CircularProgress";
+import { get, find } from "lodash";
+import { useLocation, useHistory } from "react-router";
+
+import { checkCustomWindow, toVerboseTimeRange } from "./util";
+import CloudCostEditControls from "./cloudCost/controls/cloudCostEditControls";
+import Subtitle from "./components/Subtitle";
+import Warnings from "./components/Warnings";
+import CloudCostTopService from "./services/cloudCostTop";
+
+import {
+  windowOptions,
+  costMetricOptions,
+  aggregationOptions,
+  aggMap,
+} from "./cloudCost/tokens";
+import { currencyCodes } from "./constants/currencyCodes";
+import CloudCost from "./cloudCost/cloudCost";
+import { CloudCostDetails } from "./cloudCost/cloudCostDetails";
+
+const CloudCostReports = () => {
+  const useStyles = makeStyles({
+    reportHeader: {
+      display: "flex",
+      flexFlow: "row",
+      padding: 24,
+    },
+    titles: {
+      flexGrow: 1,
+    },
+  });
+  const classes = useStyles();
+
+  // Form state, which controls form elements, but not the report itself. On
+  // certain actions, the form state may flow into the report state.
+  const [title, setTitle] = React.useState(
+    "Cumulative cost for last 7 days by account"
+  );
+  const [window, setWindow] = React.useState(windowOptions[0].value);
+  const [aggregateBy, setAggregateBy] = React.useState(
+    aggregationOptions[0].value
+  );
+  const [costMetric, setCostMetric] = React.useState(
+    costMetricOptions[0].value
+  );
+  const [filters, setFilters] = React.useState([]);
+  const [currency, setCurrency] = React.useState("USD");
+  const [selectedProviderId, setSelectedProviderId] = React.useState("");
+  const [selectedItemName, setselectedItemName] = React.useState("");
+  const sampleData = aggregateBy.includes("item");
+  // page and settings state
+  const [init, setInit] = React.useState(false);
+  const [fetch, setFetch] = React.useState(false);
+  const [loading, setLoading] = React.useState(true);
+  const [errors, setErrors] = React.useState([]);
+
+  // data
+  const [cloudCostData, setCloudCostData] = React.useState([]);
+
+  function generateTitle({ window, aggregateBy, costMetric }) {
+    let windowName = get(find(windowOptions, { value: window }), "name", "");
+    if (windowName === "") {
+      if (checkCustomWindow(window)) {
+        windowName = toVerboseTimeRange(window);
+      } else {
+        console.warn(`unknown window: ${window}`);
+      }
+    }
+
+    let aggregationName = get(
+      find(aggregationOptions, { value: aggregateBy }),
+      "name",
+      ""
+    ).toLowerCase();
+    if (aggregationName === "") {
+      console.warn(`unknown aggregation: ${aggregateBy}`);
+    }
+
+    let str = `Cumulative cost for ${windowName} by ${aggregationName}`;
+
+    if (!costMetric) {
+      str = `${str} amoritizedNetCost`;
+    }
+
+    return str;
+  }
+
+  // parse any context information from the URL
+  const routerLocation = useLocation();
+  const searchParams = new URLSearchParams(routerLocation.search);
+  const routerHistory = useHistory();
+
+  async function initialize() {
+    setInit(true);
+  }
+
+  async function fetchData() {
+    setLoading(true);
+    setErrors([]);
+    try {
+      const resp = await CloudCostTopService.fetchCloudCostData(
+        window,
+        aggregateBy,
+        costMetric,
+        filters
+      );
+      if (resp) {
+        setCloudCostData(resp);
+      } else {
+        if (resp.message && resp.message.indexOf("boundary error") >= 0) {
+          let match = resp.message.match(/(ETL is \d+\.\d+% complete)/);
+          let secondary = "Try again after ETL build is complete";
+          if (match.length > 0) {
+            secondary = `${match[1]}. ${secondary}`;
+          }
+          setErrors([
+            {
+              primary: "Data unavailable while ETL is building",
+              secondary: secondary,
+            },
+          ]);
+        }
+        setCloudCostData([]);
+      }
+    } catch (err) {
+      if (err.message.indexOf("404") === 0) {
+        setErrors([
+          {
+            primary: "Failed to load report data",
+            secondary:
+              "Please update Kubecost to the latest version, then contact support if problems persist.",
+          },
+        ]);
+      } else {
+        let secondary =
+          "Please contact Kubecost support with a bug report if problems persist.";
+        if (err.message.length > 0) {
+          secondary = err.message;
+        }
+        setErrors([
+          {
+            primary: "Failed to load report data",
+            secondary: secondary,
+          },
+        ]);
+      }
+      setCloudCostData([]);
+    }
+    setLoading(false);
+  }
+
+  function drilldown(row) {
+    if (aggregateBy.includes("item")) {
+      try {
+        setSelectedProviderId(row.providerID);
+        setselectedItemName(row.labelName ?? row.name);
+      } catch (e) {
+        logger.error(e);
+      }
+
+      return;
+    }
+    const nameParts = row.name.split("/");
+    const nextAgg = aggregateBy.includes("service") ? "item" : "service";
+    const aggToString = [aggregateBy];
+    const newFilters = aggToString.map((property, i) => {
+      const value = nameParts[i];
+      return {
+        property,
+        value,
+        name: aggMap[property] || property,
+      };
+    });
+    setFilters(newFilters);
+    setAggregateBy(nextAgg);
+  }
+
+  React.useEffect(() => {
+    setWindow(searchParams.get("window") || "7d");
+    setAggregateBy(searchParams.get("agg") || "provider");
+    setCostMetric(searchParams.get("costMetric") || "AmortizedNetCost");
+    setCurrency(searchParams.get("currency") || "USD");
+  }, [routerLocation]);
+
+  // Initialize once, then fetch report each time setFetch(true) is called
+  React.useEffect(() => {
+    if (!init) {
+      initialize();
+    }
+    if (init || fetch) {
+      fetchData();
+    }
+  }, [init, fetch]);
+
+  React.useEffect(() => {
+    setFetch(!fetch);
+    setTitle(generateTitle({ window, aggregateBy, costMetric }));
+  }, [window, aggregateBy, costMetric, filters]);
+
+  return (
+    <Page active="cloud.html">
+      <Header>
+        <IconButton aria-label="refresh" onClick={() => setFetch(true)}>
+          <RefreshIcon />
+        </IconButton>
+      </Header>
+
+      {!loading && errors.length > 0 && (
+        <div style={{ marginBottom: 20 }}>
+          <Warnings warnings={errors} />
+        </div>
+      )}
+
+      {init && (
+        <Paper id="cloud-cost">
+          <div className={classes.reportHeader}>
+            <div className={classes.titles}>
+              <Typography variant="h5">{title}</Typography>
+              <Subtitle report={{ window, aggregateBy }} />
+            </div>
+            <CloudCostEditControls
+              windowOptions={windowOptions}
+              window={window}
+              setWindow={(win) => {
+                searchParams.set("window", win);
+                routerHistory.push({
+                  search: `?${searchParams.toString()}`,
+                });
+              }}
+              aggregationOptions={aggregationOptions}
+              aggregateBy={aggregateBy}
+              setAggregateBy={(agg) => {
+                searchParams.set("agg", agg);
+                routerHistory.push({
+                  search: `?${searchParams.toString()}`,
+                });
+              }}
+              costMetricOptions={costMetricOptions}
+              costMetric={costMetric}
+              setCostMetric={(c) => {
+                searchParams.set("costMetric", c);
+                routerHistory.push({
+                  search: `?${searchParams.toString()}`,
+                });
+              }}
+              title={title}
+              // cumulativeData={cumulativeData}
+              currency={currency}
+              currencyOptions={currencyCodes}
+              setCurrency={(curr) => {
+                searchParams.set("currency", curr);
+                routerHistory.push({
+                  search: `?${searchParams.toString()}`,
+                });
+              }}
+            />
+          </div>
+
+          {loading && (
+            <div style={{ display: "flex", justifyContent: "center" }}>
+              <div style={{ paddingTop: 100, paddingBottom: 100 }}>
+                <CircularProgress />
+              </div>
+            </div>
+          )}
+
+          {!loading && (
+            <CloudCost
+              cumulativeData={cloudCostData.tableRows}
+              currency={currency}
+              graphData={cloudCostData.graphData}
+              totalData={cloudCostData.tableTotal}
+              drilldown={drilldown}
+              sampleData={sampleData}
+            />
+          )}
+          {selectedProviderId && selectedItemName && (
+            <CloudCostDetails
+              onClose={() => {
+                setSelectedProviderId("");
+                setselectedItemName("");
+              }}
+              selectedProviderId={selectedProviderId}
+              selectedItem={selectedItemName}
+              agg={aggregateBy}
+              filters={filters}
+              costMetric={costMetric}
+              window={window}
+              currency={currency}
+            />
+          )}
+        </Paper>
+      )}
+    </Page>
+  );
+};
+
+export default React.memo(CloudCostReports);

+ 0 - 200
ui/src/components/AllocationReport.js

@@ -1,200 +0,0 @@
-import React, { useEffect, useState } from 'react'
-import { get, round } from 'lodash'
-import { makeStyles } from '@material-ui/styles'
-import Table from '@material-ui/core/Table'
-import TableBody from '@material-ui/core/TableBody'
-import TableCell from '@material-ui/core/TableCell'
-import TableContainer from '@material-ui/core/TableContainer'
-import TableHead from '@material-ui/core/TableHead'
-import TablePagination from '@material-ui/core/TablePagination'
-import TableRow from '@material-ui/core/TableRow'
-import TableSortLabel from '@material-ui/core/TableSortLabel'
-import Typography from '@material-ui/core/Typography'
-import AllocationChart from './AllocationChart';
-import { toCurrency } from '../util';
-
-const useStyles = makeStyles({
-  noResults: {
-    padding: 24,
-  },
-})
-
-function descendingComparator(a, b, orderBy) {
-  if (get(b, orderBy) < get(a, orderBy)) {
-    return -1
-  }
-  if (get(b, orderBy) > get(a, orderBy)) {
-    return 1
-  }
-  return 0
-}
-
-function getComparator(order, orderBy) {
-  return order === 'desc'
-    ? (a, b) => descendingComparator(a, b, orderBy)
-    : (a, b) => -descendingComparator(a, b, orderBy)
-}
-
-function stableSort(array, comparator) {
-  const stabilizedThis = array.map((el, index) => [el, index])
-  stabilizedThis.sort((a, b) => {
-    const order = comparator(a[0], b[0])
-    if (order !== 0) return order
-    return a[1] - b[1]
-  })
-  return stabilizedThis.map((el) => el[0])
-}
-
-const headCells = [
-  { id: 'name', numeric: false, label: 'Name', width: 'auto' },
-  { id: 'cpuCost', numeric: true, label: 'CPU', width: 90 },
-  { id: 'ramCost', numeric: true, label: "RAM", width: 90 },
-  { id: 'pvCost', numeric: true, label: 'PV', width: 90 },
-  { id: 'totalEfficiency', numeric: true, label: 'Efficiency', width: 90 },
-  { id: 'totalCost', numeric: true, label: 'Total cost', width: 90 },
-]
-
-const AllocationReport = ({ allocationData, cumulativeData, totalData, currency }) => {
-  const classes = useStyles()
-
-  if (allocationData.length === 0) {
-    return <Typography variant="body2" className={classes.noResults}>No results</Typography>
-  }
-
-  const [order, setOrder] = React.useState('desc')
-  const [orderBy, setOrderBy] = React.useState('totalCost')
-  const [page, setPage] = useState(0)
-  const [rowsPerPage, setRowsPerPage] = useState(25)
-  const numData = cumulativeData.length
-
-  useEffect(() => {
-    setPage(0)
-  }, [numData])
-
-  const lastPage = Math.floor(numData / rowsPerPage)
-
-  const handleChangePage = (event, newPage) => setPage(newPage)
-
-  const handleChangeRowsPerPage = event => {
-    setRowsPerPage(parseInt(event.target.value, 10))
-    setPage(0)
-  }
-
-  const createSortHandler = (property) => (event) => handleRequestSort(event, property)
-
-  const handleRequestSort = (event, property) => {
-    const isDesc = orderBy === property && order === 'desc'
-    setOrder(isDesc ? 'asc' : 'desc')
-    setOrderBy(property)
-  }
-
-  const orderedRows = stableSort(cumulativeData, getComparator(order, orderBy))
-  const pageRows = orderedRows.slice(page * rowsPerPage, page * rowsPerPage + rowsPerPage)
-
-  return (
-    <div id="report">
-      <AllocationChart allocationRange={allocationData} currency={currency} n={10} height={300} />
-      <TableContainer>
-        <Table>
-          <TableHead>
-            <TableRow>
-              {headCells.map((cell) => (
-                <TableCell
-                  key={cell.id}
-                  colSpan={cell.colspan}
-                  align={cell.numeric ? 'right' : 'left'}
-                  sortDirection={orderBy === cell.id ? order : false}
-                  style={{ width: cell.width }}
-                >
-                  <TableSortLabel
-                    active={orderBy === cell.id}
-                    direction={orderBy === cell.id ? order : 'asc'}
-                    onClick={createSortHandler(cell.id)}
-                  >
-                    {cell.label}
-                  </TableSortLabel>
-                </TableCell>
-              ))}
-            </TableRow>
-          </TableHead>
-          <TableBody>
-            <TableRow>
-              {headCells.map((cell) => {
-                return (
-                <TableCell
-                  key={cell.id}
-                  colSpan={cell.colspan}
-                  align={cell.numeric ? 'right' : 'left'}
-                  style={{ fontWeight: 500 }}
-                >
-                  {cell.numeric
-                    ? (cell.label === 'Efficiency'
-                      ? (totalData.totalEfficiency == 1.0 && totalData.cpuReqCoreHrs == 0 && totalData.ramReqByteHrs == 0)
-                        ? "Inf%"
-                        : `${round(totalData.totalEfficiency*100, 1)}%`
-                      : toCurrency(totalData[cell.id], currency))
-                    : totalData[cell.id]}
-                </TableCell>
-              )})}
-            </TableRow>
-            {pageRows.map((row, key) => {
-              if (row.name === "__unmounted__") {
-                row.name = "Unmounted PVs"
-              }
-
-              let isIdle = row.name.indexOf("__idle__") >= 0
-              let isUnallocated = row.name.indexOf("__unallocated__") >= 0
-              let isUnmounted = row.name.indexOf("Unmounted PVs") >= 0
-
-              // Replace "efficiency" with Inf if there is usage w/o request
-              let efficiency = round(row.totalEfficiency*100, 1)
-              if (row.totalEfficiency == 1.0 && row.cpuReqCoreHrs == 0 && row.ramReqByteHrs == 0) {
-                efficiency = "Inf"
-              }
-
-              // Do not allow drill-down for idle and unallocated rows
-              if (isIdle || isUnallocated || isUnmounted) {
-                return (
-                  <TableRow key={key}>
-                    <TableCell align="left">{row.name}</TableCell>
-                    <TableCell align="right">{toCurrency(row.cpuCost, currency)}</TableCell>
-                    <TableCell align="right">{toCurrency(row.ramCost, currency)}</TableCell>
-                    <TableCell align="right">{toCurrency(row.pvCost, currency)}</TableCell>
-                    {isIdle ? (
-                      <TableCell align="right">&mdash;</TableCell>
-                    ) : (
-                      <TableCell align="right">{efficiency}%</TableCell>
-                    )}
-                    <TableCell align="right">{toCurrency(row.totalCost, currency)}</TableCell>
-                  </TableRow>
-                )
-              }
-
-              return (
-                <TableRow key={key}>
-                  <TableCell align="left">{row.name}</TableCell>
-                  <TableCell align="right">{toCurrency(row.cpuCost, currency)}</TableCell>
-                  <TableCell align="right">{toCurrency(row.ramCost, currency)}</TableCell>
-                  <TableCell align="right">{toCurrency(row.pvCost, currency)}</TableCell>
-                  <TableCell align="right">{efficiency}%</TableCell>
-                  <TableCell align="right">{toCurrency(row.totalCost, currency)}</TableCell>
-                </TableRow>
-              )
-            })}
-          </TableBody>
-        </Table>
-      </TableContainer>
-      <TablePagination
-        component="div"
-        count={numData}
-        rowsPerPage={rowsPerPage}
-        rowsPerPageOptions={[10, 25, 50]}
-        page={Math.min(page, lastPage)}
-        onChangePage={handleChangePage}
-        onChangeRowsPerPage={handleChangeRowsPerPage}
-      />
-    </div>
-  )
-}
-
-export default React.memo(AllocationReport)

+ 37 - 25
ui/src/components/Header.js

@@ -1,48 +1,60 @@
-import React from 'react'
-import { makeStyles } from '@material-ui/styles'
-import Breadcrumbs from '@material-ui/core/Breadcrumbs';
-import Link from '@material-ui/core/Link';
-import Typography from '@material-ui/core/Typography';
+import * as React from "react";
+import { makeStyles } from "@material-ui/styles";
+import Breadcrumbs from "@material-ui/core/Breadcrumbs";
+import Link from "@material-ui/core/Link";
+import Typography from "@material-ui/core/Typography";
+import { useLocation } from "react-router-dom";
 
 const useStyles = makeStyles({
   root: {
-    alignItems: 'center',
-    display: 'flex',
-    flexFlow: 'row',
-    marginBottom: 20,
-    width: '100%',
+    alignItems: "center",
+    display: "flex",
+    flexFlow: "row",
+    width: "100%",
+    marginTop: "10px",
   },
   context: {
-    flex: '1 0 auto',
+    flex: "1 0 auto",
   },
   actions: {
-    flex: '0 0 auto',
+    flex: "0 0 auto",
   },
 });
 
 const Header = (props) => {
-  const classes = useStyles()
-  const { title, breadcrumbs } = props
+  const classes = useStyles();
+  const { title, breadcrumbs } = props;
+  const { pathname } = useLocation();
+
+  const headerTitle = pathname === "/cloud" ? "Cloud Costs" : "Cost Allocation";
 
   return (
     <div className={classes.root}>
-      <img src={ require('../images/logo.png') } alt="OpenCost" />
+      <Typography variant="h3" style={{ marginBottom: "10px" }}>
+        {headerTitle}
+      </Typography>
       <div className={classes.context}>
-        {title && <Typography variant="h4" className={classes.title}>{props.title}</Typography>}
+        {title && (
+          <Typography variant="h4" className={classes.title}>
+            {props.title}
+          </Typography>
+        )}
         {breadcrumbs && breadcrumbs.length > 0 && (
           <Breadcrumbs aria-label="breadcrumb">
-            {breadcrumbs.slice(0, breadcrumbs.length-1).map(b => (
-              <Link color="inherit" href={b.href} key={b.name}>{b.name}</Link>
+            {breadcrumbs.slice(0, breadcrumbs.length - 1).map((b) => (
+              <Link color="inherit" href={b.href} key={b.name}>
+                {b.name}
+              </Link>
             ))}
-            <Typography color="textPrimary">{breadcrumbs[breadcrumbs.length-1].name}</Typography>
+            <Typography color="textPrimary">
+              {breadcrumbs[breadcrumbs.length - 1].name}
+            </Typography>
           </Breadcrumbs>
         )}
       </div>
-      <div className={classes.actions}>
-        {props.children}
-      </div>
+      <div className={classes.actions}>{props.children}</div>
     </div>
-  )
-}
+  );
+};
 
-export default Header
+export default Header;

+ 78 - 0
ui/src/components/Nav/NavItem.js

@@ -0,0 +1,78 @@
+import * as React from "react";
+import { ListItem, ListItemIcon, ListItemText } from "@material-ui/core";
+import { Link } from "react-router-dom";
+import { makeStyles } from "@material-ui/styles";
+
+const NavItem = ({ active, href, name, onClick, secondary, title, icon }) => {
+  const useStyles = makeStyles({
+    root: {
+      cursor: "pointer",
+      "&:hover": {
+        backgroundColor: "#ebebeb",
+      },
+      "&:selected": {
+        backgroundColor: "#e1e1e1",
+      },
+    },
+    text: {
+      maxWidth: 200,
+      overflow: "hidden",
+      textOverflow: "ellipsis",
+      whiteSpace: "nowrap",
+    },
+    activeIcon: {
+      color: "#346ef2",
+      minWidth: 36,
+    },
+    activeText: {
+      color: "#346ef2",
+    },
+    icon: {
+      color: "#4e4e4e",
+      minWidth: 36,
+    },
+  });
+  const classes = useStyles();
+
+  const listItemIconClasses = { root: classes.icon };
+  const listItemTextClasses = {
+    secondary: classes.text,
+  };
+
+  if (active) {
+    listItemIconClasses.root = classes.activeIcon;
+    listItemTextClasses.primary = classes.activeText;
+  }
+
+  const renderListItemCore = () => (
+    <ListItem
+      className={active ? "active" : ""}
+      classes={{ root: classes.root }}
+      onClick={(e) => {
+        if (onClick) {
+          onClick();
+          e.stopPropagation();
+        }
+      }}
+      selected={active}
+      title={title}
+    >
+      <ListItemIcon classes={listItemIconClasses}>{icon}</ListItemIcon>
+      <ListItemText
+        classes={listItemTextClasses}
+        primary={name}
+        secondary={secondary}
+      />
+    </ListItem>
+  );
+
+  return href && !active ? (
+    <Link style={{ textDecoration: "none", color: "inherit" }} to={`${href}`}>
+      {renderListItemCore()}
+    </Link>
+  ) : (
+    renderListItemCore()
+  );
+};
+
+export { NavItem };

+ 70 - 0
ui/src/components/Nav/SidebarNav.js

@@ -0,0 +1,70 @@
+import * as React from "react";
+import { Drawer, List } from "@material-ui/core";
+
+import { NavItem } from "./NavItem";
+import { BarChart } from "@material-ui/icons";
+import { Cloud } from "@material-ui/icons";
+import { makeStyles } from "@material-ui/styles";
+
+const DRAWER_WIDTH = 200;
+
+const SidebarNav = ({ active }) => {
+  const useStyles = makeStyles({
+    drawer: {
+      width: DRAWER_WIDTH,
+      flexShrink: 0,
+    },
+    drawerPaper: {
+      backgroundColor: "inherit",
+      border: 0,
+      width: DRAWER_WIDTH,
+      paddingTop: "2.5rem",
+    },
+    text: {
+      overflow: "hidden",
+      textOverflow: "ellipsis",
+      whiteSpace: "nowrap",
+    },
+  });
+
+  const classes = useStyles();
+
+  const [init, setInit] = React.useState(false);
+
+  React.useEffect(() => {
+    if (!init) {
+      setInit(true);
+    }
+  }, [init]);
+
+  const top = [
+    {
+      name: "Cost Allocation",
+      href: "allocation",
+      icon: <BarChart />,
+    },
+    { name: "Cloud Costs", href: "cloud", icon: <Cloud /> },
+  ];
+
+  return (
+    <Drawer
+      anchor={"left"}
+      className={classes.drawer}
+      classes={{ paper: classes.drawerPaper }}
+      variant={"permanent"}
+    >
+      <img
+        src={require("../../images/logo.png")}
+        alt="OpenCost"
+        style={{ flexShrink: 1, padding: "1rem" }}
+      />
+      <List style={{ flexGrow: 1 }}>
+        {top.map((l) => (
+          <NavItem active={active === `/${l.href}`} key={l.name} {...l} />
+        ))}
+      </List>
+    </Drawer>
+  );
+};
+
+export { SidebarNav };

+ 3 - 0
ui/src/components/Nav/index.js

@@ -0,0 +1,3 @@
+import { SidebarNav } from "./SidebarNav";
+
+export default SidebarNav;

+ 32 - 19
ui/src/components/Page.js

@@ -1,33 +1,46 @@
-import { makeStyles } from '@material-ui/styles'
-import React from 'react'
+import { makeStyles } from "@material-ui/styles";
+import * as React from "react";
+import { useLocation } from "react-router-dom";
+import { SidebarNav } from "./Nav/SidebarNav";
 
 const useStyles = makeStyles({
   wrapper: {
-    display: 'flex',
-    flexFlow: 'column',
+    position: "relative",
+    height: "100vh",
     flexGrow: 1,
-    margin: '20px 30px 0 30px',
-    minWidth: 800,
+    overflowX: "auto",
+    paddingLeft: "2rem",
+    paddingRight: "rem",
+    paddingTop: "2.5rem",
   },
   flexGrow: {
-    display: 'flex',
-    flexFlow: 'column',
+    display: "flex",
+    flexFlow: "column",
     flexGrow: 1,
-  }
-})
+  },
+  body: {
+    display: "flex",
+    overflowY: "scroll",
+    margin: "0px",
+    backgroundColor: "f3f3f3",
+  },
+});
+
+const Page = (props) => {
+  const classes = useStyles();
 
-const Page = props => {
-  const classes = useStyles()
+  const { pathname } = useLocation();
 
   return (
-    <div className={classes.flexGrow}>
-      <div className={classes.wrapper}>
-        <div className={classes.flexGrow}>
-          {props.children}
+    <div className={classes.body}>
+      <SidebarNav active={pathname} />
+      <div className={classes.flexGrow}>
+        <div className={classes.wrapper}>
+          <div className={classes.flexGrow}>{props.children}</div>
         </div>
       </div>
     </div>
-  )
-}
+  );
+};
 
-export default Page
+export default Page;

Некоторые файлы не были показаны из-за большого количества измененных файлов