Explorar el Código

merge with master

Signed-off-by: r2k1 <yokree@gmail.com>
r2k1 hace 1 año
padre
commit
72569b8e03
Se han modificado 100 ficheros con 5057 adiciones y 1697 borrados
  1. 2 2
      .github/ISSUE_TEMPLATE/opencost-bug-report.md
  2. 0 6
      .github/dependabot.yml
  3. 7 36
      .github/workflows/build-and-publish-release.yml
  4. 6 42
      .github/workflows/build-test.yaml
  5. 94 33
      .github/workflows/sonar.yaml
  6. 2 5
      .gitignore
  7. 5 3
      MAINTAINERS.md
  8. 4 2
      README.md
  9. 1 1
      core/go.mod
  10. 11 7
      core/pkg/filter/cloudcost/fields.go
  11. 4 0
      core/pkg/filter/cloudcost/parser.go
  12. 76 0
      core/pkg/filter/cloudcost/parser_test.go
  13. 6 2
      core/pkg/filter/fieldstrings/fieldstrings.go
  14. 11 3
      core/pkg/log/log.go
  15. 64 0
      core/pkg/log/log_test.go
  16. 74 3
      core/pkg/opencost/allocation.go
  17. 12 0
      core/pkg/opencost/allocation_json.go
  18. 2 2
      core/pkg/opencost/bingen.go
  19. 8 0
      core/pkg/opencost/cloudcost.go
  20. 8 0
      core/pkg/opencost/cloudcostmatcher.go
  21. 78 31
      core/pkg/opencost/cloudcostprops.go
  22. 200 95
      core/pkg/opencost/cloudcostprops_test.go
  23. 154 47
      core/pkg/opencost/opencost_codecs.go
  24. 31 1
      core/pkg/opencost/summaryallocation.go
  25. 21 1
      core/pkg/opencost/summaryallocation_json.go
  26. 7 49
      core/pkg/opencost/window.go
  27. 45 111
      core/pkg/opencost/window_test.go
  28. 4 0
      core/pkg/util/buffer.go
  29. 274 0
      core/pkg/util/buffer_test.go
  30. 11 0
      core/pkg/util/filterutil/filterparams.go
  31. 3 3
      core/pkg/util/filterutil/filterutil.go
  32. 8 9
      core/pkg/util/timeutil/timeutil_test.go
  33. 160 5
      core/pkg/util/worker/worker.go
  34. 137 1
      core/pkg/util/worker/worker_test.go
  35. 50 45
      go.mod
  36. 103 113
      go.sum
  37. 10 1
      justfile
  38. 54 45
      pkg/cloud/alibaba/provider.go
  39. 143 5
      pkg/cloud/alibaba/provider_test.go
  40. 31 9
      pkg/cloud/aws/athenaintegration.go
  41. 4 0
      pkg/cloud/aws/athenaquerier.go
  42. 2 3
      pkg/cloud/aws/authorizer.go
  43. 13 39
      pkg/cloud/aws/provider.go
  44. 259 9
      pkg/cloud/aws/provider_test.go
  45. 178 76
      pkg/cloud/aws/s3selectintegration.go
  46. 12 0
      pkg/cloud/aws/s3selectquerier.go
  47. 10 7
      pkg/cloud/azure/azurestorageintegration.go
  48. 68 32
      pkg/cloud/azure/billingexportparser.go
  49. 6 1
      pkg/cloud/azure/provider.go
  50. 2 2
      pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv
  51. 1 1
      pkg/cloud/azure/resources/billingexports/values/Template.csv
  52. 2 2
      pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv
  53. 99 61
      pkg/cloud/azure/storagebillingparser.go
  54. 70 6
      pkg/cloud/azure/storageconnection.go
  55. 59 9
      pkg/cloud/config/configurations.go
  56. 115 57
      pkg/cloud/config/controller.go
  57. 38 15
      pkg/cloud/config/controller_test.go
  58. 6 0
      pkg/cloud/config/statuses.go
  59. 14 2
      pkg/cloud/gcp/bigqueryintegration.go
  60. 1 1
      pkg/cloud/gcp/bigqueryintegration_test.go
  61. 43 0
      pkg/cloud/gcp/bigqueryintegration_types.go
  62. 75 0
      pkg/cloud/gcp/bigqueryintegration_types_test.go
  63. 23 2
      pkg/cloud/gcp/provider.go
  64. 180 154
      pkg/cloud/gcp/provider_test.go
  65. 17 0
      pkg/cloud/gcp/test/error.json
  66. 130 0
      pkg/cloud/oracle/authorizer.go
  67. 131 0
      pkg/cloud/oracle/usageapiconfiguration.go
  68. 318 0
      pkg/cloud/oracle/usageapiconfiguration_test.go
  69. 171 0
      pkg/cloud/oracle/usageapiintegration.go
  70. 61 0
      pkg/cloud/oracle/usageapiintegration_test.go
  71. 1 1
      pkg/cloud/provider/csvprovider.go
  72. 43 0
      pkg/cloud/provider/provider.go
  73. 44 0
      pkg/cloud/provider/provider_test.go
  74. 1 1
      pkg/cloud/scaleway/provider.go
  75. 17 3
      pkg/cloudcost/integration.go
  76. 1 13
      pkg/clustercache/watchcontroller.go
  77. 84 35
      pkg/cmd/costmodel/costmodel.go
  78. 3 1
      pkg/costmodel/aggregation.go
  79. 6 0
      pkg/costmodel/allocation.go
  80. 49 1
      pkg/costmodel/allocation_helpers.go
  81. 192 90
      pkg/costmodel/cluster.go
  82. 54 0
      pkg/costmodel/cluster_test.go
  83. 103 59
      pkg/costmodel/costmodel.go
  84. 87 0
      pkg/costmodel/costmodel_test.go
  85. 13 12
      pkg/costmodel/promparsers.go
  86. 113 178
      pkg/costmodel/router.go
  87. 7 4
      pkg/customcost/queryservice_helper.go
  88. 53 0
      pkg/customcost/queryservice_helper_test.go
  89. 13 0
      pkg/env/costmodelenv.go
  90. 3 1
      pkg/metrics/metricsconfig.go
  91. 53 1
      pkg/metrics/podlabelmetrics.go
  92. 77 0
      pkg/metrics/podlabelmetrics_test.go
  93. 1 1
      pkg/storage/prefixedbucketstorage.go
  94. 7 7
      pkg/storage/s3storage.go
  95. 3 0
      pkg/storage/storage.go
  96. 0 3
      ui/.babelrc
  97. 0 2
      ui/.dockerignore
  98. 0 1
      ui/.nvmrc
  99. 0 48
      ui/Dockerfile
  100. 0 38
      ui/Dockerfile.cross

+ 2 - 2
.github/ISSUE_TEMPLATE/opencost-bug-report.md

@@ -8,7 +8,7 @@ assignees: ''
 ---
 
 **Describe the bug**
-A clear and concise description of what the OpenCost bug is. Please ensure this is an issue related to the OpenCost cost model, API, UI or specification. Public Kubecost bugs may be opened at https://github.com/kubecost/features-bugs
+A clear and concise description of what the OpenCost bug is. Please ensure this is an issue related to the OpenCost cost model, API or specification. UI issues may be opened in the [OpenCost UI repository](https://github.com/opencost/opencost-ui).
 
 **To Reproduce**
 Steps to reproduce the behavior:
@@ -24,7 +24,7 @@ A clear and concise description of what you expected to happen.
 If applicable, add screenshots to help explain your problem.
 
 **Which version of OpenCost are you using?**
-This may be the Kubecost release.
+You can find the version from the container's startup logging or from the bottom of the page in the UI.
 
 **Additional context**
 Add any other context about the problem here. Kubernetes versions and which public clouds you are working with are especially important.

+ 0 - 6
.github/dependabot.yml

@@ -11,9 +11,3 @@ updates:
     directory: "/"
     schedule:
       interval: "weekly"
-
-  # Dependencies listed in ui/package.json
-  - package-ecosystem: "npm"
-    directory: "/ui"
-    schedule:
-      interval: "weekly"

+ 7 - 36
.github/workflows/build-and-publish-release.yml

@@ -57,12 +57,11 @@ jobs:
         id: branch
         run: |
           VERSION_NUMBER=${{ steps.version_number.outputs.RELEASE_VERSION }}
-          echo "BRANCH_NAME=v${VERSION_NUMBER%.*}" >> $GITHUB_ENV
+          echo "BRANCH_NAME=v${VERSION_NUMBER%.*}" >> $GITHUB_OUTPUT
 
       - name: Checkout Repo
         uses: actions/checkout@v4
         with:
-          repository: 'opencost/opencost'
           ref: '${{ steps.branch.outputs.BRANCH_NAME }}'
           path: ./opencost
 
@@ -76,7 +75,7 @@ jobs:
       # Login against a Docker registry except on PR
       # https://github.com/docker/login-action
       - name: Log into registry ${{ env.REGISTRY }}
-        uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
+        uses: docker/login-action@v3
         with:
           registry: ${{ env.REGISTRY }}
           username: ${{ github.actor }}
@@ -85,18 +84,9 @@ jobs:
       - name: Set OpenCost Image Tags
         id: tags
         run: |
-          echo "IMAGE_TAG=ghcr.io/opencost/opencost:${{ steps.sha.outputs.OC_SHORTHASH }}" >> $GITHUB_OUTPUT
-          echo "IMAGE_TAG_LATEST=ghcr.io/opencost/opencost:latest" >> $GITHUB_OUTPUT
-          echo "IMAGE_TAG_VERSION=ghcr.io/opencost/opencost:${{ steps.version_number.outputs.RELEASE_VERSION }}" >> $GITHUB_OUTPUT
-          echo "IMAGE_TAG_UI=ghcr.io/opencost/opencost-ui:${{ steps.sha.outputs.OC_SHORTHASH }}" >> $GITHUB_OUTPUT
-          echo "IMAGE_TAG_UI_LATEST=ghcr.io/opencost/opencost-ui:latest" >> $GITHUB_OUTPUT
-          echo "IMAGE_TAG_UI_VERSION=ghcr.io/opencost/opencost-ui:${{ steps.version_number.outputs.RELEASE_VERSION }}" >> $GITHUB_OUTPUT
-        #  echo "IMAGE_TAG_QUAY=quay.io/kubecost1/kubecost-cost-model:${{ steps.sha.outputs.OC_SHORTHASH }}" >> $GITHUB_OUTPUT
-        #  echo "IMAGE_TAG_LATEST_QUAY=quay.io/kubecost1/kubecost-cost-model:latest" >> $GITHUB_OUTPUT
-        #  echo "IMAGE_TAG_VERSION_QUAY=quay.io/kubecost1/kubecost-cost-model:prod-${{ steps.version_number.outputs.RELEASE_VERSION }}" >> $GITHUB_OUTPUT
-        #  echo "IMAGE_TAG_UI_QUAY=quay.io/kubecost1/opencost-ui:${{ steps.sha.outputs.OC_SHORTHASH }}" >> $GITHUB_OUTPUT
-        #  echo "IMAGE_TAG_UI_LATEST_QUAY=quay.io/kubecost1/opencost-ui:latest" >> $GITHUB_OUTPUT
-        #  echo "IMAGE_TAG_UI_VERSION_QUAY=quay.io/kubecost1/opencost-ui:prod-${{ inputs.release_version }}" >> $GITHUB_OUTPUT
+          echo "IMAGE_TAG=ghcr.io/${{ github.repository_owner }}/opencost:${{ steps.sha.outputs.OC_SHORTHASH }}" >> $GITHUB_OUTPUT
+          echo "IMAGE_TAG_LATEST=ghcr.io/${{ github.repository_owner }}/opencost:latest" >> $GITHUB_OUTPUT
+          echo "IMAGE_TAG_VERSION=ghcr.io/${{ github.repository_owner }}/opencost:${{ steps.version_number.outputs.RELEASE_VERSION }}" >> $GITHUB_OUTPUT
 
       - name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v3
@@ -109,10 +99,10 @@ jobs:
           go-version: 'stable'
 
       - name: Set up just
-        uses: extractions/setup-just@v1
+        uses: extractions/setup-just@v2
 
       - name: Install crane
-        uses: imjasonh/setup-crane@v0.3
+        uses: imjasonh/setup-crane@v0.4
 
       ## Install manifest-tool, which is required to combine multi-arch images
       ## https://github.com/estesp/manifest-tool
@@ -125,29 +115,10 @@ jobs:
           cp manifest-tool-linux-amd64 manifest-tool
           echo "$(pwd)" >> $GITHUB_PATH
 
-    #  - name: Login to Quay
-    #    uses: docker/login-action@v3
-    #    with:
-    #      registry: quay.io
-    #      username: ${{ secrets.QUAY_USERNAME }}
-    #      password: ${{ secrets.QUAY_PASSWORD }}
-
       - name: Build and push (multiarch) OpenCost
         working-directory: ./opencost
         run: |
           just build '${{ steps.tags.outputs.IMAGE_TAG }}' '${{ steps.version_number.outputs.RELEASE_VERSION }}'
           crane copy '${{ steps.tags.outputs.IMAGE_TAG }}' '${{ steps.tags.outputs.IMAGE_TAG_LATEST }}'
           crane copy '${{ steps.tags.outputs.IMAGE_TAG }}' '${{ steps.tags.outputs.IMAGE_TAG_VERSION }}'
-        #  crane copy '${{ steps.tags.outputs.IMAGE_TAG }}' '${steps.tags.outputs.IMAGE_TAG_QUAY}'
-        #  crane copy '${{ steps.tags.outputs.IMAGE_TAG }}' '${steps.tags.outputs.IMAGE_TAG_LATEST_QUAY}'
-        #  crane copy '${{ steps.tags.outputs.IMAGE_TAG }}' '${steps.tags.outputs.IMAGE_TAG_VERSION_QUAY}'
 
-      - name: Build and push (multiarch) OpenCost UI
-        working-directory: ./opencost/ui
-        run: |
-          just build '${{ steps.tags.outputs.IMAGE_TAG_UI }}' '${{ steps.version_number.outputs.RELEASE_VERSION }}'
-          crane copy '${{ steps.tags.outputs.IMAGE_TAG_UI }}' '${{ steps.tags.outputs.IMAGE_TAG_UI_LATEST }}'
-          crane copy '${{ steps.tags.outputs.IMAGE_TAG_UI }}' '${{ steps.tags.outputs.IMAGE_TAG_UI_VERSION }}'
-        #  crane copy '${steps.tags.outputs.IMAGE_TAG_UI}' '${steps.tags.outputs.IMAGE_TAG_UI_QUAY}'
-        #  crane copy '${steps.tags.outputs.IMAGE_TAG_UI}' '${steps.tags.outputs.IMAGE_TAG_UI_LATEST_QUAY}'
-        #  crane copy '${steps.tags.outputs.IMAGE_TAG_UI}' '${steps.tags.outputs.IMAGE_TAG_UI_VERSION_QUAY}'

+ 6 - 42
.github/workflows/build-test.yaml

@@ -21,7 +21,7 @@ jobs:
         uses: actions/setup-go@v5
         with:
           go-version: 'stable'
-          
+
       -
         name: Install protoc
         uses: arduino/setup-protoc@v3
@@ -29,12 +29,12 @@ jobs:
           version: '25.3'
       -
         name: Install just
-        uses: extractions/setup-just@v1
+        uses: extractions/setup-just@v2
 
       - name: install protobuf-go
         run: |
           go install github.com/golang/protobuf/protoc-gen-go@latest
-          go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
+          go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0
           which protoc-gen-go-grpc
       -
         name: Validate
@@ -49,7 +49,7 @@ jobs:
 
       -
         name: Install just
-        uses: extractions/setup-just@v1
+        uses: extractions/setup-just@v2
 
       -
         name: Install Go
@@ -87,45 +87,9 @@ jobs:
       - name: Upload code coverage
         uses: actions/upload-artifact@v4
         with:
-          name: oc-code-coverage
+          name: code-coverage
           path: |
            coverage.out
            pr_num.txt
            base.txt
-           head.txt
-
-  frontend:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v4
-        with:
-          path: ./
-
-      -
-        name: Install just
-        uses: extractions/setup-just@v1
-
-      -
-        name: Install node
-        uses: actions/setup-node@v4
-        with:
-          node-version: '18.3.0'
-
-      - name: Get npm cache directory
-        id: npm-cache-dir
-        shell: bash
-        run: echo "dir=$(npm config get cache)" >> ${GITHUB_OUTPUT}
-
-      - uses: actions/cache@v4
-        id: npm-cache # use this to check for `cache-hit` ==> if: steps.npm-cache.outputs.cache-hit != 'true'
-        with:
-          path: ${{ steps.npm-cache-dir.outputs.dir }}
-          key: ${{ runner.os }}-node-${{ hashFiles('./ui/**/package-lock.json') }}
-          restore-keys: |
-            ${{ runner.os }}-node-
-
-      -
-        name: Build
-        working-directory: ./ui
-        run: |
-          just build-local
+           head.txt

+ 94 - 33
.github/workflows/sonar.yaml

@@ -1,4 +1,4 @@
-name: Sonar Code Coverage Upload
+name: Sonar
 on:
   workflow_run:
     workflows: ["Build/Test"]
@@ -8,43 +8,74 @@ jobs:
     name: Sonar
     runs-on: ubuntu-latest
     if: github.event.workflow_run.conclusion == 'success'
+    permissions:
+      checks: write
+      contents: read
+      actions: read
     steps:
+      - uses: LouisBrunner/checks-action@v2.0.0
+        if: always()
+        with:
+          token: ${{ secrets.GITHUB_TOKEN }}
+          name: Quality Gate
+          status: in_progress
+          sha: ${{ github.event.workflow_run.head_sha }}
       - uses: actions/checkout@v4
         with:
           repository: ${{ github.event.workflow_run.head_repository.full_name }}
           ref: ${{ github.event.workflow_run.head_branch }}
           fetch-depth: 0
-      - name: 'Download code coverage'
-        uses: actions/github-script@v7
+      - name: Download coverage artifacts
+        uses: actions/download-artifact@v4
         with:
-          script: |
-            let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
-               owner: context.repo.owner,
-               repo: context.repo.repo,
-               run_id: context.payload.workflow_run.id,
-            });
-            let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
-              return artifact.name == "oc-code-coverage"
-            })[0];
-            let download = await github.rest.actions.downloadArtifact({
-               owner: context.repo.owner,
-               repo: context.repo.repo,
-               artifact_id: matchArtifact.id,
-               archive_format: 'zip',
-            });
-            let fs = require('fs');
-            fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/oc-code-coverage.zip`, Buffer.from(download.data));
-      - name: 'Unzip code coverage'
-        run: unzip oc-code-coverage.zip -d coverage
-      - name: set env vars 
+          name: code-coverage
+          run-id: ${{ github.event.workflow_run.id }}
+          github-token: ${{ github.token }}
+          path: pr-artifact
+      - name: Validate Coverage Vars
+        id: validate-vars
+        if: github.event.workflow_run.head_branch != 'develop'
+        shell: bash
+        run: | 
+          # check the PR number
+          pr_content=$(cat pr-artifact/pr_num.txt | tr -d '\n' | tr -d ' ')
+
+          # Check if the content matches a single number
+          if [[ "$pr_content" =~ ^[0-9]+$ ]]; then
+            echo "The file 'pr_num.txt' contains a single number: $pr_content"
+          else
+            echo "The file 'pr_num.txt' does not contain a single number."
+            exit 1
+          fi
+
+          base_content=$(cat pr-artifact/base.txt | tr -d '\n' | tr -d ' ')
+          if git check-ref-format --allow-onelevel "$base_content"; then
+            echo "The file 'base.txt' contains a valid git ref: $base_content"
+          else
+            echo "The file 'base.txt' does not contain a valid git ref: $base_content"
+            exit 1
+          fi
+
+          head_content=$(cat pr-artifact/head.txt | tr -d '\n' | tr -d ' ')
+          if git check-ref-format --allow-onelevel "$head_content"; then
+            echo "The file 'head.txt' contains a valid git ref: $head_content"
+          else
+            echo "The file 'head.txt' does not contain a valid git ref: $head_content"
+            exit 1
+          fi
+
+      - name: set vars 
+        id: set-vars
         run: | 
-          echo "SONAR_PR_NUM=$(cat coverage/pr_num.txt)" >> $GITHUB_ENV
-          echo "SONAR_BASE=$(cat coverage/base.txt)" >> $GITHUB_ENV
-          echo "SONAR_HEAD=$(cat coverage/head.txt)" >> $GITHUB_ENV
+          echo "SONAR_PR_NUM=$(cat pr-artifact/pr_num.txt | tr -d '\n' | tr -d ' ')" >> $GITHUB_OUTPUT
+          echo "SONAR_BASE=$(cat pr-artifact/base.txt | tr -d '\n' | tr -d ' ')" >> $GITHUB_OUTPUT
+          echo "SONAR_HEAD=$(cat pr-artifact/head.txt | tr -d '\n' | tr -d ' ')" >> $GITHUB_OUTPUT
+          # move coverage file to root where sonar properties file is expecting it
+          cp pr-artifact/coverage.out coverage.out
       # on develop branch, only run a baseline scan
       - name: SonarCloud Scan (Baseline)
         uses: sonarsource/sonarcloud-github-action@master
-        if: env.SONAR_HEAD == 'develop'
+        if: github.event.workflow_run.head_branch == 'develop'
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
@@ -54,18 +85,48 @@ jobs:
             -Dsonar.projectKey=opencost_opencost
             -Dsonar.organization=opencost
             -Dsonar.branch.name=develop
-            -Dsonar.branch.target=develop
       - name: SonarCloud Scan (PR)
         uses: sonarsource/sonarcloud-github-action@master
-        if: env.SONAR_HEAD != 'develop'
+        if: github.event.workflow_run.head_branch != 'develop'
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
         with:
           args: >
             -Dsonar.scm.revision=${{ github.event.workflow_run.head_sha }}
-            -Dsonar.pullrequest.key=${{ env.SONAR_PR_NUM }}
-            -Dsonar.pullrequest.branch=${{ env.SONAR_HEAD }}
-            -Dsonar.pullrequest.base=${{ env.SONAR_BASE }}
+            -Dsonar.pullrequest.key=${{ steps.set-vars.outputs.SONAR_PR_NUM }}
+            -Dsonar.pullrequest.branch="${{ steps.set-vars.outputs.SONAR_HEAD }}"
+            -Dsonar.pullrequest.base="${{ steps.set-vars.outputs.SONAR_BASE }}"
             -Dsonar.projectKey=opencost_opencost
-            -Dsonar.organization=opencost
+            -Dsonar.organization=opencost
+      - name: SonarQube Quality Gate check
+        id: sonarqube-quality-gate-check
+        continue-on-error: true
+        uses: sonarsource/sonarqube-quality-gate-action@master
+        # fail step after specific time.
+        timeout-minutes: 5
+        env:
+             SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
+             SONAR_HOST_URL: "https://sonarcloud.io"
+      - uses: LouisBrunner/checks-action@v2.0.0
+        id: fail-quality-gate
+        if: steps.sonarqube-quality-gate-check.outputs.quality-gate-status != 'PASSED'
+        with:
+          token: ${{ secrets.GITHUB_TOKEN }}
+          name: Quality Gate
+          status: completed
+          conclusion: failure
+          sha: ${{ github.event.workflow_run.head_sha }}
+          output: |
+            {"summary":"Failed - see https://sonarcloud.io/summary/new_code?id=opencost_opencostl&pullRequest=${{ steps.set-vars.outputs.SONAR_PR_NUM }}","text_description":"Quality Gate failed. Check the [SonarCloud Dashboard](https://sonarcloud.io/dashboard?id=opencost_opencost&pullRequest=${{ steps.set-vars.outputs.SONAR_PR_NUM }}) for more details."}
+      - uses: LouisBrunner/checks-action@v2.0.0
+        id: pass-quality-gate
+        if: steps.sonarqube-quality-gate-check.outputs.quality-gate-status == 'PASSED'
+        with:
+          token: ${{ secrets.GITHUB_TOKEN }}
+          name: Quality Gate
+          status: completed
+          conclusion: success
+          sha: ${{ github.event.workflow_run.head_sha }}
+          output: |
+            {"summary":"Passed","text_description":"Quality Gate passed. Check the [SonarCloud Dashboard](https://sonarcloud.io/dashboard?id=opencost_opencost&pullRequest=${{ steps.set-vars.outputs.SONAR_PR_NUM }}) for more details."} 

+ 2 - 5
.gitignore

@@ -2,11 +2,6 @@
 .idea
 *.iml
 
-ui/.parcel-cache
-ui/.cache
-ui/dist
-ui/.env
-ui/node_modules/
 cmd/costmodel/costmodel
 cmd/costmodel/costmodel-amd64
 cmd/costmodel/costmodel-arm64
@@ -14,6 +9,8 @@ cmd/costmodel/costmodel-tilt
 
 pkg/cloud/azureorphan_test.go
 
+pkg/cloud/oracle/cloud-integration.json
+
 # VS Code
 .vscode
 

+ 5 - 3
MAINTAINERS.md

@@ -7,15 +7,17 @@ Official list of [OpenCost Maintainers](https://github.com/orgs/opencost/teams/o
 | Maintainer | GitHub ID | Affiliation | Email |
 | --------------- | --------- | ----------- | ----------- |
 | Ajay Tripathy | @AjayTripathy | Kubecost | <Ajay@kubecost.com> |
-| Artur Khantimirov | @r2k1 | Microsoft | |
+| Alex Meijer | @ameijer | Kubecost | <ameijer@kubecost.com> |
+| Artur Khantimirov | @r2k1 | Microsoft | <akhantimirov@microsoft.com> |
 | Matt Bolt | @​mbolt35 | Kubecost | <matt@kubecost.com> |
-| Matt Ray | @mattray | Kubecost | <mattray@kubecost.com> |
 | Niko Kovacevic | @nikovacevic | Kubecost | <niko@kubecost.com> |
 | Sean Holcomb | @Sean-Holcomb | Kubecost | <Sean@kubecost.com> |
 | Thomas Evans | @teevans | Kubecost | <thomas@kubecost.com> |
 
 ## Opencost Emeritus Committers
 We would like to acknowledge previous committers and their huge contributions to our collective success:
+
 | Maintainer | GitHub ID | Affiliation | Email |
 | --------------- | --------- | ----------- | ----------- |
-| Michael Dresser | @michaelmdresser | Kubecost | <michaelmdresser@gmail.com> |
+| Michael Dresser | @michaelmdresser | Kubecost (former) | <michaelmdresser@gmail.com> |
+| Matt Ray | @mattray | Kubecost (former) | <mattray@kubecost.com> |

+ 4 - 2
README.md

@@ -1,7 +1,7 @@
 [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
 [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/6219/badge)](https://www.bestpractices.dev/projects/6219)
 
-<img src="./opencost-header.png"/>
+![](./opencost-header.png)
 
 # OpenCost — your favorite open source cost monitoring tool for Kubernetes and cloud spend
 
@@ -9,7 +9,7 @@ OpenCost give teams visibility into current and historical Kubernetes and cloud
 These models provide cost transparency in Kubernetes environments that support multiple applications, teams, departments, etc.
 It also provides visibility into the cloud costs across multiple providers.
 
-OpenCost was originally developed and open sourced by [Kubecost](https://kubecost.com). This project combines a [specification](/spec/) as well as a Golang implementation of these detailed requirements.
+OpenCost was originally developed and open sourced by [Kubecost](https://kubecost.com). This project combines a [specification](/spec/) as well as a Golang implementation of these detailed requirements. The web UI is available in the [opencost/opencost-ui](http://github.com/opencost/opencost-ui) repository.
 
 [![OpenCost UI Walkthrough](./ui/src/thumbnail.png)](https://youtu.be/lCP4Ci9Kcdg)
 *OpenCost UI Walkthrough*
@@ -22,6 +22,8 @@ To see the full functionality of OpenCost you can view [OpenCost features](https
 - Supports on-prem k8s clusters with custom CSV pricing
 - Allocation for in-cluster K8s resources like CPU, GPU, memory, and persistent volumes
 - Easily export pricing data to Prometheus with /metrics endpoint ([learn more](https://www.opencost.io/docs/installation/prometheus))
+- Carbon costs for cloud resources
+- Support for external costs like Datadog through [OpenCost Plugins](https://github.com/opencost/opencost-plugins)
 - Free and open source distribution ([Apache2 license](LICENSE))
 
 ## Getting Started

+ 1 - 1
core/go.mod

@@ -1,6 +1,6 @@
 module github.com/opencost/opencost/core
 
-go 1.21.0
+go 1.22.0
 
 require (
 	github.com/davecgh/go-spew v1.1.1

+ 11 - 7
core/pkg/filter/cloudcost/fields.go

@@ -8,11 +8,15 @@ import (
 type CloudCostField string
 
 const (
-	FieldInvoiceEntityID CloudCostField = CloudCostField(fieldstrings.FieldInvoiceEntityID)
-	FieldAccountID       CloudCostField = CloudCostField(fieldstrings.FieldAccountID)
-	FieldProvider        CloudCostField = CloudCostField(fieldstrings.FieldProvider)
-	FieldProviderID      CloudCostField = CloudCostField(fieldstrings.FieldProviderID)
-	FieldCategory        CloudCostField = CloudCostField(fieldstrings.FieldCategory)
-	FieldService         CloudCostField = CloudCostField(fieldstrings.FieldService)
-	FieldLabel           CloudCostField = CloudCostField(fieldstrings.FieldLabel)
+	FieldInvoiceEntityID   CloudCostField = CloudCostField(fieldstrings.FieldInvoiceEntityID)
+	FieldInvoiceEntityName CloudCostField = CloudCostField(fieldstrings.FieldInvoiceEntityName)
+	FieldAccountID         CloudCostField = CloudCostField(fieldstrings.FieldAccountID)
+	FieldAccountName       CloudCostField = CloudCostField(fieldstrings.FieldAccountName)
+	FieldRegionID          CloudCostField = CloudCostField(fieldstrings.FieldRegionID)
+	FieldAvailabilityZone  CloudCostField = CloudCostField(fieldstrings.FieldAvailabilityZone)
+	FieldProvider          CloudCostField = CloudCostField(fieldstrings.FieldProvider)
+	FieldProviderID        CloudCostField = CloudCostField(fieldstrings.FieldProviderID)
+	FieldCategory          CloudCostField = CloudCostField(fieldstrings.FieldCategory)
+	FieldService           CloudCostField = CloudCostField(fieldstrings.FieldService)
+	FieldLabel             CloudCostField = CloudCostField(fieldstrings.FieldLabel)
 )

+ 4 - 0
core/pkg/filter/cloudcost/parser.go

@@ -6,7 +6,11 @@ import "github.com/opencost/opencost/core/pkg/filter/ast"
 // valid left-hand comparators
 var cloudCostFilterFields []*ast.Field = []*ast.Field{
 	ast.NewField(FieldInvoiceEntityID),
+	ast.NewField(FieldInvoiceEntityName),
 	ast.NewField(FieldAccountID),
+	ast.NewField(FieldAccountName),
+	ast.NewField(FieldRegionID),
+	ast.NewField(FieldAvailabilityZone),
 	ast.NewField(FieldProvider),
 	ast.NewField(FieldProviderID),
 	ast.NewField(FieldCategory),

+ 76 - 0
core/pkg/filter/cloudcost/parser_test.go

@@ -0,0 +1,76 @@
+package cloudcost
+
+import (
+	"testing"
+)
+
+func TestNewCloudCostFilterParserParse(t *testing.T) {
+	parser := NewCloudCostFilterParser()
+	testCases := map[string]struct {
+		input       string
+		expectError bool
+	}{
+		"Empty": {
+			input:       ``,
+			expectError: false,
+		},
+		"InvoiceEntityID": {
+			input:       `invoiceEntityID: "123"`,
+			expectError: false,
+		},
+		"InvoiceEntityName": {
+			input:       `invoiceEntityName: "foo"`,
+			expectError: false,
+		},
+		"AccountID": {
+			input:       `accountID: "123"`,
+			expectError: false,
+		},
+		"AccountName": {
+			input:       `accountName: "foo"`,
+			expectError: false,
+		},
+		"RegionID": {
+			input:       `regionID: "us-west-1"`,
+			expectError: false,
+		},
+		"AvailabilityZone": {
+			input:       `availabilityZone: "us-west-1a"`,
+			expectError: false,
+		},
+		"Provider": {
+			input:       `provider: "aws"`,
+			expectError: false,
+		},
+		"ProviderID": {
+			input:       `providerID: "i-123"`,
+			expectError: false,
+		},
+		"Category": {
+			input:       `category: "compute"`,
+			expectError: false,
+		},
+		"Service": {
+			input:       `service: "ec2"`,
+			expectError: false,
+		},
+		"Label": {
+			input:       `label[foo]:"bar"`,
+			expectError: false,
+		},
+		"InvalidField": {
+			input:       `foo: "bar"`,
+			expectError: true,
+		},
+	}
+
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			_, err := parser.Parse(tc.input)
+			if (err != nil) != tc.expectError {
+				t.Errorf("expected error: %v, got: %v", tc.expectError, err)
+			}
+		})
+	}
+
+}

+ 6 - 2
core/pkg/filter/fieldstrings/fieldstrings.go

@@ -24,8 +24,12 @@ const (
 	FieldAccount    string = "account"
 	FieldService    string = "service"
 
-	FieldInvoiceEntityID string = "invoiceEntityID"
-	FieldAccountID       string = "accountID"
+	FieldInvoiceEntityID   string = "invoiceEntityID"
+	FieldInvoiceEntityName string = "invoiceEntityName"
+	FieldAccountID         string = "accountID"
+	FieldAccountName       string = "accountName"
+	FieldRegionID          string = "regionID"
+	FieldAvailabilityZone  string = "availabilityZone"
 
 	AliasDepartment  string = "department"
 	AliasEnvironment string = "environment"

+ 11 - 3
core/pkg/log/log.go

@@ -49,6 +49,14 @@ func InitLogging(showLogLevelSetMessage bool) {
 
 }
 
+func GetLogger() *zerolog.Logger {
+	return &log.Logger
+}
+
+func SetLogger(l *zerolog.Logger) {
+	log.Logger = *l
+}
+
 func GetLogLevel() string {
 	return zerolog.GlobalLevel().String()
 }
@@ -76,7 +84,7 @@ func DedupedErrorf(logTypeLimit int, format string, a ...interface{}) {
 		Errorf(format, a...)
 	} else if timesLogged == logTypeLimit {
 		Errorf(format, a...)
-		Infof("%s logged %d times: suppressing future logs", format, logTypeLimit)
+		Infof("%s logged %d times: suppressing future logs", fmt.Sprintf(format, a...), logTypeLimit)
 	}
 }
 
@@ -91,7 +99,7 @@ func DedupedWarningf(logTypeLimit int, format string, a ...interface{}) {
 		Warnf(format, a...)
 	} else if timesLogged == logTypeLimit {
 		Warnf(format, a...)
-		Infof("%s logged %d times: suppressing future logs", format, logTypeLimit)
+		Infof("%s logged %d times: suppressing future logs", fmt.Sprintf(format, a...), logTypeLimit)
 	}
 }
 
@@ -110,7 +118,7 @@ func DedupedInfof(logTypeLimit int, format string, a ...interface{}) {
 		Infof(format, a...)
 	} else if timesLogged == logTypeLimit {
 		Infof(format, a...)
-		Infof("%s logged %d times: suppressing future logs", format, logTypeLimit)
+		Infof("%s logged %d times: suppressing future logs", fmt.Sprintf(format, a...), logTypeLimit)
 	}
 }
 

+ 64 - 0
core/pkg/log/log_test.go

@@ -0,0 +1,64 @@
+package log
+
+import (
+	"bytes"
+	"encoding/json"
+	"strings"
+	"testing"
+
+	"github.com/rs/zerolog"
+)
+
+func TestGetLogger(t *testing.T) {
+	initialLogger := GetLogger()
+	if initialLogger == nil {
+		t.Error("GetLogger() returned nil")
+	}
+
+	secondLogger := GetLogger()
+	if initialLogger != secondLogger {
+		t.Error("GetLogger() returned different loggers on subsequent calls")
+	}
+}
+
+func TestSetLogger(t *testing.T) {
+	var buf bytes.Buffer
+	newLogger := zerolog.New(&buf).With().Str("test", "value").Logger()
+	SetLogger(&newLogger)
+
+	// Log a message using the global logger
+	Infof("Test message")
+
+	// Parse the logged message
+	loggedData := parseLogMessage(t, buf.String())
+
+	// Check if the "test" field is present in the logged message
+	if value, exists := loggedData["test"]; !exists || value != "value" {
+		t.Error("SetLogger() did not set the logger with expected context")
+	}
+}
+
+func TestLoggerConsistency(t *testing.T) {
+	var buf bytes.Buffer
+	newLogger := zerolog.New(&buf).With().Str("test", "consistency").Logger()
+	SetLogger(&newLogger)
+
+	// Log a message using the global logger
+	Infof("Consistency test message")
+
+	// Parse the logged message
+	loggedData := parseLogMessage(t, buf.String())
+
+	// Check if the "test" field is present in the logged message
+	if value, exists := loggedData["test"]; !exists || value != "consistency" {
+		t.Error("Logger inconsistency: Updated logger does not have expected context")
+	}
+}
+
+func parseLogMessage(t *testing.T, logMessage string) map[string]interface{} {
+	var loggedData map[string]interface{}
+	if err := json.Unmarshal([]byte(strings.TrimSpace(logMessage)), &loggedData); err != nil {
+		t.Fatalf("Failed to parse logged message: %v", err)
+	}
+	return loggedData
+}

+ 74 - 3
core/pkg/opencost/allocation.go

@@ -63,9 +63,11 @@ type Allocation struct {
 	CPUCoreUsageAverage        float64               `json:"cpuCoreUsageAverage"`
 	CPUCost                    float64               `json:"cpuCost"`
 	CPUCostAdjustment          float64               `json:"cpuCostAdjustment"`
+	CPUCostIdle                float64               `json:"cpuCostIdle"` //@bingen:field[ignore]
 	GPUHours                   float64               `json:"gpuHours"`
 	GPUCost                    float64               `json:"gpuCost"`
 	GPUCostAdjustment          float64               `json:"gpuCostAdjustment"`
+	GPUCostIdle                float64               `json:"gpuCostIdle"` //@bingen:field[ignore]
 	NetworkTransferBytes       float64               `json:"networkTransferBytes"`
 	NetworkReceiveBytes        float64               `json:"networkReceiveBytes"`
 	NetworkCost                float64               `json:"networkCost"`
@@ -82,6 +84,7 @@ type Allocation struct {
 	RAMBytesUsageAverage       float64               `json:"ramByteUsageAverage"`
 	RAMCost                    float64               `json:"ramCost"`
 	RAMCostAdjustment          float64               `json:"ramCostAdjustment"`
+	RAMCostIdle                float64               `json:"ramCostIdle"` //@bingen:field[ignore]
 	SharedCost                 float64               `json:"sharedCost"`
 	ExternalCost               float64               `json:"externalCost"`
 	// RawAllocationOnly is a pointer so if it is not present it will be
@@ -97,7 +100,9 @@ type Allocation struct {
 	// UnmountedPVCost is used to track how much of the cost in PVs is for an
 	// unmounted PV. It is not additive of PVCost() and need not be sent in API
 	// responses.
-	UnmountedPVCost float64 `json:"-"` //@bingen:field[ignore]
+	UnmountedPVCost   float64 `json:"-"`                 //@bingen:field[ignore]
+	GPURequestAverage float64 `json:"gpuRequestAverage"` //@bingen:field[version=22]
+	GPUUsageAverage   float64 `json:"gpuUsageAverage"`   //@bingen:field[version=22]
 }
 
 type LbAllocations map[string]*LbAllocation
@@ -667,9 +672,13 @@ func (a *Allocation) Clone() *Allocation {
 		CPUCoreRequestAverage:          a.CPUCoreRequestAverage,
 		CPUCoreUsageAverage:            a.CPUCoreUsageAverage,
 		CPUCost:                        a.CPUCost,
+		CPUCostIdle:                    a.CPUCostIdle,
 		CPUCostAdjustment:              a.CPUCostAdjustment,
 		GPUHours:                       a.GPUHours,
+		GPURequestAverage:              a.GPURequestAverage,
+		GPUUsageAverage:                a.GPUUsageAverage,
 		GPUCost:                        a.GPUCost,
+		GPUCostIdle:                    a.GPUCostIdle,
 		GPUCostAdjustment:              a.GPUCostAdjustment,
 		NetworkTransferBytes:           a.NetworkTransferBytes,
 		NetworkReceiveBytes:            a.NetworkReceiveBytes,
@@ -686,6 +695,7 @@ func (a *Allocation) Clone() *Allocation {
 		RAMBytesRequestAverage:         a.RAMBytesRequestAverage,
 		RAMBytesUsageAverage:           a.RAMBytesUsageAverage,
 		RAMCost:                        a.RAMCost,
+		RAMCostIdle:                    a.RAMCostIdle,
 		RAMCostAdjustment:              a.RAMCostAdjustment,
 		SharedCost:                     a.SharedCost,
 		ExternalCost:                   a.ExternalCost,
@@ -727,6 +737,9 @@ func (a *Allocation) Equal(that *Allocation) bool {
 	if !util.IsApproximately(a.CPUCost, that.CPUCost) {
 		return false
 	}
+	if !util.IsApproximately(a.CPUCostIdle, that.CPUCostIdle) {
+		return false
+	}
 	if !util.IsApproximately(a.CPUCostAdjustment, that.CPUCostAdjustment) {
 		return false
 	}
@@ -736,6 +749,9 @@ func (a *Allocation) Equal(that *Allocation) bool {
 	if !util.IsApproximately(a.GPUCost, that.GPUCost) {
 		return false
 	}
+	if !util.IsApproximately(a.GPUCostIdle, that.GPUCostIdle) {
+		return false
+	}
 	if !util.IsApproximately(a.GPUCostAdjustment, that.GPUCostAdjustment) {
 		return false
 	}
@@ -775,6 +791,9 @@ func (a *Allocation) Equal(that *Allocation) bool {
 	if !util.IsApproximately(a.RAMCost, that.RAMCost) {
 		return false
 	}
+	if !util.IsApproximately(a.RAMCostIdle, that.RAMCostIdle) {
+		return false
+	}
 	if !util.IsApproximately(a.RAMCostAdjustment, that.RAMCostAdjustment) {
 		return false
 	}
@@ -916,7 +935,7 @@ func (a *Allocation) CPUEfficiency() float64 {
 		return a.CPUCoreUsageAverage / a.CPUCoreRequestAverage
 	}
 
-	if a.CPUCoreUsageAverage == 0.0 || a.CPUCost == 0.0 {
+	if a.CPUCoreUsageAverage == 0.0 || a.CPUTotalCost() == 0.0 {
 		return 0.0
 	}
 
@@ -935,7 +954,26 @@ func (a *Allocation) RAMEfficiency() float64 {
 		return a.RAMBytesUsageAverage / a.RAMBytesRequestAverage
 	}
 
-	if a.RAMBytesUsageAverage == 0.0 || a.RAMCost == 0.0 {
+	if a.RAMBytesUsageAverage == 0.0 || a.RAMTotalCost() == 0.0 {
+		return 0.0
+	}
+
+	return 1.0
+}
+
+// GPUEfficiency is the ratio of usage to request. Note that, without the NVIDIA
+// DCGM exporter providing Prometheus with usage metrics, this will always be
+// zero, as GPUUsageAverage will be zero (the default value).
+func (a *Allocation) GPUEfficiency() float64 {
+	if a == nil {
+		return 0.0
+	}
+
+	if a.GPURequestAverage > 0 && a.GPUUsageAverage > 0 {
+		return a.GPUUsageAverage / a.GPURequestAverage
+	}
+
+	if a.GPUUsageAverage == 0.0 || a.GPUTotalCost() == 0.0 {
 		return 0.0
 	}
 
@@ -1183,6 +1221,12 @@ func (a *Allocation) add(that *Allocation) {
 	ramUseByteMins := a.RAMBytesUsageAverage * a.Minutes()
 	ramUseByteMins += that.RAMBytesUsageAverage * that.Minutes()
 
+	gpuReqMins := a.GPURequestAverage * a.Minutes()
+	gpuReqMins += that.GPURequestAverage * that.Minutes()
+
+	gpuUseMins := a.GPUUsageAverage * a.Minutes()
+	gpuUseMins += that.GPUUsageAverage * that.Minutes()
+
 	// Expand Start and End to be the "max" of among the given Allocations
 	if that.Start.Before(a.Start) {
 		a.Start = that.Start
@@ -1198,11 +1242,15 @@ func (a *Allocation) add(that *Allocation) {
 		a.CPUCoreUsageAverage = cpuUseCoreMins / a.Minutes()
 		a.RAMBytesRequestAverage = ramReqByteMins / a.Minutes()
 		a.RAMBytesUsageAverage = ramUseByteMins / a.Minutes()
+		a.GPURequestAverage = gpuReqMins / a.Minutes()
+		a.GPUUsageAverage = gpuUseMins / a.Minutes()
 	} else {
 		a.CPUCoreRequestAverage = 0.0
 		a.CPUCoreUsageAverage = 0.0
 		a.RAMBytesRequestAverage = 0.0
 		a.RAMBytesUsageAverage = 0.0
+		a.GPURequestAverage = 0.0
+		a.GPUUsageAverage = 0.0
 	}
 
 	// Sum all cumulative resource fields
@@ -1216,6 +1264,9 @@ func (a *Allocation) add(that *Allocation) {
 	a.CPUCost += that.CPUCost
 	a.GPUCost += that.GPUCost
 	a.RAMCost += that.RAMCost
+	a.CPUCostIdle += that.CPUCostIdle
+	a.GPUCostIdle += that.GPUCostIdle
+	a.RAMCostIdle += that.RAMCostIdle
 	a.NetworkCost += that.NetworkCost
 	a.NetworkCrossZoneCost += that.NetworkCrossZoneCost
 	a.NetworkCrossRegionCost += that.NetworkCrossRegionCost
@@ -2512,6 +2563,10 @@ func (a *Allocation) SanitizeNaN() {
 		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for CPUCost: name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
 		a.CPUCost = 0
 	}
+	if math.IsNaN(a.CPUCostIdle) {
+		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for CPUCostIdle: name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
+		a.CPUCostIdle = 0
+	}
 	if math.IsNaN(a.CPUCoreRequestAverage) {
 		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for CPUCoreRequestAverage: name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
 		a.CPUCoreRequestAverage = 0
@@ -2532,10 +2587,22 @@ func (a *Allocation) SanitizeNaN() {
 		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for GPUHours name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
 		a.GPUHours = 0
 	}
+	if math.IsNaN(a.GPURequestAverage) {
+		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for GPURequestAverage name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
+		a.GPURequestAverage = 0
+	}
+	if math.IsNaN(a.GPUUsageAverage) {
+		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for GPUUsageAverage name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
+		a.GPUUsageAverage = 0
+	}
 	if math.IsNaN(a.GPUCost) {
 		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for GPUCost name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
 		a.GPUCost = 0
 	}
+	if math.IsNaN(a.GPUCostIdle) {
+		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for GPUCostIdle name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
+		a.GPUCostIdle = 0
+	}
 	if math.IsNaN(a.GPUCostAdjustment) {
 		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for GPUCostAdjustment name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
 		a.GPUCostAdjustment = 0
@@ -2596,6 +2663,10 @@ func (a *Allocation) SanitizeNaN() {
 		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for RAMCost name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
 		a.RAMCost = 0
 	}
+	if math.IsNaN(a.RAMCostIdle) {
+		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for RAMCostIdle name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
+		a.RAMCostIdle = 0
+	}
 	if math.IsNaN(a.RAMCostAdjustment) {
 		log.DedupedWarningf(5, "Allocation: Unexpected NaN found for RAMCostAdjustment name:%s, window:%s, properties:%s", a.Name, a.Window.String(), a.Properties.String())
 		a.RAMCostAdjustment = 0

+ 12 - 0
core/pkg/opencost/allocation_json.go

@@ -23,11 +23,16 @@ type AllocationJSON struct {
 	CPUCoreHours                   *float64                        `json:"cpuCoreHours"`
 	CPUCost                        *float64                        `json:"cpuCost"`
 	CPUCostAdjustment              *float64                        `json:"cpuCostAdjustment"`
+	CPUCostIdle                    *float64                        `json:"cpuCostIdle"`
 	CPUEfficiency                  *float64                        `json:"cpuEfficiency"`
 	GPUCount                       *float64                        `json:"gpuCount"`
+	GPURequestAverage              *float64                        `json:"gpuRequestAverage"`
+	GPUUsageAverage                *float64                        `json:"gpuUsageAverage"`
 	GPUHours                       *float64                        `json:"gpuHours"`
 	GPUCost                        *float64                        `json:"gpuCost"`
 	GPUCostAdjustment              *float64                        `json:"gpuCostAdjustment"`
+	GPUCostIdle                    *float64                        `json:"gpuCostIdle"`
+	GPUEfficiency                  *float64                        `json:"gpuEfficiency"`
 	NetworkTransferBytes           *float64                        `json:"networkTransferBytes"`
 	NetworkReceiveBytes            *float64                        `json:"networkReceiveBytes"`
 	NetworkCost                    *float64                        `json:"networkCost"`
@@ -48,6 +53,7 @@ type AllocationJSON struct {
 	RAMByteHours                   *float64                        `json:"ramByteHours"`
 	RAMCost                        *float64                        `json:"ramCost"`
 	RAMCostAdjustment              *float64                        `json:"ramCostAdjustment"`
+	RAMCostIdle                    *float64                        `json:"ramCostIdle"`
 	RAMEfficiency                  *float64                        `json:"ramEfficiency"`
 	ExternalCost                   *float64                        `json:"externalCost"`
 	SharedCost                     *float64                        `json:"sharedCost"`
@@ -75,11 +81,16 @@ func (aj *AllocationJSON) BuildFromAllocation(a *Allocation) {
 	aj.CPUCoreHours = formatFloat64ForResponse(a.CPUCoreHours)
 	aj.CPUCost = formatFloat64ForResponse(a.CPUCost)
 	aj.CPUCostAdjustment = formatFloat64ForResponse(a.CPUCostAdjustment)
+	aj.CPUCostIdle = formatFloat64ForResponse(a.CPUCostIdle)
 	aj.CPUEfficiency = formatFloat64ForResponse(a.CPUEfficiency())
 	aj.GPUCount = formatFloat64ForResponse(a.GPUs())
+	aj.GPURequestAverage = formatFloat64ForResponse(a.GPURequestAverage)
+	aj.GPUUsageAverage = formatFloat64ForResponse(a.GPUUsageAverage)
 	aj.GPUHours = formatFloat64ForResponse(a.GPUHours)
 	aj.GPUCost = formatFloat64ForResponse(a.GPUCost)
 	aj.GPUCostAdjustment = formatFloat64ForResponse(a.GPUCostAdjustment)
+	aj.GPUCostIdle = formatFloat64ForResponse(a.GPUCostIdle)
+	aj.GPUEfficiency = formatFloat64ForResponse(a.GPUEfficiency())
 	aj.NetworkTransferBytes = formatFloat64ForResponse(a.NetworkTransferBytes)
 	aj.NetworkReceiveBytes = formatFloat64ForResponse(a.NetworkReceiveBytes)
 	aj.NetworkCost = formatFloat64ForResponse(a.NetworkCost)
@@ -100,6 +111,7 @@ func (aj *AllocationJSON) BuildFromAllocation(a *Allocation) {
 	aj.RAMByteHours = formatFloat64ForResponse(a.RAMByteHours)
 	aj.RAMCost = formatFloat64ForResponse(a.RAMCost)
 	aj.RAMCostAdjustment = formatFloat64ForResponse(a.RAMCostAdjustment)
+	aj.RAMCostIdle = formatFloat64ForResponse(a.RAMCostIdle)
 	aj.RAMEfficiency = formatFloat64ForResponse(a.RAMEfficiency())
 	aj.SharedCost = formatFloat64ForResponse(a.SharedCost)
 	aj.ExternalCost = formatFloat64ForResponse(a.ExternalCost)

+ 2 - 2
core/pkg/opencost/bingen.go

@@ -46,7 +46,7 @@ package opencost
 // @bingen:end
 
 // Allocation Version Set: Includes Allocation pipeline specific resources
-// @bingen:set[name=Allocation,version=21]
+// @bingen:set[name=Allocation,version=22]
 // @bingen:generate:Allocation
 // @bingen:generate[stringtable]:AllocationSet
 // @bingen:generate:AllocationSetRange
@@ -62,7 +62,7 @@ package opencost
 // @bingen:generate:LbAllocation
 // @bingen:end
 
-// @bingen:set[name=CloudCost,version=2]
+// @bingen:set[name=CloudCost,version=3]
 // @bingen:generate:CloudCost
 // @bingen:generate:CostMetric
 // @bingen:generate[stringtable]:CloudCostSet

+ 8 - 0
core/pkg/opencost/cloudcost.go

@@ -104,8 +104,16 @@ func (cc *CloudCost) StringProperty(prop string) (string, error) {
 	switch prop {
 	case CloudCostInvoiceEntityIDProp:
 		return cc.Properties.InvoiceEntityID, nil
+	case CloudCostInvoiceEntityNameProp:
+		return cc.Properties.InvoiceEntityName, nil
 	case CloudCostAccountIDProp:
 		return cc.Properties.AccountID, nil
+	case CloudCostAccountNameProp:
+		return cc.Properties.AccountName, nil
+	case CloudCostRegionIDProp:
+		return cc.Properties.RegionID, nil
+	case CloudCostAvailabilityZoneProp:
+		return cc.Properties.AvailabilityZone, nil
 	case CloudCostProviderProp:
 		return cc.Properties.Provider, nil
 	case CloudCostProviderIDProp:

+ 8 - 0
core/pkg/opencost/cloudcostmatcher.go

@@ -48,8 +48,16 @@ func cloudCostFieldMap(cc *CloudCost, identifier ast.Identifier) (string, error)
 	switch ccfilter.CloudCostField(identifier.Field.Name) {
 	case ccfilter.FieldInvoiceEntityID:
 		return cc.Properties.InvoiceEntityID, nil
+	case ccfilter.FieldInvoiceEntityName:
+		return cc.Properties.InvoiceEntityName, nil
 	case ccfilter.FieldAccountID:
 		return cc.Properties.AccountID, nil
+	case ccfilter.FieldAccountName:
+		return cc.Properties.AccountName, nil
+	case ccfilter.FieldRegionID:
+		return cc.Properties.RegionID, nil
+	case ccfilter.FieldAvailabilityZone:
+		return cc.Properties.AvailabilityZone, nil
 	case ccfilter.FieldProvider:
 		return cc.Properties.Provider, nil
 	case ccfilter.FieldProviderID:

+ 78 - 31
core/pkg/opencost/cloudcostprops.go

@@ -28,14 +28,18 @@ func (apt *CloudCostProperty) GetLabel() string {
 }
 
 const (
-	CloudCostInvoiceEntityIDProp string = "invoiceEntityID"
-	CloudCostAccountIDProp       string = "accountID"
-	CloudCostProviderProp        string = "provider"
-	CloudCostProviderIDProp      string = "providerID"
-	CloudCostCategoryProp        string = "category"
-	CloudCostServiceProp         string = "service"
-	CloudCostLabelProp           string = "label"
-	CloudCostLabelSetProp        string = "labelSet"
+	CloudCostInvoiceEntityIDProp   string = "invoiceEntityID"
+	CloudCostInvoiceEntityNameProp string = "invoiceEntityName"
+	CloudCostAccountIDProp         string = "accountID"
+	CloudCostAccountNameProp       string = "accountName"
+	CloudCostRegionIDProp          string = "regionID"
+	CloudCostAvailabilityZoneProp  string = "availabilityZone"
+	CloudCostProviderProp          string = "provider"
+	CloudCostProviderIDProp        string = "providerID"
+	CloudCostCategoryProp          string = "category"
+	CloudCostServiceProp           string = "service"
+	CloudCostLabelProp             string = "label"
+	CloudCostLabelSetProp          string = "labelSet"
 )
 
 func ParseCloudProperties(props []string) ([]CloudCostProperty, error) {
@@ -61,8 +65,16 @@ func ParseCloudCostProperty(text string) (CloudCostProperty, error) {
 	switch strings.TrimSpace(strings.ToLower(text)) {
 	case "invoiceentityid":
 		return CloudCostProperty(CloudCostInvoiceEntityIDProp), nil
+	case "invoiceentityname":
+		return CloudCostProperty(CloudCostInvoiceEntityNameProp), nil
 	case "accountid":
 		return CloudCostProperty(CloudCostAccountIDProp), nil
+	case "accountname":
+		return CloudCostProperty(CloudCostAccountNameProp), nil
+	case "regionid":
+		return CloudCostProperty(CloudCostRegionIDProp), nil
+	case "availabilityzone":
+		return CloudCostProperty(CloudCostAvailabilityZoneProp), nil
 	case "provider":
 		return CloudCostProperty(CloudCostProviderProp), nil
 	case "providerid":
@@ -152,20 +164,28 @@ func (ccl CloudCostLabels) Intersection(that CloudCostLabels) CloudCostLabels {
 }
 
 type CloudCostProperties struct {
-	ProviderID      string          `json:"providerID,omitempty"`
-	Provider        string          `json:"provider,omitempty"`
-	AccountID       string          `json:"accountID,omitempty"`
-	InvoiceEntityID string          `json:"invoiceEntityID,omitempty"`
-	Service         string          `json:"service,omitempty"`
-	Category        string          `json:"category,omitempty"`
-	Labels          CloudCostLabels `json:"labels,omitempty"`
+	ProviderID        string          `json:"providerID,omitempty"`
+	Provider          string          `json:"provider,omitempty"`
+	AccountID         string          `json:"accountID,omitempty"`
+	AccountName       string          `json:"accountName,omitempty"` // @bingen:field[version=3]
+	InvoiceEntityID   string          `json:"invoiceEntityID,omitempty"`
+	InvoiceEntityName string          `json:"invoiceEntityName,omitempty"` // @bingen:field[version=3]
+	RegionID          string          `json:"regionID,omitempty"`          // @bingen:field[version=3]
+	AvailabilityZone  string          `json:"availabilityZone,omitempty"`  // @bingen:field[version=3]
+	Service           string          `json:"service,omitempty"`
+	Category          string          `json:"category,omitempty"`
+	Labels            CloudCostLabels `json:"labels,omitempty"`
 }
 
 func (ccp *CloudCostProperties) Equal(that *CloudCostProperties) bool {
 	return ccp.ProviderID == that.ProviderID &&
 		ccp.Provider == that.Provider &&
 		ccp.AccountID == that.AccountID &&
+		ccp.AccountName == that.AccountName &&
 		ccp.InvoiceEntityID == that.InvoiceEntityID &&
+		ccp.InvoiceEntityName == that.InvoiceEntityName &&
+		ccp.RegionID == that.RegionID &&
+		ccp.AvailabilityZone == that.AvailabilityZone &&
 		ccp.Service == that.Service &&
 		ccp.Category == that.Category &&
 		ccp.Labels.Equal(that.Labels)
@@ -173,13 +193,17 @@ func (ccp *CloudCostProperties) Equal(that *CloudCostProperties) bool {
 
 func (ccp *CloudCostProperties) Clone() *CloudCostProperties {
 	return &CloudCostProperties{
-		ProviderID:      ccp.ProviderID,
-		Provider:        ccp.Provider,
-		AccountID:       ccp.AccountID,
-		InvoiceEntityID: ccp.InvoiceEntityID,
-		Service:         ccp.Service,
-		Category:        ccp.Category,
-		Labels:          ccp.Labels.Clone(),
+		ProviderID:        ccp.ProviderID,
+		Provider:          ccp.Provider,
+		AccountID:         ccp.AccountID,
+		AccountName:       ccp.AccountName,
+		InvoiceEntityID:   ccp.InvoiceEntityID,
+		InvoiceEntityName: ccp.InvoiceEntityName,
+		RegionID:          ccp.RegionID,
+		AvailabilityZone:  ccp.AvailabilityZone,
+		Service:           ccp.Service,
+		Category:          ccp.Category,
+		Labels:            ccp.Labels.Clone(),
 	}
 }
 
@@ -206,9 +230,21 @@ func (ccp *CloudCostProperties) Intersection(that *CloudCostProperties) *CloudCo
 	if ccp.AccountID == that.AccountID {
 		intersectionCCP.AccountID = ccp.AccountID
 	}
+	if ccp.AccountName == that.AccountName {
+		intersectionCCP.AccountName = ccp.AccountName
+	}
 	if ccp.InvoiceEntityID == that.InvoiceEntityID {
 		intersectionCCP.InvoiceEntityID = ccp.InvoiceEntityID
 	}
+	if ccp.InvoiceEntityName == that.InvoiceEntityName {
+		intersectionCCP.InvoiceEntityName = ccp.InvoiceEntityName
+	}
+	if ccp.RegionID == that.RegionID {
+		intersectionCCP.RegionID = ccp.RegionID
+	}
+	if ccp.AvailabilityZone == that.AvailabilityZone {
+		intersectionCCP.AvailabilityZone = ccp.AvailabilityZone
+	}
 	if ccp.Service == that.Service {
 		intersectionCCP.Service = ccp.Service
 	}
@@ -220,15 +256,6 @@ func (ccp *CloudCostProperties) Intersection(that *CloudCostProperties) *CloudCo
 	return intersectionCCP
 }
 
-var cloudCostDefaultKeyProperties = []string{
-	CloudCostProviderProp,
-	CloudCostInvoiceEntityIDProp,
-	CloudCostAccountIDProp,
-	CloudCostCategoryProp,
-	CloudCostServiceProp,
-	CloudCostProviderIDProp,
-}
-
 // GenerateKey takes a list of properties and creates a "/" seperated key based on the values of the requested properties.
 // Invalid values are ignored with a warning. A nil input returns the default key, while an empty slice  returns the empty string
 func (ccp *CloudCostProperties) GenerateKey(props []string) string {
@@ -259,10 +286,26 @@ func (ccp *CloudCostProperties) GenerateKey(props []string) string {
 			if ccp.InvoiceEntityID != "" {
 				propVal = ccp.InvoiceEntityID
 			}
+		case prop == CloudCostInvoiceEntityNameProp:
+			if ccp.InvoiceEntityName != "" {
+				propVal = ccp.InvoiceEntityName
+			}
 		case prop == CloudCostAccountIDProp:
 			if ccp.AccountID != "" {
 				propVal = ccp.AccountID
 			}
+		case prop == CloudCostAccountNameProp:
+			if ccp.AccountName != "" {
+				propVal = ccp.AccountName
+			}
+		case prop == CloudCostRegionIDProp:
+			if ccp.RegionID != "" {
+				propVal = ccp.RegionID
+			}
+		case prop == CloudCostAvailabilityZoneProp:
+			if ccp.AvailabilityZone != "" {
+				propVal = ccp.AvailabilityZone
+			}
 		case prop == CloudCostServiceProp:
 			if ccp.Service != "" {
 				propVal = ccp.Service
@@ -298,7 +341,11 @@ func (ccp *CloudCostProperties) hashKey() string {
 	builder.WriteString(ccp.ProviderID)
 	builder.WriteString(ccp.Provider)
 	builder.WriteString(ccp.AccountID)
+	builder.WriteString(ccp.AccountName)
 	builder.WriteString(ccp.InvoiceEntityID)
+	builder.WriteString(ccp.InvoiceEntityName)
+	builder.WriteString(ccp.RegionID)
+	builder.WriteString(ccp.AvailabilityZone)
 	builder.WriteString(ccp.Service)
 	builder.WriteString(ccp.Category)
 

+ 200 - 95
core/pkg/opencost/cloudcostprops_test.go

@@ -10,34 +10,46 @@ func TestCloudCostPropertiesIntersection(t *testing.T) {
 	}{
 		"When properties match between both CloudCostProperties": {
 			baseCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service1",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
 			},
 			intCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service1",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
 			},
 			expectedCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service1",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
@@ -45,34 +57,46 @@ func TestCloudCostPropertiesIntersection(t *testing.T) {
 		},
 		"When one of the properties differ in the two CloudCostProperties": {
 			baseCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service1",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
 			},
 			intCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service2",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service2",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
 			},
 			expectedCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
@@ -80,47 +104,108 @@ func TestCloudCostPropertiesIntersection(t *testing.T) {
 		},
 		"When two of the properties differ in the two CloudCostProperties": {
 			baseCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service1",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
 			},
 			intCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID2",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service2",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID2",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service2",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
 			},
 			expectedCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 				},
 			},
 		},
+		"When all properties differ in the two CloudCostProperties": {
+			baseCCP: &CloudCostProperties{
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
+				Labels: map[string]string{
+					"key1": "value1",
+				},
+			},
+			intCCP: &CloudCostProperties{
+				Provider:          "CustomProvider2",
+				ProviderID:        "ProviderID2",
+				AccountID:         "WorkGroupID2",
+				AccountName:       "AccountName2",
+				InvoiceEntityID:   "InvoiceEntityID2",
+				InvoiceEntityName: "InvoiceEntityName2",
+				RegionID:          "RegionID2",
+				AvailabilityZone:  "AvailabilityZone2",
+				Service:           "Service2",
+				Category:          "Category2",
+				Labels: map[string]string{
+					"key2": "value2",
+				},
+			},
+			expectedCCP: &CloudCostProperties{
+				Provider:          "",
+				ProviderID:        "",
+				AccountID:         "",
+				AccountName:       "",
+				InvoiceEntityID:   "",
+				InvoiceEntityName: "",
+				RegionID:          "",
+				AvailabilityZone:  "",
+				Service:           "",
+				Category:          "",
+				Labels:            map[string]string{},
+			},
+		},
 		"When labels differ": {
 			baseCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service1",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value1",
 					"key2": "value2",
@@ -128,12 +213,16 @@ func TestCloudCostPropertiesIntersection(t *testing.T) {
 				},
 			},
 			intCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service1",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key1": "value2",
 					"key2": "value2",
@@ -141,12 +230,16 @@ func TestCloudCostPropertiesIntersection(t *testing.T) {
 				},
 			},
 			expectedCCP: &CloudCostProperties{
-				Provider:        "CustomProvider",
-				ProviderID:      "ProviderID1",
-				AccountID:       "WorkGroupID1",
-				InvoiceEntityID: "InvoiceEntityID1",
-				Service:         "Service1",
-				Category:        "Category1",
+				Provider:          "CustomProvider",
+				ProviderID:        "ProviderID1",
+				AccountID:         "WorkGroupID1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "InvoiceEntityID1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "Service1",
+				Category:          "Category1",
 				Labels: map[string]string{
 					"key2": "value2",
 				},
@@ -176,45 +269,57 @@ func TestCloudCostProperties_hashKey(t *testing.T) {
 		},
 		"All props no labels": {
 			props: &CloudCostProperties{
-				ProviderID:      "providerid1",
-				Provider:        "provider1",
-				AccountID:       "workgroup1",
-				InvoiceEntityID: "billing1",
-				Service:         "service1",
-				Category:        "category1",
-				Labels:          map[string]string{},
-			},
-			want: "a19b7dddf0032572",
+				ProviderID:        "providerid1",
+				Provider:          "provider1",
+				AccountID:         "workgroup1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "billing1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "service1",
+				Category:          "category1",
+				Labels:            map[string]string{},
+			},
+			want: "d07ffd0bd6d5eaf1",
 		},
 		"All props": {
 			props: &CloudCostProperties{
-				ProviderID:      "providerid1",
-				Provider:        "provider1",
-				AccountID:       "workgroup1",
-				InvoiceEntityID: "billing1",
-				Service:         "service1",
-				Category:        "category1",
+				ProviderID:        "providerid1",
+				Provider:          "provider1",
+				AccountID:         "workgroup1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "billing1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "service1",
+				Category:          "category1",
 				Labels: map[string]string{
 					"label1": "value1",
 					"label2": "value2",
 				},
 			},
-			want: "9d54403e40ad4db6",
+			want: "318cb6294bf9e2d5",
 		},
 		"All props swap labels": {
 			props: &CloudCostProperties{
-				ProviderID:      "providerid1",
-				Provider:        "provider1",
-				AccountID:       "workgroup1",
-				InvoiceEntityID: "billing1",
-				Service:         "service1",
-				Category:        "category1",
+				ProviderID:        "providerid1",
+				Provider:          "provider1",
+				AccountID:         "workgroup1",
+				AccountName:       "AccountName1",
+				InvoiceEntityID:   "billing1",
+				InvoiceEntityName: "InvoiceEntityName1",
+				RegionID:          "RegionID1",
+				AvailabilityZone:  "AvailabilityZone1",
+				Service:           "service1",
+				Category:          "category1",
 				Labels: map[string]string{
 					"label2": "value2",
 					"label1": "value1",
 				},
 			},
-			want: "9d54403e40ad4db6",
+			want: "318cb6294bf9e2d5",
 		},
 	}
 	for name, tt := range tests {

+ 154 - 47
core/pkg/opencost/opencost_codecs.go

@@ -13,12 +13,11 @@ package opencost
 
 import (
 	"fmt"
+	util "github.com/opencost/opencost/core/pkg/util"
 	"reflect"
 	"strings"
 	"sync"
 	"time"
-
-	util "github.com/opencost/opencost/core/pkg/util"
 )
 
 const (
@@ -41,10 +40,10 @@ const (
 	AssetsCodecVersion uint8 = 21
 
 	// AllocationCodecVersion is used for any resources listed in the Allocation version set
-	AllocationCodecVersion uint8 = 21
+	AllocationCodecVersion uint8 = 22
 
 	// CloudCostCodecVersion is used for any resources listed in the CloudCost version set
-	CloudCostCodecVersion uint8 = 2
+	CloudCostCodecVersion uint8 = 3
 )
 
 //--------------------------------------------------------------------------
@@ -457,6 +456,8 @@ func (target *Allocation) MarshalBinaryWithContext(ctx *EncodingContext) (err er
 	}
 	// --- [end][write][alias](LbAllocations) ---
 
+	buff.WriteFloat64(target.GPURequestAverage) // write float64
+	buff.WriteFloat64(target.GPUUsageAverage)   // write float64
 	return nil
 }
 
@@ -769,6 +770,24 @@ func (target *Allocation) UnmarshalBinaryWithContext(ctx *DecodingContext) (err
 	} else {
 	}
 
+	// field version check
+	if uint8(22) <= version {
+		fff := buff.ReadFloat64() // read float64
+		target.GPURequestAverage = fff
+
+	} else {
+		target.GPURequestAverage = float64(0) // default
+	}
+
+	// field version check
+	if uint8(22) <= version {
+		ggg := buff.ReadFloat64() // read float64
+		target.GPUUsageAverage = ggg
+
+	} else {
+		target.GPUUsageAverage = float64(0) // default
+	}
+
 	return nil
 }
 
@@ -3561,20 +3580,44 @@ func (target *CloudCostProperties) MarshalBinaryWithContext(ctx *EncodingContext
 		buff.WriteString(target.AccountID) // write string
 	}
 	if ctx.IsStringTable() {
-		d := ctx.Table.AddOrGet(target.InvoiceEntityID)
+		d := ctx.Table.AddOrGet(target.AccountName)
 		buff.WriteInt(d) // write table index
 	} else {
-		buff.WriteString(target.InvoiceEntityID) // write string
+		buff.WriteString(target.AccountName) // write string
 	}
 	if ctx.IsStringTable() {
-		e := ctx.Table.AddOrGet(target.Service)
+		e := ctx.Table.AddOrGet(target.InvoiceEntityID)
 		buff.WriteInt(e) // write table index
 	} else {
-		buff.WriteString(target.Service) // write string
+		buff.WriteString(target.InvoiceEntityID) // write string
 	}
 	if ctx.IsStringTable() {
-		f := ctx.Table.AddOrGet(target.Category)
+		f := ctx.Table.AddOrGet(target.InvoiceEntityName)
 		buff.WriteInt(f) // write table index
+	} else {
+		buff.WriteString(target.InvoiceEntityName) // write string
+	}
+	if ctx.IsStringTable() {
+		g := ctx.Table.AddOrGet(target.RegionID)
+		buff.WriteInt(g) // write table index
+	} else {
+		buff.WriteString(target.RegionID) // write string
+	}
+	if ctx.IsStringTable() {
+		h := ctx.Table.AddOrGet(target.AvailabilityZone)
+		buff.WriteInt(h) // write table index
+	} else {
+		buff.WriteString(target.AvailabilityZone) // write string
+	}
+	if ctx.IsStringTable() {
+		k := ctx.Table.AddOrGet(target.Service)
+		buff.WriteInt(k) // write table index
+	} else {
+		buff.WriteString(target.Service) // write string
+	}
+	if ctx.IsStringTable() {
+		l := ctx.Table.AddOrGet(target.Category)
+		buff.WriteInt(l) // write table index
 	} else {
 		buff.WriteString(target.Category) // write string
 	}
@@ -3588,14 +3631,14 @@ func (target *CloudCostProperties) MarshalBinaryWithContext(ctx *EncodingContext
 		buff.WriteInt(len(map[string]string(target.Labels))) // map length
 		for v, z := range map[string]string(target.Labels) {
 			if ctx.IsStringTable() {
-				g := ctx.Table.AddOrGet(v)
-				buff.WriteInt(g) // write table index
+				m := ctx.Table.AddOrGet(v)
+				buff.WriteInt(m) // write table index
 			} else {
 				buff.WriteString(v) // write string
 			}
 			if ctx.IsStringTable() {
-				h := ctx.Table.AddOrGet(z)
-				buff.WriteInt(h) // write table index
+				n := ctx.Table.AddOrGet(z)
+				buff.WriteInt(n) // write table index
 			} else {
 				buff.WriteString(z) // write string
 			}
@@ -3692,15 +3735,21 @@ func (target *CloudCostProperties) UnmarshalBinaryWithContext(ctx *DecodingConte
 	g := h
 	target.AccountID = g
 
-	var m string
-	if ctx.IsStringTable() {
-		n := buff.ReadInt() // read string index
-		m = ctx.Table[n]
+	// field version check
+	if uint8(3) <= version {
+		var m string
+		if ctx.IsStringTable() {
+			n := buff.ReadInt() // read string index
+			m = ctx.Table[n]
+		} else {
+			m = buff.ReadString() // read string
+		}
+		l := m
+		target.AccountName = l
+
 	} else {
-		m = buff.ReadString() // read string
+		target.AccountName = "" // default
 	}
-	l := m
-	target.InvoiceEntityID = l
 
 	var p string
 	if ctx.IsStringTable() {
@@ -3710,56 +3759,114 @@ func (target *CloudCostProperties) UnmarshalBinaryWithContext(ctx *DecodingConte
 		p = buff.ReadString() // read string
 	}
 	o := p
-	target.Service = o
+	target.InvoiceEntityID = o
 
-	var s string
+	// field version check
+	if uint8(3) <= version {
+		var s string
+		if ctx.IsStringTable() {
+			t := buff.ReadInt() // read string index
+			s = ctx.Table[t]
+		} else {
+			s = buff.ReadString() // read string
+		}
+		r := s
+		target.InvoiceEntityName = r
+
+	} else {
+		target.InvoiceEntityName = "" // default
+	}
+
+	// field version check
+	if uint8(3) <= version {
+		var w string
+		if ctx.IsStringTable() {
+			x := buff.ReadInt() // read string index
+			w = ctx.Table[x]
+		} else {
+			w = buff.ReadString() // read string
+		}
+		u := w
+		target.RegionID = u
+
+	} else {
+		target.RegionID = "" // default
+	}
+
+	// field version check
+	if uint8(3) <= version {
+		var aa string
+		if ctx.IsStringTable() {
+			bb := buff.ReadInt() // read string index
+			aa = ctx.Table[bb]
+		} else {
+			aa = buff.ReadString() // read string
+		}
+		y := aa
+		target.AvailabilityZone = y
+
+	} else {
+		target.AvailabilityZone = "" // default
+	}
+
+	var dd string
 	if ctx.IsStringTable() {
-		t := buff.ReadInt() // read string index
-		s = ctx.Table[t]
+		ee := buff.ReadInt() // read string index
+		dd = ctx.Table[ee]
 	} else {
-		s = buff.ReadString() // read string
+		dd = buff.ReadString() // read string
 	}
-	r := s
-	target.Category = r
+	cc := dd
+	target.Service = cc
+
+	var gg string
+	if ctx.IsStringTable() {
+		hh := buff.ReadInt() // read string index
+		gg = ctx.Table[hh]
+	} else {
+		gg = buff.ReadString() // read string
+	}
+	ff := gg
+	target.Category = ff
 
 	// --- [begin][read][alias](CloudCostLabels) ---
-	var u map[string]string
+	var kk map[string]string
 	if buff.ReadUInt8() == uint8(0) {
-		u = nil
+		kk = nil
 	} else {
 		// --- [begin][read][map](map[string]string) ---
-		x := buff.ReadInt() // map len
-		w := make(map[string]string, x)
-		for i := 0; i < x; i++ {
+		mm := buff.ReadInt() // map len
+		ll := make(map[string]string, mm)
+		for i := 0; i < mm; i++ {
 			var v string
-			var aa string
+			var oo string
 			if ctx.IsStringTable() {
-				bb := buff.ReadInt() // read string index
-				aa = ctx.Table[bb]
+				pp := buff.ReadInt() // read string index
+				oo = ctx.Table[pp]
 			} else {
-				aa = buff.ReadString() // read string
+				oo = buff.ReadString() // read string
 			}
-			y := aa
-			v = y
+			nn := oo
+			v = nn
 
 			var z string
-			var dd string
+			var rr string
 			if ctx.IsStringTable() {
-				ee := buff.ReadInt() // read string index
-				dd = ctx.Table[ee]
+				ss := buff.ReadInt() // read string index
+				rr = ctx.Table[ss]
 			} else {
-				dd = buff.ReadString() // read string
+				rr = buff.ReadString() // read string
 			}
-			cc := dd
-			z = cc
+			qq := rr
+			z = qq
 
-			w[v] = z
+			ll[v] = z
 		}
-		u = w
+		kk = ll
 		// --- [end][read][map](map[string]string) ---
 
 	}
-	target.Labels = CloudCostLabels(u)
+	target.Labels = CloudCostLabels(kk)
 	// --- [end][read][alias](CloudCostLabels) ---
 
 	return nil

+ 31 - 1
core/pkg/opencost/summaryallocation.go

@@ -29,17 +29,23 @@ type SummaryAllocation struct {
 	CPUCoreRequestAverage  float64               `json:"cpuCoreRequestAverage"`
 	CPUCoreUsageAverage    float64               `json:"cpuCoreUsageAverage"`
 	CPUCost                float64               `json:"cpuCost"`
+	CPUCostIdle            float64               `json:"cpuCostIdle"`
+	GPURequestAverage      float64               `json:"gpuRequestAverage"`
+	GPUUsageAverage        float64               `json:"gpuUsageAverage"`
 	GPUCost                float64               `json:"gpuCost"`
+	GPUCostIdle            float64               `json:"gpuCostIdle"`
 	NetworkCost            float64               `json:"networkCost"`
 	LoadBalancerCost       float64               `json:"loadBalancerCost"`
 	PVCost                 float64               `json:"pvCost"`
 	RAMBytesRequestAverage float64               `json:"ramByteRequestAverage"`
 	RAMBytesUsageAverage   float64               `json:"ramByteUsageAverage"`
 	RAMCost                float64               `json:"ramCost"`
+	RAMCostIdle            float64               `json:"ramCostIdle"`
 	SharedCost             float64               `json:"sharedCost"`
 	ExternalCost           float64               `json:"externalCost"`
 	Share                  bool                  `json:"-"`
 	UnmountedPVCost        float64               `json:"-"`
+	Efficiency             float64               `json:"efficiency"`
 }
 
 // NewSummaryAllocation converts an Allocation to a SummaryAllocation by
@@ -59,6 +65,8 @@ func NewSummaryAllocation(alloc *Allocation, reconcile, reconcileNetwork bool) *
 		CPUCoreRequestAverage:  alloc.CPUCoreRequestAverage,
 		CPUCoreUsageAverage:    alloc.CPUCoreUsageAverage,
 		CPUCost:                alloc.CPUCost + alloc.CPUCostAdjustment,
+		GPURequestAverage:      alloc.GPURequestAverage,
+		GPUUsageAverage:        alloc.GPUUsageAverage,
 		GPUCost:                alloc.GPUCost + alloc.GPUCostAdjustment,
 		NetworkCost:            alloc.NetworkCost + alloc.NetworkCostAdjustment,
 		LoadBalancerCost:       alloc.LoadBalancerCost + alloc.LoadBalancerCostAdjustment,
@@ -88,7 +96,7 @@ func NewSummaryAllocation(alloc *Allocation, reconcile, reconcileNetwork bool) *
 	if sa.IsUnmounted() {
 		sa.UnmountedPVCost = sa.PVCost
 	}
-
+	sa.Efficiency = sa.TotalEfficiency()
 	return sa
 }
 
@@ -120,6 +128,12 @@ func (sa *SummaryAllocation) Add(that *SummaryAllocation) error {
 	ramUseByteMins := sa.RAMBytesUsageAverage * sa.Minutes()
 	ramUseByteMins += that.RAMBytesUsageAverage * that.Minutes()
 
+	gpuReqMins := sa.GPURequestAverage * sa.Minutes()
+	gpuReqMins += that.GPURequestAverage * that.Minutes()
+
+	gpuUseMins := sa.GPUUsageAverage * sa.Minutes()
+	gpuUseMins += that.GPUUsageAverage * that.Minutes()
+
 	// Expand Start and End to be the "max" of among the given Allocations
 	if that.Start.Before(sa.Start) {
 		sa.Start = that.Start
@@ -134,11 +148,15 @@ func (sa *SummaryAllocation) Add(that *SummaryAllocation) error {
 		sa.CPUCoreUsageAverage = cpuUseCoreMins / sa.Minutes()
 		sa.RAMBytesRequestAverage = ramReqByteMins / sa.Minutes()
 		sa.RAMBytesUsageAverage = ramUseByteMins / sa.Minutes()
+		sa.GPURequestAverage = gpuReqMins / sa.Minutes()
+		sa.GPUUsageAverage = gpuUseMins / sa.Minutes()
 	} else {
 		sa.CPUCoreRequestAverage = 0.0
 		sa.CPUCoreUsageAverage = 0.0
 		sa.RAMBytesRequestAverage = 0.0
 		sa.RAMBytesUsageAverage = 0.0
+		sa.GPURequestAverage = 0.0
+		sa.GPUUsageAverage = 0.0
 	}
 
 	// Sum all cumulative cost fields
@@ -151,6 +169,7 @@ func (sa *SummaryAllocation) Add(that *SummaryAllocation) error {
 	sa.RAMCost += that.RAMCost
 	sa.SharedCost += that.SharedCost
 
+	sa.Efficiency = sa.TotalEfficiency()
 	return nil
 }
 
@@ -164,6 +183,8 @@ func (sa *SummaryAllocation) Clone() *SummaryAllocation {
 		CPUCoreRequestAverage:  sa.CPUCoreRequestAverage,
 		CPUCoreUsageAverage:    sa.CPUCoreUsageAverage,
 		CPUCost:                sa.CPUCost,
+		GPURequestAverage:      sa.GPURequestAverage,
+		GPUUsageAverage:        sa.GPUUsageAverage,
 		GPUCost:                sa.GPUCost,
 		NetworkCost:            sa.NetworkCost,
 		LoadBalancerCost:       sa.LoadBalancerCost,
@@ -173,6 +194,7 @@ func (sa *SummaryAllocation) Clone() *SummaryAllocation {
 		RAMCost:                sa.RAMCost,
 		SharedCost:             sa.SharedCost,
 		ExternalCost:           sa.ExternalCost,
+		Efficiency:             sa.Efficiency,
 	}
 }
 
@@ -224,6 +246,14 @@ func (sa *SummaryAllocation) Equal(that *SummaryAllocation) bool {
 		return false
 	}
 
+	if sa.GPURequestAverage != that.GPURequestAverage {
+		return false
+	}
+
+	if sa.GPUUsageAverage != that.GPUUsageAverage {
+		return false
+	}
+
 	if sa.GPUCost != that.GPUCost {
 		return false
 	}

+ 21 - 1
core/pkg/opencost/summaryallocation_json.go

@@ -15,13 +15,18 @@ type SummaryAllocationResponse struct {
 	CPUCoreRequestAverage  *float64  `json:"cpuCoreRequestAverage"`
 	CPUCoreUsageAverage    *float64  `json:"cpuCoreUsageAverage"`
 	CPUCost                *float64  `json:"cpuCost"`
+	CPUCostIdle            *float64  `json:"cpuCostIdle"`
+	GPURequestAverage      *float64  `json:"gpuRequestAverage"`
+	GPUUsageAverage        *float64  `json:"gpuUsageAverage"`
 	GPUCost                *float64  `json:"gpuCost"`
+	GPUCostIdle            *float64  `json:"gpuCostIdle"`
 	NetworkCost            *float64  `json:"networkCost"`
 	LoadBalancerCost       *float64  `json:"loadBalancerCost"`
 	PVCost                 *float64  `json:"pvCost"`
 	RAMBytesRequestAverage *float64  `json:"ramByteRequestAverage"`
 	RAMBytesUsageAverage   *float64  `json:"ramByteUsageAverage"`
 	RAMCost                *float64  `json:"ramCost"`
+	RAMCostIdle            *float64  `json:"ramCostIdle"`
 	SharedCost             *float64  `json:"sharedCost"`
 	ExternalCost           *float64  `json:"externalCost"`
 	TotalEfficiency        *float64  `json:"totalEfficiency"`
@@ -35,6 +40,16 @@ func (sa *SummaryAllocation) ToResponse() *SummaryAllocationResponse {
 		return nil
 	}
 
+	// if the efficiency has already been set,
+	// prefer that since it has been calculated elsewhere
+	// and matches the sorting criteria more closely
+	efficiency := sa.Efficiency
+	if efficiency == 0 {
+		// if efficiency has not been set by SQL or otherwise, calculate it
+		// using the object method
+		efficiency = sa.TotalEfficiency()
+
+	}
 	return &SummaryAllocationResponse{
 		Name:                   sa.Name,
 		Start:                  sa.Start,
@@ -42,16 +57,21 @@ func (sa *SummaryAllocation) ToResponse() *SummaryAllocationResponse {
 		CPUCoreRequestAverage:  formatutil.Float64ToResponse(sa.CPUCoreRequestAverage),
 		CPUCoreUsageAverage:    formatutil.Float64ToResponse(sa.CPUCoreUsageAverage),
 		CPUCost:                formatutil.Float64ToResponse(sa.CPUCost),
+		CPUCostIdle:            formatutil.Float64ToResponse(sa.CPUCostIdle),
+		GPURequestAverage:      formatutil.Float64ToResponse(sa.GPURequestAverage),
+		GPUUsageAverage:        formatutil.Float64ToResponse(sa.GPUUsageAverage),
 		GPUCost:                formatutil.Float64ToResponse(sa.GPUCost),
+		GPUCostIdle:            formatutil.Float64ToResponse(sa.GPUCostIdle),
 		NetworkCost:            formatutil.Float64ToResponse(sa.NetworkCost),
 		LoadBalancerCost:       formatutil.Float64ToResponse(sa.LoadBalancerCost),
 		PVCost:                 formatutil.Float64ToResponse(sa.PVCost),
 		RAMBytesRequestAverage: formatutil.Float64ToResponse(sa.RAMBytesRequestAverage),
 		RAMBytesUsageAverage:   formatutil.Float64ToResponse(sa.RAMBytesUsageAverage),
 		RAMCost:                formatutil.Float64ToResponse(sa.RAMCost),
+		RAMCostIdle:            formatutil.Float64ToResponse(sa.RAMCostIdle),
 		SharedCost:             formatutil.Float64ToResponse(sa.SharedCost),
 		ExternalCost:           formatutil.Float64ToResponse(sa.ExternalCost),
-		TotalEfficiency:        formatutil.Float64ToResponse(sa.TotalEfficiency()),
+		TotalEfficiency:        formatutil.Float64ToResponse(efficiency),
 		TotalCost:              formatutil.Float64ToResponse(sa.TotalCost()),
 	}
 }

+ 7 - 49
core/pkg/opencost/window.go

@@ -298,13 +298,18 @@ func parseWindow(window string, now time.Time) (Window, error) {
 		end := now
 		start := end.Add(-time.Duration(num) * dur)
 
-		// when using windows such as "7d" and "1w", we have to have a definition for what "the past X days" means.
+		// when using windows such as "7d", "1w", and "2h" we have to have a definition for what "the past X days/hours" means.
 		// let "the past X days" be defined as the entirety of today plus the entirety of the past X-1 days, where
 		// "entirety" is defined as midnight to midnight, UTC. given this definition, we round forward the calculated
 		// start and end times to the nearest day to align with midnight boundaries
-		if match[2] == "d" || match[2] == "w" {
+		// an analogous definition applies to "the past X weeks" and "the past X hours"
+		if match[2] == "w" {
+			// special case - with a week, we say the week ends today
 			end = end.Truncate(timeutil.Day).Add(timeutil.Day)
 			start = start.Truncate(timeutil.Day).Add(timeutil.Day)
+		} else {
+			end = now.Truncate(dur).Add(dur)
+			start = end.Add(-time.Duration(num) * dur)
 		}
 
 		return NewWindow(&start, &end), nil
@@ -743,53 +748,6 @@ func (w Window) DurationOffset() (time.Duration, time.Duration, error) {
 	return duration, offset, nil
 }
 
-// DurationOffsetForPrometheus returns strings representing durations for the
-// duration and offset of the given window, factoring in the Thanos offset if
-// necessary. Whereas duration is a simple duration string (e.g. "1d"), the
-// offset includes the word "offset" (e.g. " offset 2d") so that the values
-// returned can be used directly in the formatting string "some_metric[%s]%s"
-// to generate the query "some_metric[1d] offset 2d".
-func (w Window) DurationOffsetForPrometheus() (string, string, error) {
-	duration, offset, err := w.DurationOffset()
-	if err != nil {
-		return "", "", err
-	}
-
-	// If using Thanos, increase offset to 3 hours, reducing the duration by
-	// equal measure to maintain the same starting point.
-	// TODO: This logic should technically be decoupled from this type, but
-	// TODO: current use cases are unclear. To ensure we do not break existing
-	// TODO: (or legacy) use-cases, temporarily support this one-off logic.
-	thanosDur := thanosOffset()
-	if offset < thanosDur && isThanosEnabled() {
-		diff := thanosDur - offset
-		offset += diff
-		duration -= diff
-	}
-
-	// If duration < 0, return an error
-	if duration < 0 {
-		return "", "", fmt.Errorf("negative duration: %s", duration)
-	}
-
-	// Negative offset means that the end time is in the future. Prometheus
-	// fails for non-positive offset values, so shrink the duration and
-	// remove the offset altogether.
-	if offset < 0 {
-		duration = duration + offset
-		offset = 0
-	}
-
-	durStr, offStr := timeutil.DurationOffsetStrings(duration, offset)
-	if offset < time.Minute {
-		offStr = ""
-	} else {
-		offStr = " offset " + offStr
-	}
-
-	return durStr, offStr, nil
-}
-
 // DurationOffsetStrings returns formatted, Prometheus-compatible strings representing
 // the duration and offset of the window in terms of days, hours, minutes, or seconds;
 // e.g. ("7d", "1441m", "30m", "1s", "")

+ 45 - 111
core/pkg/opencost/window_test.go

@@ -4,7 +4,6 @@ import (
 	"encoding/json"
 	"errors"
 	"fmt"
-	"strings"
 	"testing"
 	"time"
 
@@ -653,7 +652,39 @@ func TestWindow_DurationOffsetStrings(t *testing.T) {
 	}
 }
 
-func TestWindow_DurationOffsetForPrometheus(t *testing.T) {
+func TestParse_Window(t *testing.T) {
+	now := time.Date(2024, time.May, 3, 8, 1, 4, 6, time.UTC)
+	win, err := parseWindow("2h", now)
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "2h": %s`, err)
+	}
+
+	expectedStart := time.Date(2024, time.May, 3, 7, 0, 0, 0, time.UTC)
+	expectedEnd := time.Date(2024, time.May, 3, 9, 0, 0, 0, time.UTC)
+
+	if !win.start.Equal(expectedStart) {
+		t.Fatalf(`expect: window start to be %s; actual: %s`, expectedStart, win.start)
+	}
+	if !win.end.Equal(expectedEnd) {
+		t.Fatalf(`expect: window end to be %s; actual: %s`, expectedEnd, win.end)
+	}
+
+	win, err = parseWindow("3d", now)
+	if err != nil {
+		t.Fatalf(`unexpected error parsing "3d": %s`, err)
+	}
+
+	expectedStart = time.Date(2024, time.May, 1, 0, 0, 0, 0, time.UTC)
+	expectedEnd = time.Date(2024, time.May, 4, 0, 0, 0, 0, time.UTC)
+
+	if !win.start.Equal(expectedStart) {
+		t.Fatalf(`expect: window start to be %s; actual: %s`, expectedStart, win.start)
+	}
+	if !win.end.Equal(expectedEnd) {
+		t.Fatalf(`expect: window end to be %s; actual: %s`, expectedEnd, win.end)
+	}
+}
+func TestWindow_Duration(t *testing.T) {
 	// Set-up and tear-down
 	thanosEnabled := env.GetBool(ThanosEnabledEnvVarName, false)
 	defer env.SetBool(ThanosEnabledEnvVarName, thanosEnabled)
@@ -665,144 +696,47 @@ func TestWindow_DurationOffsetForPrometheus(t *testing.T) {
 	}
 
 	now := time.Now().UTC()
-	startOfToday := now.Truncate(timeutil.Day)
 	w, err := parseWindow("1d", now)
 	if err != nil {
 		t.Fatalf(`unexpected error parsing "1d": %s`, err)
 	}
-
-	dur, off, err := w.DurationOffsetForPrometheus()
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	// We can get a response in seconds OR minutes. Check seconds first as it
-	// is higher resolution.
-	expDurSec := int(now.Sub(startOfToday).Seconds())
-	expDurSecStr := fmt.Sprintf("%ds", expDurSec)
-	expDurMin := int(now.Sub(startOfToday).Minutes())
-	expDurMinStr := fmt.Sprintf("%dm", expDurMin)
-	if dur != expDurSecStr && dur != expDurMinStr {
-		t.Fatalf(`expect: window to be "%s" (or "%s"); actual: "%s"`, expDurSecStr, expDurMinStr, dur)
-	}
-	if off != "" {
-		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
+	if w.Duration() != 24*time.Hour {
+		t.Fatalf(`expect: window to be 24 hours; actual: %s`, w.Duration())
 	}
 
 	w, err = ParseWindowUTC("2h")
 	if err != nil {
 		t.Fatalf(`unexpected error parsing "2h": %s`, err)
 	}
-	dur, off, err = w.DurationOffsetForPrometheus()
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if dur != "2h" {
-		t.Fatalf(`expect: window to be "2h"; actual: "%s"`, dur)
-	}
-	if off != "" {
-		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
-	}
 
+	if w.Duration().String() != "2h0m0s" {
+		t.Fatalf(`expect: window to be "2h"; actual: "%s"`, w.Duration().String())
+	}
 	w, err = ParseWindowUTC("10m")
 	if err != nil {
 		t.Fatalf(`unexpected error parsing "10m": %s`, err)
 	}
-	dur, off, err = w.DurationOffsetForPrometheus()
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if dur != "10m" {
-		t.Fatalf(`expect: window to be "10m"; actual: "%s"`, dur)
-	}
-	if off != "" {
-		t.Fatalf(`expect: offset to be ""; actual: "%s"`, off)
+
+	if w.Duration().String() != "10m0s" {
+		t.Fatalf(`expect: window to be "10m"; actual: "%s"`, w.Duration().String())
 	}
 
 	w, err = ParseWindowUTC("1589448338,1589534798")
 	if err != nil {
 		t.Fatalf(`unexpected error parsing "1589448338,1589534798": %s`, err)
 	}
-	dur, off, err = w.DurationOffsetForPrometheus()
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if dur != "1441m" {
-		t.Fatalf(`expect: window to be "1441m"; actual: "%s"`, dur)
-	}
-	if !strings.HasPrefix(off, " offset ") {
-		t.Fatalf(`expect: offset to start with " offset "; actual: "%s"`, off)
+	if w.Duration().String() != "24h1m0s" {
+		t.Fatalf(`expect: window to be "24h1m0s"; actual: "%s"`, w.Duration().String())
 	}
 
 	w, err = ParseWindowUTC("yesterday")
 	if err != nil {
 		t.Fatalf(`unexpected error parsing "yesterday": %s`, err)
 	}
-	dur, off, err = w.DurationOffsetForPrometheus()
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if dur != "1d" {
-		t.Fatalf(`expect: window to be "1d"; actual: "%s"`, dur)
-	}
-	if !strings.HasPrefix(off, " offset ") {
-		t.Fatalf(`expect: offset to start with " offset "; actual: "%s"`, off)
-	}
-
-	// Test for Thanos (env.IsThanosEnabled() == true)
-	env.SetBool(ThanosEnabledEnvVarName, true)
-	if !env.GetBool(ThanosEnabledEnvVarName, false) {
-		t.Fatalf("expected env.IsThanosEnabled() == true")
-	}
-
-	// Note - with the updated logic of 1d, 1w, etc. rounding the start and end times forward to the nearest midnight,
-	// DurationOffsetForPrometheus may fail if not using a window using "Xh" as the string to parse
-	w, err = ParseWindowUTC("24h")
-	if err != nil {
-		t.Fatalf(`unexpected error parsing "24h": %s`, err)
-	}
-	dur, off, err = w.DurationOffsetForPrometheus()
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if dur != "21h" {
-		t.Fatalf(`expect: window to be "21d"; actual: "%s"`, dur)
-	}
-	if off != " offset 3h" {
-		t.Fatalf(`expect: offset to be " offset 3h"; actual: "%s"`, off)
+	if w.Duration().String() != "24h0m0s" {
+		t.Fatalf(`expect: window to be "24h0m0s"; actual: "%s"`, w.Duration().String())
 	}
 
-	w, err = ParseWindowUTC("2h")
-	if err != nil {
-		t.Fatalf(`unexpected error parsing "2h": %s`, err)
-	}
-	dur, off, err = w.DurationOffsetForPrometheus()
-	if err == nil {
-		t.Fatalf(`expected error (negative duration); got ("%s", "%s")`, dur, off)
-	}
-
-	w, err = ParseWindowUTC("10m")
-	if err != nil {
-		t.Fatalf(`unexpected error parsing "1d": %s`, err)
-	}
-	dur, off, err = w.DurationOffsetForPrometheus()
-	if err == nil {
-		t.Fatalf(`expected error (negative duration); got ("%s", "%s")`, dur, off)
-	}
-
-	w, err = ParseWindowUTC("1589448338,1589534798")
-	if err != nil {
-		t.Fatalf(`unexpected error parsing "1589448338,1589534798": %s`, err)
-	}
-	dur, off, err = w.DurationOffsetForPrometheus()
-	if err != nil {
-		t.Fatalf("unexpected error: %s", err)
-	}
-	if dur != "1441m" {
-		t.Fatalf(`expect: window to be "1441m"; actual: "%s"`, dur)
-	}
-	if !strings.HasPrefix(off, " offset ") {
-		t.Fatalf(`expect: offset to start with " offset "; actual: "%s"`, off)
-	}
 }
 
 // TODO

+ 4 - 0
core/pkg/util/buffer.go

@@ -114,6 +114,10 @@ func (b *Buffer) WriteFloat64(i float64) {
 // WriteString writes the string's length as a uint16 followed by the string contents.
 func (b *Buffer) WriteString(i string) {
 	s := stringToBytes(i)
+	// string lengths are limited to uint16 - See ReadString()
+	if len(s) > math.MaxUint16 {
+		s = s[:math.MaxUint16]
+	}
 	write(b.b, uint16(len(s)))
 	b.b.Write(s)
 }

+ 274 - 0
core/pkg/util/buffer_test.go

@@ -0,0 +1,274 @@
+package util
+
+import (
+	"bytes"
+	"math"
+	"math/rand"
+	"testing"
+)
+
+func TestBufferReadWrite(t *testing.T) {
+	buf := NewBuffer()
+
+	buf.WriteBool(true)
+	buf.WriteInt(42)
+	buf.WriteFloat64(3.14)
+	buf.WriteString("Testing, 1, 2, 3!")
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+
+	boolVal := readBuf.ReadBool()
+	intVal := readBuf.ReadInt()
+	floatVal := readBuf.ReadFloat64()
+	stringVal := readBuf.ReadString()
+
+	if boolVal != true {
+		t.Errorf("Expected bool value to be true, got %v", boolVal)
+	}
+	if intVal != 42 {
+		t.Errorf("Expected int value to be 42, got %v", intVal)
+	}
+	if floatVal != 3.14 {
+		t.Errorf("Expected float value to be 3.14, got %v", floatVal)
+	}
+	if stringVal != "Testing, 1, 2, 3!" {
+		t.Errorf("Expected string value to be 'Hello, World!', got %v", stringVal)
+	}
+}
+
+func TestBufferWriteReadBytes(t *testing.T) {
+	buf := NewBuffer()
+
+	bytesToWrite := []byte{0x01, 0x02, 0x03, 0x04}
+	buf.WriteBytes(bytesToWrite)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readBytes := readBuf.ReadBytes(len(bytesToWrite))
+
+	if !bytes.Equal(readBytes, bytesToWrite) {
+		t.Errorf("Expected bytes to be %v, got %v", bytesToWrite, readBytes)
+	}
+}
+
+func TestBufferWriteReadUInt64(t *testing.T) {
+	buf := NewBuffer()
+
+	uint64Val := uint64(1234567890)
+	buf.WriteUInt64(uint64Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readUInt64 := readBuf.ReadUInt64()
+
+	if readUInt64 != uint64Val {
+		t.Errorf("Expected uint64 value to be %v, got %v", uint64Val, readUInt64)
+	}
+}
+
+func TestBufferWriteReadFloat32(t *testing.T) {
+	buf := NewBuffer()
+
+	float32Val := float32(3.14159)
+	buf.WriteFloat32(float32Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readFloat32 := readBuf.ReadFloat32()
+
+	if readFloat32 != float32Val {
+		t.Errorf("Expected float32 value to be %v, got %v", float32Val, readFloat32)
+	}
+}
+
+func TestBufferWriteReadInt8(t *testing.T) {
+	buf := NewBuffer()
+
+	int8Val := int8(-42)
+	buf.WriteInt8(int8Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readInt8 := readBuf.ReadInt8()
+
+	if readInt8 != int8Val {
+		t.Errorf("Expected int8 value to be %v, got %v", int8Val, readInt8)
+	}
+}
+
+func TestBufferWriteReadUInt16(t *testing.T) {
+	buf := NewBuffer()
+
+	uint16Val := uint16(65535)
+	buf.WriteUInt16(uint16Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readUInt16 := readBuf.ReadUInt16()
+
+	if readUInt16 != uint16Val {
+		t.Errorf("Expected uint16 value to be %v, got %v", uint16Val, readUInt16)
+	}
+}
+
+func TestBufferWriteReadInt32(t *testing.T) {
+	buf := NewBuffer()
+
+	int32Val := int32(-1234567890)
+	buf.WriteInt32(int32Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readInt32 := readBuf.ReadInt32()
+
+	if readInt32 != int32Val {
+		t.Errorf("Expected int32 value to be %v, got %v", int32Val, readInt32)
+	}
+}
+
+func TestBufferWriteReadUInt8(t *testing.T) {
+	buf := NewBuffer()
+
+	uint8Val := uint8(255)
+	buf.WriteUInt8(uint8Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readUInt8 := readBuf.ReadUInt8()
+
+	if readUInt8 != uint8Val {
+		t.Errorf("Expected uint8 value to be %v, got %v", uint8Val, readUInt8)
+	}
+}
+
+func TestBufferWriteReadInt16(t *testing.T) {
+	buf := NewBuffer()
+
+	int16Val := int16(-32768)
+	buf.WriteInt16(int16Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readInt16 := readBuf.ReadInt16()
+
+	if readInt16 != int16Val {
+		t.Errorf("Expected int16 value to be %v, got %v", int16Val, readInt16)
+	}
+}
+
+func TestBufferWriteReadUInt32(t *testing.T) {
+	buf := NewBuffer()
+
+	uint32Val := uint32(4294967295)
+	buf.WriteUInt32(uint32Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readUInt32 := readBuf.ReadUInt32()
+
+	if readUInt32 != uint32Val {
+		t.Errorf("Expected uint32 value to be %v, got %v", uint32Val, readUInt32)
+	}
+}
+
+func TestBufferWriteReadInt64(t *testing.T) {
+	buf := NewBuffer()
+
+	int64Val := int64(-9223372036854775808)
+	buf.WriteInt64(int64Val)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+	readInt64 := readBuf.ReadInt64()
+
+	if readInt64 != int64Val {
+		t.Errorf("Expected int64 value to be %v, got %v", int64Val, readInt64)
+	}
+}
+
+func TestBufferBytes(t *testing.T) {
+	buf := NewBuffer()
+
+	buf.WriteInt(42)
+	buf.WriteFloat64(3.14)
+
+	unreadBytes := buf.Bytes()
+
+	newBuf := NewBufferFromBytes(unreadBytes)
+
+	intVal := newBuf.ReadInt()
+	floatVal := newBuf.ReadFloat64()
+
+	if intVal != 42 {
+		t.Errorf("Expected int value to be 42, got %v", intVal)
+	}
+	if floatVal != 3.14 {
+		t.Errorf("Expected float value to be 3.14, got %v", floatVal)
+	}
+}
+
+func TestBufferNewBufferFrom(t *testing.T) {
+	buf := NewBuffer()
+
+	buf.WriteInt(42)
+	buf.WriteFloat64(3.14)
+
+	newBuf := NewBufferFrom(buf)
+
+	intVal := newBuf.ReadInt()
+	floatVal := newBuf.ReadFloat64()
+
+	if intVal != 42 {
+		t.Errorf("Expected int value to be 42, got %v", intVal)
+	}
+	if floatVal != 3.14 {
+		t.Errorf("Expected float value to be 3.14, got %v", floatVal)
+	}
+}
+
+const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func generateRandomString(ln int) string {
+	b := make([]byte, ln)
+	for i := range b {
+		b[i] = letters[rand.Intn(len(letters))]
+	}
+	return string(b)
+}
+
+func TestTooLargeStringTruncate(t *testing.T) {
+	normalStr := generateRandomString(100)
+	bigStr := generateRandomString(math.MaxUint16 + (math.MaxUint16 / 2))
+	expectedBigStrRead := bigStr[:math.MaxUint16]
+
+	otherBigStr := generateRandomString(math.MaxUint16)
+	plusOne := generateRandomString(math.MaxUint16 + 1)
+	expectedPlusOne := plusOne[:math.MaxUint16]
+
+	buf := NewBuffer()
+
+	buf.WriteInt(42)
+	buf.WriteFloat64(3.14)
+	buf.WriteString(normalStr)
+	buf.WriteString(bigStr)
+	buf.WriteString(otherBigStr)
+	buf.WriteString(plusOne)
+
+	readBuf := NewBufferFromBytes(buf.Bytes())
+
+	intVal := readBuf.ReadInt()
+	floatVal := readBuf.ReadFloat64()
+	normalStrRead := readBuf.ReadString()
+	bigStrRead := readBuf.ReadString()
+	otherBigStrRead := readBuf.ReadString()
+	plusOneRead := readBuf.ReadString()
+
+	if intVal != 42 {
+		t.Errorf("Expected int value to be 42, got %v", intVal)
+	}
+	if floatVal != 3.14 {
+		t.Errorf("Expected float value to be 3.14, got %v", floatVal)
+	}
+	if normalStrRead != normalStr {
+		t.Errorf("Expected string value to be %v, got %v", normalStr, normalStrRead)
+	}
+	if bigStrRead != expectedBigStrRead {
+		t.Errorf("Expected large string values to be equivalent!")
+	}
+	if otherBigStrRead != otherBigStr {
+		t.Errorf("Expected large string values to be equivalent!")
+	}
+	if plusOneRead != expectedPlusOne {
+		t.Errorf("Expected large string values to be equivalent!")
+	}
+}

+ 11 - 0
core/pkg/util/filterutil/filterparams.go

@@ -3,6 +3,7 @@ package filterutil
 import (
 	"reflect"
 
+	"github.com/opencost/opencost/core/pkg/filter/fieldstrings"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 	"github.com/opencost/opencost/core/pkg/util/mapper"
@@ -94,6 +95,16 @@ var AssetPropToV1FilterParamKey = map[opencost.AssetProperty]string{
 	opencost.AssetServiceProp:    ParamFilterServices,
 }
 
+var CloudPropToV1FilterParamKey = map[string]string{
+	fieldstrings.FieldAccountID:       ParamFilterAccountIDs,
+	fieldstrings.FieldCategory:        ParamFilterCategories,
+	fieldstrings.FieldInvoiceEntityID: ParamFilterInvoiceEntityIDs,
+	fieldstrings.FieldLabel:           ParamFilterLabels,
+	fieldstrings.FieldProvider:        ParamFilterProviders,
+	fieldstrings.FieldProviderID:      ParamFilterProviderIDs,
+	fieldstrings.FieldService:         ParamFilterServices,
+}
+
 // AllHTTPParamKeys returns all HTTP GET parameters used for v1 filters. It is
 // intended to help validate HTTP queries in handlers to help avoid e.g.
 // spelling errors.

+ 3 - 3
core/pkg/util/filterutil/filterutil.go

@@ -15,7 +15,6 @@ import (
 	assetfilter "github.com/opencost/opencost/core/pkg/filter/asset"
 	"github.com/opencost/opencost/core/pkg/filter/ast"
 	cloudcostfilter "github.com/opencost/opencost/core/pkg/filter/cloudcost"
-	// cloudfilter "github.com/opencost/opencost/core/pkg/filter/cloud"
 )
 
 // ============================================================================
@@ -29,8 +28,9 @@ import (
 // funcs by Field type.
 var defaultFieldByType = map[string]any{
 	// typeutil.TypeOf[cloudfilter.CloudAggregationField](): cloudfilter.DefaultFieldByName,
-	typeutil.TypeOf[afilter.AllocationField](): afilter.DefaultFieldByName,
-	typeutil.TypeOf[assetfilter.AssetField]():  assetfilter.DefaultFieldByName,
+	typeutil.TypeOf[afilter.AllocationField]():        afilter.DefaultFieldByName,
+	typeutil.TypeOf[assetfilter.AssetField]():         assetfilter.DefaultFieldByName,
+	typeutil.TypeOf[cloudcostfilter.CloudCostField](): cloudcostfilter.DefaultFieldByName,
 }
 
 // DefaultFieldByName looks up a specific T field instance by name and returns the default

+ 8 - 9
core/pkg/util/timeutil/timeutil_test.go

@@ -1,7 +1,6 @@
 package timeutil
 
 import (
-	"fmt"
 	"testing"
 	"time"
 )
@@ -393,27 +392,27 @@ func Test_FormatDurationStringDaysToHours(t *testing.T) {
 func TestRoundToStartOfWeek(t *testing.T) {
 	sunday := time.Date(2023, 03, 26, 12, 12, 12, 12, time.UTC)
 	roundedFromSunday := RoundToStartOfWeek(sunday)
-	if roundedFromSunday.Day() != 26 || roundedFromSunday.Weekday() == time.Sunday {
-		fmt.Errorf("expected date to be rounded to the same sunday, got: %d, %s", roundedFromSunday.Day(), roundedFromSunday.Weekday().String())
+	if roundedFromSunday.Day() != 26 || roundedFromSunday.Weekday() != time.Sunday {
+		t.Errorf("expected date to be rounded to the same sunday, got: %d, %s", roundedFromSunday.Day(), roundedFromSunday.Weekday().String())
 	}
 
 	tuesday := time.Date(2023, 03, 28, 12, 12, 12, 12, time.UTC)
 	roundedFromTuesday := RoundToStartOfWeek(tuesday)
-	if roundedFromTuesday.Day() != 26 || roundedFromTuesday.Weekday() == time.Sunday {
-		fmt.Errorf("expected date to be rounded to the same sunday, got: %d, %s", roundedFromTuesday.Day(), roundedFromTuesday.Weekday().String())
+	if roundedFromTuesday.Day() != 26 || roundedFromTuesday.Weekday() != time.Sunday {
+		t.Errorf("expected date to be rounded to the same sunday, got: %d, %s", roundedFromTuesday.Day(), roundedFromTuesday.Weekday().String())
 	}
 }
 
 func TestRoundToStartOfFollowingWeek(t *testing.T) {
 	sunday := time.Date(2023, 03, 26, 12, 12, 12, 12, time.UTC)
 	roundedFromSunday := RoundToStartOfFollowingWeek(sunday)
-	if roundedFromSunday.Month() != 4 || roundedFromSunday.Day() != 2 || roundedFromSunday.Weekday() == time.Sunday {
-		fmt.Errorf("expected date to be rounded to the same sunday, got: %d, %s", roundedFromSunday.Day(), roundedFromSunday.Weekday().String())
+	if roundedFromSunday.Month() != 4 || roundedFromSunday.Day() != 2 || roundedFromSunday.Weekday() != time.Sunday {
+		t.Errorf("expected date to be rounded to the same sunday, got: %d, %s", roundedFromSunday.Day(), roundedFromSunday.Weekday().String())
 	}
 
 	tuesday := time.Date(2023, 03, 28, 12, 12, 12, 12, time.UTC)
 	roundedFromTuesday := RoundToStartOfFollowingWeek(tuesday)
-	if roundedFromTuesday.Month() != 4 || roundedFromTuesday.Day() != 2 || roundedFromTuesday.Weekday() == time.Sunday {
-		fmt.Errorf("expected date to be rounded to the same sunday, got: %d, %s", roundedFromTuesday.Day(), roundedFromTuesday.Weekday().String())
+	if roundedFromTuesday.Month() != 4 || roundedFromTuesday.Day() != 2 || roundedFromTuesday.Weekday() != time.Sunday {
+		t.Errorf("expected date to be rounded to the same sunday, got: %d, %s", roundedFromTuesday.Day(), roundedFromTuesday.Weekday().String())
 	}
 }

+ 160 - 5
core/pkg/util/worker/worker.go

@@ -9,6 +9,9 @@ import (
 	"github.com/opencost/opencost/core/pkg/collections"
 )
 
+// Runner is a function type that takes a single input and returns nothing.
+type Runner[T any] func(T)
+
 // Worker is a transformation function from input type T to output type U.
 type Worker[T any, U any] func(T) U
 
@@ -54,7 +57,7 @@ type queuedWorkerPool[T any, U any] struct {
 type ordered[T any, U any] struct {
 	workPool WorkerPool[T, U]
 	results  []U
-	wg       *sync.WaitGroup
+	wg       sync.WaitGroup
 	count    int
 }
 
@@ -129,7 +132,6 @@ func NewOrderedGroup[T any, U any](pool WorkerPool[T, U], size int) WorkGroup[T,
 	return &ordered[T, U]{
 		workPool: pool,
 		results:  make([]U, size),
-		wg:       new(sync.WaitGroup),
 		count:    0,
 	}
 }
@@ -166,6 +168,96 @@ func (ow *ordered[T, U]) Wait() []U {
 	return ow.results
 }
 
+// noResultGroup is a WorkGroup implementation which arbitrarily pushes inputs to
+// a runner pool to be executed concurrently. This group does not collect results.
+type noResultGroup[T any] struct {
+	workPool WorkerPool[T, struct{}]
+	wg       sync.WaitGroup
+}
+
+// NewNoResultGroup creates a new WorkGroup implementation for processing a group of inputs concurrently. This
+// work group implementation does not collect results, and therefore, requires a worker pool with a struct{} output.
+func NewNoResultGroup[T any](pool WorkerPool[T, struct{}]) WorkGroup[T, struct{}] {
+	return &noResultGroup[T]{
+		workPool: pool,
+	}
+}
+
+// Push adds a new input to the work group.
+func (ow *noResultGroup[T]) Push(input T) error {
+	onComplete := make(chan struct{})
+	err := ow.workPool.Run(input, onComplete)
+	if err != nil {
+		return err
+	}
+
+	ow.wg.Add(1)
+
+	go func() {
+		defer close(onComplete)
+		defer ow.wg.Done()
+
+		<-onComplete
+	}()
+
+	return nil
+}
+
+// Wait waits for all pending worker tasks to complete, then returns all the results.
+func (ow *noResultGroup[T]) Wait() []struct{} {
+	ow.wg.Wait()
+	return []struct{}{}
+}
+
+// collector is a WorkGroup implementation which collects non-nil results into the results slice
+// and ignores any nil results.
+type collector[T any, U any] struct {
+	workPool   WorkerPool[T, *U]
+	resultLock sync.Mutex
+	results    []*U
+	wg         sync.WaitGroup
+}
+
+// NewCollectionGroup creates a new WorkGroup implementation for processing a group of inputs concurrently. The
+// collection group implementation will collect all non-nil results into the output slice. Thus, the worker pool
+// parameter requires the output type to be a pointer.
+func NewCollectionGroup[T any, U any](pool WorkerPool[T, *U]) WorkGroup[T, *U] {
+	return &collector[T, U]{
+		workPool: pool,
+	}
+}
+
+// Push adds a new input to the work group.
+func (ow *collector[T, U]) Push(input T) error {
+	onComplete := make(chan *U)
+	err := ow.workPool.Run(input, onComplete)
+	if err != nil {
+		return err
+	}
+
+	ow.wg.Add(1)
+
+	go func() {
+		defer ow.wg.Done()
+		defer close(onComplete)
+
+		result := <-onComplete
+		if result != nil {
+			ow.resultLock.Lock()
+			ow.results = append(ow.results, result)
+			ow.resultLock.Unlock()
+		}
+	}()
+
+	return nil
+}
+
+// Wait waits for all pending worker tasks to complete, then returns all the results.
+func (ow *collector[T, U]) Wait() []*U {
+	ow.wg.Wait()
+	return ow.results
+}
+
 // these constraints protect against the possibility of unexpected output from runtime.NumCPU()
 const (
 	defaultMinWorkers = 4
@@ -190,10 +282,21 @@ func OptimalWorkerCountInRange(min int, max int) int {
 	return cores
 }
 
-// ConcurrentDo runs a pool of workers which concurrently call the provided worker func on each input to get ordered
-// output corresponding to the inputs
+// ConcurrentDo runs a pool of N workers which concurrently call the provided worker func on each
+// input to get ordered output corresponding to the inputs. The total number of workers is determined
+// by the total number of CPUs available, bound to a range from 4-16.
 func ConcurrentDo[T any, U any](worker Worker[T, U], inputs []T) []U {
-	workerPool := NewWorkerPool(OptimalWorkerCount(), worker)
+	return ConcurrentDoWith(OptimalWorkerCount(), worker, inputs)
+}
+
+// ConcurrentDoWith runs a pool of workers of the specified size which concurrently call the provided worker func
+// on each input to get ordered output corresponding to the inputs. Size inputs < 1 will automatically be set to 1.
+func ConcurrentDoWith[T any, U any](size int, worker Worker[T, U], inputs []T) []U {
+	if size < 1 {
+		size = 1
+	}
+
+	workerPool := NewWorkerPool(size, worker)
 	defer workerPool.Shutdown()
 
 	workGroup := NewOrderedGroup(workerPool, len(inputs))
@@ -203,3 +306,55 @@ func ConcurrentDo[T any, U any](worker Worker[T, U], inputs []T) []U {
 
 	return workGroup.Wait()
 }
+
+// ConcurrentCollect runs a pool of N workers which concurrently call the provided worker func on each
+// input to get a result slice of non-nil outputs. The total number of workers is determined
+// by the total number of CPUs available, bound to a range from 4-16.
+func ConcurrentCollect[T any, U any](workerFunc Worker[T, *U], inputs []T) []*U {
+	return ConcurrentCollectWith(OptimalWorkerCount(), workerFunc, inputs)
+}
+
+// ConcurrentCollectWith runs a pool of workers of the specified size which concurrently call the provided worker
+// func on each input to get a result slice of non-nil outputs. Size inputs < 1 will automatically be set to 1.
+func ConcurrentCollectWith[T any, U any](size int, workerFunc Worker[T, *U], inputs []T) []*U {
+	if size < 1 {
+		size = 1
+	}
+
+	workerPool := NewWorkerPool(size, workerFunc)
+	defer workerPool.Shutdown()
+
+	workGroup := NewCollectionGroup(workerPool)
+	for _, input := range inputs {
+		workGroup.Push(input)
+	}
+
+	return workGroup.Wait()
+}
+
+// ConcurrentRun runs a pool of N workers which concurrently call the provided runner func on each
+// input. The total number of workers is determined by the total number of CPUs available, bound to
+// a range from 4-16.
+func ConcurrentRun[T any](runner Runner[T], inputs []T) {
+	ConcurrentRunWith(OptimalWorkerCount(), runner, inputs)
+}
+
+// ConcurrentRunWith runs a pool of runners of the specified size which concurrently call the provided runner
+// func on each input. Size inputs < 1 will automatically be set to 1.
+func ConcurrentRunWith[T any](size int, runner Runner[T], inputs []T) {
+	if size < 1 {
+		size = 1
+	}
+
+	workerPool := NewWorkerPool(size, func(input T) (void struct{}) {
+		runner(input)
+		return
+	})
+
+	workGroup := NewNoResultGroup(workerPool)
+	for _, input := range inputs {
+		workGroup.Push(input)
+	}
+
+	workGroup.Wait()
+}

+ 137 - 1
core/pkg/util/worker/worker_test.go

@@ -74,7 +74,6 @@ func TestWorkerPoolExactWorkers(t *testing.T) {
 	case <-time.After(5 * time.Second):
 		t.Errorf("Failed to Complete Run for %d jobs in 5s\n", workers)
 	}
-
 }
 
 func TestOrderedWorkGroup(t *testing.T) {
@@ -116,6 +115,38 @@ func TestOrderedWorkGroup(t *testing.T) {
 	// above assertion
 }
 
+func TestConcurrentRun(t *testing.T) {
+	const tasks = 50
+
+	var wg sync.WaitGroup
+	wg.Add(tasks)
+
+	// worker func logs start/finish for simulated work, returns input value
+	// for testing resulting group output
+	work := func(i int) {
+		defer wg.Done()
+
+		t.Logf("Starting Work: %d\n", i)
+		time.Sleep(time.Duration(rand.Intn(250)+250) * time.Millisecond)
+		t.Logf("Finished Work: %d\n", i)
+	}
+
+	// pre-build inputs
+	input := make([]int, tasks)
+	for i := 0; i < tasks; i++ {
+		input[i] = i + 1
+	}
+
+	// get results and verify they match the recorded inputs
+	ConcurrentRunWith(10, work, input)
+
+	select {
+	case <-waitChannelFor(&wg):
+	case <-time.After(5 * time.Second):
+		t.Errorf("Failed to Complete Run for %d jobs in 5s\n", tasks)
+	}
+}
+
 func TestConcurrentDoOrdered(t *testing.T) {
 	// Perform a similar test to the above ordered test, but use the helper func with pre-built inputs
 	const tasks = 50
@@ -147,3 +178,108 @@ func TestConcurrentDoOrdered(t *testing.T) {
 	// the result collection handles the ordering in the group, which is what we want to ensure in the
 	// above assertion
 }
+
+func TestConcurrentCollect(t *testing.T) {
+	type A struct {
+		Value int
+	}
+
+	type B struct {
+		Value int
+	}
+
+	// Perform a similar test to the above ordered test, but use the helper func with pre-built inputs
+	const tasks = 100
+	const expectedResults = 50
+
+	var inputs []*A
+	for i := 0; i < tasks; i++ {
+		inputs = append(inputs, &A{Value: i})
+	}
+
+	workerFunc := func(a *A) *B {
+		time.Sleep(time.Duration(rand.Intn(150)+100) * time.Millisecond)
+
+		if a.Value%2 == 0 {
+			return &B{Value: a.Value}
+		}
+
+		return nil
+	}
+
+	results := ConcurrentCollect(workerFunc, inputs)
+
+	if len(results) != expectedResults {
+		t.Errorf("Expected 50 results, got %d", len(results))
+	}
+
+	seen := map[int]bool{}
+	for _, result := range results {
+		if seen[result.Value] {
+			t.Errorf("Duplicate result: %d", result.Value)
+		}
+		seen[result.Value] = true
+
+		if result.Value%2 != 0 {
+			t.Errorf("Found odd value: %d", result.Value)
+		}
+	}
+}
+
+func TestConcurrentDoWithLessThanOne(t *testing.T) {
+	const tasks = 4
+
+	var wg sync.WaitGroup
+	wg.Add(tasks)
+
+	now := time.Now()
+
+	doIt := func(i int) int {
+		defer wg.Done()
+		time.Sleep(250 * time.Millisecond)
+		return i
+	}
+
+	results := ConcurrentDoWith(-1, doIt, []int{1, 2, 3, 4})
+
+	select {
+	case <-waitChannelFor(&wg):
+	case <-time.After(2 * time.Second):
+		t.Errorf("Failed to Complete Run for %d jobs in 2s\n", tasks)
+	}
+
+	if time.Since(now) > 1500*time.Millisecond {
+		t.Errorf("Expected to complete in 1.5s, took %dms", time.Since(now).Milliseconds())
+	}
+	for i := 1; i <= tasks; i++ {
+		if results[i-1] != i {
+			t.Errorf("Expected %d, got %d", i, results[i])
+		}
+	}
+}
+
+func TestConcurrentRunWithLessThanOne(t *testing.T) {
+	const tasks = 4
+
+	var wg sync.WaitGroup
+	wg.Add(tasks)
+
+	now := time.Now()
+
+	doIt := func(i int) {
+		defer wg.Done()
+		time.Sleep(250 * time.Millisecond)
+	}
+
+	ConcurrentRunWith(-1, doIt, []int{1, 2, 3, 4})
+
+	select {
+	case <-waitChannelFor(&wg):
+	case <-time.After(2 * time.Second):
+		t.Errorf("Failed to Complete Run for %d jobs in 2s\n", tasks)
+	}
+
+	if time.Since(now) > 1500*time.Millisecond {
+		t.Errorf("Expected to complete in 1.5s, took %dms", time.Since(now).Milliseconds())
+	}
+}

+ 50 - 45
go.mod

@@ -6,12 +6,12 @@ replace (
 )
 
 require (
-	cloud.google.com/go/bigquery v1.59.1
-	cloud.google.com/go/compute/metadata v0.2.3
-	cloud.google.com/go/storage v1.37.0
+	cloud.google.com/go/bigquery v1.61.0
+	cloud.google.com/go/compute/metadata v0.3.0
+	cloud.google.com/go/storage v1.42.0
 	github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
-	github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2
-	github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
+	github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0
+	github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
 	github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0
 	github.com/Azure/go-autorest/autorest v0.11.28
 	github.com/Azure/go-autorest/autorest/azure/auth v0.5.11
@@ -28,6 +28,7 @@ require (
 	github.com/aws/smithy-go v1.20.1
 	github.com/davecgh/go-spew v1.1.1
 	github.com/getsentry/sentry-go v0.25.0
+	github.com/google/martian v2.1.0+incompatible
 	github.com/google/uuid v1.6.0
 	github.com/hashicorp/go-hclog v1.6.2
 	github.com/hashicorp/go-plugin v1.6.0
@@ -36,7 +37,7 @@ require (
 	github.com/kubecost/events v0.0.6
 	github.com/lib/pq v1.2.0
 	github.com/microcosm-cc/bluemonday v1.0.23
-	github.com/minio/minio-go/v7 v7.0.50
+	github.com/minio/minio-go/v7 v7.0.72
 	github.com/opencost/opencost/core v0.0.0-00010101000000-000000000000
 	github.com/patrickmn/go-cache v2.1.0+incompatible
 	github.com/pkg/errors v0.9.1
@@ -48,27 +49,33 @@ require (
 	github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9
 	github.com/spf13/cobra v1.2.1
 	github.com/spf13/viper v1.8.1
-	github.com/stretchr/testify v1.8.4
+	github.com/stretchr/testify v1.9.0
 	go.etcd.io/bbolt v1.3.5
-	go.opentelemetry.io/otel v1.22.0
+	go.opentelemetry.io/otel v1.24.0
 	golang.org/x/exp v0.0.0-20231006140011-7918f672742d
-	golang.org/x/oauth2 v0.16.0
-	golang.org/x/sync v0.6.0
-	golang.org/x/text v0.14.0
-	google.golang.org/api v0.162.0
-	google.golang.org/protobuf v1.33.0
+	golang.org/x/oauth2 v0.21.0
+	golang.org/x/sync v0.7.0
+	golang.org/x/text v0.16.0
+	google.golang.org/api v0.183.0
+	google.golang.org/protobuf v1.34.1
 	gopkg.in/yaml.v2 v2.4.0
-	k8s.io/api v0.30.0
-	k8s.io/apimachinery v0.30.0
-	k8s.io/client-go v0.30.0
+	k8s.io/api v0.30.2
+	k8s.io/apimachinery v0.30.2
+	k8s.io/client-go v0.30.2
 	sigs.k8s.io/yaml v1.3.0
 )
 
 require (
-	cloud.google.com/go v0.112.0 // indirect
-	cloud.google.com/go/compute v1.24.0 // indirect
-	cloud.google.com/go/iam v1.1.6 // indirect
-	github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
+	github.com/gofrs/flock v0.8.1 // indirect
+	github.com/sony/gobreaker v0.5.0 // indirect
+)
+
+require (
+	cloud.google.com/go v0.114.0 // indirect
+	cloud.google.com/go/auth v0.5.1 // indirect
+	cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
+	cloud.google.com/go/iam v1.1.8 // indirect
+	github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 // indirect
 	github.com/Azure/go-autorest v14.2.0+incompatible // indirect
 	github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect
 	github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
@@ -77,8 +84,8 @@ require (
 	github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
 	github.com/Azure/go-autorest/logger v0.2.1 // indirect
 	github.com/Azure/go-autorest/tracing v0.6.0 // indirect
-	github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
-	github.com/apache/arrow/go/v14 v14.0.2 // indirect
+	github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
+	github.com/apache/arrow/go/v15 v15.0.2 // indirect
 	github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 // indirect
 	github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.1 // indirect
 	github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.1 // indirect
@@ -109,7 +116,7 @@ require (
 	github.com/gofrs/uuid v4.2.0+incompatible // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
 	github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
-	github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
+	github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
 	github.com/golang/protobuf v1.5.4 // indirect
 	github.com/google/flatbuffers v23.5.26+incompatible // indirect
@@ -118,7 +125,7 @@ require (
 	github.com/google/gofuzz v1.2.0 // indirect
 	github.com/google/s2a-go v0.1.7 // indirect
 	github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
-	github.com/googleapis/gax-go/v2 v2.12.0 // indirect
+	github.com/googleapis/gax-go/v2 v2.12.4 // indirect
 	github.com/gorilla/css v1.0.0 // indirect
 	github.com/hashicorp/errwrap v1.0.0 // indirect
 	github.com/hashicorp/go-multierror v1.1.1 // indirect
@@ -129,8 +136,8 @@ require (
 	github.com/jmespath/go-jmespath v0.4.0 // indirect
 	github.com/josharian/intern v1.0.0 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
-	github.com/klauspost/compress v1.16.7 // indirect
-	github.com/klauspost/cpuid/v2 v2.2.5 // indirect
+	github.com/klauspost/compress v1.17.6 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.6 // indirect
 	github.com/kylelemons/godebug v1.1.0 // indirect
 	github.com/magiconair/properties v1.8.5 // indirect
 	github.com/mailru/easyjson v0.7.7 // indirect
@@ -138,7 +145,6 @@ require (
 	github.com/mattn/go-isatty v0.0.20 // indirect
 	github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
 	github.com/minio/md5-simd v1.1.2 // indirect
-	github.com/minio/sha256-simd v1.0.0 // indirect
 	github.com/mitchellh/go-homedir v1.1.0 // indirect
 	github.com/mitchellh/go-testing-interface v1.14.1 // indirect
 	github.com/mitchellh/mapstructure v1.5.0 // indirect
@@ -147,14 +153,14 @@ require (
 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
 	github.com/oklog/run v1.1.0 // indirect
 	github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect
+	github.com/oracle/oci-go-sdk/v65 v65.71.0
 	github.com/pelletier/go-toml v1.9.3 // indirect
 	github.com/pierrec/lz4/v4 v4.1.18 // indirect
 	github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
 	github.com/prometheus/procfs v0.11.1 // indirect
-	github.com/rs/xid v1.4.0 // indirect
+	github.com/rs/xid v1.5.0 // indirect
 	github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
-	github.com/sirupsen/logrus v1.9.0 // indirect
 	github.com/spf13/afero v1.6.0 // indirect
 	github.com/spf13/cast v1.3.1 // indirect
 	github.com/spf13/jwalterweatherman v1.1.0 // indirect
@@ -162,24 +168,23 @@ require (
 	github.com/subosito/gotenv v1.2.0 // indirect
 	github.com/zeebo/xxh3 v1.0.2 // indirect
 	go.opencensus.io v0.24.0 // indirect
-	go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect
-	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
-	go.opentelemetry.io/otel/metric v1.22.0 // indirect
-	go.opentelemetry.io/otel/trace v1.22.0 // indirect
+	go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
+	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
+	go.opentelemetry.io/otel/metric v1.24.0 // indirect
+	go.opentelemetry.io/otel/trace v1.24.0 // indirect
 	go.uber.org/atomic v1.10.0 // indirect
-	golang.org/x/crypto v0.21.0 // indirect
-	golang.org/x/mod v0.15.0 // indirect
-	golang.org/x/net v0.23.0 // indirect
-	golang.org/x/sys v0.18.0 // indirect
-	golang.org/x/term v0.18.0 // indirect
+	golang.org/x/crypto v0.24.0 // indirect
+	golang.org/x/mod v0.17.0 // indirect
+	golang.org/x/net v0.26.0 // indirect
+	golang.org/x/sys v0.21.0 // indirect
+	golang.org/x/term v0.21.0 // indirect
 	golang.org/x/time v0.5.0 // indirect
-	golang.org/x/tools v0.18.0 // indirect
+	golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
 	golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
-	google.golang.org/appengine v1.6.8 // indirect
-	google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect
-	google.golang.org/grpc v1.62.0 // indirect
+	google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
+	google.golang.org/grpc v1.64.1 // indirect
 	gopkg.in/inf.v0 v0.9.1 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
@@ -192,4 +197,4 @@ require (
 
 go 1.22.0
 
-toolchain go1.22.2
+toolchain go1.22.4

+ 103 - 113
go.sum

@@ -18,29 +18,31 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW
 cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
 cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
 cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
-cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
+cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY=
+cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E=
+cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
+cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
+cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
+cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
 cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
 cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
 cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
 cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
 cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/bigquery v1.59.1 h1:CpT+/njKuKT3CEmswm6IbhNu9u35zt5dO4yPDLW+nG4=
-cloud.google.com/go/bigquery v1.59.1/go.mod h1:VP1UJYgevyTwsV7desjzNzDND5p6hZB+Z8gZJN1GQUc=
-cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
-cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
-cloud.google.com/go/datacatalog v1.19.3 h1:A0vKYCQdxQuV4Pi0LL9p39Vwvg4jH5yYveMv50gU5Tw=
-cloud.google.com/go/datacatalog v1.19.3/go.mod h1:ra8V3UAsciBpJKQ+z9Whkxzxv7jmQg1hfODr3N3YPJ4=
+cloud.google.com/go/bigquery v1.61.0 h1:w2Goy9n6gh91LVi6B2Sc+HpBl8WbWhIyzdvVvrAuEIw=
+cloud.google.com/go/bigquery v1.61.0/go.mod h1:PjZUje0IocbuTOdq4DBOJLNYB0WF3pAKBHzAYyxCwFo=
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/datacatalog v1.20.1 h1:czcba5mxwRM5V//jSadyig0y+8aOHmN7gUl9GbHu59E=
+cloud.google.com/go/datacatalog v1.20.1/go.mod h1:Jzc2CoHudhuZhpv78UBAjMEg3w7I9jHA11SbRshWUjk=
 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
 cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
-cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
-cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg=
-cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s=
+cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0=
+cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE=
+cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU=
+cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
 cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
 cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
 cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -50,17 +52,17 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
 cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
 cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
 cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4=
-cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k=
+cloud.google.com/go/storage v1.42.0 h1:4QtGpplCVt1wz6g5o1ifXd656P5z+yNgzdw1tVfp0cU=
+cloud.google.com/go/storage v1.42.0/go.mod h1:HjMXRFq65pGKFn6hxj6x3HCyR41uSB72Z0SO/Vn6JFQ=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc=
 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 h1:IfFdxTUDiV58iZqPKgyWiz4X4fCxZeQ1pTQPImLYXpY=
@@ -90,15 +92,15 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
 github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
 github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/aliyun/alibaba-cloud-sdk-go v1.62.3 h1:kWY5c/9JOhSYBogi3mtNG7G9TxXS0CddtQ6RKOI3mvY=
 github.com/aliyun/alibaba-cloud-sdk-go v1.62.3/go.mod h1:Api2AkmMgGaSUAhmk76oaFObkoeCPc/bKAqcyplPODs=
 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw=
-github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY=
+github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE=
+github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA=
 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@@ -164,8 +166,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
 github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
 github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
 github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
 github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@@ -188,8 +188,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y
 github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
 github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
-github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
 github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
@@ -223,6 +221,8 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4
 github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
 github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
 github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
+github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
 github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0=
 github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
@@ -232,8 +232,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw
 github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
 github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
-github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
+github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -294,8 +294,8 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
 github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
-github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
+github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
 github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
 github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
 github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -319,8 +319,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
 github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
 github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
 github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
-github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
+github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
 github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
@@ -385,12 +385,11 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d
 github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
-github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
+github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
 github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
-github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
+github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
 github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
 github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -428,10 +427,8 @@ github.com/microcosm-cc/bluemonday v1.0.23/go.mod h1:mN70sk7UkkF8TUr2IGBpNN0jAgS
 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
 github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
 github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.50 h1:4IL4V8m/kI90ZL6GupCARZVrBv8/XrcKcJhaJ3iz68k=
-github.com/minio/minio-go/v7 v7.0.50/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU=
-github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
-github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
+github.com/minio/minio-go/v7 v7.0.72 h1:ZSbxs2BfJensLyHdVOgHv+pfmvxYraaUy07ER04dWnA=
+github.com/minio/minio-go/v7 v7.0.72/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo=
 github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
 github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -465,6 +462,8 @@ github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
 github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
 github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A=
 github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU=
+github.com/oracle/oci-go-sdk/v65 v65.71.0 h1:eEnFD/CzcoqdAA0xu+EmK32kJL3jfV0oLYNWVzoKNyo=
+github.com/oracle/oci-go-sdk/v65 v65.71.0/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0=
 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
 github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
 github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
@@ -494,13 +493,13 @@ github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwa
 github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
 github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U=
 github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
 github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
-github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
-github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
+github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
 github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
 github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -511,10 +510,10 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg
 github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
 github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
 github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg=
+github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
 github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
 github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
 github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
@@ -530,6 +529,8 @@ github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -540,8 +541,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
 github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
 github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
@@ -554,7 +556,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
 github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
 github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
 github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
@@ -573,18 +574,18 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
 go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
 go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
 go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
-go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
-go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
-go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
-go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
-go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
-go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
-go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
-go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
 go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
@@ -602,8 +603,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
 golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -641,9 +642,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
-golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -682,9 +682,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -697,8 +696,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
 golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
-golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -710,9 +709,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -761,20 +759,17 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -783,10 +778,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -844,9 +837,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
 golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
 golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
-golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -877,8 +869,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR
 google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
 google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
 google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/api v0.162.0 h1:Vhs54HkaEpkMBdgGdOT2P6F0csGG/vxDS0hWHJzmmps=
-google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0=
+google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE=
+google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -886,8 +878,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
 google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
 google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
 google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -929,12 +919,12 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
 google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
 google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y=
-google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
-google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A=
-google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
+google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE=
+google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
 google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -955,8 +945,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
 google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
 google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
 google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
-google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
+google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
+google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -969,8 +959,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -999,12 +989,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
 honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
 honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA=
-k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE=
-k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA=
-k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ=
-k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY=
+k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
+k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
+k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
+k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
+k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
 k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
 k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=

+ 10 - 1
justfile

@@ -6,11 +6,20 @@ commit := `git rev-parse --short HEAD`
 default:
     just --list
 
+# run core unit tests
+test-core: 
+    {{commonenv}} cd ./core && go test ./... -coverprofile=coverage.out
+    {{commonenv}} cd ./core && go vet ./...
+
 # Run unit tests
-test:
+test: test-core
     {{commonenv}} go test ./... -coverprofile=coverage.out
     {{commonenv}} go vet ./...
 
+# Run unit tests and integration tests
+test-integration:
+    {{commonenv}} INTEGRATION=true go test ./... -coverprofile=coverage.out
+
 # Compile a local binary
 build-local:
     cd ./cmd/costmodel && \

+ 54 - 45
pkg/cloud/alibaba/provider.go

@@ -66,9 +66,8 @@ var (
 )
 
 // Variable to keep track of instance families that fail in DescribePrice API due improper defaulting of systemDisk if the information is not available
-var alibabaDefaultToCloudEssd = []string{"g6e", "r6e", "r7", "g7", "g7a", "r7a"}
+var alibabaDefaultToCloudEssd = []string{"g6e", "r6e"}
 
-// Why predefined and dependency on code? Can be converted to API call - https://www.alibabacloud.com/help/en/elastic-compute-service/latest/regions-describeregions
 var alibabaRegions = []string{
 	"cn-qingdao",
 	"cn-beijing",
@@ -78,48 +77,28 @@ var alibabaRegions = []string{
 	"cn-hangzhou",
 	"cn-shanghai",
 	"cn-nanjing",
-	"cn-fuzhou",
 	"cn-shenzhen",
+	"cn-heyuan",
 	"cn-guangzhou",
+	"cn-fuzhou",
+	"cn-wuhan-lr",
 	"cn-chengdu",
 	"cn-hongkong",
+	"ap-northeast-1",
+	"ap-northeast-2",
 	"ap-southeast-1",
 	"ap-southeast-2",
 	"ap-southeast-3",
-	"ap-southeast-5",
 	"ap-southeast-6",
-	"ap-southeast-7",
+	"ap-southeast-5",
 	"ap-south-1",
-	"ap-northeast-1",
-	"ap-northeast-2",
-	"us-west-1",
+	"ap-southeast-7",
 	"us-east-1",
-	"eu-central-1",
+	"us-west-1",
+	"eu-west-1",
 	"me-east-1",
-}
-
-// To-Do: Convert to API call - https://www.alibabacloud.com/help/en/elastic-compute-service/latest/describeinstancetypefamilies
-// Also first pass only completely tested pricing API for General pupose instances families & memory optimized instance families
-var alibabaInstanceFamilies = []string{
-	"g7",
-	"g7a",
-	"g6e",
-	"g6",
-	"g5",
-	"sn2",
-	"sn2ne",
-	"r7",
-	"r7a",
-	"r6e",
-	"r6a",
-	"r6",
-	"r5",
-	"se1",
-	"se1ne",
-	"re6",
-	"re6p",
-	"re4",
-	"se1",
+	"me-central-1",
+	"eu-central-1",
 }
 
 // AlibabaInfo contains configuration for Alibaba's CUR integration
@@ -429,7 +408,6 @@ func (alibaba *Alibaba) DownloadPricingData() error {
 	var lookupKey string
 	alibaba.clients = make(map[string]*sdk.Client)
 	alibaba.Pricing = make(map[string]*AlibabaPricing)
-
 	for _, node := range nodeList {
 		pricingObj := &AlibabaPricing{}
 		slimK8sNode := generateSlimK8sNodeFromV1Node(node)
@@ -533,8 +511,17 @@ func (alibaba *Alibaba) NodePricing(key models.Key) (*models.Node, models.Pricin
 
 	pricing, ok := alibaba.Pricing[keyFeature]
 	if !ok {
-		log.Errorf("Node pricing information not found for node with feature: %s", keyFeature)
-		return nil, meta, fmt.Errorf("Node pricing information not found for node with feature: %s letting it use default values", keyFeature)
+		keys := make([]string, 0, len(alibaba.Pricing))
+		for k := range alibaba.Pricing {
+			keys = append(keys, k)
+		}
+		kf := key.(*AlibabaNodeKey)
+		// Try to look up pricing with no disk attached
+		pricing, ok = alibaba.Pricing[kf.FeaturesWithOtherDisk("")]
+		if !ok {
+			log.Errorf("Node pricing information not found for node with feature: %s . Existing keys are: %+v", keyFeature, keys)
+			return nil, meta, fmt.Errorf("Node pricing information not found for node with feature: %s letting it use default values", keyFeature)
+		}
 	}
 
 	log.Debugf("returning the node price for the node with feature: %s", keyFeature)
@@ -553,7 +540,7 @@ func (alibaba *Alibaba) PVPricing(pvk models.PVKey) (*models.PV, error) {
 	pricing, ok := alibaba.Pricing[keyFeature]
 
 	if !ok {
-		log.Errorf("Persistent Volume pricing not found for PV with feature: %s", keyFeature)
+		log.Debugf("Persistent Volume pricing not found for PV with feature: %s", keyFeature)
 		return nil, fmt.Errorf("Persistent Volume pricing not found for PV with feature: %s letting it use default values", keyFeature)
 	}
 
@@ -844,6 +831,12 @@ func (alibabaNodeKey *AlibabaNodeKey) Features() string {
 	return strings.Join(keyLookup, "::")
 }
 
+func (alibabaNodeKey *AlibabaNodeKey) FeaturesWithOtherDisk(overrideDiskCategory string) string {
+	keyLookup := stringutil.DeleteEmptyStringsFromArray([]string{alibabaNodeKey.RegionID, alibabaNodeKey.InstanceType, alibabaNodeKey.OSType,
+		alibabaNodeKey.OptimizedKeyword, overrideDiskCategory, alibabaNodeKey.SystemDiskSizeInGiB, alibabaNodeKey.SystemDiskPerformanceLevel})
+	return strings.Join(keyLookup, "::")
+}
+
 func (alibabaNodeKey *AlibabaNodeKey) GPUType() string {
 	return ""
 }
@@ -986,9 +979,9 @@ func createDescribePriceACSRequest(i interface{}) (*requests.CommonRequest, erro
 				request.QueryParams["SystemDisk.PerformanceLevel"] = node.SystemDisk.PerformanceLevel
 			}
 		} else {
-			// When System Disk information is not available for instance family g6e, r7 and r6e the defaults in
-			// DescribePrice dont default rightly to cloud_essd for these instances.
-			if slices.Contains(alibabaDefaultToCloudEssd, node.InstanceTypeFamily) {
+			// When the system disk information is not available, and the instance family is g6e or r6e,
+			// or the instance generation is 6 or above, the default disk category in DescribePrice should be cloud_essd.
+			if slices.Contains(alibabaDefaultToCloudEssd, node.InstanceTypeFamily) || getInstanceFamilyGenerationFromType(node.InstanceType) > 6 {
 				request.QueryParams["SystemDisk.Category"] = ALIBABA_DISK_CLOUD_ESSD_CATEGORY
 			}
 		}
@@ -1095,7 +1088,7 @@ func processDescribePriceAndCreateAlibabaPricing(client *sdk.Client, i interface
 		resp, err := client.ProcessCommonRequestWithSigner(req, signer)
 		pricing.NodeAttributes = NewAlibabaNodeAttributes(node)
 		if err != nil || resp.GetHttpStatus() != 200 {
-			// Can be defaulted to some value here?
+			// Try again but default the disk to something else
 			return nil, fmt.Errorf("unable to fetch information for node with InstanceType: %v", node.InstanceType)
 		} else {
 			// This is where population of Pricing happens
@@ -1151,13 +1144,29 @@ func getInstanceFamilyFromType(instanceType string) string {
 		log.Warnf("unable to find the family of the instance type %s, returning its family type unknown", instanceType)
 		return ALIBABA_UNKNOWN_INSTANCE_FAMILY_TYPE
 	}
-	if !slices.Contains(alibabaInstanceFamilies, splitinstanceType[1]) {
-		log.Warnf("currently the instance family type %s is not valid or not tested completely for pricing API", instanceType)
-		return ALIBABA_NOT_SUPPORTED_INSTANCE_FAMILY_TYPE
-	}
 	return splitinstanceType[1]
 }
 
+// This function is used to obtain the generation of the instance family from the InstanceType,
+// because when the generation is higher than or equal to 7, the instance disk type will not support cloud_efficiency.
+// In such cases, when calling the DescribePrice interface, the system disk type will default to cloud_essd.
+func getInstanceFamilyGenerationFromType(instanceType string) int {
+	// FamilyName format: g7ne or g7 or r7 or r6e,
+	familyName := getInstanceFamilyFromType(instanceType)
+	re := regexp.MustCompile(`(\d+)`)
+	match := re.FindString(familyName)
+	if match != "" {
+		generation, err := strconv.Atoi(match)
+		if err != nil {
+			log.Errorf("unable to convert the generation of the instance type %s to integer", instanceType)
+		} else {
+			return generation
+		}
+	}
+	log.Warnf("unable to find the generation of the instance type %s,", instanceType)
+	return -1
+}
+
 // getInstanceIDFromProviderID returns the instance ID associated with the Node. A *v1.Node providerID in Alibaba cloud
 // is of <REGION-ID>.<INSTANCE-ID>. This function returns the Instance ID for the given ProviderID. if its unable to interpret
 // it defaults to empty string.

+ 143 - 5
pkg/cloud/alibaba/provider_test.go

@@ -113,6 +113,20 @@ func TestProcessDescribePriceAndCreateAlibabaPricing(t *testing.T) {
 			},
 			expectedError: nil,
 		},
+		{
+			name: "test General Purpose Type g8a instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.g8a.8xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-01c",
+				InstanceTypeFamily: "g8a",
+			},
+			expectedError: nil,
+		},
 		{
 			name: "test Enhanced General Purpose Type g6e instance family",
 			teststruct: &SlimK8sNode{
@@ -409,6 +423,30 @@ func TestProcessDescribePriceAndCreateAlibabaPricing(t *testing.T) {
 			},
 			expectedError: nil,
 		},
+		{
+			name: "test incorrect disk type",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.g6.xlarge",
+				RegionID:           "ap-northeast-1",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-15",
+				InstanceTypeFamily: "se1",
+				SystemDisk: &SlimK8sDisk{
+					DiskType:         "data",
+					RegionID:         "ap-northeast-1",
+					PriceUnit:        "Hour",
+					SizeInGiB:        "40",
+					DiskCategory:     "cloud_essd",
+					PerformanceLevel: "PL1",
+					ProviderID:       "d-Ali-cloud-XXX-04",
+					StorageClass:     "temp",
+				},
+			},
+			expectedError: nil,
+		},
 	}
 	custom := &models.CustomPricing{}
 	for _, c := range cases {
@@ -443,11 +481,6 @@ func TestGetInstanceFamilyFromType(t *testing.T) {
 			instanceType:           "random.value",
 			expectedInstanceFamily: ALIBABA_UNKNOWN_INSTANCE_FAMILY_TYPE,
 		},
-		{
-			name:                   "test if random instance family gives you ALIBABA_NOT_SUPPORTED_INSTANCE_FAMILY_TYPE value ",
-			instanceType:           "ecs.g7e.2xlarge",
-			expectedInstanceFamily: ALIBABA_NOT_SUPPORTED_INSTANCE_FAMILY_TYPE,
-		},
 	}
 
 	for _, c := range cases {
@@ -838,3 +871,108 @@ func TestDeterminePVRegion(t *testing.T) {
 	}
 
 }
+
+func TestGetInstanceFamilyGenerationFromType(t *testing.T) {
+	cases := []struct {
+		name                             string
+		instanceType                     string
+		expectedInstanceFamilyGeneration int
+	}{
+		{
+			name:                             "test if ecs.[instance-family].[different-type] work",
+			instanceType:                     "ecs.sn2ne.2xlarge",
+			expectedInstanceFamilyGeneration: 2,
+		},
+		{
+			name:                             "test if ecs.[instance-family].[different-type] work",
+			instanceType:                     "ecs.g7.large",
+			expectedInstanceFamilyGeneration: 7,
+		},
+		{
+			name:                             "test if random word gives you ALIBABA_UNKNOWN_INSTANCE_FAMILY_TYPE value ",
+			instanceType:                     "random.value",
+			expectedInstanceFamilyGeneration: -1,
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			returnValue := getInstanceFamilyGenerationFromType(c.instanceType)
+			if returnValue != c.expectedInstanceFamilyGeneration {
+				t.Fatalf("Case name %s: expected instance family generation of type %d but got %d", c.name, c.expectedInstanceFamilyGeneration, returnValue)
+			}
+		})
+	}
+}
+
+func TestCreateDescribeNodePriceACSRequest(t *testing.T) {
+
+	cases := []struct {
+		name                 string
+		testStruct           interface{}
+		expectedError        error
+		expectedDiskCategory string
+	}{
+		{
+			// Test case for instance type ecs.g6.large
+			name: "test request parma when instance type is ecs.g6.large",
+			testStruct: &SlimK8sNode{
+				InstanceType:       "ecs.g6.large",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "Ali-XXX-node-01",
+				InstanceTypeFamily: "g6",
+			},
+			expectedError:        nil,
+			expectedDiskCategory: "",
+		},
+		{
+			// Test case for instance type ecs.g7.large
+			name: "test request parma when instance type is ecs.g7.large",
+			testStruct: &SlimK8sNode{
+				InstanceType:       "ecs.g7.large",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "Ali-XXX-node-02",
+				InstanceTypeFamily: "g7",
+			},
+			expectedError:        nil,
+			expectedDiskCategory: ALIBABA_DISK_CLOUD_ESSD_CATEGORY,
+		},
+		{
+			// Test case for instance type ecs.g7.large, this instance type is in 'alibabaDefaultToCloudEssd'
+			name: "test request parma when instance type is ecs.g6e.large",
+			testStruct: &SlimK8sNode{
+				InstanceType:       "ecs.g6e.large",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "Ali-XXX-node-03",
+				InstanceTypeFamily: "g6e",
+			},
+			expectedError:        nil,
+			expectedDiskCategory: ALIBABA_DISK_CLOUD_ESSD_CATEGORY,
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			req, err := createDescribePriceACSRequest(c.testStruct)
+			t.Logf("Request Params SystemDisk.Category: %v", req.QueryParams["SystemDisk.Category"])
+			if err != nil && c.expectedError != nil {
+				t.Fatalf("Case name %s: Error converting to Alibaba cloud request", c.name)
+			}
+			if c.expectedDiskCategory != req.QueryParams["SystemDisk.Category"] {
+				t.Fatalf("Case name %s: Disk Category is not set correctly", c.name)
+			}
+		})
+	}
+}

+ 31 - 9
pkg/cloud/aws/athenaintegration.go

@@ -14,6 +14,8 @@ import (
 )
 
 const LabelColumnPrefix = "resource_tags_user_"
+const AWSLabelColumnPrefix = "resource_tags_aws_"
+const AthenaResourceTagPrefix = "resource_tags_"
 
 // athenaDateLayout is the default AWS date format
 const AthenaDateLayout = "2006-01-02 15:04:05.000"
@@ -52,6 +54,7 @@ type AthenaQueryIndexes struct {
 	Query                  string
 	ColumnIndexes          map[string]int
 	TagColumns             []string
+	AWSTagColumns          []string
 	ListCostColumn         string
 	NetCostColumn          string
 	AmortizedNetCostColumn string
@@ -80,6 +83,8 @@ func (ai *AthenaIntegration) GetCloudCost(start, end time.Time) (*opencost.Cloud
 		"line_item_usage_account_id",
 		"line_item_product_code",
 		"line_item_usage_type",
+		"product_region_code",
+		"line_item_availability_zone",
 	}
 
 	// Create query indices
@@ -98,6 +103,10 @@ func (ai *AthenaIntegration) GetCloudCost(start, end time.Time) (*opencost.Cloud
 			groupByColumns = append(groupByColumns, quotedTag)
 			aqi.TagColumns = append(aqi.TagColumns, quotedTag)
 		}
+		if strings.HasPrefix(column, AWSLabelColumnPrefix) {
+			groupByColumns = append(groupByColumns, column)
+			aqi.AWSTagColumns = append(aqi.AWSTagColumns, column)
+		}
 	}
 	var selectColumns []string
 
@@ -333,7 +342,6 @@ func (ai *AthenaIntegration) RowToCloudCost(row types.Row, aqi AthenaQueryIndexe
 	// Iterate through the slice of tag columns, assigning
 	// values to the column names, minus the tag prefix.
 	labels := opencost.CloudCostLabels{}
-	labelValues := []string{}
 	for _, tagColumnName := range aqi.TagColumns {
 		// remove quotes
 		labelName := strings.TrimPrefix(tagColumnName, `"`)
@@ -343,7 +351,15 @@ func (ai *AthenaIntegration) RowToCloudCost(row types.Row, aqi AthenaQueryIndexe
 		value := GetAthenaRowValue(row, aqi.ColumnIndexes, tagColumnName)
 		if value != "" {
 			labels[labelName] = value
-			labelValues = append(labelValues, value)
+		}
+	}
+
+	for _, awsColumnName := range aqi.AWSTagColumns {
+		// partially remove prefix leaving "aws_"
+		labelName := strings.TrimPrefix(awsColumnName, AthenaResourceTagPrefix)
+		value := GetAthenaRowValue(row, aqi.ColumnIndexes, awsColumnName)
+		if value != "" {
+			labels[labelName] = value
 		}
 	}
 
@@ -353,6 +369,8 @@ func (ai *AthenaIntegration) RowToCloudCost(row types.Row, aqi AthenaQueryIndexe
 	providerID := GetAthenaRowValue(row, aqi.ColumnIndexes, "line_item_resource_id")
 	productCode := GetAthenaRowValue(row, aqi.ColumnIndexes, "line_item_product_code")
 	usageType := GetAthenaRowValue(row, aqi.ColumnIndexes, "line_item_usage_type")
+	regionCode := GetAthenaRowValue(row, aqi.ColumnIndexes, "product_region_code")
+	availabilityZone := GetAthenaRowValue(row, aqi.ColumnIndexes, "line_item_availability_zone")
 	isK8s, _ := strconv.ParseBool(GetAthenaRowValue(row, aqi.ColumnIndexes, aqi.IsK8sColumn))
 	k8sPct := 0.0
 	if isK8s {
@@ -396,13 +414,17 @@ func (ai *AthenaIntegration) RowToCloudCost(row types.Row, aqi AthenaQueryIndexe
 	}
 
 	properties := opencost.CloudCostProperties{
-		ProviderID:      providerID,
-		Provider:        opencost.AWSProvider,
-		AccountID:       accountID,
-		InvoiceEntityID: invoiceEntityID,
-		Service:         productCode,
-		Category:        category,
-		Labels:          labels,
+		ProviderID:        providerID,
+		Provider:          opencost.AWSProvider,
+		AccountID:         accountID,
+		AccountName:       accountID,
+		InvoiceEntityID:   invoiceEntityID,
+		InvoiceEntityName: invoiceEntityID,
+		RegionID:          regionCode,
+		AvailabilityZone:  availabilityZone,
+		Service:           productCode,
+		Category:          category,
+		Labels:            labels,
 	}
 
 	start, err := time.Parse(AthenaDateLayout, startStr)

+ 4 - 0
pkg/cloud/aws/athenaquerier.go

@@ -208,6 +208,10 @@ func SelectAWSCategory(providerID, usageType, service string) string {
 	// The node and volume conditions are mutually exclusive.
 	// Provider ID has prefix "i-"
 	if strings.HasPrefix(providerID, "i-") {
+		// GuardDuty has a ProviderID prefix of "i-", but should not be categorized as compute
+		if strings.ToUpper(service) == "AMAZONGUARDDUTY" {
+			return opencost.OtherCategory
+		}
 		return opencost.ComputeCategory
 	}
 	// Provider ID has prefix "vol-"

+ 2 - 3
pkg/cloud/aws/authorizer.go

@@ -15,7 +15,7 @@ import (
 const AccessKeyAuthorizerType = "AWSAccessKey"
 const ServiceAccountAuthorizerType = "AWSServiceAccount"
 const AssumeRoleAuthorizerType = "AWSAssumeRole"
-const WebIdentityAuthorizerType = "WebIdentity"
+const WebIdentityAuthorizerType = "AWSWebIdentity"
 
 // Authorizer implementations provide aws.Config for AWS SDK calls
 type Authorizer interface {
@@ -269,7 +269,7 @@ func (wea *WebIdentity) CreateAWSConfig(region string) (aws.Config, error) {
 }
 
 func (wea *WebIdentity) MarshalJSON() ([]byte, error) {
-	fmap := make(map[string]any, 1)
+	fmap := make(map[string]any, 4)
 	fmap[cloud.AuthorizerTypeProperty] = WebIdentityAuthorizerType
 	fmap["roleARN"] = wea.RoleARN
 	fmap["identityProvider"] = wea.IdentityProvider
@@ -314,7 +314,6 @@ func (wea *WebIdentity) UnmarshalJSON(b []byte) error {
 	switch idp {
 	case "Google":
 		tokenRetriever = &GoogleIDTokenRetriever{}
-
 	}
 
 	err = json.Unmarshal(trb, &tokenRetriever)

+ 13 - 39
pkg/cloud/aws/provider.go

@@ -246,6 +246,7 @@ type AWSProduct struct {
 // AWSProductAttributes represents metadata about the product used to map to a node.
 type AWSProductAttributes struct {
 	Location        string `json:"location"`
+	RegionCode      string `json:"regionCode"`
 	InstanceType    string `json:"instanceType"`
 	Memory          string `json:"memory"`
 	Storage         string `json:"storage"`
@@ -256,6 +257,7 @@ type AWSProductAttributes struct {
 	InstanceFamily  string `json:"instanceFamily"`
 	CapacityStatus  string `json:"capacitystatus"`
 	GPU             string `json:"gpu"` // GPU represents the number of GPU on the instance
+	MarketOption    string `json:"marketOption"`
 }
 
 // AWSPricingTerms are how you pay for the node: OnDemand, Reserved, or (TODO) Spot
@@ -344,36 +346,6 @@ var volTypes = map[string]string{
 	"st1":                    "EBS:VolumeUsage.st1",
 }
 
-// locationToRegion maps AWS region names (As they come from Billing)
-// to actual region identifiers
-var locationToRegion = map[string]string{
-	"US East (Ohio)":            "us-east-2",
-	"US East (N. Virginia)":     "us-east-1",
-	"US West (N. California)":   "us-west-1",
-	"US West (Oregon)":          "us-west-2",
-	"Asia Pacific (Hong Kong)":  "ap-east-1",
-	"Asia Pacific (Mumbai)":     "ap-south-1",
-	"Asia Pacific (Osaka)":      "ap-northeast-3",
-	"Asia Pacific (Seoul)":      "ap-northeast-2",
-	"Asia Pacific (Singapore)":  "ap-southeast-1",
-	"Asia Pacific (Sydney)":     "ap-southeast-2",
-	"Asia Pacific (Tokyo)":      "ap-northeast-1",
-	"Asia Pacific (Jakarta)":    "ap-southeast-3",
-	"Canada (Central)":          "ca-central-1",
-	"China (Beijing)":           "cn-north-1",
-	"China (Ningxia)":           "cn-northwest-1",
-	"EU (Frankfurt)":            "eu-central-1",
-	"EU (Ireland)":              "eu-west-1",
-	"EU (London)":               "eu-west-2",
-	"EU (Paris)":                "eu-west-3",
-	"EU (Stockholm)":            "eu-north-1",
-	"EU (Milan)":                "eu-south-1",
-	"South America (Sao Paulo)": "sa-east-1",
-	"Africa (Cape Town)":        "af-south-1",
-	"AWS GovCloud (US-East)":    "us-gov-east-1",
-	"AWS GovCloud (US-West)":    "us-gov-west-1",
-}
-
 var loadedAWSSecret bool = false
 var awsSecret *AWSAccessKey = nil
 
@@ -381,11 +353,10 @@ func (aws *AWS) GetLocalStorageQuery(window, offset time.Duration, rate bool, us
 	return ""
 }
 
-// KubeAttrConversion maps the k8s labels for region to an aws region
-func (aws *AWS) KubeAttrConversion(location, instanceType, operatingSystem string) string {
+// KubeAttrConversion maps the k8s labels for region to an AWS key
+func (aws *AWS) KubeAttrConversion(region, instanceType, operatingSystem string) string {
 	operatingSystem = strings.ToLower(operatingSystem)
 
-	region := locationToRegion[location]
 	return region + "," + instanceType + "," + operatingSystem
 }
 
@@ -1027,8 +998,9 @@ func (aws *AWS) populatePricing(resp *http.Response, inputkeys map[string]bool)
 
 				if product.Attributes.PreInstalledSw == "NA" &&
 					(strings.HasPrefix(product.Attributes.UsageType, "BoxUsage") || strings.Contains(product.Attributes.UsageType, "-BoxUsage")) &&
-					product.Attributes.CapacityStatus == "Used" {
-					key := aws.KubeAttrConversion(product.Attributes.Location, product.Attributes.InstanceType, product.Attributes.OperatingSystem)
+					product.Attributes.CapacityStatus == "Used" &&
+					product.Attributes.MarketOption == "OnDemand" {
+					key := aws.KubeAttrConversion(product.Attributes.RegionCode, product.Attributes.InstanceType, product.Attributes.OperatingSystem)
 					spotKey := key + ",preemptible"
 					if inputkeys[key] || inputkeys[spotKey] { // Just grab the sku even if spot, and change the price later.
 						productTerms := &AWSProductTerms{
@@ -1049,11 +1021,11 @@ func (aws *AWS) populatePricing(resp *http.Response, inputkeys map[string]bool)
 					// volTypes to keep lookups generic
 					usageTypeMatch := usageTypeRegx.FindStringSubmatch(product.Attributes.UsageType)
 					usageTypeNoRegion := usageTypeMatch[len(usageTypeMatch)-1]
-					key := locationToRegion[product.Attributes.Location] + "," + usageTypeNoRegion
+					key := product.Attributes.RegionCode + "," + usageTypeNoRegion
 					spotKey := key + ",preemptible"
 					pv := &models.PV{
 						Class:  volTypes[usageTypeNoRegion],
-						Region: locationToRegion[product.Attributes.Location],
+						Region: product.Attributes.RegionCode,
 					}
 					productTerms := &AWSProductTerms{
 						Sku: product.Sku,
@@ -1380,8 +1352,7 @@ func (aws *AWS) createNode(terms *AWSProductTerms, usageType string, k models.Ke
 	}
 	// Throw error if public price is not found
 	if !publicPricingFound {
-		log.Errorf("Could not fetch data for \"%s\"", k.ID())
-		return nil, meta, fmt.Errorf("Could not fetch data for \"%s\"", k.ID())
+		return nil, meta, fmt.Errorf("for node \"%s\", cannot find the following key in OnDemand pricing data \"%s\"", k.ID(), k.Features())
 	}
 
 	return &models.Node{
@@ -1411,6 +1382,9 @@ func (aws *AWS) NodePricing(k models.Key) (*models.Node, models.PricingMetadata,
 	meta := models.PricingMetadata{}
 
 	terms, ok := aws.Pricing[key]
+	if termsStr, err := json.Marshal(terms); err == nil {
+		log.Debugf("NodePricing: for key \"%s\" found the following OnDemand data: %s", key, string(termsStr))
+	}
 	if ok {
 		return aws.createNode(terms, usageType, k)
 	} else if _, ok := aws.ValidPricingKeys[key]; ok {

+ 259 - 9
pkg/cloud/aws/provider_test.go

@@ -2,9 +2,11 @@ package aws
 
 import (
 	"bytes"
+	"encoding/json"
 	"io"
 	"net/http"
 	"net/url"
+	"os"
 	"reflect"
 	"testing"
 
@@ -102,17 +104,73 @@ func Test_awsKey_getUsageType(t *testing.T) {
 	}
 }
 
+// Test_PricingData_Regression
+//
+// Objective: To test the pricing data download and validate the schema is still
+// as expected
+//
+// These tests may take a long time to complete. It is downloading AWS Pricing
+// data files (~500MB) for each region.
+func Test_PricingData_Regression(t *testing.T) {
+	if os.Getenv("INTEGRATION") == "" {
+		t.Skip("skipping integration tests, set environment variable INTEGRATION")
+	}
+
+	awsRegions := []string{"us-east-1", "eu-west-1"}
+
+	// Check pricing data produced for each region
+	for _, region := range awsRegions {
+		node := v1.Node{}
+		node.SetLabels(map[string]string{"topology.kubernetes.io/region": region})
+
+		awsTest := AWS{}
+		res, _, err := awsTest.getRegionPricing([]*v1.Node{&node})
+		if err != nil {
+			t.Errorf("Failed to download pricing data for region %s: %v", region, err)
+		}
+
+		// Unmarshal pricing data into AWSPricing
+		var pricingData AWSPricing
+		body, err := io.ReadAll(res.Body)
+		if err != nil {
+			t.Errorf("Failed to read pricing data for region %s: %v", region, err)
+		}
+		err = json.Unmarshal(body, &pricingData)
+		if err != nil {
+			t.Errorf("Failed to unmarshal pricing data for region %s: %v", region, err)
+		}
+
+		// ASSERTION. We only anticipate "OnDemand" or "CapacityBlock" in the
+		// pricing data.
+		//
+		// Failing this test does not necessarily mean we have regressed. Just
+		// that we need to revisit this code to ensure OnDemand pricing is still
+		// functioning as expected.
+		for _, product := range pricingData.Products {
+			if product.Attributes.MarketOption != "OnDemand" && product.Attributes.MarketOption != "CapacityBlock" && product.Attributes.MarketOption != "" {
+				t.Errorf("Invalid marketOption for product %s: %s", product.Sku, product.Attributes.MarketOption)
+			}
+		}
+	}
+}
+
 // Test_populate_pricing
 //
 // Objective: To test core pricing population logic for AWS
 //
-//	Case 0: US endpoints
-//	 Take a portion of json returned from ondemand terms in us endpoints
-//	 load the request into the http response and give it to the function
-//	 inspect the resulting aws object after the function returns and validate fields
-//	Case 1: Chinese endpoints
-//	 Same as above US test case, except using CN PV offer codes
-//	 Validate populated fields in AWS object
+// Case 0: US endpoints
+// Take a portion of json returned from ondemand terms in us endpoints load the
+// request into the http response and give it to the function inspect the
+// resulting aws object after the function returns and validate fields
+//
+// Case 1: Ensure marketOption=OnDemand
+// AWS introduced the field marketOption. We need to further filter for
+// marketOption=OnDemand to ensure we are not getting pricing from a line item
+// such as marketOption=CapacityBlock
+//
+// Case 2: Chinese endpoints
+// Same as above US test case, except using CN PV offer codes. Validate
+// populated fields in AWS object
 func Test_populate_pricing(t *testing.T) {
 	awsTest := AWS{
 		ValidPricingKeys: map[string]bool{},
@@ -230,7 +288,7 @@ func Test_populate_pricing(t *testing.T) {
 				  "servicename" : "Amazon Elastic Compute Cloud",
 				  "volumeApiName" : "gp3"
 				}
-			  }
+			}
 		},
 		"terms" : {
 			"OnDemand" : {
@@ -383,7 +441,199 @@ func Test_populate_pricing(t *testing.T) {
 		t.Fatalf("expected parsed pricing did not match actual parsed result (us-east-1)")
 	}
 
-	// Case 1
+	// Case 1 - Only accept `"marketoption":"OnDemand"`
+	inputkeysCase1 := map[string]bool{
+		"us-east-1,p4d.24xlarge,linux": true,
+	}
+	pricingCase1 := `
+	{
+		"formatVersion" : "v1.0",
+		"disclaimer" : "This pricing list is for informational purposes only. All prices are subject to the additional terms included in the pricing pages on http://aws.amazon.com. All Free Tier prices are also subject to the terms included at https://aws.amazon.com/free/",
+		"offerCode" : "AmazonEC2",
+		"version" : "20240528203522",
+		"publicationDate" : "2024-05-28T20:35:22Z",
+		"products" : {
+			"H7NGEAC6UEHNTKSJ" : {
+				"sku" : "H7NGEAC6UEHNTKSJ",
+				"productFamily" : "Compute Instance",
+				"attributes" : {
+					"servicecode" : "AmazonEC2",
+					"location" : "US East (N. Virginia)",
+					"locationType" : "AWS Region",
+					"instanceType" : "p4d.24xlarge",
+					"currentGeneration" : "Yes",
+					"instanceFamily" : "GPU instance",
+					"vcpu" : "96",
+					"physicalProcessor" : "Intel Xeon Platinum 8275L",
+					"clockSpeed" : "3 GHz",
+					"memory" : "1152 GiB",
+					"storage" : "8 x 1000 SSD",
+					"networkPerformance" : "400 Gigabit",
+					"processorArchitecture" : "64-bit",
+					"tenancy" : "Shared",
+					"operatingSystem" : "Linux",
+					"licenseModel" : "No License required",
+					"usagetype" : "BoxUsage:p4d.24xlarge",
+					"operation" : "RunInstances",
+					"availabilityzone" : "NA",
+					"capacitystatus" : "Used",
+					"classicnetworkingsupport" : "false",
+					"dedicatedEbsThroughput" : "19000 Mbps",
+					"ecu" : "345",
+					"enhancedNetworkingSupported" : "No",
+					"gpu" : "8",
+					"gpuMemory" : "NA",
+					"intelAvxAvailable" : "Yes",
+					"intelAvx2Available" : "Yes",
+					"intelTurboAvailable" : "Yes",
+					"marketoption" : "OnDemand",
+					"normalizationSizeFactor" : "192",
+					"preInstalledSw" : "NA",
+					"processorFeatures" : "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
+					"regionCode" : "us-east-1",
+					"servicename" : "Amazon Elastic Compute Cloud",
+					"vpcnetworkingsupport" : "true"
+				}
+			},
+			"YSXJGN78QTXNVGDQ" : {
+				"sku" : "YSXJGN78QTXNVGDQ",
+				"productFamily" : "Compute Instance",
+				"attributes" : {
+					"servicecode" : "AmazonEC2",
+					"location" : "US East (N. Virginia)",
+					"locationType" : "AWS Region",
+					"instanceType" : "p4d.24xlarge",
+					"currentGeneration" : "Yes",
+					"instanceFamily" : "GPU instance",
+					"vcpu" : "96",
+					"physicalProcessor" : "Intel Xeon Platinum 8275L",
+					"clockSpeed" : "3 GHz",
+					"memory" : "1152 GiB",
+					"storage" : "8 x 1000 SSD",
+					"networkPerformance" : "400 Gigabit",
+					"processorArchitecture" : "64-bit",
+					"tenancy" : "Shared",
+					"operatingSystem" : "Linux",
+					"licenseModel" : "No License required",
+					"usagetype" : "BoxUsage:p4d.24xlarge",
+					"operation" : "RunInstances:CB",
+					"availabilityzone" : "NA",
+					"capacitystatus" : "Used",
+					"classicnetworkingsupport" : "false",
+					"dedicatedEbsThroughput" : "19000 Mbps",
+					"ecu" : "345",
+					"enhancedNetworkingSupported" : "No",
+					"gpu" : "8",
+					"gpuMemory" : "NA",
+					"intelAvxAvailable" : "Yes",
+					"intelAvx2Available" : "Yes",
+					"intelTurboAvailable" : "Yes",
+					"marketoption" : "CapacityBlock",
+					"normalizationSizeFactor" : "192",
+					"preInstalledSw" : "NA",
+					"processorFeatures" : "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
+					"regionCode" : "us-east-1",
+					"servicename" : "Amazon Elastic Compute Cloud",
+					"vpcnetworkingsupport" : "true"
+				}
+			}
+		},
+		"terms" : {
+			"OnDemand" : {
+				"H7NGEAC6UEHNTKSJ" : {
+					"H7NGEAC6UEHNTKSJ.JRTCKXETXF" : {
+						"offerTermCode" : "JRTCKXETXF",
+						"sku" : "H7NGEAC6UEHNTKSJ",
+						"effectiveDate" : "2024-05-01T00:00:00Z",
+						"priceDimensions" : {
+							"H7NGEAC6UEHNTKSJ.JRTCKXETXF.6YS6EN2CT7" : {
+								"rateCode" : "H7NGEAC6UEHNTKSJ.JRTCKXETXF.6YS6EN2CT7",
+								"description" : "$32.7726 per On Demand Linux p4d.24xlarge Instance Hour",
+								"beginRange" : "0",
+								"endRange" : "Inf",
+								"unit" : "Hrs",
+								"pricePerUnit" : {
+									"USD" : "32.7726000000"
+								},
+								"appliesTo" : [ ]
+							}
+						},
+						"termAttributes" : { }
+					}
+				},
+				"YSXJGN78QTXNVGDQ" : {
+					"YSXJGN78QTXNVGDQ.JRTCKXETXF" : {
+						"offerTermCode" : "JRTCKXETXF",
+						"sku" : "YSXJGN78QTXNVGDQ",
+						"effectiveDate" : "2024-05-01T00:00:00Z",
+						"priceDimensions" : {
+							"YSXJGN78QTXNVGDQ.JRTCKXETXF.6YS6EN2CT7" : {
+							"rateCode" : "YSXJGN78QTXNVGDQ.JRTCKXETXF.6YS6EN2CT7",
+							"description" : "$0.00 per Capacity Block Linux p4d.24xlarge Instance Hour",
+							"beginRange" : "0",
+							"endRange" : "Inf",
+							"unit" : "Hrs",
+							"pricePerUnit" : {
+								"USD" : "0.0000000000"
+							},
+							"appliesTo" : [ ]
+						}
+					},
+					"termAttributes" : { }
+					}
+				},
+			}
+		},
+		"attributesList" : { }
+	}
+	`
+
+	testResponseCase1 := http.Response{
+		Body: io.NopCloser(bytes.NewBufferString(pricingCase1)),
+		Request: &http.Request{
+			URL: &url.URL{
+				Scheme: "https",
+				Host:   "test-aws-http-endpoint:443",
+			},
+		},
+	}
+
+	awsTest.populatePricing(&testResponseCase1, inputkeysCase1)
+
+	expectedProdTermsInstanceOndemandCase1 := &AWSProductTerms{
+		Sku:     "H7NGEAC6UEHNTKSJ",
+		Memory:  "1152 GiB",
+		Storage: "8 x 1000 SSD",
+		VCpu:    "96",
+		GPU:     "8",
+		OnDemand: &AWSOfferTerm{
+			Sku:           "H7NGEAC6UEHNTKSJ",
+			OfferTermCode: "JRTCKXETXF",
+			PriceDimensions: map[string]*AWSRateCode{
+				"H7NGEAC6UEHNTKSJ.JRTCKXETXF.6YS6EN2CT7": {
+					Unit: "Hrs",
+					PricePerUnit: AWSCurrencyCode{
+						USD: "32.7726000000",
+					},
+				},
+			},
+		},
+	}
+
+	expectedPricingCase1 := map[string]*AWSProductTerms{
+		"us-east-1,p4d.24xlarge,linux":             expectedProdTermsInstanceOndemandCase1,
+		"us-east-1,p4d.24xlarge,linux,preemptible": expectedProdTermsInstanceOndemandCase1,
+	}
+
+	if !reflect.DeepEqual(expectedPricingCase1, awsTest.Pricing) {
+		expectedJsonString, _ := json.MarshalIndent(expectedPricingCase1, "", "  ")
+		resultJsonString, _ := json.MarshalIndent(awsTest.Pricing, "", "  ")
+		t.Logf("Expected: %s", string(expectedJsonString))
+		t.Logf("Result: %s", string(resultJsonString))
+		t.Fatalf("expected parsed pricing did not match actual parsed result (us-east-1)")
+	}
+
+	// Case 2
 	awsCnString := `
 	{
 		"formatVersion" : "v1.0",

+ 178 - 76
pkg/cloud/aws/s3selectintegration.go

@@ -7,7 +7,6 @@ import (
 	"strings"
 	"time"
 
-	"github.com/aws/aws-sdk-go-v2/service/s3"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/core/pkg/opencost"
 )
@@ -15,13 +14,15 @@ import (
 const S3SelectDateLayout = "2006-01-02T15:04:05Z"
 
 // S3Object is aliased as "s" in queries
-const S3SelectAccountID = `s."bill/PayerAccountId"`
-
+const S3SelectBillPayerAccountID = `s."bill/PayerAccountId"`
+const S3SelectAccountID = `s."lineItem/UsageAccountId"`
 const S3SelectItemType = `s."lineItem/LineItemType"`
 const S3SelectStartDate = `s."lineItem/UsageStartDate"`
 const S3SelectProductCode = `s."lineItem/ProductCode"`
 const S3SelectResourceID = `s."lineItem/ResourceId"`
 const S3SelectUsageType = `s."lineItem/UsageType"`
+const S3SelectRegionCode = `s."product/regionCode"`
+const S3SelectAvailabilityZone = `s."lineItem/AvailabilityZone"`
 
 const S3SelectListCost = `s."lineItem/UnblendedCost"`
 const S3SelectNetCost = `s."lineItem/NetUnblendedCost"`
@@ -29,6 +30,12 @@ const S3SelectNetCost = `s."lineItem/NetUnblendedCost"`
 // These two may be used for Amortized<Net>Cost
 const S3SelectRICost = `s."reservation/EffectiveCost"`
 const S3SelectSPCost = `s."savingsPlan/SavingsPlanEffectiveCost"`
+const S3SelectNetRICost = `s."reservation/NetEffectiveCost"`
+const S3SelectNetSPCost = `s."savingsPlan/NetSavingsPlanEffectiveCost"`
+
+const S3SelectUserLabelPrefix = "resourceTags/user:"
+const S3SelectAWSLabelPrefix = "resourceTags/aws:"
+const S3SelectResourceTagsPrefix = "resourceTags/"
 
 type S3SelectIntegration struct {
 	S3SelectQuerier
@@ -44,15 +51,6 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 		opencost.NewWindow(&start, &end).String(),
 	)
 
-	// Set midnight yesterday as last point in time reconciliation data
-	// can be pulled from to ensure complete days of data
-	midnightYesterday := time.Now().In(
-		time.UTC,
-	).Truncate(time.Hour*24).AddDate(0, 0, -1)
-	if end.After(midnightYesterday) {
-		end = midnightYesterday
-	}
-
 	// ccsr to populate with cloudcosts.
 	ccsr, err := opencost.NewCloudCostSetRange(
 		start,
@@ -74,40 +72,71 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 		return nil, err
 	}
 	// Acquire headers
-	headers, err := s3si.GetHeaders(queryKeys, client)
+	headers, err := s3si.GetHeaders(queryKeys[0], client)
 	if err != nil {
 		return nil, err
 	}
-	// Exactly what it says on the tin. Though is there a set equivalent
-	// in Go? This seems like a good use case for that.
-	allColumns := map[string]bool{}
+
+	allColumns := map[string]struct{}{}
 	for _, header := range headers {
-		allColumns[header] = true
+		allColumns[header] = struct{}{}
 	}
 
 	formattedStart := start.Format("2006-01-02")
 	formattedEnd := end.Format("2006-01-02")
 	selectColumns := []string{
 		S3SelectStartDate,
+		S3SelectBillPayerAccountID,
 		S3SelectAccountID,
 		S3SelectResourceID,
 		S3SelectItemType,
 		S3SelectProductCode,
 		S3SelectUsageType,
+		S3SelectRegionCode,
+		S3SelectAvailabilityZone,
 		S3SelectListCost,
 	}
-	// OC equivalent to KCM env flags relevant at all?
+	_, checkNet := allColumns[S3SelectNetCost]
+	if checkNet {
+		selectColumns = append(selectColumns, S3SelectNetCost)
+	}
+
 	// Check for Reservation columns in CUR and query if available
-	checkReservations := allColumns[S3SelectRICost]
+	_, checkReservations := allColumns[S3SelectRICost]
 	if checkReservations {
 		selectColumns = append(selectColumns, S3SelectRICost)
 	}
+	_, checkNetReservations := allColumns[S3SelectNetRICost]
+	if checkNetReservations {
+		selectColumns = append(selectColumns, S3SelectNetRICost)
+	}
 
 	// Check for Savings Plan Columns in CUR and query if available
-	checkSavingsPlan := allColumns[S3SelectSPCost]
+	_, checkSavingsPlan := allColumns[S3SelectSPCost]
 	if checkSavingsPlan {
 		selectColumns = append(selectColumns, S3SelectSPCost)
 	}
+	_, checkNetSavingsPlan := allColumns[S3SelectNetSPCost]
+	if checkNetSavingsPlan {
+		selectColumns = append(selectColumns, S3SelectNetSPCost)
+	}
+
+	// Determine which columns are user-defined tags and add those to the list
+	// of columns to query.
+	labelColumns := []string{}
+	awsLabelColumns := []string{}
+	for column := range allColumns {
+		if strings.HasPrefix(column, S3SelectUserLabelPrefix) {
+			quotedTag := fmt.Sprintf(`s."%s"`, column)
+			selectColumns = append(selectColumns, quotedTag)
+			labelColumns = append(labelColumns, quotedTag)
+		}
+		if strings.HasPrefix(column, S3SelectAWSLabelPrefix) {
+			quotedTag := fmt.Sprintf(`s."%s"`, column)
+			selectColumns = append(selectColumns, quotedTag)
+			awsLabelColumns = append(awsLabelColumns, quotedTag)
+		}
+	}
 
 	// Build map of query columns to use for parsing query
 	columnIndexes := map[string]int{}
@@ -118,17 +147,8 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 	selectStr := strings.Join(selectColumns, ", ")
 	queryStr := `SELECT %s FROM s3object s
 	WHERE (CAST(s."lineItem/UsageStartDate" AS TIMESTAMP) BETWEEN CAST('%s' AS TIMESTAMP) AND CAST('%s' AS TIMESTAMP))
-	AND s."lineItem/ResourceId" <> ''
-	AND (
-		(
-			s."lineItem/ProductCode" = 'AmazonEC2' AND (
-				SUBSTRING(s."lineItem/ResourceId",1,2) = 'i-'
-				OR SUBSTRING(s."lineItem/ResourceId",1,4) = 'vol-'
-			)
-		)
-		OR s."lineItem/ProductCode" = 'AWSELB'
-       OR s."lineItem/ProductCode" = 'AmazonFSx'
-	)`
+	AND (s."lineItem/LineItemType" = 'Usage' OR s."lineItem/LineItemType" = 'DiscountedUsage' OR s."lineItem/LineItemType" = 'SavingsPlanCoveredUsage' OR s."lineItem/LineItemType" = 'EdpDiscount' OR s."lineItem/LineItemType" = 'PrivateRateDiscount')
+	`
 	query := fmt.Sprintf(queryStr, selectStr, formattedStart, formattedEnd)
 
 	processResults := func(reader *csv.Reader) error {
@@ -143,45 +163,107 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 			}
 
 			startStr := GetCSVRowValue(row, columnIndexes, S3SelectStartDate)
+			billPayerAccountID := GetCSVRowValue(row, columnIndexes, S3SelectBillPayerAccountID)
 			itemAccountID := GetCSVRowValue(row, columnIndexes, S3SelectAccountID)
 			itemProviderID := GetCSVRowValue(row, columnIndexes, S3SelectResourceID)
 			lineItemType := GetCSVRowValue(row, columnIndexes, S3SelectItemType)
 			itemProductCode := GetCSVRowValue(row, columnIndexes, S3SelectProductCode)
 			usageType := GetCSVRowValue(row, columnIndexes, S3SelectUsageType)
+			regionCode := GetCSVRowValue(row, columnIndexes, S3SelectRegionCode)
+			availabilityZone := GetCSVRowValue(row, columnIndexes, S3SelectAvailabilityZone)
+
+			// Iterate through the slice of tag columns, assigning
+			// values to the column names, minus the tag prefix.
+			labels := opencost.CloudCostLabels{}
+			for _, labelColumnName := range labelColumns {
+				// remove quotes
+				labelName := strings.TrimPrefix(labelColumnName, `s."`)
+				labelName = strings.TrimSuffix(labelName, `"`)
+				// remove prefix
+				labelName = strings.TrimPrefix(labelName, S3SelectUserLabelPrefix)
+				value := GetCSVRowValue(row, columnIndexes, labelColumnName)
+				if value != "" {
+					labels[labelName] = value
+				}
+			}
+			for _, awsLabelColumnName := range awsLabelColumns {
+				// remove quotes
+				labelName := strings.TrimPrefix(awsLabelColumnName, `s."`)
+				labelName = strings.TrimSuffix(labelName, `"`)
+				// partially remove prefix leaving "aws:"
+				labelName = strings.TrimPrefix(labelName, S3SelectResourceTagsPrefix)
+				value := GetCSVRowValue(row, columnIndexes, awsLabelColumnName)
+				if value != "" {
+					labels[labelName] = value
+				}
+			}
+
+			isKubernetes := 0.0
+			if itemProductCode == "AmazonEKS" || hasK8sLabel(labels) {
+				isKubernetes = 1.0
+			}
 
 			var (
-				amortizedCost float64
-				listCost      float64
-				netCost       float64
+				amortizedCost    float64
+				amortizedNetCost float64
+				listCost         float64
+				netCost          float64
 			)
 			// Get list and net costs
-			listCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectListCost)
-			if err != nil {
-				return err
+			if lineItemType != "EdpDiscount" && lineItemType != "PrivateRateDiscount" {
+				listCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectListCost)
+				if err != nil {
+					return err
+				}
 			}
-			netCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectNetCost)
-			if err != nil {
-				return err
+
+			// Get net cost if available
+			netCost = listCost
+			if checkNet {
+				netCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectNetCost)
+				if err != nil {
+					return err
+				}
 			}
 
 			// If there is a reservation_reservation_a_r_n on the line item use the awsRIPricingSUMColumn as cost
-			if checkReservations && lineItemType == "DiscountedUsage" {
-				amortizedCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectRICost)
-				if err != nil {
-					log.Errorf(err.Error())
-					continue
+			amortizedCost = listCost
+			amortizedNetCost = listCost
+			if lineItemType == "DiscountedUsage" {
+				if checkReservations {
+					amortizedCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectRICost)
+					if err != nil {
+						log.Errorf(err.Error())
+						continue
+					}
+					amortizedNetCost = amortizedCost
+				}
+				if checkNetReservations {
+					amortizedNetCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectNetRICost)
+					if err != nil {
+						log.Errorf(err.Error())
+						continue
+					}
 				}
 				// If there is a lineItemType of SavingsPlanCoveredUsage use the awsSPPricingSUMColumn
-			} else if checkSavingsPlan && lineItemType == "SavingsPlanCoveredUsage" {
-				amortizedCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectSPCost)
-				if err != nil {
-					log.Errorf(err.Error())
-					continue
+			} else if lineItemType == "SavingsPlanCoveredUsage" {
+				if checkSavingsPlan {
+					amortizedCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectSPCost)
+					if err != nil {
+						log.Errorf(err.Error())
+						continue
+					}
+					amortizedNetCost = amortizedCost
+				}
+				if checkNetSavingsPlan {
+					amortizedNetCost, err = GetCSVRowValueFloat(row, columnIndexes, S3SelectNetSPCost)
+					if err != nil {
+						log.Errorf(err.Error())
+						continue
+					}
 				}
-			} else {
-				// Default to listCost
-				amortizedCost = listCost
 			}
+
 			category := SelectAWSCategory(itemProviderID, usageType, itemProductCode)
 			// Retrieve final stanza of product code for ProviderID
 			if itemProductCode == "AWSELB" || itemProductCode == "AmazonFSx" {
@@ -190,10 +272,16 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 
 			properties := opencost.CloudCostProperties{}
 			properties.Provider = opencost.AWSProvider
+			properties.InvoiceEntityID = billPayerAccountID
+			properties.InvoiceEntityName = billPayerAccountID
 			properties.AccountID = itemAccountID
+			properties.AccountName = itemAccountID
 			properties.Category = category
 			properties.Service = itemProductCode
 			properties.ProviderID = itemProviderID
+			properties.RegionID = regionCode
+			properties.AvailabilityZone = availabilityZone
+			properties.Labels = labels
 
 			itemStart, err := time.Parse(S3SelectDateLayout, startStr)
 			if err != nil {
@@ -211,19 +299,24 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 				Properties: &properties,
 				Window:     opencost.NewWindow(&itemStart, &itemEnd),
 				ListCost: opencost.CostMetric{
-					Cost: listCost,
+					Cost:              listCost,
+					KubernetesPercent: isKubernetes,
 				},
 				NetCost: opencost.CostMetric{
-					Cost: netCost,
+					Cost:              netCost,
+					KubernetesPercent: isKubernetes,
 				},
 				AmortizedNetCost: opencost.CostMetric{
-					Cost: amortizedCost,
+					Cost:              amortizedCost,
+					KubernetesPercent: isKubernetes,
 				},
 				AmortizedCost: opencost.CostMetric{
-					Cost: amortizedCost,
+					Cost:              amortizedNetCost,
+					KubernetesPercent: isKubernetes,
 				},
 				InvoicedCost: opencost.CostMetric{
-					Cost: netCost,
+					Cost:              netCost,
+					KubernetesPercent: isKubernetes,
 				},
 			}
 			ccsr.LoadCloudCost(cc)
@@ -237,25 +330,34 @@ func (s3si *S3SelectIntegration) GetCloudCost(
 	return ccsr, nil
 }
 
-func (s3si *S3SelectIntegration) GetHeaders(queryKeys []string, client *s3.Client) ([]string, error) {
-	// Query to grab only header line from file
-	query := "SELECT * FROM S3OBJECT LIMIT 1"
-	var record []string
+const (
+	TagAWSEKSClusterName     = "aws:eks:cluster-name"
+	TagEKSClusterName        = "eks:cluster-name"
+	TagEKSCtlClusterName     = "alpha.eksctl.io/cluster-name"
+	TagKubernetesServiceName = "kubernetes.io/service-name"
+	TagKubernetesPVCName     = "kubernetes.io/created-for/pvc/name"
+	TagKubernetesPVName      = "kubernetes.io/created-for/pv/name"
+)
 
-	proccessheaders := func(reader *csv.Reader) error {
-		var err error
-		record, err = reader.Read()
-		if err != nil {
-			return err
-		}
-		return nil
+// hsK8sLabel checks if the labels contain a k8s label
+func hasK8sLabel(labels opencost.CloudCostLabels) bool {
+	if _, ok := labels[TagAWSEKSClusterName]; ok {
+		return true
 	}
-
-	// Use only the first query key with assumption that files share schema
-	err := s3si.Query(query, []string{queryKeys[0]}, client, proccessheaders)
-	if err != nil {
-		return nil, err
+	if _, ok := labels[TagEKSClusterName]; ok {
+		return true
 	}
-
-	return record, nil
+	if _, ok := labels[TagEKSCtlClusterName]; ok {
+		return true
+	}
+	if _, ok := labels[TagKubernetesServiceName]; ok {
+		return true
+	}
+	if _, ok := labels[TagKubernetesPVCName]; ok {
+		return true
+	}
+	if _, ok := labels[TagKubernetesPVName]; ok {
+		return true
+	}
+	return false
 }

+ 12 - 0
pkg/cloud/aws/s3selectquerier.go

@@ -45,6 +45,18 @@ func (s3sq *S3SelectQuerier) Query(query string, queryKeys []string, cli *s3.Cli
 	return nil
 }
 
+func (s3sq *S3SelectQuerier) GetHeaders(queryKey string, cli *s3.Client) ([]string, error) {
+	reader, err := s3sq.fetchCSVReader("SELECT * FROM S3Object LIMIT 1", queryKey, cli, s3Types.FileHeaderInfoNone)
+	if err != nil {
+		return nil, err
+	}
+	record, err := reader.Read()
+	if err != nil {
+		return nil, err
+	}
+	return record, nil
+}
+
 // GetQueryKeys returns a list of s3 object names, where the there are 1 object for each month within the range between
 // start and end
 func (s3sq *S3SelectQuerier) GetQueryKeys(start, end time.Time, client *s3.Client) ([]string, error) {

+ 10 - 7
pkg/cloud/azure/azurestorageintegration.go

@@ -34,13 +34,16 @@ func (asi *AzureStorageIntegration) GetCloudCost(start, end time.Time) (*opencos
 		// until we can revisit and spend the time to do the calculations correctly
 		cc := &opencost.CloudCost{
 			Properties: &opencost.CloudCostProperties{
-				ProviderID:      providerID,
-				Provider:        opencost.AzureProvider,
-				AccountID:       abv.SubscriptionID,
-				InvoiceEntityID: abv.InvoiceEntityID,
-				Service:         abv.Service,
-				Category:        SelectAzureCategory(abv.MeterCategory),
-				Labels:          abv.Tags,
+				ProviderID:        providerID,
+				Provider:          opencost.AzureProvider,
+				AccountID:         abv.SubscriptionID,
+				AccountName:       abv.SubscriptionName,
+				InvoiceEntityID:   abv.InvoiceEntityID,
+				InvoiceEntityName: abv.InvoiceEntityName,
+				RegionID:          abv.Region,
+				Service:           abv.Service,
+				Category:          SelectAzureCategory(abv.MeterCategory),
+				Labels:            abv.Tags,
 			},
 			Window: window,
 			AmortizedNetCost: opencost.CostMetric{

+ 68 - 32
pkg/cloud/azure/billingexportparser.go

@@ -19,16 +19,19 @@ var groupRegex = regexp.MustCompile("(/[^/]+)")
 
 // BillingRowValues holder for Azure Billing Values
 type BillingRowValues struct {
-	Date            time.Time
-	MeterCategory   string
-	SubscriptionID  string
-	InvoiceEntityID string
-	InstanceID      string
-	Service         string
-	Tags            map[string]string
-	AdditionalInfo  map[string]any
-	Cost            float64
-	NetCost         float64
+	Date              time.Time
+	MeterCategory     string
+	SubscriptionID    string
+	SubscriptionName  string
+	InvoiceEntityID   string
+	InvoiceEntityName string
+	Region            string
+	InstanceID        string
+	Service           string
+	Tags              map[string]string
+	AdditionalInfo    map[string]any
+	Cost              float64
+	NetCost           float64
 }
 
 func (brv *BillingRowValues) IsCompute(category string) bool {
@@ -52,17 +55,20 @@ func (brv *BillingRowValues) IsCompute(category string) bool {
 
 // BillingExportParser holds indexes of relevent fields in Azure Billing CSV in addition to the correct data format
 type BillingExportParser struct {
-	Date            int
-	MeterCategory   int
-	InvoiceEntityID int
-	SubscriptionID  int
-	InstanceID      int
-	Service         int
-	Tags            int
-	AdditionalInfo  int
-	Cost            int
-	NetCost         int
-	DateFormat      string
+	Date              int
+	MeterCategory     int
+	InvoiceEntityID   int
+	InvoiceEntityName int
+	SubscriptionID    int
+	SubscriptionName  int
+	Region            int
+	InstanceID        int
+	Service           int
+	Tags              int
+	AdditionalInfo    int
+	Cost              int
+	NetCost           int
+	DateFormat        string
 }
 
 // match "SubscriptionGuid" in "Abonnement-GUID (SubscriptionGuid)"
@@ -106,6 +112,14 @@ func NewBillingParseSchema(headers []string) (*BillingExportParser, error) {
 		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Subscription ID field")
 	}
 
+	// set Subscription Name
+	if i, ok := headerIndexes["subscriptionname"]; ok {
+		abp.SubscriptionName = i
+	} else {
+		// if no subscription name column use subscriptionID column
+		abp.SubscriptionName = abp.SubscriptionID
+	}
+
 	// Set Billing ID
 	if i, ok := headerIndexes["billingaccountid"]; ok {
 		abp.InvoiceEntityID = i
@@ -116,6 +130,25 @@ func NewBillingParseSchema(headers []string) (*BillingExportParser, error) {
 		abp.InvoiceEntityID = abp.SubscriptionID
 	}
 
+	// Set Billing Account Name
+	if i, ok := headerIndexes["billingaccountname"]; ok {
+		abp.InvoiceEntityName = i
+	} else {
+		// if no billing name column is present use billing ID index
+		abp.InvoiceEntityName = abp.InvoiceEntityID
+	}
+
+	// Set Region
+	if i, ok := headerIndexes["resourcelocation"]; ok {
+		abp.Region = i
+	} else if j, ok2 := headerIndexes["meterregion"]; ok2 {
+		abp.Region = j
+	} else if k, ok3 := headerIndexes["location"]; ok3 {
+		abp.Region = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Region field")
+	}
+
 	// Set Instance ID
 	if i, ok := headerIndexes["instanceid"]; ok {
 		abp.InstanceID = i
@@ -237,16 +270,19 @@ func (bep *BillingExportParser) ParseRow(start, end time.Time, record []string)
 	}
 
 	return &BillingRowValues{
-		Date:            usageDate,
-		MeterCategory:   record[bep.MeterCategory],
-		SubscriptionID:  record[bep.SubscriptionID],
-		InvoiceEntityID: record[bep.InvoiceEntityID],
-		InstanceID:      record[bep.InstanceID],
-		Service:         record[bep.Service],
-		Tags:            tags,
-		AdditionalInfo:  additionalInfo,
-		Cost:            cost,
-		NetCost:         netCost,
+		Date:              usageDate,
+		MeterCategory:     record[bep.MeterCategory],
+		SubscriptionID:    record[bep.SubscriptionID],
+		SubscriptionName:  record[bep.SubscriptionName],
+		InvoiceEntityID:   record[bep.InvoiceEntityID],
+		InvoiceEntityName: record[bep.InvoiceEntityName],
+		Region:            record[bep.Region],
+		InstanceID:        record[bep.InstanceID],
+		Service:           record[bep.Service],
+		Tags:              tags,
+		AdditionalInfo:    additionalInfo,
+		Cost:              cost,
+		NetCost:           netCost,
 	}
 }
 
@@ -285,7 +321,7 @@ func AzureSetProviderID(abv *BillingRowValues) (providerID string, isVMSSShared
 }
 
 func SelectAzureCategory(meterCategory string) string {
-	if meterCategory == "Virtual Machines" {
+	if meterCategory == "Virtual Machines" || meterCategory == "Virtual Machines Licenses" {
 		return opencost.ComputeCategory
 	} else if meterCategory == "Storage" {
 		return opencost.StorageCategory

+ 6 - 1
pkg/cloud/azure/provider.go

@@ -844,7 +844,12 @@ func (az *Azure) DownloadPricingData() error {
 	rateCardFilter := fmt.Sprintf("OfferDurableId eq '%s' and Currency eq '%s' and Locale eq 'en-US' and RegionInfo eq '%s'", config.AzureOfferDurableID, config.CurrencyCode, config.AzureBillingRegion)
 
 	log.Infof("Using ratecard query %s", rateCardFilter)
-	result, err := rcClient.Get(context.TODO(), rateCardFilter)
+	// rate-card client is old, it can hang indefinitely in some cases
+	// this happens on the main thread, so it may block the whole app
+	// there is can be a better way to set timeout for the client
+	ctx, cancel := context.WithTimeout(context.TODO(), 60*time.Second)
+	defer cancel()
+	result, err := rcClient.Get(ctx, rateCardFilter)
 	if err != nil {
 		log.Warnf("Error in pricing download query from API")
 		az.rateCardPricingError = err

+ 2 - 2
pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv

@@ -1,2 +1,2 @@
-subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
-11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456abcdef,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""","""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2"
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo,resourcelocation
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456abcdef,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""","""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2",""

+ 1 - 1
pkg/cloud/azure/resources/billingexports/values/Template.csv

@@ -1,4 +1,4 @@
-subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo,resourcelocation
 11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
 11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
 11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",

+ 2 - 2
pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv

@@ -1,2 +1,2 @@
-subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
-11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456billing,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""}","{ ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2  }"
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo,resourcelocation
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456billing,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""}","{ ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2  }",""

+ 99 - 61
pkg/cloud/azure/storagebillingparser.go

@@ -3,6 +3,7 @@ package azure
 import (
 	"context"
 	"encoding/csv"
+	"encoding/json"
 	"fmt"
 	"io"
 	"os"
@@ -46,28 +47,32 @@ func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, re
 		return err
 	}
 	ctx := context.Background()
+	// most recent blob list contains information on blob including name and lastMod time
 	// Example blobNames: [ export/myExport/20240101-20240131/myExport_758a42af-0731-4edb-b498-1e523bb40f12.csv ]
-	blobNames, err := asbp.getMostRecentBlobs(start, end, client, ctx)
+	blobInfos, err := asbp.getMostRecentBlobs(start, end, client, ctx)
 	if err != nil {
 		asbp.ConnectionStatus = cloud.FailedConnection
 		return err
 	}
 
-	if len(blobNames) == 0 && asbp.ConnectionStatus != cloud.SuccessfulConnection {
+	if len(blobInfos) == 0 && asbp.ConnectionStatus != cloud.SuccessfulConnection {
 		asbp.ConnectionStatus = cloud.MissingData
 		return nil
 	}
 
-	for _, blobName := range blobNames {
-		if env.IsAzureDownloadBillingDataToDisk() {
-			localPath := filepath.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), "db", "cloudcost")
-			localFilePath := filepath.Join(localPath, filepath.Base(blobName))
+	if env.IsAzureDownloadBillingDataToDisk() {
+		// clean up old files that have been saved to disk before downloading new ones
+		localPath := filepath.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), "db", "cloudcost")
+		if _, err := asbp.deleteFilesOlderThan7d(localPath); err != nil {
+			log.Warnf("CloudCost: Azure: ParseBillingData: failed to remove the following stale files: %v", err)
+		}
+		for _, blob := range blobInfos {
+			blobName := *blob.Name
 
-			if _, err := asbp.deleteFilesOlderThan7d(localPath); err != nil {
-				log.Warnf("CloudCost: Azure: ParseBillingData: failed to remove the following stale files: %v", err)
-			}
+			// Use entire blob name to prevent collision with other files from previous months or other integrations (ex "part_0_0001.csv")
+			localFilePath := filepath.Join(localPath, strings.ReplaceAll(blobName, "/", "_"))
 
-			err := asbp.DownloadBlobToFile(localFilePath, blobName, client, ctx)
+			err := asbp.DownloadBlobToFile(localFilePath, blob, client, ctx)
 			if err != nil {
 				asbp.ConnectionStatus = cloud.FailedConnection
 				return err
@@ -84,7 +89,11 @@ func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, re
 				asbp.ConnectionStatus = cloud.ParseError
 				return err
 			}
-		} else {
+
+		}
+	} else {
+		for _, blobInfo := range blobInfos {
+			blobName := *blobInfo.Name
 			streamReader, err2 := asbp.StreamBlob(blobName, client)
 			if err2 != nil {
 				asbp.ConnectionStatus = cloud.FailedConnection
@@ -98,6 +107,7 @@ func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, re
 			}
 		}
 	}
+
 	asbp.ConnectionStatus = cloud.SuccessfulConnection
 	return nil
 }
@@ -133,10 +143,10 @@ func (asbp *AzureStorageBillingParser) parseCSV(start, end time.Time, reader *cs
 	return nil
 }
 
-// getMostRecentBlobs returns a list of filepaths on the Azure Storage
+// getMostRecentBlobs returns a list of blobs in the Azure Storage
 // Container. It uses the "Last Modified Time" of the file to determine which
 // has the latest month-to-date billing data.
-func (asbp *AzureStorageBillingParser) getMostRecentBlobs(start, end time.Time, client *azblob.Client, ctx context.Context) ([]string, error) {
+func (asbp *AzureStorageBillingParser) getMostRecentBlobs(start, end time.Time, client *azblob.Client, ctx context.Context) ([]container.BlobItem, error) {
 	log.Infof("Azure Storage: retrieving most recent reports from: %v - %v", start, end)
 
 	// Get list of month substrings for months contained in the start to end range
@@ -144,7 +154,9 @@ func (asbp *AzureStorageBillingParser) getMostRecentBlobs(start, end time.Time,
 	if err != nil {
 		return nil, err
 	}
-	mostRecentBlobs := make(map[string]container.BlobItem)
+
+	// Build map of blobs keyed by month string and blob name
+	blobsForMonth := make(map[string]map[string]container.BlobItem)
 
 	pager := client.NewListBlobsFlatPager(asbp.Container, &azblob.ListBlobsFlatOptions{
 		Include: container.ListBlobsInclude{Deleted: false, Versions: false},
@@ -167,27 +179,88 @@ func (asbp *AzureStorageBillingParser) getMostRecentBlobs(start, end time.Time,
 			}
 			for _, month := range monthStrs {
 				if strings.Contains(*blobInfo.Name, month) {
-					// check if blob is the newest seen for this month
-					if prevBlob, ok := mostRecentBlobs[month]; ok {
-						if prevBlob.Properties.CreationTime.After(*blobInfo.Properties.CreationTime) {
-							continue
-						}
+					if _, ok := blobsForMonth[month]; !ok {
+						blobsForMonth[month] = make(map[string]container.BlobItem)
 					}
-					mostRecentBlobs[month] = *blobInfo
+					blobsForMonth[month][*blobInfo.Name] = *blobInfo
 				}
 			}
 		}
 	}
 
-	// convert blob names into blob urls and move from map into ordered list of blob names
-	var blobNames []string
-	for _, month := range monthStrs {
-		if blob, ok := mostRecentBlobs[month]; ok {
-			blobNames = append(blobNames, *blob.Name)
+	// build list of most recent blobs that are needed to fulfil a query on the give date range
+	var blobs []container.BlobItem
+	for _, monthBlobs := range blobsForMonth {
+		// Find most recent blob
+		var mostRecentBlob *container.BlobItem
+		var mostRecentManifest *container.BlobItem
+
+		for name := range monthBlobs {
+			blob := monthBlobs[name]
+			lastMod := *blob.Properties.LastModified
+			// Handle manifest files
+			if strings.HasSuffix(*blob.Name, "manifest.json") {
+				if mostRecentManifest == nil {
+					mostRecentManifest = &blob
+
+					continue
+				}
+				if mostRecentManifest.Properties.LastModified.Before(lastMod) {
+					mostRecentManifest = &blob
+				}
+				// Only look at non-manifest blobs if manifests are not present
+			} else if mostRecentManifest == nil {
+				if mostRecentBlob == nil {
+					mostRecentBlob = &blob
+					continue
+				}
+				if mostRecentBlob.Properties.LastModified.Before(lastMod) {
+					mostRecentBlob = &blob
+				}
+			}
+		}
+
+		// In the absence of a manifest, add the most recent blob
+		if mostRecentManifest == nil {
+			if mostRecentBlob != nil {
+				blobs = append(blobs, *mostRecentBlob)
+			}
+			continue
+		}
+
+		// download manifest for the month
+		manifestBytes, err := asbp.DownloadBlob(*mostRecentManifest.Name, client, ctx)
+		if err != nil {
+			return nil, fmt.Errorf("failed to retrieve manifest %w", err)
+		}
+
+		var manifest manifestJson
+		err = json.Unmarshal(manifestBytes, &manifest)
+		if err != nil {
+			return nil, fmt.Errorf("failed to unmarshal manifest %w", err)
+		}
+
+		// Add all partitioned blobs named in the manifest to the list of blobs to be retrieved
+		for _, mb := range manifest.Blobs {
+			namedBlob, ok := monthBlobs[mb.BlobName]
+			if !ok {
+				log.Errorf("AzureStorage: failed to find blob named in manifest '%s'", mb.BlobName)
+				continue
+			}
+			blobs = append(blobs, namedBlob)
 		}
 	}
 
-	return blobNames, nil
+	return blobs, nil
+}
+
+// manifestJson is a struct for unmarshalling manifest.json files associated with the azure billing export
+type manifestJson struct {
+	Blobs []manifestBlob `json:"blobs"`
+}
+
+type manifestBlob struct {
+	BlobName string `json:"blobName"`
 }
 
 // getMonthStrings returns a list of month strings in the format
@@ -222,38 +295,3 @@ func (asbp *AzureStorageBillingParser) timeToMonthString(input time.Time) string
 	endOfMonth := input.AddDate(0, 1, -input.Day())
 	return startOfMonth.Format(format) + "-" + endOfMonth.Format(format)
 }
-
-// deleteFilesOlderThan7d recursively walks the directory specified and deletes
-// files which have not been modified in the last 7 days. Returns a list of
-// files deleted.
-func (asbp *AzureStorageBillingParser) deleteFilesOlderThan7d(localPath string) ([]string, error) {
-	duration := 7 * 24 * time.Hour
-	cleaned := []string{}
-	errs := []string{}
-
-	if _, err := os.Stat(localPath); err != nil {
-		return cleaned, nil // localPath does not exist
-	}
-
-	filepath.Walk(localPath, func(path string, info os.FileInfo, err error) error {
-		if err != nil {
-			errs = append(errs, err.Error())
-			return err
-		}
-
-		if time.Since(info.ModTime()) > duration {
-			err := os.Remove(path)
-			if err != nil {
-				errs = append(errs, err.Error())
-			}
-			cleaned = append(cleaned, path)
-		}
-		return nil
-	})
-
-	if len(errs) == 0 {
-		return cleaned, nil
-	} else {
-		return cleaned, fmt.Errorf("deleteFilesOlderThan7d: %v", errs)
-	}
-}

+ 70 - 6
pkg/cloud/azure/storageconnection.go

@@ -7,8 +7,11 @@ import (
 	"os"
 	"path/filepath"
 	"strings"
+	"sync"
+	"time"
 
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
 	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/opencost/opencost/pkg/cloud"
 )
@@ -16,6 +19,7 @@ import (
 // StorageConnection provides access to Azure Storage
 type StorageConnection struct {
 	StorageConfiguration
+	lock             sync.Mutex
 	ConnectionStatus cloud.ConnectionStatus
 }
 
@@ -42,6 +46,9 @@ func (sc *StorageConnection) getBlobURLTemplate() string {
 	// Use gov cloud blob url if gov is detected in AzureCloud
 	if strings.Contains(strings.ToLower(sc.Cloud), "gov") {
 		return "https://%s.blob.core.usgovcloudapi.net/%s"
+	} else if strings.Contains(strings.ToLower(sc.Cloud), "china") {
+		// Use China cloud blob url if china is detected in AzureCloud
+		return "https://%s.blob.core.chinacloudapi.cn/%s"
 	}
 	// default to Public Cloud template
 	return "https://%s.blob.core.windows.net/%s"
@@ -77,11 +84,21 @@ func (sc *StorageConnection) StreamBlob(blobName string, client *azblob.Client)
 }
 
 // DownloadBlobToFile downloads the Azure Billing CSV to a local file
-func (sc *StorageConnection) DownloadBlobToFile(localFilePath string, blobName string, client *azblob.Client, ctx context.Context) error {
-	// If file exists, don't download it again
-	if _, err := os.Stat(localFilePath); err == nil {
-		log.DedupedInfof(3, "CloudCost: Azure: DownloadBlobToFile: file %v already exists, not downloading %v", localFilePath, blobName)
-		return nil
+func (sc *StorageConnection) DownloadBlobToFile(localFilePath string, blob container.BlobItem, client *azblob.Client, ctx context.Context) error {
+	// Lock to prevent accessing a file which may not be fully downloaded
+	sc.lock.Lock()
+	defer sc.lock.Unlock()
+	blobName := *blob.Name
+	// Check if file already exists
+	if fileInfo, err := os.Stat(localFilePath); err == nil {
+		blobModTime := *blob.Properties.LastModified
+		// Check if the blob was last modified before the file was modified, indicating that the
+		// file is the most recent version of the blob
+		if blobModTime.Before(fileInfo.ModTime()) {
+			log.Debugf("CloudCost: Azure: DownloadBlobToFile: file %s is more recent than correspondig blob %s", localFilePath, blobName)
+			return nil
+		}
+
 	}
 
 	// Create filepath
@@ -96,12 +113,59 @@ func (sc *StorageConnection) DownloadBlobToFile(localFilePath string, blobName s
 	defer fp.Close()
 
 	// Download newest Azure Billing CSV to disk
+
+	// Time out to prevent deadlock on download
+	timeoutCtx, cancel := context.WithTimeout(ctx, 30*time.Minute)
+	defer cancel()
+
 	log.Infof("CloudCost: Azure: DownloadBlobToFile: retrieving blob: %v", blobName)
-	filesize, err := client.DownloadFile(ctx, sc.Container, blobName, fp, nil)
+	filesize, err := client.DownloadFile(timeoutCtx, sc.Container, blobName, fp, nil)
 	if err != nil {
+		// Clean up file from failed download
+		err2 := os.Remove(localFilePath)
+		if err2 != nil {
+			log.Errorf("CloudCost: Azure: DownloadBlobToFile: failed to remove file %s after failed download %s", localFilePath, err2.Error())
+		}
 		return fmt.Errorf("CloudCost: Azure: DownloadBlobToFile: failed to download %w", err)
 	}
 	log.Infof("CloudCost: Azure: DownloadBlobToFile: retrieved %v of size %dMB", blobName, filesize/1024/1024)
 
 	return nil
 }
+
+// deleteFilesOlderThan7d recursively walks the directory specified and deletes
+// files which have not been modified in the last 7 days. Returns a list of
+// files deleted.
+func (sc *StorageConnection) deleteFilesOlderThan7d(localPath string) ([]string, error) {
+	sc.lock.Lock()
+	defer sc.lock.Unlock()
+	duration := 7 * 24 * time.Hour
+	cleaned := []string{}
+	errs := []string{}
+
+	if _, err := os.Stat(localPath); err != nil {
+		return cleaned, nil // localPath does not exist
+	}
+
+	filepath.Walk(localPath, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			errs = append(errs, err.Error())
+			return err
+		}
+
+		if time.Since(info.ModTime()) > duration {
+			err := os.Remove(path)
+			if err != nil {
+				errs = append(errs, err.Error())
+			}
+			cleaned = append(cleaned, path)
+		}
+		return nil
+	})
+
+	if len(errs) == 0 {
+		return cleaned, nil
+	} else {
+		return cleaned, fmt.Errorf("deleteFilesOlderThan7d: %v", errs)
+	}
+}

+ 59 - 9
pkg/cloud/config/configurations.go

@@ -10,6 +10,7 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/oracle"
 )
 
 // MultiCloudConfig struct is used to unmarshal cloud configs for each provider out of cloud-integration file
@@ -66,22 +67,31 @@ type Configurations struct {
 	GCP     *GCPConfigs     `json:"gcp,omitempty"`
 	Azure   *AzureConfigs   `json:"azure,omitempty"`
 	Alibaba *AlibabaConfigs `json:"alibaba,omitempty"`
+	OCI     *OCIConfigs     `json:"oci,omitempty"`
 }
 
 // UnmarshalJSON custom json unmarshalling to maintain support for MultiCloudConfig format
 func (c *Configurations) UnmarshalJSON(bytes []byte) error {
-	// Attempt to unmarshal into old config object
-	multiConfig := &MultiCloudConfig{}
-	err := json.Unmarshal(bytes, multiConfig)
-	// If unmarshal is successful, move values into config and return
-	if err == nil {
-		multiConfig.loadConfigurations(c)
-		return nil
-	}
+	// This has been tested for backwards compatability, and it works in both config formats.
+	// It also coincidentally works if you mix-and-match both the old format and the new
+	// format.
 	// Create inline type to gain access to default Unmarshalling
 	type ConfUnmarshaller *Configurations
 	var conf ConfUnmarshaller = c
-	return json.Unmarshal(bytes, conf)
+	err := json.Unmarshal(bytes, conf)
+	// If unmarshal is successful, return
+	if err == nil {
+		return nil
+	}
+
+	// Attempt to unmarshal into old config object
+	multiConfig := &MultiCloudConfig{}
+	err = json.Unmarshal(bytes, multiConfig)
+	if err != nil {
+		return err
+	}
+	multiConfig.loadConfigurations(c)
+	return nil
 }
 
 func (c *Configurations) Equals(that *Configurations) bool {
@@ -108,6 +118,10 @@ func (c *Configurations) Equals(that *Configurations) bool {
 		return false
 	}
 
+	if !c.OCI.Equals(that.OCI) {
+		return false
+	}
+
 	return true
 }
 
@@ -138,6 +152,11 @@ func (c *Configurations) Insert(keyedConfig cloud.Config) error {
 			c.Alibaba = &AlibabaConfigs{}
 		}
 		c.Alibaba.BOA = append(c.Alibaba.BOA, keyedConfig.(*alibaba.BOAConfiguration))
+	case *oracle.UsageApiConfiguration:
+		if c.OCI == nil {
+			c.OCI = &OCIConfigs{}
+		}
+		c.OCI.UsageAPI = append(c.OCI.UsageAPI, keyedConfig.(*oracle.UsageApiConfiguration))
 	default:
 		return fmt.Errorf("Configurations: Insert: failed to insert config of type: %T", keyedConfig)
 	}
@@ -174,6 +193,12 @@ func (c *Configurations) ToSlice() []cloud.KeyedConfig {
 		}
 	}
 
+	if c.OCI != nil {
+		for _, usageConfig := range c.OCI.UsageAPI {
+			keyedConfigs = append(keyedConfigs, usageConfig)
+		}
+	}
+
 	return keyedConfigs
 
 }
@@ -289,3 +314,28 @@ func (ac *AlibabaConfigs) Equals(that *AlibabaConfigs) bool {
 
 	return true
 }
+
+type OCIConfigs struct {
+	UsageAPI []*oracle.UsageApiConfiguration `json:"usageApi,omitempty"`
+}
+
+func (oc *OCIConfigs) Equals(that *OCIConfigs) bool {
+	if oc == nil && that == nil {
+		return true
+	}
+	if oc == nil || that == nil {
+		return false
+	}
+	// Check Usage API
+	if len(oc.UsageAPI) != len(that.UsageAPI) {
+		return false
+	}
+	for i, thisUsageAPI := range oc.UsageAPI {
+		thatUsageAPI := that.UsageAPI[i]
+		if !thisUsageAPI.Equals(thatUsageAPI) {
+			return false
+		}
+	}
+
+	return true
+}

+ 115 - 57
pkg/cloud/config/controller.go

@@ -12,7 +12,6 @@ import (
 	"github.com/opencost/opencost/core/pkg/util/timeutil"
 	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
-	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/env"
 )
 
@@ -22,27 +21,47 @@ const configFile = "cloud-configurations.json"
 // methods. To do this it has a map of config watchers mapped on configuration source and a list Observers that it updates
 // upon any change detected from the config watchers.
 type Controller struct {
-	path      string
+	storage   controllerStorage
 	lock      sync.RWMutex
 	observers []Observer
 	watchers  map[ConfigSource]cloud.KeyedConfigWatcher
 }
 
 // NewController initializes an Config Controller
-func NewController(cp models.Provider) *Controller {
-	var watchers map[ConfigSource]cloud.KeyedConfigWatcher
-	if env.IsKubernetesEnabled() && cp != nil {
-		providerConfig := provider.ExtractConfigFromProviders(cp)
-		watchers = GetCloudBillingWatchers(providerConfig)
-	} else {
-		watchers = GetCloudBillingWatchers(nil)
+func NewController(providerConfig models.ProviderConfig) *Controller {
+
+	watchers := GetCloudBillingWatchers(providerConfig)
+
+	storage := &FileControllerStorage{
+		path: filepath.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), configFile),
+	}
+
+	ic := &Controller{
+		storage:  storage,
+		watchers: watchers,
 	}
+
+	ic.start()
+
+	return ic
+}
+
+// NewMemoryController initializes a Config Controller backed in memory
+func NewMemoryController(providerConfig models.ProviderConfig) *Controller {
+	watchers := GetCloudBillingWatchers(providerConfig)
+
 	ic := &Controller{
-		path:     filepath.Join(env.GetConfigPathWithDefault(env.DefaultConfigMountPath), configFile),
+		storage:  &MemoryControllerStorage{},
 		watchers: watchers,
 	}
 
-	ic.pullWatchers()
+	ic.start()
+
+	return ic
+}
+
+func (c *Controller) start() {
+	c.pullWatchers()
 
 	go func() {
 		ticker := timeutil.NewJobTicker()
@@ -53,22 +72,20 @@ func NewController(cp models.Provider) *Controller {
 
 			<-ticker.Ch
 
-			ic.pullWatchers()
+			c.pullWatchers()
 		}
 	}()
-
-	return ic
 }
 
 // pullWatchers retrieve configs from watchers and update configs according to priority of sources
 func (c *Controller) pullWatchers() {
 	c.lock.Lock()
 	defer c.lock.Unlock()
-	statuses, err := c.load()
+	statuses, err := c.storage.load()
 	if err != nil {
 		log.Warnf("Controller: pullWatchers: %s. Proceeding to create the file", err.Error())
 		statuses = Statuses{}
-		err = c.save(statuses)
+		err = c.storage.save(statuses)
 		if err != nil {
 			log.Errorf("Controller: pullWatchers: failed to save statuses %s", err.Error())
 		}
@@ -158,7 +175,7 @@ func (c *Controller) pullWatchers() {
 			if status.Active {
 				c.broadcastAddConfig(conf)
 			}
-			err = c.save(statuses)
+			err = c.storage.save(statuses)
 			if err != nil {
 				log.Errorf("Controller: pullWatchers: failed to save statuses %s", err.Error())
 			}
@@ -178,7 +195,7 @@ func (c *Controller) CreateConfig(conf cloud.KeyedConfig) error {
 		return fmt.Errorf("provided configuration was invalid: %w", err)
 	}
 
-	statuses, err := c.load()
+	statuses, err := c.storage.load()
 	if err != nil {
 		return fmt.Errorf("failed to load statuses")
 	}
@@ -219,7 +236,7 @@ func (c *Controller) CreateConfig(conf cloud.KeyedConfig) error {
 	}
 
 	c.broadcastAddConfig(conf)
-	err = c.save(statuses)
+	err = c.storage.save(statuses)
 	if err != nil {
 		return fmt.Errorf("failed to save statues: %w", err)
 	}
@@ -231,7 +248,7 @@ func (c *Controller) EnableConfig(key, sourceStr string) error {
 	c.lock.Lock()
 	defer c.lock.Unlock()
 
-	statuses, err := c.load()
+	statuses, err := c.storage.load()
 	if err != nil {
 		return fmt.Errorf("failed to load statuses")
 	}
@@ -261,7 +278,7 @@ func (c *Controller) EnableConfig(key, sourceStr string) error {
 
 	cs.Active = true
 	c.broadcastAddConfig(cs.Config)
-	c.save(statuses)
+	c.storage.save(statuses)
 	return nil
 }
 
@@ -269,7 +286,7 @@ func (c *Controller) EnableConfig(key, sourceStr string) error {
 func (c *Controller) DisableConfig(key, sourceStr string) error {
 	c.lock.Lock()
 	defer c.lock.Unlock()
-	statuses, err := c.load()
+	statuses, err := c.storage.load()
 	if err != nil {
 		return fmt.Errorf("failed to load statuses")
 	}
@@ -284,7 +301,7 @@ func (c *Controller) DisableConfig(key, sourceStr string) error {
 
 	is.Active = false
 	c.broadcastRemoveConfig(key)
-	c.save(statuses)
+	c.storage.save(statuses)
 	return nil
 }
 
@@ -298,7 +315,7 @@ func (c *Controller) DeleteConfig(key, sourceStr string) error {
 		return fmt.Errorf("controller does not own config with key %s from source %s, manage this config at its source", key, source.String())
 	}
 
-	statuses, err := c.load()
+	statuses, err := c.storage.load()
 	if err != nil {
 		return fmt.Errorf("failed to load statuses")
 	}
@@ -321,37 +338,7 @@ func (c *Controller) deleteConfig(key string, source ConfigSource, statuses Stat
 		c.broadcastRemoveConfig(key)
 	}
 	delete(statuses[source], key)
-	c.save(statuses)
-	return nil
-}
-
-func (c *Controller) load() (Statuses, error) {
-	raw, err := os.ReadFile(c.path)
-	if err != nil {
-		return nil, fmt.Errorf("failed to load config statuses from file: %w", err)
-	}
-
-	statuses := Statuses{}
-	err = json.Unmarshal(raw, &statuses)
-	if err != nil {
-		return nil, fmt.Errorf("failed to marshal config statuses: %s", err.Error())
-	}
-
-	return statuses, nil
-}
-
-func (c *Controller) save(statuses Statuses) error {
-
-	raw, err := json.Marshal(statuses)
-	if err != nil {
-		return fmt.Errorf("failed to marshal config statuses: %s", err)
-	}
-
-	err = os.WriteFile(c.path, raw, 0644)
-	if err != nil {
-		return fmt.Errorf("failed to save config statuses to file: %s", err)
-	}
-
+	c.storage.save(statuses)
 	return nil
 }
 
@@ -386,7 +373,7 @@ func (c *Controller) ExportConfigs(key string) (*Configurations, error) {
 
 func (c *Controller) getActiveConfigs() map[string]cloud.KeyedConfig {
 	activeConfigs := make(map[string]cloud.KeyedConfig)
-	statuses, err := c.load()
+	statuses, err := c.storage.load()
 	if err != nil {
 		log.Errorf("GetStatus: failed to load cloud statuses")
 	}
@@ -438,7 +425,7 @@ func (c *Controller) GetStatus() []Status {
 	c.lock.RLock()
 	defer c.lock.RUnlock()
 	var status []Status
-	statuses, err := c.load()
+	statuses, err := c.storage.load()
 	if err != nil {
 		log.Errorf("GetStatus: failed to load cloud statuses")
 	}
@@ -447,3 +434,74 @@ func (c *Controller) GetStatus() []Status {
 	}
 	return status
 }
+
+type controllerStorage interface {
+	load() (Statuses, error)
+	save(statuses Statuses) error
+}
+
+type FileControllerStorage struct {
+	path string
+}
+
+func (fcs *FileControllerStorage) load() (Statuses, error) {
+	raw, err := os.ReadFile(fcs.path)
+	if err != nil {
+		return nil, fmt.Errorf("failed to load config statuses from file: %w", err)
+	}
+
+	statuses := Statuses{}
+	err = json.Unmarshal(raw, &statuses)
+	if err != nil {
+		return nil, fmt.Errorf("failed to marshal config statuses: %s", err.Error())
+	}
+
+	return statuses, nil
+}
+
+func (fcs *FileControllerStorage) save(statuses Statuses) error {
+
+	raw, err := json.Marshal(statuses)
+	if err != nil {
+		return fmt.Errorf("failed to marshal config statuses: %s", err)
+	}
+
+	err = os.WriteFile(fcs.path, raw, 0644)
+	if err != nil {
+		return fmt.Errorf("failed to save config statuses to file: %s", err)
+	}
+
+	return nil
+}
+
+// MemoryControllerStorage is a ControllerStorage implementation that is backed by a byte array that
+// is marshalled in and out of to ensure that behaviours is same as the file backed version
+type MemoryControllerStorage struct {
+	bytes []byte
+}
+
+func (mcs *MemoryControllerStorage) load() (Statuses, error) {
+	if mcs.bytes == nil {
+		return Statuses{}, nil
+	}
+
+	statuses := Statuses{}
+	err := json.Unmarshal(mcs.bytes, &statuses)
+	if err != nil {
+		return nil, fmt.Errorf("failed to marshal config statuses: %s", err.Error())
+	}
+
+	return statuses, nil
+}
+
+func (mcs *MemoryControllerStorage) save(statuses Statuses) error {
+
+	raw, err := json.Marshal(statuses)
+	if err != nil {
+		return fmt.Errorf("failed to marshal config statuses: %s", err)
+	}
+
+	mcs.bytes = raw
+
+	return nil
+}

+ 38 - 15
pkg/cloud/config/controller_test.go

@@ -855,12 +855,16 @@ func TestIntegrationController_pullWatchers(t *testing.T) {
 			path := filepath.Join(tempDir, configFile)
 			defer os.Remove(path)
 
+			storage := &FileControllerStorage{
+				path: path,
+			}
+
 			// Initialize controller
 			icd := &Controller{
-				path:     path,
+				storage:  storage,
 				watchers: tc.configWatchers,
 			}
-			err = icd.save(initialStatuses)
+			err = icd.storage.save(initialStatuses)
 			if err != nil {
 				t.Errorf("failed to save initial statuses: %s", err.Error())
 			}
@@ -869,7 +873,7 @@ func TestIntegrationController_pullWatchers(t *testing.T) {
 			icd.pullWatchers()
 
 			// Test Result
-			status, err := icd.load()
+			status, err := icd.storage.load()
 			if err != nil {
 				t.Errorf("failed to load status file: %s", err.Error())
 			}
@@ -964,11 +968,15 @@ func TestIntegrationController_CreateConfig(t *testing.T) {
 			path := filepath.Join(tempDir, configFile)
 			defer os.Remove(path)
 
+			storage := &FileControllerStorage{
+				path: path,
+			}
+
 			// Initialize controller
 			icd := &Controller{
-				path: path,
+				storage: storage,
 			}
-			err = icd.save(initialStatuses)
+			err = icd.storage.save(initialStatuses)
 			if err != nil {
 				t.Errorf("failed to save initial statuses: %s", err.Error())
 			}
@@ -984,7 +992,7 @@ func TestIntegrationController_CreateConfig(t *testing.T) {
 				t.Errorf("no error where expect")
 			}
 
-			status, err := icd.load()
+			status, err := icd.storage.load()
 			if err != nil {
 				t.Errorf("failed to load status file: %s", err.Error())
 			}
@@ -1078,11 +1086,16 @@ func TestIntegrationController_EnableConfig(t *testing.T) {
 			path := filepath.Join(tempDir, configFile)
 			defer os.Remove(path)
 
+			storage := &FileControllerStorage{
+				path: path,
+			}
+
 			// Initialize controller
 			icd := &Controller{
-				path: path,
+				storage: storage,
 			}
-			err = icd.save(initialStatuses)
+
+			err = icd.storage.save(initialStatuses)
 			if err != nil {
 				t.Errorf("failed to save initial statuses: %s", err.Error())
 			}
@@ -1098,7 +1111,7 @@ func TestIntegrationController_EnableConfig(t *testing.T) {
 				t.Errorf("no error where expect")
 			}
 
-			status, err := icd.load()
+			status, err := icd.storage.load()
 			if err != nil {
 				t.Errorf("failed to load status file: %s", err.Error())
 			}
@@ -1194,11 +1207,16 @@ func TestIntegrationController_DisableConfig(t *testing.T) {
 			path := filepath.Join(tempDir, configFile)
 			defer os.Remove(path)
 
+			storage := &FileControllerStorage{
+				path: path,
+			}
+
 			// Initialize controller
 			icd := &Controller{
-				path: path,
+				storage: storage,
 			}
-			err = icd.save(initialStatuses)
+
+			err = icd.storage.save(initialStatuses)
 			if err != nil {
 				t.Errorf("failed to save initial statuses: %s", err.Error())
 			}
@@ -1214,7 +1232,7 @@ func TestIntegrationController_DisableConfig(t *testing.T) {
 				t.Errorf("no error where expect")
 			}
 
-			status, err := icd.load()
+			status, err := icd.storage.load()
 			if err != nil {
 				t.Errorf("failed to load status file: %s", err.Error())
 			}
@@ -1289,11 +1307,16 @@ func TestIntegrationController_DeleteConfig(t *testing.T) {
 			path := filepath.Join(tempDir, configFile)
 			defer os.Remove(path)
 
+			storage := &FileControllerStorage{
+				path: path,
+			}
+
 			// Initialize controller
 			icd := &Controller{
-				path: path,
+				storage: storage,
 			}
-			err = icd.save(initialStatuses)
+
+			err = icd.storage.save(initialStatuses)
 			if err != nil {
 				t.Errorf("failed to save initial statuses: %s", err.Error())
 			}
@@ -1309,7 +1332,7 @@ func TestIntegrationController_DeleteConfig(t *testing.T) {
 				t.Errorf("no error where expect")
 			}
 
-			status, err := icd.load()
+			status, err := icd.storage.load()
 			if err != nil {
 				t.Errorf("failed to load status file: %s", err.Error())
 			}

+ 6 - 0
pkg/cloud/config/statuses.go

@@ -9,6 +9,7 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/oracle"
 )
 
 const (
@@ -16,6 +17,7 @@ const (
 	AthenaConfigType       = "athena"
 	BigQueryConfigType     = "bigquery"
 	AzureStorageConfigType = "azurestorage"
+	UsageApiConfigType     = "usageapi"
 )
 
 func ConfigTypeFromConfig(config cloud.KeyedConfig) (string, error) {
@@ -28,6 +30,8 @@ func ConfigTypeFromConfig(config cloud.KeyedConfig) (string, error) {
 		return BigQueryConfigType, nil
 	case *azure.StorageConfiguration:
 		return AzureStorageConfigType, nil
+	case *oracle.UsageApiConfiguration:
+		return UsageApiConfigType, nil
 	}
 	return "", fmt.Errorf("failed to config type for config with key: %s, type %T", config.Key(), config)
 }
@@ -114,6 +118,8 @@ func (s *Status) UnmarshalJSON(b []byte) error {
 		config = &gcp.BigQueryConfiguration{}
 	case AzureStorageConfigType:
 		config = &azure.StorageConfiguration{}
+	case UsageApiConfigType:
+		config = &oracle.UsageApiConfiguration{}
 	default:
 		return fmt.Errorf("Status: UnmarshalJSON: config type '%s' is not recognized", configType)
 	}

+ 14 - 2
pkg/cloud/gcp/bigqueryintegration.go

@@ -20,10 +20,14 @@ const (
 	UsageDateColumnName          = "usage_date"
 	BillingAccountIDColumnName   = "billing_id"
 	ProjectIDColumnName          = "project_id"
+	ProjectNameColumnName        = "project_name"
+	RegionColumnName             = "region"
+	ZoneColumnName               = "zone"
 	ServiceDescriptionColumnName = "service"
 	SKUDescriptionColumnName     = "description"
 	LabelsColumnName             = "labels"
 	ResourceNameColumnName       = "resource"
+	ResourceGlobalNameColumnName = "global_resource"
 	CostColumnName               = "cost"
 	ListCostColumnName           = "list_cost"
 	CreditsColumnName            = "credits"
@@ -43,9 +47,13 @@ func (bqi *BigQueryIntegration) GetCloudCost(start time.Time, end time.Time) (*o
 		fmt.Sprintf("TIMESTAMP_TRUNC(usage_start_time, day) as %s", UsageDateColumnName),
 		fmt.Sprintf("billing_account_id as %s", BillingAccountIDColumnName),
 		fmt.Sprintf("project.id as %s", ProjectIDColumnName),
+		fmt.Sprintf("project.name as %s", ProjectNameColumnName),
+		fmt.Sprintf("location.region as %s", RegionColumnName),
+		fmt.Sprintf("location.zone as %s", ZoneColumnName),
 		fmt.Sprintf("service.description as %s", ServiceDescriptionColumnName),
 		fmt.Sprintf("sku.description as %s", SKUDescriptionColumnName),
 		fmt.Sprintf("resource.name as %s", ResourceNameColumnName),
+		fmt.Sprintf("resource.global_name as %s", ResourceGlobalNameColumnName),
 		fmt.Sprintf("TO_JSON_STRING(labels) as %s", LabelsColumnName),
 		fmt.Sprintf("SUM(cost) as %s", CostColumnName),
 		fmt.Sprintf("SUM(cost_at_list) as %s", ListCostColumnName),
@@ -56,10 +64,14 @@ func (bqi *BigQueryIntegration) GetCloudCost(start time.Time, end time.Time) (*o
 		UsageDateColumnName,
 		BillingAccountIDColumnName,
 		ProjectIDColumnName,
+		ProjectNameColumnName,
+		RegionColumnName,
+		ZoneColumnName,
 		ServiceDescriptionColumnName,
 		SKUDescriptionColumnName,
 		LabelsColumnName,
 		ResourceNameColumnName,
+		ResourceGlobalNameColumnName,
 	}
 
 	whereConjuncts := GetWhereConjuncts(start, end)
@@ -178,7 +190,7 @@ func (bqi *BigQueryIntegration) queryFlexibleCUDTotalCosts(start time.Time, end
 		  IFNULL(SUM((Select SUM(amount) FROM bd.credits)),0),
 		FROM %s
 		WHERE %s
-		GROUP BY usage_date, sku.description
+		GROUP BY usage_date
 	`
 
 	table := fmt.Sprintf(" `%s` bd ", bqi.GetBillingDataDataset())
@@ -211,7 +223,7 @@ func (bqi *BigQueryIntegration) queryFlexibleCUDTotalCredits(start time.Time, en
 	FROM %s
 	CROSS JOIN UNNEST(bd.credits) AS credits
 	WHERE %s
-	GROUP BY usage_date, credits.id
+	GROUP BY usage_date
 	`
 
 	table := fmt.Sprintf(" `%s` bd ", bqi.GetBillingDataDataset())

+ 1 - 1
pkg/cloud/gcp/bigqueryintegration_test.go

@@ -13,7 +13,7 @@ import (
 func TestBigQueryIntegration_GetCloudCost(t *testing.T) {
 	bigQueryConfigPath := os.Getenv("BIGQUERY_CONFIGURATION")
 	if bigQueryConfigPath == "" {
-		t.Skip("skipping integration test, set environment variable ATHENA_CONFIGURATION")
+		t.Skip("skipping integration test, set environment variable BIGQUERY_CONFIGURATION\"")
 	}
 	bigQueryConfigBin, err := os.ReadFile(bigQueryConfigPath)
 	if err != nil {

+ 43 - 0
pkg/cloud/gcp/bigqueryintegration_types.go

@@ -61,6 +61,8 @@ func (ccl *CloudCostLoader) Load(values []bigquery.Value, schema bigquery.Schema
 				invoiceEntityID = ""
 			}
 			properties.InvoiceEntityID = invoiceEntityID
+			// Use InvoiceEntityID as InvoiceEntityName
+			properties.InvoiceEntityName = invoiceEntityID
 		case ProjectIDColumnName:
 			accountID, ok := values[i].(string)
 			if !ok {
@@ -68,6 +70,27 @@ func (ccl *CloudCostLoader) Load(values []bigquery.Value, schema bigquery.Schema
 				accountID = ""
 			}
 			properties.AccountID = accountID
+		case ProjectNameColumnName:
+			accountName, ok := values[i].(string)
+			if !ok {
+				log.DedupedErrorf(5, "error parsing GCP CloudCost %s: %v", ProjectNameColumnName, values[i])
+				accountName = ""
+			}
+			properties.AccountName = accountName
+		case RegionColumnName:
+			regionID, ok := values[i].(string)
+			if !ok {
+				log.DedupedErrorf(5, "error parsing GCP CloudCost %s: %v", RegionColumnName, values[i])
+				regionID = ""
+			}
+			properties.RegionID = regionID
+		case ZoneColumnName:
+			zone, ok := values[i].(string)
+			if !ok {
+				log.DedupedErrorf(5, "error parsing GCP CloudCost %s: %v", ZoneColumnName, values[i])
+				zone = ""
+			}
+			properties.AvailabilityZone = zone
 		case ServiceDescriptionColumnName:
 			service, ok := values[i].(string)
 			if !ok {
@@ -113,6 +136,26 @@ func (ccl *CloudCostLoader) Load(values []bigquery.Value, schema bigquery.Schema
 			}
 
 			properties.ProviderID = ParseProviderID(resource)
+		case ResourceGlobalNameColumnName:
+			// skip if we already got ProviderID from resource.name, as resource.global_name is a fallback for when
+			// resource.name is null
+			if len(properties.ProviderID) > 0 {
+				continue
+			}
+
+			resourceGlobalNameValue := values[i]
+			if resourceGlobalNameValue == nil {
+				properties.ProviderID = ""
+				continue
+			}
+			resourceGlobalName, ok := resourceGlobalNameValue.(string)
+			if !ok {
+				log.DedupedErrorf(5, "error parsing GCP CloudCost %s: %v", ResourceGlobalNameColumnName, values[i])
+				properties.ProviderID = ""
+				continue
+			}
+
+			properties.ProviderID = ParseProviderID(resourceGlobalName)
 		case CostColumnName:
 			costValue, ok := values[i].(float64)
 			if !ok {

+ 75 - 0
pkg/cloud/gcp/bigqueryintegration_types_test.go

@@ -0,0 +1,75 @@
+package gcp
+
+import (
+	"testing"
+	"time"
+
+	"cloud.google.com/go/bigquery"
+	"github.com/opencost/opencost/core/pkg/opencost"
+)
+
+func Test_Load_ResourceFallback(t *testing.T) {
+	schema := bigquery.Schema{
+		&bigquery.FieldSchema{
+			Name: UsageDateColumnName,
+		},
+		&bigquery.FieldSchema{
+			Name: ResourceNameColumnName,
+		},
+		&bigquery.FieldSchema{
+			Name: ResourceGlobalNameColumnName,
+		},
+	}
+
+	testCases := map[string]struct {
+		values             []bigquery.Value
+		expectedProviderID string
+	}{
+		"no data": {
+			values: []bigquery.Value{
+				bigquery.Value(time.Now()),
+				bigquery.Value(nil),
+				bigquery.Value(nil),
+			},
+			expectedProviderID: "",
+		},
+		"resource name only": {
+			values: []bigquery.Value{
+				bigquery.Value(time.Now()),
+				bigquery.Value("resource_name"),
+				bigquery.Value(nil),
+			},
+			expectedProviderID: "resource_name",
+		},
+		"resource global name only": {
+			values: []bigquery.Value{
+				bigquery.Value(time.Now()),
+				bigquery.Value(nil),
+				bigquery.Value("resource_global_name"),
+			},
+			expectedProviderID: "resource_global_name",
+		},
+		"resource name and global name": {
+			values: []bigquery.Value{
+				bigquery.Value(time.Now()),
+				bigquery.Value("resource_name"),
+				bigquery.Value("resource_global_name"),
+			},
+			expectedProviderID: "resource_name",
+		},
+	}
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			ccl := CloudCostLoader{
+				CloudCost: &opencost.CloudCost{},
+			}
+
+			err := ccl.Load(testCase.values, schema)
+			if err != nil {
+				t.Errorf("Other error during testing %s", err)
+			} else if ccl.CloudCost.Properties.ProviderID != testCase.expectedProviderID {
+				t.Errorf("Incorrect result, actual ProviderID: %s, expected: %s", ccl.CloudCost.Properties.ProviderID, testCase.expectedProviderID)
+			}
+		})
+	}
+}

+ 23 - 2
pkg/cloud/gcp/provider.go

@@ -66,18 +66,28 @@ var gcpRegions = []string{
 	"australia-southeast2",
 	"europe-central2",
 	"europe-north1",
+	"europe-southwest1",
 	"europe-west1",
+	"europe-west10",
+	"europe-west12",
 	"europe-west2",
 	"europe-west3",
 	"europe-west4",
 	"europe-west6",
+	"europe-west8",
 	"europe-west9",
+	"me-central1",
+	"me-central2",
+	"me-west1",
 	"northamerica-northeast1",
 	"northamerica-northeast2",
 	"southamerica-east1",
+	"southamerica-west1",
 	"us-central1",
 	"us-east1",
 	"us-east4",
+	"us-east5",
+	"us-south1",
 	"us-west1",
 	"us-west2",
 	"us-west3",
@@ -484,7 +494,8 @@ func (gcp *GCP) GetOrphanedResources() ([]models.OrphanedResource, error) {
 				desc := map[string]string{}
 				if disk.Description != "" {
 					if err := json.Unmarshal([]byte(disk.Description), &desc); err != nil {
-						return nil, fmt.Errorf("error converting string to map: %s", err)
+						log.Errorf("ignoring orphaned disk %s, failed to convert disk description to map: %s", disk.Name, err)
+						continue
 					}
 				}
 
@@ -632,6 +643,16 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]models.Key, pvKeys m
 		} else if err != nil {
 			return nil, "", fmt.Errorf("error parsing GCP pricing page: %s", err)
 		}
+		if t == "error" {
+			errReader := dec.Buffered()
+			buf := new(strings.Builder)
+			_, err = io.Copy(buf, errReader)
+			if err != nil {
+				return nil, "", fmt.Errorf("error respnse: could not be read %s", err)
+			}
+
+			return nil, "", fmt.Errorf("error respnse: %s", buf.String())
+		}
 		if t == "skus" {
 			_, err := dec.Token() // consumes [
 			if err != nil {
@@ -1141,7 +1162,7 @@ func (gcp *GCP) PVPricing(pvk models.PVKey) (*models.PV, error) {
 	defer gcp.DownloadPricingDataLock.RUnlock()
 	pricing, ok := gcp.Pricing[pvk.Features()]
 	if !ok {
-		log.Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		log.Debugf("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
 		return &models.PV{}, nil
 	}
 	return pricing.PV, nil

+ 180 - 154
pkg/cloud/gcp/provider_test.go

@@ -7,6 +7,7 @@ import (
 	"reflect"
 	"testing"
 
+	"github.com/google/martian/log"
 	"github.com/opencost/opencost/pkg/cloud/models"
 )
 
@@ -181,171 +182,196 @@ func TestKeyFeatures(t *testing.T) {
 // Load a reader object on a portion of a GCP api response
 // Confirm that the resting *GCP object contains the correctly parsed pricing info
 func TestParsePage(t *testing.T) {
-	// NOTE: SKUs here are copied directly from GCP Billing API. Some of them
-	// are in currency IDR, which relates directly to ticket GTM-52, for which
-	// some of this work was done. So if the prices look huge... don't panic.
-	// The only thing we're testing here is that, given these instance types
-	// and regions and prices, those same prices get set appropriately into
-	// the returned pricing map.
-	skuFilePath := "./test/skus.json"
-	fileBytes, err := os.ReadFile(skuFilePath)
-	if err != nil {
-		t.Fatalf("failed to open file '%s': %s", skuFilePath, err)
-	}
-	reader := bytes.NewReader(fileBytes)
-
-	testGcp := &GCP{}
 
-	inputKeys := map[string]models.Key{
-		"us-central1,a2highgpu,ondemand,gpu": &gcpKey{
-			Labels: map[string]string{
-				"node.kubernetes.io/instance-type": "a2-highgpu-1g",
-				"cloud.google.com/gke-gpu":         "true",
-				"cloud.google.com/gke-accelerator": "nvidia-tesla-a100",
-				"topology.kubernetes.io/region":    "us-central1",
-			},
-		},
-		"us-central1,e2medium,ondemand": &gcpKey{
-			Labels: map[string]string{
-				"node.kubernetes.io/instance-type": "e2-medium",
-				"topology.kubernetes.io/region":    "us-central1",
-			},
-		},
-		"us-central1,e2standard,ondemand": &gcpKey{
-			Labels: map[string]string{
-				"node.kubernetes.io/instance-type": "e2-standard",
-				"topology.kubernetes.io/region":    "us-central1",
-			},
-		},
-		"asia-southeast1,t2dstandard,ondemand": &gcpKey{
-			Labels: map[string]string{
-				"node.kubernetes.io/instance-type": "t2d-standard-1",
-				"topology.kubernetes.io/region":    "asia-southeast1",
-			},
+	testCases := map[string]struct {
+		inputFile      string
+		inputKeys      map[string]models.Key
+		pvKeys         map[string]models.PVKey
+		expectedPrices map[string]*GCPPricing
+		expectedToken  string
+		expectError    bool
+	}{
+		"Error Response": {
+			inputFile:      "./test/error.json",
+			inputKeys:      nil,
+			pvKeys:         nil,
+			expectedPrices: nil,
+			expectError:    true,
 		},
-	}
-
-	pvKeys := map[string]models.PVKey{}
-
-	actualPrices, token, err := testGcp.parsePage(reader, inputKeys, pvKeys)
-	if err != nil {
-		t.Fatalf("got error parsing page: %v", err)
-	}
-
-	const expectedToken = "APKCS1HVa0YpwgyTFbqbJ1eGwzKZmsPwLqzMZPTSNia5ck1Hc54Tx_Kz3oBxwSnRIdGVxXoSPdf-XlDpyNBf4QuxKcIEgtrQ1LDLWAgZowI0ns7HjrGta2s="
-	if token != expectedToken {
-		t.Fatalf("error parsing GCP next page token, parsed %s but expected %s", token, expectedToken)
-	}
-
-	expectedActualPrices := map[string]*GCPPricing{
-		"us-central1,a2highgpu,ondemand,gpu": {
-			Name:        "services/6F81-5844-456A/skus/039F-D0DA-4055",
-			SKUID:       "039F-D0DA-4055",
-			Description: "Nvidia Tesla A100 GPU running in Americas",
-			Category: &GCPResourceInfo{
-				ServiceDisplayName: "Compute Engine",
-				ResourceFamily:     "Compute",
-				ResourceGroup:      "GPU",
-				UsageType:          "OnDemand",
+		"SKU file": {
+			// NOTE: SKUs here are copied directly from GCP Billing API. Some of them
+			// are in currency IDR, which relates directly to ticket GTM-52, for which
+			// some of this work was done. So if the prices look huge... don't panic.
+			// The only thing we're testing here is that, given these instance types
+			// and regions and prices, those same prices get set appropriately into
+			// the returned pricing map.
+			inputFile: "./test/skus.json",
+			inputKeys: map[string]models.Key{
+				"us-central1,a2highgpu,ondemand,gpu": &gcpKey{
+					Labels: map[string]string{
+						"node.kubernetes.io/instance-type": "a2-highgpu-1g",
+						"cloud.google.com/gke-gpu":         "true",
+						"cloud.google.com/gke-accelerator": "nvidia-tesla-a100",
+						"topology.kubernetes.io/region":    "us-central1",
+					},
+				},
+				"us-central1,e2medium,ondemand": &gcpKey{
+					Labels: map[string]string{
+						"node.kubernetes.io/instance-type": "e2-medium",
+						"topology.kubernetes.io/region":    "us-central1",
+					},
+				},
+				"us-central1,e2standard,ondemand": &gcpKey{
+					Labels: map[string]string{
+						"node.kubernetes.io/instance-type": "e2-standard",
+						"topology.kubernetes.io/region":    "us-central1",
+					},
+				},
+				"asia-southeast1,t2dstandard,ondemand": &gcpKey{
+					Labels: map[string]string{
+						"node.kubernetes.io/instance-type": "t2d-standard-1",
+						"topology.kubernetes.io/region":    "asia-southeast1",
+					},
+				},
 			},
-			ServiceRegions: []string{"us-central1", "us-east1", "us-west1"},
-			PricingInfo: []*PricingInfo{
-				{
-					Summary: "",
-					PricingExpression: &PricingExpression{
-						UsageUnit:                "h",
-						UsageUnitDescription:     "hour",
-						BaseUnit:                 "s",
-						BaseUnitConversionFactor: 0,
-						DisplayQuantity:          1,
-						TieredRates: []*TieredRates{
-							{
-								StartUsageAmount: 0,
-								UnitPrice: &UnitPriceInfo{
-									CurrencyCode: "USD",
-									Units:        "2",
-									Nanos:        933908000,
+			pvKeys: map[string]models.PVKey{},
+			expectedPrices: map[string]*GCPPricing{
+				"us-central1,a2highgpu,ondemand,gpu": {
+					Name:        "services/6F81-5844-456A/skus/039F-D0DA-4055",
+					SKUID:       "039F-D0DA-4055",
+					Description: "Nvidia Tesla A100 GPU running in Americas",
+					Category: &GCPResourceInfo{
+						ServiceDisplayName: "Compute Engine",
+						ResourceFamily:     "Compute",
+						ResourceGroup:      "GPU",
+						UsageType:          "OnDemand",
+					},
+					ServiceRegions: []string{"us-central1", "us-east1", "us-west1"},
+					PricingInfo: []*PricingInfo{
+						{
+							Summary: "",
+							PricingExpression: &PricingExpression{
+								UsageUnit:                "h",
+								UsageUnitDescription:     "hour",
+								BaseUnit:                 "s",
+								BaseUnitConversionFactor: 0,
+								DisplayQuantity:          1,
+								TieredRates: []*TieredRates{
+									{
+										StartUsageAmount: 0,
+										UnitPrice: &UnitPriceInfo{
+											CurrencyCode: "USD",
+											Units:        "2",
+											Nanos:        933908000,
+										},
+									},
 								},
 							},
+							CurrencyConversionRate: 1,
+							EffectiveTime:          "2023-03-24T10:52:50.681Z",
 						},
 					},
-					CurrencyConversionRate: 1,
-					EffectiveTime:          "2023-03-24T10:52:50.681Z",
+					ServiceProviderName: "Google",
+					Node: &models.Node{
+						VCPUCost:         "0.031611",
+						RAMCost:          "0.004237",
+						UsesBaseCPUPrice: false,
+						GPU:              "1",
+						GPUName:          "nvidia-tesla-a100",
+						GPUCost:          "2.933908",
+					},
+				},
+				"us-central1,a2highgpu,ondemand": {
+					Node: &models.Node{
+						VCPUCost:         "0.031611",
+						RAMCost:          "0.004237",
+						UsesBaseCPUPrice: false,
+						UsageType:        "ondemand",
+					},
+				},
+				"us-central1,e2medium,ondemand": {
+					Node: &models.Node{
+						VCPU:             "1.000000",
+						VCPUCost:         "327.173848364",
+						RAMCost:          "43.85294978",
+						UsesBaseCPUPrice: false,
+						UsageType:        "ondemand",
+					},
+				},
+				"us-central1,e2medium,ondemand,gpu": {
+					Node: &models.Node{
+						VCPU:             "1.000000",
+						VCPUCost:         "327.173848364",
+						RAMCost:          "43.85294978",
+						UsesBaseCPUPrice: false,
+						UsageType:        "ondemand",
+					},
+				},
+				"us-central1,e2standard,ondemand": {
+					Node: &models.Node{
+						VCPUCost:         "327.173848364",
+						RAMCost:          "43.85294978",
+						UsesBaseCPUPrice: false,
+						UsageType:        "ondemand",
+					},
+				},
+				"us-central1,e2standard,ondemand,gpu": {
+					Node: &models.Node{
+						VCPUCost:         "327.173848364",
+						RAMCost:          "43.85294978",
+						UsesBaseCPUPrice: false,
+						UsageType:        "ondemand",
+					},
+				},
+				"asia-southeast1,t2dstandard,ondemand": {
+					Node: &models.Node{
+						VCPUCost:         "508.934997455",
+						RAMCost:          "68.204999658",
+						UsesBaseCPUPrice: false,
+						UsageType:        "ondemand",
+					},
+				},
+				"asia-southeast1,t2dstandard,ondemand,gpu": {
+					Node: &models.Node{
+						VCPUCost:         "508.934997455",
+						RAMCost:          "68.204999658",
+						UsesBaseCPUPrice: false,
+						UsageType:        "ondemand",
+					},
 				},
 			},
-			ServiceProviderName: "Google",
-			Node: &models.Node{
-				VCPUCost:         "0.031611",
-				RAMCost:          "0.004237",
-				UsesBaseCPUPrice: false,
-				GPU:              "1",
-				GPUName:          "nvidia-tesla-a100",
-				GPUCost:          "2.933908",
-			},
-		},
-		"us-central1,a2highgpu,ondemand": {
-			Node: &models.Node{
-				VCPUCost:         "0.031611",
-				RAMCost:          "0.004237",
-				UsesBaseCPUPrice: false,
-				UsageType:        "ondemand",
-			},
-		},
-		"us-central1,e2medium,ondemand": {
-			Node: &models.Node{
-				VCPU:             "1.000000",
-				VCPUCost:         "327.173848364",
-				RAMCost:          "43.85294978",
-				UsesBaseCPUPrice: false,
-				UsageType:        "ondemand",
-			},
-		},
-		"us-central1,e2medium,ondemand,gpu": {
-			Node: &models.Node{
-				VCPU:             "1.000000",
-				VCPUCost:         "327.173848364",
-				RAMCost:          "43.85294978",
-				UsesBaseCPUPrice: false,
-				UsageType:        "ondemand",
-			},
-		},
-		"us-central1,e2standard,ondemand": {
-			Node: &models.Node{
-				VCPUCost:         "327.173848364",
-				RAMCost:          "43.85294978",
-				UsesBaseCPUPrice: false,
-				UsageType:        "ondemand",
-			},
-		},
-		"us-central1,e2standard,ondemand,gpu": {
-			Node: &models.Node{
-				VCPUCost:         "327.173848364",
-				RAMCost:          "43.85294978",
-				UsesBaseCPUPrice: false,
-				UsageType:        "ondemand",
-			},
-		},
-		"asia-southeast1,t2dstandard,ondemand": {
-			Node: &models.Node{
-				VCPUCost:         "508.934997455",
-				RAMCost:          "68.204999658",
-				UsesBaseCPUPrice: false,
-				UsageType:        "ondemand",
-			},
-		},
-		"asia-southeast1,t2dstandard,ondemand,gpu": {
-			Node: &models.Node{
-				VCPUCost:         "508.934997455",
-				RAMCost:          "68.204999658",
-				UsesBaseCPUPrice: false,
-				UsageType:        "ondemand",
-			},
+			expectedToken: "APKCS1HVa0YpwgyTFbqbJ1eGwzKZmsPwLqzMZPTSNia5ck1Hc54Tx_Kz3oBxwSnRIdGVxXoSPdf-XlDpyNBf4QuxKcIEgtrQ1LDLWAgZowI0ns7HjrGta2s=",
+			expectError:   false,
 		},
 	}
 
-	if !reflect.DeepEqual(actualPrices, expectedActualPrices) {
-		act, _ := json.Marshal(actualPrices)
-		exp, _ := json.Marshal(expectedActualPrices)
-		t.Errorf("error parsing GCP prices: parsed \n%s\n expected \n%s\n", string(act), string(exp))
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			fileBytes, err := os.ReadFile(tc.inputFile)
+			if err != nil {
+				t.Fatalf("failed to open file '%s': %s", tc.inputFile, err)
+			}
+			reader := bytes.NewReader(fileBytes)
+
+			testGcp := &GCP{}
+			actualPrices, token, err := testGcp.parsePage(reader, tc.inputKeys, tc.pvKeys)
+			if err != nil {
+				log.Errorf("got error parsing page: %v", err)
+			}
+			if tc.expectError != (err != nil) {
+				t.Fatalf("Error from result was not as expected. Expected: %v, Actual: %v", tc.expectError, err != nil)
+			}
+
+			if token != tc.expectedToken {
+				t.Fatalf("error parsing GCP next page token, parsed %s but expected %s", token, tc.expectedToken)
+			}
+
+			if !reflect.DeepEqual(actualPrices, tc.expectedPrices) {
+				act, _ := json.Marshal(actualPrices)
+				exp, _ := json.Marshal(tc.expectedPrices)
+				t.Errorf("error parsing GCP prices: parsed \n%s\n expected \n%s\n", string(act), string(exp))
+			}
+		})
 	}
+
 }

+ 17 - 0
pkg/cloud/gcp/test/error.json

@@ -0,0 +1,17 @@
+{
+  "error": {
+    "code": 400,
+    "message": "API key not valid. Please pass a valid API key.",
+    "status": "INVALID_ARGUMENT",
+    "details": [
+      {
+        "@type": "type.googleapis.com/google.rpc.ErrorInfo",
+        "reason": "API_KEY_INVALID",
+        "domain": "googleapis.com",
+        "metadata": {
+          "service": "cloudbilling.googleapis.com"
+        }
+      }
+    ]
+  }
+}

+ 130 - 0
pkg/cloud/oracle/authorizer.go

@@ -0,0 +1,130 @@
+package oracle
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/oracle/oci-go-sdk/v65/common"
+)
+
+const RawConfigProviderAuthorizerType = "OCIRawConfigProvider"
+
+// Authorizer provides which is used in when creating clients in the OCI SDK
+type Authorizer interface {
+	cloud.Authorizer
+	CreateOCIConfig() (common.ConfigurationProvider, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case RawConfigProviderAuthorizerType:
+		return &RawConfigProvider{}, nil
+	default:
+		return nil, fmt.Errorf("OCI: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// RawConfigProvider holds OCI credentials and fulfils the common.ConfigurationProvider interface
+type RawConfigProvider struct {
+	TenancyID            string  `json:"tenancyID"`
+	UserID               string  `json:"userID"`
+	Region               string  `json:"region"`
+	Fingerprint          string  `json:"fingerprint"`
+	PrivateKey           string  `json:"privateKey"`
+	PrivateKeyPassphrase *string `json:"privateKeyPassphrase"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *RawConfigProvider) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 6)
+	fmap[cloud.AuthorizerTypeProperty] = RawConfigProviderAuthorizerType
+	fmap["tenancyId"] = ak.TenancyID
+	fmap["userId"] = ak.UserID
+	fmap["region"] = ak.Region
+	fmap["fingerprint"] = ak.Fingerprint
+	fmap["privateKey"] = ak.PrivateKey
+	fmap["privateKeyPassphrase"] = ak.PrivateKeyPassphrase
+	return json.Marshal(fmap)
+}
+
+func (ak *RawConfigProvider) Validate() error {
+	if ak.TenancyID == "" {
+		return fmt.Errorf("RawConfigProvider: missing tenancy ID")
+	}
+	if ak.UserID == "" {
+		return fmt.Errorf("RawConfigProvider: missing user ID")
+	}
+	if ak.Fingerprint == "" {
+		return fmt.Errorf("RawConfigProvider: missing key fingerprint")
+	}
+	if ak.Region == "" {
+		return fmt.Errorf("RawConfigProvider: missing region")
+	}
+	if ak.PrivateKey == "" {
+		return fmt.Errorf("RawConfigProvider: missing private key")
+	}
+	if ak.PrivateKeyPassphrase != nil {
+		if *ak.PrivateKeyPassphrase == "" {
+			return fmt.Errorf("RawConfigProvider: missing private key passphrase")
+		}
+	}
+
+	return nil
+}
+
+func (ak *RawConfigProvider) Equals(config cloud.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*RawConfigProvider)
+	if !ok {
+		return false
+	}
+
+	if ak.TenancyID != thatConfig.TenancyID {
+		return false
+	}
+	if ak.UserID != thatConfig.UserID {
+		return false
+	}
+	if ak.Fingerprint != thatConfig.Fingerprint {
+		return false
+	}
+	if ak.Region != thatConfig.Region {
+		return false
+	}
+	if ak.PrivateKey != thatConfig.PrivateKey {
+		return false
+	}
+	if ak.PrivateKeyPassphrase == nil && thatConfig.PrivateKeyPassphrase != nil {
+		return false
+	}
+	if ak.PrivateKeyPassphrase != nil && thatConfig.PrivateKeyPassphrase == nil {
+		return false
+	}
+	if ak.PrivateKeyPassphrase != nil && thatConfig.PrivateKeyPassphrase != nil {
+		if *ak.PrivateKeyPassphrase != *thatConfig.PrivateKeyPassphrase {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (ak *RawConfigProvider) Sanitize() cloud.Config {
+	redacted := cloud.Redacted
+	return &RawConfigProvider{
+		TenancyID:            ak.TenancyID,
+		UserID:               ak.UserID,
+		Fingerprint:          ak.Fingerprint,
+		Region:               ak.Region,
+		PrivateKey:           cloud.Redacted,
+		PrivateKeyPassphrase: &redacted,
+	}
+}
+
+func (ak *RawConfigProvider) CreateOCIConfig() (common.ConfigurationProvider, error) {
+	return common.NewRawConfigurationProvider(ak.TenancyID, ak.UserID, ak.Region, ak.Fingerprint, ak.PrivateKey, ak.PrivateKeyPassphrase), nil
+}

+ 131 - 0
pkg/cloud/oracle/usageapiconfiguration.go

@@ -0,0 +1,131 @@
+package oracle
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/oracle/oci-go-sdk/v65/usageapi"
+)
+
+type UsageApiConfiguration struct {
+	TenancyID  string     `json:"tenancyID"`
+	Region     string     `json:"region"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (uac *UsageApiConfiguration) Validate() error {
+	// Validate Authorizer
+	if uac.Authorizer == nil {
+		return fmt.Errorf("UsageApiConfiguration: missing Authorizer")
+	}
+
+	err := uac.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("UsageApiConfiguration: %s", err)
+	}
+
+	// Validate base properties
+	if uac.TenancyID == "" {
+		return fmt.Errorf("UsageApiConfiguration: missing tenancyID")
+	}
+
+	if uac.Region == "" {
+		return fmt.Errorf("UsageApiConfiguration: missing region")
+	}
+
+	return nil
+}
+
+func (uac *UsageApiConfiguration) Equals(config cloud.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*UsageApiConfiguration)
+	if !ok {
+		return false
+	}
+
+	if uac.Authorizer != nil {
+		if !uac.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if uac.TenancyID != thatConfig.TenancyID {
+		return false
+	}
+
+	if uac.Region != thatConfig.Region {
+		return false
+	}
+
+	return true
+}
+
+func (uac *UsageApiConfiguration) Sanitize() cloud.Config {
+	return &UsageApiConfiguration{
+		TenancyID:  uac.TenancyID,
+		Region:     uac.Region,
+		Authorizer: uac.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (uac *UsageApiConfiguration) Key() string {
+	return uac.TenancyID
+}
+
+func (uac *UsageApiConfiguration) Provider() string {
+	return opencost.OracleProvider
+}
+
+func (uac *UsageApiConfiguration) GetUsageApiClient() (*usageapi.UsageapiClient, error) {
+	configProvider, err := uac.Authorizer.CreateOCIConfig()
+	if err != nil {
+		return nil, fmt.Errorf("failed to create oci config: %s", err.Error())
+	}
+	client, err := usageapi.NewUsageapiClientWithConfigurationProvider(configProvider)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create usage api client: %s", err.Error())
+	}
+	return &client, nil
+}
+
+func (uac *UsageApiConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	tenancyId, err := cloud.GetInterfaceValue[string](fmap, "tenancyID")
+	if err != nil {
+		return fmt.Errorf("UsageApiConfiguration: UnmarshalJSON: %w", err)
+	}
+	uac.TenancyID = tenancyId
+
+	region, err := cloud.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("UsageApiConfiguration: UnmarshalJSON: %w", err)
+	}
+	uac.Region = region
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("UsageApiConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := cloud.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("UsageApiConfiguration: UnmarshalJSON: %w", err)
+	}
+	uac.Authorizer = authorizer
+
+	return nil
+}

+ 318 - 0
pkg/cloud/oracle/usageapiconfiguration_test.go

@@ -0,0 +1,318 @@
+package oracle
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/core/pkg/log"
+	"github.com/opencost/opencost/core/pkg/util/json"
+	"github.com/opencost/opencost/pkg/cloud"
+)
+
+func TestUsageApiConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   UsageApiConfiguration
+		expected error
+	}{
+		"valid config OCI Key": {
+			config: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			expected: nil,
+		},
+		"invalid authorizer": {
+			config: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "",
+					PrivateKey:  "",
+				},
+			},
+			expected: fmt.Errorf("UsageApiConfiguration: RawConfigProvider: missing key fingerprint"),
+		},
+		"missing authorizer": {
+			config: UsageApiConfiguration{
+				TenancyID:  "tenancyID",
+				Region:     "region",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("UsageApiConfiguration: missing Authorizer"),
+		},
+		"missing tenancyID": {
+			config: UsageApiConfiguration{
+				TenancyID: "",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			expected: fmt.Errorf("UsageApiConfiguration: missing tenancyID"),
+		},
+		"missing region": {
+			config: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			expected: fmt.Errorf("UsageApiConfiguration: missing region"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestUsageApiConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     UsageApiConfiguration
+		right    cloud.Config
+		expected bool
+	}{
+		"matching config": {
+			left: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			right: &UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			expected: true,
+		},
+		"different configurer": {
+			left: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint2",
+					PrivateKey:  "key",
+				},
+			},
+			right: &UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			expected: false,
+		},
+		"missing both configurer": {
+			left: UsageApiConfiguration{
+				TenancyID:  "tenancyID",
+				Region:     "region",
+				Authorizer: nil,
+			},
+			right: &UsageApiConfiguration{
+				TenancyID:  "tenancyID",
+				Region:     "region",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left configurer": {
+			left: UsageApiConfiguration{
+				TenancyID:  "tenancyID",
+				Region:     "region",
+				Authorizer: nil,
+			},
+			right: &UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			expected: false,
+		},
+		"missing right configurer": {
+			left: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			right: &UsageApiConfiguration{
+				TenancyID:  "tenancyID",
+				Region:     "region",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different tenancyID": {
+			left: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			right: &UsageApiConfiguration{
+				TenancyID: "tenancyID2",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID2",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			right: &UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region2",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region2",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestUsageApiConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config UsageApiConfiguration
+	}{
+		"Empty Config": {
+			config: UsageApiConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: UsageApiConfiguration{
+				TenancyID:  "tenancyID",
+				Region:     "region",
+				Authorizer: nil,
+			},
+		},
+		"RawConfigProviderAuthorizer": {
+			config: UsageApiConfiguration{
+				TenancyID: "tenancyID",
+				Region:    "region",
+				Authorizer: &RawConfigProvider{
+					TenancyID:   "tenancyID",
+					UserID:      "userID",
+					Region:      "region2",
+					Fingerprint: "fingerprint",
+					PrivateKey:  "key",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &UsageApiConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 171 - 0
pkg/cloud/oracle/usageapiintegration.go

@@ -0,0 +1,171 @@
+package oracle
+
+import (
+	"context"
+	"fmt"
+	"strconv"
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/oracle/oci-go-sdk/v65/common"
+	"github.com/oracle/oci-go-sdk/v65/usageapi"
+)
+
+type UsageApiIntegration struct {
+	UsageApiConfiguration
+	ConnectionStatus cloud.ConnectionStatus
+}
+
+func (uai *UsageApiIntegration) GetCloudCost(start time.Time, end time.Time) (*opencost.CloudCostSetRange, error) {
+	client, err := uai.GetUsageApiClient()
+	if err != nil {
+		uai.ConnectionStatus = cloud.FailedConnection
+		return nil, fmt.Errorf("getting oracle usage api client: %s", err.Error())
+	}
+
+	req := usageapi.RequestSummarizedUsagesRequest{
+		RequestSummarizedUsagesDetails: usageapi.RequestSummarizedUsagesDetails{
+			Granularity:       usageapi.RequestSummarizedUsagesDetailsGranularityDaily,
+			GroupBy:           []string{"resourceId", "service", "subscriptionId", "tenantName"},
+			IsAggregateByTime: common.Bool(false),
+			TimeUsageStarted:  &common.SDKTime{Time: start},
+			TimeUsageEnded:    &common.SDKTime{Time: end},
+			QueryType:         usageapi.RequestSummarizedUsagesDetailsQueryTypeCost,
+			TenantId:          common.String(uai.TenancyID),
+		},
+		Limit: common.Int(500),
+	}
+
+	resp, err := client.RequestSummarizedUsages(context.Background(), req)
+	if err != nil {
+		uai.ConnectionStatus = cloud.FailedConnection
+		return nil, fmt.Errorf("failed to query usage: %w", err)
+	}
+
+	ccsr, err := opencost.NewCloudCostSetRange(start, end, opencost.AccumulateOptionDay, uai.Key())
+	if err != nil {
+		return nil, err
+	}
+
+	// Set status to missing data if query comes back empty and the status isn't already successful
+	if len(resp.Items) == 0 && uai.ConnectionStatus != cloud.SuccessfulConnection {
+		uai.ConnectionStatus = cloud.MissingData
+		return ccsr, nil
+	}
+
+	for _, item := range resp.Items {
+		resourceId := ""
+		if item.ResourceId != nil {
+			resourceId = *item.ResourceId
+		}
+
+		tenantName := ""
+		if item.TenantName != nil {
+			tenantName = *item.TenantName
+		}
+
+		subscriptionId := ""
+		if item.SubscriptionId != nil {
+			subscriptionId = *item.SubscriptionId
+		}
+
+		service := ""
+		if item.Service != nil {
+			service = *item.Service
+		}
+
+		category := SelectOCICategory(service)
+
+		// Iterate through the slice of tags, assigning
+		// keys and values to the map of labels
+		labels := opencost.CloudCostLabels{}
+		for _, tag := range item.Tags {
+			if tag.Key == nil || tag.Value == nil {
+				continue
+			}
+			labels[*tag.Key] = *tag.Value
+		}
+
+		properties := &opencost.CloudCostProperties{
+			ProviderID:      resourceId,
+			Provider:        opencost.OracleProvider,
+			AccountID:       uai.TenancyID,
+			AccountName:     tenantName,
+			InvoiceEntityID: subscriptionId,
+			RegionID:        uai.Region,
+			Service:         service,
+			Category:        category,
+			Labels:          labels,
+		}
+
+		winStart := item.TimeUsageStarted.Time
+		winEnd := start.AddDate(0, 0, 1)
+
+		listRate := 0.0
+		if item.ListRate != nil {
+			listRate = float64(*item.ListRate)
+		}
+
+		attrCostToParse := ""
+		if item.AttributedCost != nil {
+			attrCostToParse = *item.AttributedCost
+		}
+
+		attrCost, err := strconv.ParseFloat(attrCostToParse, 64)
+		if err != nil {
+			return nil, fmt.Errorf("unable to parse float '%s': %s", attrCostToParse, err.Error())
+		}
+
+		computedAmt := 0.0
+		if item.ComputedAmount != nil {
+			computedAmt = float64(*item.ComputedAmount)
+		}
+
+		cc := &opencost.CloudCost{
+			Properties: properties,
+			Window:     opencost.NewWindow(&winStart, &winEnd),
+			//todo: which returned costs go where?
+			ListCost: opencost.CostMetric{
+				Cost: listRate,
+			},
+			NetCost: opencost.CostMetric{
+				Cost: computedAmt,
+			},
+			AmortizedNetCost: opencost.CostMetric{
+				Cost: attrCost,
+			},
+			AmortizedCost: opencost.CostMetric{
+				Cost: attrCost,
+			},
+			InvoicedCost: opencost.CostMetric{
+				Cost: computedAmt,
+			},
+		}
+
+		ccsr.LoadCloudCost(cc)
+	}
+
+	uai.ConnectionStatus = cloud.SuccessfulConnection
+	return ccsr, nil
+}
+
+func (uai *UsageApiIntegration) GetStatus() cloud.ConnectionStatus {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if uai.ConnectionStatus.String() == "" {
+		uai.ConnectionStatus = cloud.InitialStatus
+	}
+	return uai.ConnectionStatus
+}
+
+func SelectOCICategory(service string) string {
+	if service == "Compute" {
+		return opencost.ComputeCategory
+	} else if service == "Block Storage" || service == "Object Storage" {
+		return opencost.StorageCategory
+	} else if service == "Load Balancer" || service == "Virtual Cloud Network" {
+		return opencost.NetworkCategory
+	} else {
+		return opencost.OtherCategory
+	}
+}

+ 61 - 0
pkg/cloud/oracle/usageapiintegration_test.go

@@ -0,0 +1,61 @@
+package oracle
+
+import (
+	"encoding/json"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/opencost/opencost/core/pkg/util/timeutil"
+)
+
+func TestUsageAPIIntegration_GetCloudCost(t *testing.T) {
+	usageApiConfigPath := os.Getenv("USAGEAPI_CONFIGURATION")
+	if usageApiConfigPath == "" {
+		t.Skip("skipping integration test, set environment variable USAGEAPI_CONFIGURATION")
+	}
+	usageApiConfigBin, err := os.ReadFile(usageApiConfigPath)
+	if err != nil {
+		t.Fatalf("failed to read config file: %s", err.Error())
+	}
+	var usageApiConfig UsageApiConfiguration
+	err = json.Unmarshal(usageApiConfigBin, &usageApiConfig)
+	if err != nil {
+		t.Fatalf("failed to unmarshal config from JSON: %s", err.Error())
+	}
+	testCases := map[string]struct {
+		integration *UsageApiIntegration
+		start       time.Time
+		end         time.Time
+		expected    bool
+	}{
+		// No CUR data is expected within 2 days of now
+		"too_recent_window": {
+			integration: &UsageApiIntegration{
+				UsageApiConfiguration: usageApiConfig,
+			},
+			end:      time.Now(),
+			start:    time.Now().Add(-timeutil.Day),
+			expected: true,
+		},
+		// CUR data should be available
+		"last week window": {
+			integration: &UsageApiIntegration{
+				UsageApiConfiguration: usageApiConfig,
+			},
+			end:      time.Now().Add(-7 * timeutil.Day),
+			start:    time.Now().Add(-8 * timeutil.Day),
+			expected: false,
+		},
+	}
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual, err := testCase.integration.GetCloudCost(testCase.start, testCase.end)
+			if err != nil {
+				t.Errorf("Other error during testing %s", err)
+			} else if actual.IsEmpty() != testCase.expected {
+				t.Errorf("Incorrect result, actual emptiness: %t, expected: %t", actual.IsEmpty(), testCase.expected)
+			}
+		})
+	}
+}

+ 1 - 1
pkg/cloud/provider/csvprovider.go

@@ -417,7 +417,7 @@ func (c *CSVProvider) PVPricing(pvk models.PVKey) (*models.PV, error) {
 	defer c.DownloadPricingDataLock.RUnlock()
 	pricing, ok := c.PricingPV[pvk.Features()]
 	if !ok {
-		log.Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		log.Debugf("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
 		return &models.PV{}, nil
 	}
 	return &models.PV{

+ 43 - 0
pkg/cloud/provider/provider.go

@@ -2,9 +2,11 @@ package provider
 
 import (
 	"errors"
+	"fmt"
 	"net"
 	"net/http"
 	"regexp"
+	"strconv"
 	"strings"
 	"time"
 
@@ -276,6 +278,16 @@ func getClusterProperties(node *clustercache.Node) clusterProperties {
 		accountID:      "",
 		projectID:      "",
 	}
+
+	// Check for custom provider settings
+	if env.IsUseCustomProvider() {
+		// Use CSV provider if set
+		if env.IsUseCSVProvider() {
+			cp.provider = opencost.CSVProvider
+		}
+		return cp
+	}
+
 	// The second conditional is mainly if you're running opencost outside of GCE, say in a local environment.
 	if metadata.OnGCE() || strings.HasPrefix(providerID, "gce") {
 		cp.provider = opencost.GCPProvider
@@ -301,6 +313,7 @@ func getClusterProperties(node *clustercache.Node) clusterProperties {
 		cp.provider = opencost.OracleProvider
 		cp.configFileName = "oracle.json"
 	}
+	// Override provider to CSV if CSVProvider is used and custom provider is not set
 	if env.IsUseCSVProvider() {
 		cp.provider = opencost.CSVProvider
 	}
@@ -314,6 +327,7 @@ var (
 	// gce://guestbook-227502/us-central1-a/gke-niko-n1-standard-2-wljla-8df8e58a-hfy7
 	//  => gke-niko-n1-standard-2-wljla-8df8e58a-hfy7
 	providerGCERegex = regexp.MustCompile("gce://[^/]*/[^/]*/([^/]+)")
+
 	// Capture "vol-0fc54c5e83b8d2b76" from "aws://us-east-2a/vol-0fc54c5e83b8d2b76"
 	persistentVolumeAWSRegex = regexp.MustCompile("aws:/[^/]*/[^/]*/([^/]+)")
 	// Capture "ad9d88195b52a47c89b5055120f28c58" from "ad9d88195b52a47c89b5055120f28c58-1037804914.us-east-2.elb.amazonaws.com"
@@ -360,3 +374,32 @@ func ParseLBID(id string) string {
 	// Return id for GCP Provider, Azure Provider, CSV Provider and Custom Provider
 	return id
 }
+
+// ParseLocalDiskID attempts to parse a ProviderID from the ProviderID of the node that the local disk is running on
+func ParseLocalDiskID(id string) string {
+	// Parse like node
+	id = ParseID(id)
+
+	if strings.HasPrefix(id, "azure://") {
+
+		// handle vmss ProviderID of type azure:///subscriptions/ae337b64-e7ba-3387-b043-187289efe4e3/resourceGroups/mc_test_eastus2/providers/Microsoft.Compute/virtualMachineScaleSets/aks-userpool-12345678-vmss/virtualMachines/11
+		if strings.Contains(id, "virtualMachineScaleSets") {
+			split := strings.Split(id, "/virtualMachineScaleSets/")
+			// combine vmss name and number into a single string ending in a 6 character base 32 number
+			vmSplit := strings.Split(split[1], "/")
+			if len(vmSplit) != 3 {
+				return id
+			}
+			vmNum, err := strconv.ParseInt(vmSplit[2], 10, 64)
+			if err != nil {
+				return id
+			}
+
+			id = fmt.Sprintf("%s/disks/%s%06s", split[0], vmSplit[0], strconv.FormatInt(vmNum, 32))
+		}
+		id = strings.Replace(id, "/virtualMachines/", "/disks/", -1)
+		id = strings.ToLower(id)
+		return fmt.Sprintf("%s_osdisk", id)
+	}
+	return id
+}

+ 44 - 0
pkg/cloud/provider/provider_test.go

@@ -0,0 +1,44 @@
+package provider
+
+import (
+	"testing"
+)
+
+func TestParseLocalDiskID(t *testing.T) {
+	tests := map[string]struct {
+		input string
+		want  string
+	}{
+		"empty string": {
+			input: "",
+			want:  "",
+		},
+		"generic string": {
+			input: "test",
+			want:  "test",
+		},
+		"AWS node provider id": {
+			input: "aws:///us-east-2a/i-0fea4fd46592d050b",
+			want:  "i-0fea4fd46592d050b",
+		},
+		"GCP node provider id": {
+			input: "gce://guestbook-11111/us-central1-a/gke-niko-n1-standard-2-wlkla-8d48e58a-hfy7",
+			want:  "gke-niko-n1-standard-2-wlkla-8d48e58a-hfy7",
+		},
+		"Azure vmss provider id": {
+			input: "azure:///subscriptions/ae337b64-e7ba-3387-b043-187289efe4e3/resourceGroups/mc_test_eastus2/providers/Microsoft.Compute/virtualMachineScaleSets/aks-userpool-12345678-vmss/virtualMachines/11",
+			want:  "azure:///subscriptions/ae337b64-e7ba-3387-b043-187289efe4e3/resourcegroups/mc_test_eastus2/providers/microsoft.compute/disks/aks-userpool-12345678-vmss00000b_osdisk",
+		},
+		"Azure vm provider id": {
+			input: "azure:///subscriptions/ae337b64-e7ba-3387-b043-187289efe4e3/resourceGroups/mc_test_eastus2/providers/Microsoft.Compute/virtualMachines/master-0",
+			want:  "azure:///subscriptions/ae337b64-e7ba-3387-b043-187289efe4e3/resourcegroups/mc_test_eastus2/providers/microsoft.compute/disks/master-0_osdisk",
+		},
+	}
+	for name, tt := range tests {
+		t.Run(name, func(t *testing.T) {
+			if got := ParseLocalDiskID(tt.input); got != tt.want {
+				t.Errorf("ParseLocalDiskID() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}

+ 1 - 1
pkg/cloud/scaleway/provider.go

@@ -230,7 +230,7 @@ func (c *Scaleway) PVPricing(pvk models.PVKey) (*models.PV, error) {
 
 	pricing, ok := c.Pricing[pvk.Features()]
 	if !ok {
-		log.Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		log.Debugf("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
 		return &models.PV{}, nil
 	}
 	return &models.PV{

+ 17 - 3
pkg/cloudcost/integration.go

@@ -9,6 +9,7 @@ import (
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/oracle"
 )
 
 // CloudCostIntegration is an interface for retrieving daily granularity CloudCost data for a given range
@@ -57,15 +58,24 @@ func GetIntegrationFromConfig(kc cloud.KeyedConfig) CloudCostIntegration {
 	case *azure.StorageConnection:
 		return &azure.AzureStorageIntegration{
 			AzureStorageBillingParser: azure.AzureStorageBillingParser{
-				StorageConnection: *keyedConfig,
+				StorageConnection: azure.StorageConnection{
+					StorageConfiguration: keyedConfig.StorageConfiguration},
 			},
 		}
 	case *azure.AzureStorageBillingParser:
 		return &azure.AzureStorageIntegration{
-			AzureStorageBillingParser: *keyedConfig,
+			AzureStorageBillingParser: azure.AzureStorageBillingParser{
+				StorageConnection: azure.StorageConnection{
+					StorageConfiguration: keyedConfig.StorageConfiguration},
+			},
 		}
 	case *azure.AzureStorageIntegration:
-		return keyedConfig
+		return &azure.AzureStorageIntegration{
+			AzureStorageBillingParser: azure.AzureStorageBillingParser{
+				StorageConnection: azure.StorageConnection{
+					StorageConfiguration: keyedConfig.StorageConfiguration},
+			},
+		}
 	// S3SelectIntegration
 	case *aws.S3Configuration:
 		return &aws.S3SelectIntegration{
@@ -90,6 +100,10 @@ func GetIntegrationFromConfig(kc cloud.KeyedConfig) CloudCostIntegration {
 	// Alibaba BOA Integration
 	case *alibaba.BOAConfiguration:
 		return nil
+	case *oracle.UsageApiConfiguration:
+		return &oracle.UsageApiIntegration{
+			UsageApiConfiguration: *keyedConfig,
+		}
 	default:
 		return nil
 	}

+ 1 - 13
pkg/clustercache/watchcontroller.go

@@ -88,19 +88,7 @@ func NewCachingWatcher(restClient rest.Interface, resource string, resourceType
 }
 
 func (c *CachingWatchController) GetAll() []interface{} {
-	list := c.indexer.List()
-
-	// since the indexer returns the as-is pointer to the resource,
-	// we deep copy the resources such that callers don't corrupt the
-	// index
-	cloneList := make([]interface{}, 0, len(list))
-	for _, v := range list {
-		if deepCopyable, ok := v.(rt.Object); ok {
-			cloneList = append(cloneList, deepCopyable.DeepCopyObject())
-		}
-	}
-
-	return cloneList
+	return c.indexer.List()
 }
 
 func (c *CachingWatchController) SetUpdateHandler(handler WatchHandler) WatchController {

+ 84 - 35
pkg/cmd/costmodel/costmodel.go

@@ -8,7 +8,10 @@ import (
 	"time"
 
 	"github.com/julienschmidt/httprouter"
-	"github.com/opencost/opencost/pkg/cloudcost"
+	"github.com/opencost/opencost/core/pkg/util/json"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/provider"
+	"github.com/opencost/opencost/pkg/customcost"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 	"github.com/rs/cors"
 
@@ -36,59 +39,64 @@ func Execute(opts *CostModelOpts) error {
 	log.Infof("Starting cost-model version %s", version.FriendlyVersion())
 	log.Infof("Kubernetes enabled: %t", env.IsKubernetesEnabled())
 
+	router := httprouter.New()
 	var a *costmodel.Accesses
-
+	var cp models.Provider
 	if env.IsKubernetesEnabled() {
-		a = costmodel.Initialize()
+		a = costmodel.Initialize(router)
 		err := StartExportWorker(context.Background(), a.Model)
 		if err != nil {
 			log.Errorf("couldn't start CSV export worker: %v", err)
 		}
-	} else {
-		a = costmodel.InitializeWithoutKubernetes()
-		log.Debugf("Cloud Cost config path: %s", env.GetCloudCostConfigPath())
+
+		// Register OpenCost Specific Endpoints
+		router.GET("/allocation", a.ComputeAllocationHandler)
+		router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
+		router.GET("/assets", a.ComputeAssetsHandler)
+		if env.IsCarbonEstimatesEnabled() {
+			router.GET("/assets/carbon", a.ComputeAssetsCarbonHandler)
+		}
+
+		// set cloud provider for cloud cost
+		cp = a.CloudProvider
 	}
 
 	log.Infof("Cloud Costs enabled: %t", env.IsCloudCostEnabled())
 	if env.IsCloudCostEnabled() {
-		repo := cloudcost.NewMemoryRepository()
-		a.CloudCostPipelineService = cloudcost.NewPipelineService(repo, a.CloudConfigController, cloudcost.DefaultIngestorConfiguration())
-		repoQuerier := cloudcost.NewRepositoryQuerier(repo)
-		a.CloudCostQueryService = cloudcost.NewQueryService(repoQuerier, repoQuerier)
+		var providerConfig models.ProviderConfig
+		if cp != nil {
+			providerConfig = provider.ExtractConfigFromProviders(cp)
+		}
+		costmodel.InitializeCloudCost(router, providerConfig)
 	}
 
-	rootMux := http.NewServeMux()
-	a.Router.GET("/healthz", Healthz)
-
-	if env.IsKubernetesEnabled() {
-		a.Router.GET("/allocation", a.ComputeAllocationHandler)
-		a.Router.GET("/allocation/summary", a.ComputeAllocationHandlerSummary)
-		a.Router.GET("/assets", a.ComputeAssetsHandler)
-		if env.IsCarbonEstimatesEnabled() {
-			a.Router.GET("/assets/carbon", a.ComputeAssetsCarbonHandler)
-		}
+	log.Infof("Custom Costs enabled: %t", env.IsCustomCostEnabled())
+	var customCostPipelineService *customcost.PipelineService
+	if env.IsCustomCostEnabled() {
+		customCostPipelineService = costmodel.InitializeCustomCost(router)
 	}
 
-	a.Router.GET("/cloudCost", a.CloudCostQueryService.GetCloudCostHandler())
-	a.Router.GET("/cloudCost/view/graph", a.CloudCostQueryService.GetCloudCostViewGraphHandler())
-	a.Router.GET("/cloudCost/view/totals", a.CloudCostQueryService.GetCloudCostViewTotalsHandler())
-	a.Router.GET("/cloudCost/view/table", a.CloudCostQueryService.GetCloudCostViewTableHandler())
+	// this endpoint is intentionally left out of the "if env.IsCustomCostEnabled()" conditional; in the handler, it is
+	// valid for CustomCostPipelineService to be nil
+	router.GET("/customCost/status", customCostPipelineService.GetCustomCostStatusHandler())
+
+	router.GET("/healthz", Healthz)
 
-	a.Router.GET("/cloudCost/status", a.CloudCostPipelineService.GetCloudCostStatusHandler())
-	a.Router.GET("/cloudCost/rebuild", a.CloudCostPipelineService.GetCloudCostRebuildHandler())
-	a.Router.GET("/cloudCost/repair", a.CloudCostPipelineService.GetCloudCostRepairHandler())
+	router.GET("/logs/level", GetLogLevel)
+	router.POST("/logs/level", SetLogLevel)
 
 	if env.IsPProfEnabled() {
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/", pprof.Index)
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/cmdline", pprof.Cmdline)
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/profile", pprof.Profile)
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/symbol", pprof.Symbol)
-		a.Router.HandlerFunc(http.MethodGet, "/debug/pprof/trace", pprof.Trace)
-		a.Router.Handler(http.MethodGet, "/debug/pprof/goroutine", pprof.Handler("goroutine"))
-		a.Router.Handler(http.MethodGet, "/debug/pprof/heap", pprof.Handler("heap"))
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/", pprof.Index)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/cmdline", pprof.Cmdline)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/profile", pprof.Profile)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/symbol", pprof.Symbol)
+		router.HandlerFunc(http.MethodGet, "/debug/pprof/trace", pprof.Trace)
+		router.Handler(http.MethodGet, "/debug/pprof/goroutine", pprof.Handler("goroutine"))
+		router.Handler(http.MethodGet, "/debug/pprof/heap", pprof.Handler("heap"))
 	}
 
-	rootMux.Handle("/", a.Router)
+	rootMux := http.NewServeMux()
+	rootMux.Handle("/", router)
 	rootMux.Handle("/metrics", promhttp.Handler())
 	telemetryHandler := metrics.ResponseMetricMiddleware(rootMux)
 	handler := cors.AllowAll().Handler(telemetryHandler)
@@ -130,3 +138,44 @@ func StartExportWorker(ctx context.Context, model costmodel.AllocationModel) err
 	}()
 	return nil
 }
+
+type LogLevelRequestResponse struct {
+	Level string `json:"level"`
+}
+
+func GetLogLevel(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	level := log.GetLogLevel()
+	llrr := LogLevelRequestResponse{
+		Level: level,
+	}
+
+	body, err := json.Marshal(llrr)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to retrive log level"), http.StatusInternalServerError)
+		return
+	}
+	_, err = w.Write(body)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to write response: %s", body), http.StatusInternalServerError)
+		return
+	}
+}
+
+func SetLogLevel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	params := LogLevelRequestResponse{}
+	err := json.NewDecoder(r.Body).Decode(&params)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("unable to decode request body, error: %s", err), http.StatusBadRequest)
+		return
+	}
+
+	err = log.SetLogLevel(params.Level)
+	if err != nil {
+		http.Error(w, fmt.Sprintf("level must be a valid log level according to zerolog; level given: %s, error: %s", params.Level, err), http.StatusBadRequest)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+}

+ 3 - 1
pkg/costmodel/aggregation.go

@@ -2284,7 +2284,9 @@ func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Reque
 	// include aggregated labels/annotations if true
 	includeAggregatedMetadata := qp.GetBool("includeAggregatedMetadata", false)
 
-	asr, err := a.Model.QueryAllocation(window, resolution, step, aggregateBy, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer, accumulateBy)
+	shareIdle := qp.GetBool("shareIdle", false)
+
+	asr, err := a.Model.QueryAllocation(window, resolution, step, aggregateBy, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer, accumulateBy, shareIdle)
 	if err != nil {
 		if strings.Contains(strings.ToLower(err.Error()), "bad request") {
 			WriteError(w, BadRequest(err.Error()))

+ 6 - 0
pkg/costmodel/allocation.go

@@ -28,6 +28,7 @@ const (
 	queryFmtCPURequests                 = `avg(avg_over_time(kube_pod_container_resource_requests{resource="cpu", unit="core", container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
 	queryFmtCPUUsageAvg                 = `avg(rate(container_cpu_usage_seconds_total{container!="", container_name!="POD", container!="POD", %s}[%s])) by (container_name, container, pod_name, pod, namespace, instance, %s)`
 	queryFmtGPUsRequested               = `avg(avg_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
+	queryFmtGPUsUsageAvg                = `avg(avg_over_time(DCGM_FI_PROF_GR_ENGINE_ACTIVE{container!=""}[%s])) by (container, pod, namespace, %s)`
 	queryFmtGPUsAllocated               = `avg(avg_over_time(container_gpu_allocation{container!="", container!="POD", node!="", %s}[%s])) by (container, pod, namespace, node, %s)`
 	queryFmtNodeCostPerCPUHr            = `avg(avg_over_time(node_cpu_hourly_cost{%s}[%s])) by (node, %s, instance_type, provider_id)`
 	queryFmtNodeCostPerRAMGiBHr         = `avg(avg_over_time(node_ram_hourly_cost{%s}[%s])) by (node, %s, instance_type, provider_id)`
@@ -428,6 +429,9 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	queryGPUsRequested := fmt.Sprintf(queryFmtGPUsRequested, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	resChGPUsRequested := ctx.QueryAtTime(queryGPUsRequested, end)
 
+	queryGPUsUsageAvg := fmt.Sprintf(queryFmtGPUsUsageAvg, durStr, env.GetPromClusterLabel())
+	resChGPUsUsageAvg := ctx.Query(queryGPUsUsageAvg)
+
 	queryGPUsAllocated := fmt.Sprintf(queryFmtGPUsAllocated, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	resChGPUsAllocated := ctx.QueryAtTime(queryGPUsAllocated, end)
 
@@ -544,6 +548,7 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	resRAMUsageAvg, _ := resChRAMUsageAvg.Await()
 	resRAMUsageMax, _ := resChRAMUsageMax.Await()
 	resGPUsRequested, _ := resChGPUsRequested.Await()
+	resGPUsUsageAvg, _ := resChGPUsUsageAvg.Await()
 	resGPUsAllocated, _ := resChGPUsAllocated.Await()
 
 	resNodeCostPerCPUHr, _ := resChNodeCostPerCPUHr.Await()
@@ -610,6 +615,7 @@ func (cm *CostModel) computeAllocation(start, end time.Time, resolution time.Dur
 	applyRAMBytesRequested(podMap, resRAMRequests, podUIDKeyMap)
 	applyRAMBytesUsedAvg(podMap, resRAMUsageAvg, podUIDKeyMap)
 	applyRAMBytesUsedMax(podMap, resRAMUsageMax, podUIDKeyMap)
+	applyGPUUsageAvg(podMap, resGPUsUsageAvg, podUIDKeyMap)
 	applyGPUsAllocated(podMap, resGPUsRequested, resGPUsAllocated, podUIDKeyMap)
 	applyNetworkTotals(podMap, resNetTransferBytes, resNetReceiveBytes, podUIDKeyMap)
 	applyNetworkAllocation(podMap, resNetZoneGiB, resNetZoneCostPerGiB, podUIDKeyMap, networkCrossZoneCost)

+ 49 - 1
pkg/costmodel/allocation_helpers.go

@@ -614,6 +614,47 @@ func applyRAMBytesUsedMax(podMap map[podKey]*pod, resRAMBytesUsedMax []*prom.Que
 	}
 }
 
+func applyGPUUsageAvg(podMap map[podKey]*pod, resGPUUsageAvg []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
+	// Example PromQueryResult: {container="dcgmproftester12", namespace="gpu", pod="dcgmproftester3-deployment-fc89c8dd6-ph7z5"} 0.997307
+	for _, res := range resGPUUsageAvg {
+		key, err := resultPodKey(res, env.GetPromClusterLabel(), "namespace")
+		if err != nil {
+			log.DedupedWarningf(10, "CostModel.ComputeAllocation: GPU usage avg result missing field: %s", err)
+			continue
+		}
+
+		var pods []*pod
+		if thisPod, ok := podMap[key]; !ok {
+			if uidKeys, ok := podUIDKeyMap[key]; ok {
+				for _, uidKey := range uidKeys {
+					thisPod, ok = podMap[uidKey]
+					if ok {
+						pods = append(pods, thisPod)
+					}
+				}
+			} else {
+				continue
+			}
+		} else {
+			pods = []*pod{thisPod}
+		}
+
+		for _, thisPod := range pods {
+			container, err := res.GetString("container")
+			if err != nil {
+				log.DedupedWarningf(10, "CostModel.ComputeAllocation: GPU usage avg query result missing 'container': %s", key)
+				continue
+			}
+			if _, ok := thisPod.Allocations[container]; !ok {
+				thisPod.appendContainer(container)
+			}
+
+			// DCGM_FI_PROF_GR_ENGINE_ACTIVE metric is a float between 0-1.
+			thisPod.Allocations[container].GPUUsageAverage = res.Values[0].Value
+		}
+	}
+}
+
 func applyGPUsAllocated(podMap map[podKey]*pod, resGPUsRequested []*prom.QueryResult, resGPUsAllocated []*prom.QueryResult, podUIDKeyMap map[podKey][]podKey) {
 	if len(resGPUsAllocated) > 0 { // Use the new query, when it's become available in a window
 		resGPUsRequested = resGPUsAllocated
@@ -655,6 +696,13 @@ func applyGPUsAllocated(podMap map[podKey]*pod, resGPUsRequested []*prom.QueryRe
 
 			hrs := thisPod.Allocations[container].Minutes() / 60.0
 			thisPod.Allocations[container].GPUHours = res.Values[0].Value * hrs
+
+			// For now, it will always be the case that Request==Allocation. If
+			// you would like to use a GPU you need to request the full GPU.
+			// Therefore max(usage,request) will always equal request. In the
+			// future this may need to be refactored when building support for
+			// GPU Time Slicing.
+			thisPod.Allocations[container].GPURequestAverage = res.Values[0].Value
 		}
 	}
 }
@@ -1864,7 +1912,7 @@ func applyPVBytes(pvMap map[pvKey]*pv, resPVBytes []*prom.QueryResult) {
 		}
 
 		if _, ok := pvMap[key]; !ok {
-			log.Warnf("CostModel.ComputeAllocation: pv bytes result for missing pv: %s", err)
+			log.Warnf("CostModel.ComputeAllocation: pv bytes result for missing pv: %s", key)
 			continue
 		}
 

+ 192 - 90
pkg/costmodel/cluster.go

@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"net"
 	"strconv"
+	"strings"
 	"time"
 
 	"github.com/opencost/opencost/pkg/cloud/provider"
@@ -42,7 +43,16 @@ const (
 	queryNodes = `sum(avg(node_total_hourly_cost{%s}) by (node, %s)) * 730 %s`
 )
 
-const maxLocalDiskSize = 200 // AWS limits root disks to 100 Gi, and occasional metric errors in filesystem size should not contribute to large costs.
+const MAX_LOCAL_STORAGE_SIZE = 1024 * 1024 * 1024 * 1024
+
+// When ASSET_INCLUDE_LOCAL_DISK_COST is set to false, local storage
+// provisioned by sig-storage-local-static-provisioner is excluded
+// by checking if the volume is prefixed by "local-pv-".
+//
+// This is based on the sig-storage-local-static-provisioner implementation,
+// which creates all PVs with the "local-pv-" prefix. For reference, see:
+// https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner/blob/b6f465027bd059e92c0032c81dd1e1d90e35c909/pkg/discovery/discovery.go#L410-L417
+const SIG_STORAGE_LOCAL_PROVISIONER_PREFIX = "local-pv-"
 
 // Costs represents cumulative and monthly cluster costs over a given duration. Costs
 // are broken down by cores, memory, and storage.
@@ -142,7 +152,7 @@ type DiskIdentifier struct {
 	Name    string
 }
 
-func ClusterDisks(client prometheus.Client, provider models.Provider, start, end time.Time) (map[DiskIdentifier]*Disk, error) {
+func ClusterDisks(client prometheus.Client, cp models.Provider, start, end time.Time) (map[DiskIdentifier]*Disk, error) {
 	// Start from the time "end", querying backwards
 	t := end
 
@@ -161,13 +171,6 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 	if durStr == "" {
 		return nil, fmt.Errorf("illegal duration value for %s", opencost.NewClosedWindow(start, end))
 	}
-	// hourlyToCumulative is a scaling factor that, when multiplied by an hourly
-	// value, converts it to a cumulative value; i.e.
-	// [$/hr] * [min/res]*[hr/min] = [$/res]
-	hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
-
-	// TODO niko/assets how do we not hard-code this price?
-	costPerGBHr := 0.04 / 730.0
 
 	ctx := prom.NewNamedContext(client, prom.ClusterContextName)
 	queryPVCost := fmt.Sprintf(`avg(avg_over_time(pv_hourly_cost{%s}[%s])) by (%s, persistentvolume,provider_id)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
@@ -177,12 +180,6 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 	queryPVUsedAvg := fmt.Sprintf(`avg(avg_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	queryPVUsedMax := fmt.Sprintf(`max(max_over_time(kubelet_volume_stats_used_bytes{%s}[%s])) by (%s, persistentvolumeclaim, namespace)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
 	queryPVCInfo := fmt.Sprintf(`avg(avg_over_time(kube_persistentvolumeclaim_info{%s}[%s])) by (%s, volumename, persistentvolumeclaim, namespace)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel())
-	queryLocalStorageCost := fmt.Sprintf(`sum_over_time(sum(container_fs_limit_bytes{device!="tmpfs", id="/", %s}) by (instance, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
-	queryLocalStorageUsedCost := fmt.Sprintf(`sum_over_time(sum(container_fs_usage_bytes{device!="tmpfs", id="/", %s}) by (instance, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
-	queryLocalStorageUsedAvg := fmt.Sprintf(`avg(sum(avg_over_time(container_fs_usage_bytes{device!="tmpfs", id="/", %s}[%s])) by (instance, %s, job)) by (instance, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
-	queryLocalStorageUsedMax := fmt.Sprintf(`max(sum(max_over_time(container_fs_usage_bytes{device!="tmpfs", id="/", %s}[%s])) by (instance, %s, job)) by (instance, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
-	queryLocalStorageBytes := fmt.Sprintf(`avg_over_time(sum(container_fs_limit_bytes{device!="tmpfs", id="/", %s}) by (instance, %s)[%s:%dm])`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
-	queryLocalActiveMins := fmt.Sprintf(`count(node_total_hourly_cost{%s}) by (%s, node)[%s:%dm]`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
 
 	resChPVCost := ctx.QueryAtTime(queryPVCost, t)
 	resChPVSize := ctx.QueryAtTime(queryPVSize, t)
@@ -191,12 +188,6 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 	resChPVUsedAvg := ctx.QueryAtTime(queryPVUsedAvg, t)
 	resChPVUsedMax := ctx.QueryAtTime(queryPVUsedMax, t)
 	resChPVCInfo := ctx.QueryAtTime(queryPVCInfo, t)
-	resChLocalStorageCost := ctx.QueryAtTime(queryLocalStorageCost, t)
-	resChLocalStorageUsedCost := ctx.QueryAtTime(queryLocalStorageUsedCost, t)
-	resChLocalStoreageUsedAvg := ctx.QueryAtTime(queryLocalStorageUsedAvg, t)
-	resChLocalStoreageUsedMax := ctx.QueryAtTime(queryLocalStorageUsedMax, t)
-	resChLocalStorageBytes := ctx.QueryAtTime(queryLocalStorageBytes, t)
-	resChLocalActiveMins := ctx.QueryAtTime(queryLocalActiveMins, t)
 
 	resPVCost, _ := resChPVCost.Await()
 	resPVSize, _ := resChPVSize.Await()
@@ -205,12 +196,54 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 	resPVUsedAvg, _ := resChPVUsedAvg.Await()
 	resPVUsedMax, _ := resChPVUsedMax.Await()
 	resPVCInfo, _ := resChPVCInfo.Await()
-	resLocalStorageCost, _ := resChLocalStorageCost.Await()
-	resLocalStorageUsedCost, _ := resChLocalStorageUsedCost.Await()
-	resLocalStorageUsedAvg, _ := resChLocalStoreageUsedAvg.Await()
-	resLocalStorageUsedMax, _ := resChLocalStoreageUsedMax.Await()
-	resLocalStorageBytes, _ := resChLocalStorageBytes.Await()
-	resLocalActiveMins, _ := resChLocalActiveMins.Await()
+
+	// Cloud providers do not always charge for a node's local disk costs (i.e.
+	// ephemeral storage). Provide an option to opt out of calculating &
+	// allocating local disk costs. Note, that this does not affect
+	// PersistentVolume costs.
+	//
+	// Ref:
+	// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/RootDeviceStorage.html
+	// https://learn.microsoft.com/en-us/azure/virtual-machines/managed-disks-overview#temporary-disk
+	// https://cloud.google.com/compute/docs/disks/local-ssd
+	resLocalStorageCost := []*prom.QueryResult{}
+	resLocalStorageUsedCost := []*prom.QueryResult{}
+	resLocalStorageUsedAvg := []*prom.QueryResult{}
+	resLocalStorageUsedMax := []*prom.QueryResult{}
+	resLocalStorageBytes := []*prom.QueryResult{}
+	resLocalActiveMins := []*prom.QueryResult{}
+	if env.GetAssetIncludeLocalDiskCost() {
+		// hourlyToCumulative is a scaling factor that, when multiplied by an
+		// hourly value, converts it to a cumulative value; i.e. [$/hr] *
+		// [min/res]*[hr/min] = [$/res]
+		hourlyToCumulative := float64(minsPerResolution) * (1.0 / 60.0)
+		costPerGBHr := 0.04 / 730.0
+
+		// container_fs metrics contains metrics for disks that are not local storage of the node. While not perfect to
+		// attempt to identify the correct device which is being used as local storage we first filter for devices mounted
+		// at paths `/dev/nvme.*` or `/dev/sda.*`. There still may be multiple devices mounted at paths matching the regex
+		// so later on we will select the device with the highest `container_fs_limit_bytes` per instance to create a local disk asset
+		queryLocalStorageCost := fmt.Sprintf(`sum_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
+		queryLocalStorageUsedCost := fmt.Sprintf(`sum_over_time(sum(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm]) / 1024 / 1024 / 1024 * %f * %f`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution, hourlyToCumulative, costPerGBHr)
+		queryLocalStorageUsedAvg := fmt.Sprintf(`avg(sum(avg_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
+		queryLocalStorageUsedMax := fmt.Sprintf(`max(sum(max_over_time(container_fs_usage_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}[%s])) by (instance, device, %s, job)) by (instance, device, %s)`, env.GetPromClusterFilter(), durStr, env.GetPromClusterLabel(), env.GetPromClusterLabel())
+		queryLocalStorageBytes := fmt.Sprintf(`avg_over_time(sum(container_fs_limit_bytes{device=~"/dev/(nvme|sda).*", id="/", %s}) by (instance, device, %s)[%s:%dm])`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
+		queryLocalActiveMins := fmt.Sprintf(`count(node_total_hourly_cost{%s}) by (%s, node)[%s:%dm]`, env.GetPromClusterFilter(), env.GetPromClusterLabel(), durStr, minsPerResolution)
+
+		resChLocalStorageCost := ctx.QueryAtTime(queryLocalStorageCost, t)
+		resChLocalStorageUsedCost := ctx.QueryAtTime(queryLocalStorageUsedCost, t)
+		resChLocalStoreageUsedAvg := ctx.QueryAtTime(queryLocalStorageUsedAvg, t)
+		resChLocalStoreageUsedMax := ctx.QueryAtTime(queryLocalStorageUsedMax, t)
+		resChLocalStorageBytes := ctx.QueryAtTime(queryLocalStorageBytes, t)
+		resChLocalActiveMins := ctx.QueryAtTime(queryLocalActiveMins, t)
+
+		resLocalStorageCost, _ = resChLocalStorageCost.Await()
+		resLocalStorageUsedCost, _ = resChLocalStorageUsedCost.Await()
+		resLocalStorageUsedAvg, _ = resChLocalStoreageUsedAvg.Await()
+		resLocalStorageUsedMax, _ = resChLocalStoreageUsedMax.Await()
+		resLocalStorageBytes, _ = resChLocalStorageBytes.Await()
+		resLocalActiveMins, _ = resChLocalActiveMins.Await()
+	}
 
 	if ctx.HasErrors() {
 		return nil, ctx.ErrorCollection()
@@ -254,9 +287,18 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 		diskMap[key].ClaimNamespace = claimNamespace
 	}
 
-	pvCosts(diskMap, resolution, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo, provider, opencost.NewClosedWindow(start, end))
+	pvCosts(diskMap, resolution, resActiveMins, resPVSize, resPVCost, resPVUsedAvg, resPVUsedMax, resPVCInfo, cp, opencost.NewClosedWindow(start, end))
 
-	for _, result := range resLocalStorageCost {
+	type localStorage struct {
+		device string
+		disk   *Disk
+	}
+
+	localStorageDisks := map[DiskIdentifier]localStorage{}
+
+	// Start with local storage bytes so that the device with the largest size which has passed the
+	// query filters can be determined
+	for _, result := range resLocalStorageBytes {
 		cluster, err := result.GetString(env.GetPromClusterLabel())
 		if err != nil {
 			cluster = env.GetClusterID()
@@ -268,23 +310,37 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 			continue
 		}
 
-		cost := result.Values[0].Value
+		device, err := result.GetString("device")
+		if err != nil {
+			log.Warnf("ClusterDisks: local storage data missing device")
+			continue
+		}
+
+		bytes := result.Values[0].Value
+		// Ignore disks that are larger than the max size
+		if bytes > MAX_LOCAL_STORAGE_SIZE {
+			continue
+		}
+
 		key := DiskIdentifier{cluster, name}
-		if _, ok := diskMap[key]; !ok {
-			diskMap[key] = &Disk{
-				Cluster:   cluster,
-				Name:      name,
-				Breakdown: &ClusterCostsBreakdown{},
-				Local:     true,
+
+		// only keep the device with the most bytes per instance
+		if current, ok := localStorageDisks[key]; !ok || current.disk.Bytes < bytes {
+			localStorageDisks[key] = localStorage{
+				device: device,
+				disk: &Disk{
+					Cluster:      cluster,
+					Name:         name,
+					Breakdown:    &ClusterCostsBreakdown{},
+					Local:        true,
+					StorageClass: opencost.LocalStorageClass,
+					Bytes:        bytes,
+				},
 			}
 		}
-		diskMap[key].Cost += cost
-
-		//Assigning explicitly the storage class of local storage to local
-		diskMap[key].StorageClass = opencost.LocalStorageClass
 	}
 
-	for _, result := range resLocalStorageUsedCost {
+	for _, result := range resLocalStorageCost {
 		cluster, err := result.GetString(env.GetPromClusterLabel())
 		if err != nil {
 			cluster = env.GetClusterID()
@@ -292,24 +348,27 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 
 		name, err := result.GetString("instance")
 		if err != nil {
-			log.Warnf("ClusterDisks: local storage usage data missing instance")
+			log.Warnf("ClusterDisks: local storage data missing instance")
+			continue
+		}
+
+		device, err := result.GetString("device")
+		if err != nil {
+			log.Warnf("ClusterDisks: local storage data missing device")
 			continue
 		}
 
 		cost := result.Values[0].Value
 		key := DiskIdentifier{cluster, name}
-		if _, ok := diskMap[key]; !ok {
-			diskMap[key] = &Disk{
-				Cluster:   cluster,
-				Name:      name,
-				Breakdown: &ClusterCostsBreakdown{},
-				Local:     true,
-			}
+		ls, ok := localStorageDisks[key]
+		if !ok || ls.device != device {
+			continue
 		}
-		diskMap[key].Breakdown.System = cost / diskMap[key].Cost
+		ls.disk.Cost = cost
+
 	}
 
-	for _, result := range resLocalStorageUsedAvg {
+	for _, result := range resLocalStorageUsedCost {
 		cluster, err := result.GetString(env.GetPromClusterLabel())
 		if err != nil {
 			cluster = env.GetClusterID()
@@ -317,24 +376,26 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 
 		name, err := result.GetString("instance")
 		if err != nil {
-			log.Warnf("ClusterDisks: local storage data missing instance")
+			log.Warnf("ClusterDisks: local storage usage data missing instance")
 			continue
 		}
 
-		bytesAvg := result.Values[0].Value
+		device, err := result.GetString("device")
+		if err != nil {
+			log.Warnf("ClusterDisks: local storage data missing device")
+			continue
+		}
+
+		cost := result.Values[0].Value
 		key := DiskIdentifier{cluster, name}
-		if _, ok := diskMap[key]; !ok {
-			diskMap[key] = &Disk{
-				Cluster:   cluster,
-				Name:      name,
-				Breakdown: &ClusterCostsBreakdown{},
-				Local:     true,
-			}
+		ls, ok := localStorageDisks[key]
+		if !ok || ls.device != device {
+			continue
 		}
-		diskMap[key].BytesUsedAvgPtr = &bytesAvg
+		ls.disk.Breakdown.System = cost / ls.disk.Cost
 	}
 
-	for _, result := range resLocalStorageUsedMax {
+	for _, result := range resLocalStorageUsedAvg {
 		cluster, err := result.GetString(env.GetPromClusterLabel())
 		if err != nil {
 			cluster = env.GetClusterID()
@@ -346,20 +407,22 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 			continue
 		}
 
-		bytesMax := result.Values[0].Value
+		device, err := result.GetString("device")
+		if err != nil {
+			log.Warnf("ClusterDisks: local storage data missing device")
+			continue
+		}
+
+		bytesAvg := result.Values[0].Value
 		key := DiskIdentifier{cluster, name}
-		if _, ok := diskMap[key]; !ok {
-			diskMap[key] = &Disk{
-				Cluster:   cluster,
-				Name:      name,
-				Breakdown: &ClusterCostsBreakdown{},
-				Local:     true,
-			}
+		ls, ok := localStorageDisks[key]
+		if !ok || ls.device != device {
+			continue
 		}
-		diskMap[key].BytesUsedMaxPtr = &bytesMax
+		ls.disk.BytesUsedAvgPtr = &bytesAvg
 	}
 
-	for _, result := range resLocalStorageBytes {
+	for _, result := range resLocalStorageUsedMax {
 		cluster, err := result.GetString(env.GetPromClusterLabel())
 		if err != nil {
 			cluster = env.GetClusterID()
@@ -371,21 +434,19 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 			continue
 		}
 
-		bytes := result.Values[0].Value
-		key := DiskIdentifier{cluster, name}
-		if _, ok := diskMap[key]; !ok {
-			diskMap[key] = &Disk{
-				Cluster:   cluster,
-				Name:      name,
-				Breakdown: &ClusterCostsBreakdown{},
-				Local:     true,
-			}
+		device, err := result.GetString("device")
+		if err != nil {
+			log.Warnf("ClusterDisks: local storage data missing device")
+			continue
 		}
-		diskMap[key].Bytes = bytes
-		if bytes/1024/1024/1024 > maxLocalDiskSize {
-			log.DedupedWarningf(5, "Deleting large root disk/localstorage disk from analysis")
-			delete(diskMap, key)
+
+		bytesMax := result.Values[0].Value
+		key := DiskIdentifier{cluster, name}
+		ls, ok := localStorageDisks[key]
+		if !ok || ls.device != device {
+			continue
 		}
+		ls.disk.BytesUsedMaxPtr = &bytesMax
 	}
 
 	for _, result := range resLocalActiveMins {
@@ -400,12 +461,20 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 			continue
 		}
 
+		providerID, err := result.GetString("provider_id")
+		if err != nil {
+			log.DedupedWarningf(5, "ClusterDisks: local active mins data missing instance")
+			continue
+		}
+
 		key := DiskIdentifier{cluster, name}
-		if _, ok := diskMap[key]; !ok {
-			log.DedupedWarningf(5, "ClusterDisks: local active mins for unidentified disk or disk deleted from analysis")
+		ls, ok := localStorageDisks[key]
+		if !ok {
 			continue
 		}
 
+		ls.disk.ProviderID = provider.ParseLocalDiskID(providerID)
+
 		if len(result.Values) == 0 {
 			continue
 		}
@@ -416,9 +485,14 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 
 		// TODO niko/assets if mins >= threshold, interpolate for missing data?
 
-		diskMap[key].End = e
-		diskMap[key].Start = s
-		diskMap[key].Minutes = mins
+		ls.disk.End = e
+		ls.disk.Start = s
+		ls.disk.Minutes = mins
+	}
+
+	// move local storage disks to main disk map
+	for key, ls := range localStorageDisks {
+		diskMap[key] = ls.disk
 	}
 
 	var unTracedDiskLogData []DiskIdentifier
@@ -468,6 +542,10 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 		}
 	}
 
+	if !env.GetAssetIncludeLocalDiskCost() {
+		return filterOutLocalPVs(diskMap), nil
+	}
+
 	return diskMap, nil
 }
 
@@ -1484,11 +1562,13 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 				log.Debugf("ClusterDisks: pv claim data missing volumename")
 				continue
 			}
+
 			thatClaimName, err := thatRes.GetString("persistentvolumeclaim")
 			if err != nil {
 				log.Debugf("ClusterDisks: pv claim data missing persistentvolumeclaim")
 				continue
 			}
+
 			thatClaimNamespace, err := thatRes.GetString("namespace")
 			if err != nil {
 				log.Debugf("ClusterDisks: pv claim data missing namespace")
@@ -1525,6 +1605,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 			log.Debugf("ClusterDisks: pv usage data missing persistentvolumeclaim")
 			continue
 		}
+
 		claimNamespace, err := result.GetString("namespace")
 		if err != nil {
 			log.Debugf("ClusterDisks: pv usage data missing namespace")
@@ -1545,11 +1626,13 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 				log.Debugf("ClusterDisks: pv claim data missing volumename")
 				continue
 			}
+
 			thatClaimName, err := thatRes.GetString("persistentvolumeclaim")
 			if err != nil {
 				log.Debugf("ClusterDisks: pv claim data missing persistentvolumeclaim")
 				continue
 			}
+
 			thatClaimNamespace, err := thatRes.GetString("namespace")
 			if err != nil {
 				log.Debugf("ClusterDisks: pv claim data missing namespace")
@@ -1575,3 +1658,22 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		diskMap[key].BytesUsedMaxPtr = &usage
 	}
 }
+
+// filterOutLocalPVs removes local Persistent Volumes (PVs) from the given disk map.
+// Local PVs are identified by the prefix "local-pv-" in their names, which is the
+// convention used by sig-storage-local-static-provisioner.
+//
+// Parameters:
+//   - diskMap: A map of DiskIdentifier to Disk pointers, representing all PVs.
+//
+// Returns:
+//   - A new map of DiskIdentifier to Disk pointers, containing only non-local PVs.
+func filterOutLocalPVs(diskMap map[DiskIdentifier]*Disk) map[DiskIdentifier]*Disk {
+	nonLocalPVDiskMap := map[DiskIdentifier]*Disk{}
+	for key, val := range diskMap {
+		if !strings.HasPrefix(key.Name, SIG_STORAGE_LOCAL_PROVISIONER_PREFIX) {
+			nonLocalPVDiskMap[key] = val
+		}
+	}
+	return nonLocalPVDiskMap
+}

+ 54 - 0
pkg/costmodel/cluster_test.go

@@ -0,0 +1,54 @@
+package costmodel
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func Test_filterOutLocalPVs(t *testing.T) {
+	testCases := []struct {
+		name     string
+		input    map[DiskIdentifier]*Disk
+		expected map[DiskIdentifier]*Disk
+	}{
+		{
+			name: "Filter out local PVs",
+			input: map[DiskIdentifier]*Disk{
+				{Cluster: "cluster1", Name: "pv1"}:              &Disk{Name: "pv1"},
+				{Cluster: "cluster1", Name: "local-pv-123"}:     &Disk{Name: "local-pv-123"},
+				{Cluster: "cluster2", Name: "pv2"}:              &Disk{Name: "pv2"},
+				{Cluster: "cluster2", Name: "local-pv-456"}:     &Disk{Name: "local-pv-456"},
+				{Cluster: "cluster3", Name: "not-local-pv-789"}: &Disk{Name: "not-local-pv-789"},
+			},
+			expected: map[DiskIdentifier]*Disk{
+				{Cluster: "cluster1", Name: "pv1"}:              &Disk{Name: "pv1"},
+				{Cluster: "cluster2", Name: "pv2"}:              &Disk{Name: "pv2"},
+				{Cluster: "cluster3", Name: "not-local-pv-789"}: &Disk{Name: "not-local-pv-789"},
+			},
+		},
+		{
+			name: "No local PVs to filter",
+			input: map[DiskIdentifier]*Disk{
+				{Cluster: "cluster1", Name: "pv1"}: &Disk{Name: "pv1"},
+				{Cluster: "cluster2", Name: "pv2"}: &Disk{Name: "pv2"},
+			},
+			expected: map[DiskIdentifier]*Disk{
+				{Cluster: "cluster1", Name: "pv1"}: &Disk{Name: "pv1"},
+				{Cluster: "cluster2", Name: "pv2"}: &Disk{Name: "pv2"},
+			},
+		},
+		{
+			name:     "Empty input",
+			input:    map[DiskIdentifier]*Disk{},
+			expected: map[DiskIdentifier]*Disk{},
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			result := filterOutLocalPVs(tc.input)
+			assert.Equal(t, tc.expected, result)
+		})
+	}
+}

+ 103 - 59
pkg/costmodel/costmodel.go

@@ -182,7 +182,7 @@ const (
 	queryGPURequestsStr = `avg(
 		label_replace(
 			label_replace(
-				sum_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s] %s), 
+				sum_over_time(kube_pod_container_resource_requests{resource="nvidia_com_gpu", container!="",container!="POD", node!="", %s}[%s] %s),
 				"container_name","$1","container","(.+)"
 			), "pod_name","$1","pod","(.+)"
 		)
@@ -988,15 +988,6 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 	nodeList := cm.Cache.GetAllNodes()
 	nodes := make(map[string]*costAnalyzerCloud.Node)
 
-	vgpuCount, err := getAllocatableVGPUs(cm.Cache)
-	if err != nil {
-		return nil, err
-	}
-	vgpuCoeff := 10.0
-	if vgpuCount > 0.0 {
-		vgpuCoeff = vgpuCount
-	}
-
 	pmd := &costAnalyzerCloud.PricingMatchMetadata{
 		TotalNodes:        0,
 		PricingTypeCounts: make(map[costAnalyzerCloud.PricingType]int),
@@ -1028,6 +1019,8 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			pmd.PricingTypeCounts[cnode.PricingType] = 1
 		}
 
+		// newCnode builds upon cnode but populates/overrides certain fields.
+		// cnode was populated leveraging cloud provider public pricing APIs.
 		newCnode := *cnode
 		if newCnode.InstanceType == "" {
 			it, _ := util.GetInstanceType(n.Labels)
@@ -1070,48 +1063,24 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 
 		newCnode.RAMBytes = fmt.Sprintf("%f", ram)
 
-		// Azure does not seem to provide a GPU count in its pricing API. GKE supports attaching multiple GPUs
-		// So the k8s api will often report more accurate results for GPU count under status > capacity > nvidia.com/gpu than the cloud providers billing data
-		// not all providers are guaranteed to use this, so don't overwrite a Provider assignment if we can't find something under that capacity exists
-		gpuc := 0.0
-		q, ok := n.Status.Capacity["nvidia.com/gpu"]
-		_, hasReplicas := n.Labels["nvidia.com/gpu.replicas"]
-
-		if ok && !hasReplicas {
-			gpuCount := q.Value()
-			if gpuCount != 0 {
-				newCnode.GPU = fmt.Sprintf("%d", gpuCount)
-				newCnode.VGPU = newCnode.GPU
-				gpuc = float64(gpuCount)
-			}
-		} else if hasReplicas { // See https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/gpu-sharing.html
-			if q.Value() == 0 {
-				q = n.Status.Capacity["nvidia.com/gpu.shared"]
-			}
-			g, ok := n.Labels["nvidia.com/gpu.count"]
-			if ok {
-				newCnode.GPU = g
-			} else {
-				newCnode.GPU = fmt.Sprintf("%d", 0)
-			}
-			newCnode.VGPU = fmt.Sprintf("%d", q.Value())
+		gpuc, err := strconv.ParseFloat(newCnode.GPU, 64)
+		if err != nil {
+			gpuc = 0.0
+		}
 
-		} else if g, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
-			gpuCount := g.Value()
-			if gpuCount != 0 {
-				newCnode.GPU = fmt.Sprintf("%d", int(float64(gpuCount)/vgpuCoeff))
-				newCnode.VGPU = fmt.Sprintf("%d", gpuCount)
-				gpuc = float64(gpuCount) / vgpuCoeff
-			}
-		} else {
-			gpuc, err = strconv.ParseFloat(newCnode.GPU, 64)
-			if err != nil {
-				gpuc = 0.0
-			}
+		// The k8s API will often report more accurate results for GPU count
+		// than cloud provider public pricing APIs. If found, override the
+		// original value.
+		gpuOverride, vgpuOverride, err := getGPUCount(cm.Cache, n)
+		if err != nil {
+			log.Warnf("Unable to get GPUCount for node %s: %s", n.Name, err.Error())
 		}
-		if math.IsNaN(gpuc) {
-			log.Warnf("gpu count parsed as NaN. Setting to 0.")
-			gpuc = 0.0
+		if gpuOverride > 0 {
+			newCnode.GPU = fmt.Sprintf("%f", gpuOverride)
+			gpuc = gpuOverride
+		}
+		if vgpuOverride > 0 {
+			newCnode.VGPU = fmt.Sprintf("%f", vgpuOverride)
 		}
 
 		// Special case for SUSE rancher, since it won't behave with normal
@@ -1159,17 +1128,20 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			nodeCost := cpuCost + gpuCost + ramCost
 
 			newCnode.Cost = fmt.Sprintf("%f", nodeCost)
-			newCnode.VCPUCost = fmt.Sprintf("%f", cpuCost)
-			newCnode.GPUCost = fmt.Sprintf("%f", gpuCost)
-			newCnode.RAMCost = fmt.Sprintf("%f", ramCost)
+			newCnode.VCPUCost = fmt.Sprintf("%f", defaultCPUCorePrice)
+			newCnode.GPUCost = fmt.Sprintf("%f", defaultGPUPrice)
+			newCnode.RAMCost = fmt.Sprintf("%f", defaultRAMPrice)
 			newCnode.RAMBytes = fmt.Sprintf("%f", ram)
 
 		} else if newCnode.GPU != "" && newCnode.GPUCost == "" {
 			// was the big thing to investigate. All the funky ratio math
 			// we were doing was messing with their default pricing. for SUSE Rancher.
 
-			// We couldn't find a gpu cost, so fix cpu and ram, then accordingly
-			log.Infof("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
+			// We reach this when a GPU is detected on a node, but no cost for
+			// the GPU is defined in the OnDemand pricing. Calculate ratios of
+			// CPU to RAM and GPU to RAM costs, then distribute the total node
+			// cost among the CPU, RAM, and GPU.
+			log.Tracef("GPU without cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
 
 			defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
 			if err != nil {
@@ -1261,8 +1233,10 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			newCnode.RAMBytes = fmt.Sprintf("%f", ram)
 			newCnode.GPUCost = fmt.Sprintf("%f", gpuPrice)
 		} else if newCnode.RAMCost == "" {
-			// We couldn't find a ramcost, so fix cpu and allocate ram accordingly
-			log.Debugf("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
+			// We reach this when no RAM cost is defined in the OnDemand
+			// pricing. It calculates a cpuToRAMRatio and ramMultiple to
+			// distrubte the total node cost among CPU and RAM costs.
+			log.Tracef("No RAM cost found for %s, calculating...", cp.GetKey(nodeLabels, n).Features())
 
 			defaultCPU, err := strconv.ParseFloat(cfg.CPU, 64)
 			if err != nil {
@@ -1352,7 +1326,7 @@ func (cm *CostModel) GetNodeCost(cp costAnalyzerCloud.Provider) (map[string]*cos
 			}
 			newCnode.RAMBytes = fmt.Sprintf("%f", ram)
 
-			log.Debugf("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
+			log.Tracef("Computed \"%s\" RAM Cost := %v", name, newCnode.RAMCost)
 		}
 
 		nodes[name] = &newCnode
@@ -2353,6 +2327,68 @@ func getStatefulSetsOfPod(pod clustercache.Pod) []string {
 	return []string{}
 }
 
+// getGPUCount reads the node's Status and Labels (via the k8s API) to identify
+// the number of GPUs and vGPUs are equipped on the node. If unable to identify
+// a GPU count, it will return -1.
+func getGPUCount(cache clustercache.ClusterCache, n *v1.Node) (float64, float64, error) {
+	g, hasGpu := n.Status.Capacity["nvidia.com/gpu"]
+	_, hasReplicas := n.Labels["nvidia.com/gpu.replicas"]
+
+	// Case 1: Standard NVIDIA GPU
+	if hasGpu && g.Value() != 0 && !hasReplicas {
+		return float64(g.Value()), float64(g.Value()), nil
+	}
+
+	// Case 2: NVIDIA GPU with GPU Feature Discovery (GFD) Pod enabled.
+	// Ref: https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/gpu-sharing.html#verifying-the-gpu-time-slicing-configuration
+	// Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L44-L45
+	// Ref: https://github.com/NVIDIA/k8s-device-plugin/blob/d899752a424818428f744a946d32b132ea2c0cf1/internal/lm/resource_test.go#L103-L118
+	if hasReplicas {
+		resultGPU := 0.0
+		resultVGPU := 0.0
+
+		if c, ok := n.Labels["nvidia.com/gpu.count"]; ok {
+			var err error
+			resultGPU, err = strconv.ParseFloat(c, 64)
+			if err != nil {
+				return -1, -1, fmt.Errorf("could not parse label \"nvidia.com/gpu.count\": %v", err)
+			}
+		}
+
+		if s, ok := n.Status.Capacity["nvidia.com/gpu.shared"]; ok { // GFD configured `renameByDefault=true`
+			resultVGPU = float64(s.Value())
+		} else if g, ok := n.Status.Capacity["nvidia.com/gpu"]; ok { // GFD configured `renameByDefault=false`
+			resultVGPU = float64(g.Value())
+		} else {
+			resultVGPU = resultGPU
+		}
+
+		return resultGPU, resultVGPU, nil
+	}
+
+	// Case 3: AWS vGPU
+	if vgpu, ok := n.Status.Capacity["k8s.amazonaws.com/vgpu"]; ok {
+		vgpuCount, err := getAllocatableVGPUs(cache)
+		if err != nil {
+			return -1, -1, err
+		}
+
+		vgpuCoeff := 10.0
+		if vgpuCount > 0.0 {
+			vgpuCoeff = vgpuCount
+		}
+
+		if vgpu.Value() != 0 {
+			resultGPU := float64(vgpu.Value()) / vgpuCoeff
+			resultVGPU := float64(vgpu.Value())
+			return resultGPU, resultVGPU, nil
+		}
+	}
+
+	// No GPU found
+	return -1, -1, nil
+}
+
 func getAllocatableVGPUs(cache clustercache.ClusterCache) (float64, error) {
 	daemonsets := cache.GetAllDaemonSets()
 	vgpuCount := 0.0
@@ -2403,7 +2439,7 @@ func measureTimeAsync(start time.Time, threshold time.Duration, name string, ch
 	}
 }
 
-func (cm *CostModel) QueryAllocation(window opencost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy opencost.AccumulateOption) (*opencost.AllocationSetRange, error) {
+func (cm *CostModel) QueryAllocation(window opencost.Window, resolution, step time.Duration, aggregate []string, includeIdle, idleByNode, includeProportionalAssetResourceCosts, includeAggregatedMetadata, sharedLoadBalancer bool, accumulateBy opencost.AccumulateOption, shareIdle bool) (*opencost.AllocationSetRange, error) {
 	// Validate window is legal
 	if window.IsOpen() || window.IsNegative() {
 		return nil, fmt.Errorf("illegal window: %s", window)
@@ -2474,10 +2510,18 @@ func (cm *CostModel) QueryAllocation(window opencost.Window, resolution, step ti
 	}
 
 	// Set aggregation options and aggregate
+	var shareIdleOpt string
+	if shareIdle {
+		shareIdleOpt = opencost.ShareWeighted
+	} else {
+		shareIdleOpt = opencost.ShareNone
+	}
+
 	opts := &opencost.AllocationAggregationOptions{
 		IncludeProportionalAssetResourceCosts: includeProportionalAssetResourceCosts,
 		IdleByNode:                            idleByNode,
 		IncludeAggregatedMetadata:             includeAggregatedMetadata,
+		ShareIdle:                             shareIdleOpt,
 	}
 
 	// Aggregate

+ 87 - 0
pkg/costmodel/costmodel_test.go

@@ -4,8 +4,95 @@ import (
 	"testing"
 
 	"github.com/opencost/opencost/core/pkg/util"
+	"github.com/stretchr/testify/assert"
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
+func TestGetGPUCount(t *testing.T) {
+	tests := []struct {
+		name          string
+		node          *v1.Node
+		expectedGPU   float64
+		expectedVGPU  float64
+		expectedError bool
+	}{
+		{
+			name: "Standard NVIDIA GPU",
+			node: &v1.Node{
+				Status: v1.NodeStatus{
+					Capacity: v1.ResourceList{
+						"nvidia.com/gpu": resource.MustParse("2"),
+					},
+				},
+			},
+			expectedGPU:  2.0,
+			expectedVGPU: 2.0,
+		},
+		{
+			name: "NVIDIA GPU with GFD - renameByDefault=true",
+			node: &v1.Node{
+				ObjectMeta: metav1.ObjectMeta{
+					Labels: map[string]string{
+						"nvidia.com/gpu.replicas": "4",
+						"nvidia.com/gpu.count":    "1",
+					},
+				},
+				Status: v1.NodeStatus{
+					Capacity: v1.ResourceList{
+						"nvidia.com/gpu.shared": resource.MustParse("4"),
+					},
+				},
+			},
+			expectedGPU:  1.0,
+			expectedVGPU: 4.0,
+		},
+		{
+			name: "NVIDIA GPU with GFD - renameByDefault=false",
+			node: &v1.Node{
+				ObjectMeta: metav1.ObjectMeta{
+					Labels: map[string]string{
+						"nvidia.com/gpu.replicas": "4",
+						"nvidia.com/gpu.count":    "1",
+					},
+				},
+				Status: v1.NodeStatus{
+					Capacity: v1.ResourceList{
+						"nvidia.com/gpu": resource.MustParse("4"),
+					},
+				},
+			},
+			expectedGPU:  1.0,
+			expectedVGPU: 4.0,
+		},
+		{
+			name: "No GPU",
+			node: &v1.Node{
+				Status: v1.NodeStatus{
+					Capacity: v1.ResourceList{},
+				},
+			},
+			expectedGPU:  -1.0,
+			expectedVGPU: -1.0,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			gpu, vgpu, err := getGPUCount(nil, tt.node)
+
+			if tt.expectedError {
+				assert.Error(t, err)
+			} else {
+				assert.NoError(t, err)
+				assert.Equal(t, tt.expectedGPU, gpu)
+				assert.Equal(t, tt.expectedVGPU, vgpu)
+			}
+		})
+	}
+}
+
 func Test_CostData_GetController_CronJob(t *testing.T) {
 	cases := []struct {
 		name string

+ 13 - 12
pkg/costmodel/promparsers.go

@@ -50,7 +50,7 @@ func GetPVInfo(qrs []*prom.QueryResult, defaultClusterID string) (map[string]*Pe
 	toReturn := make(map[string]*PersistentVolumeClaimData)
 
 	for _, val := range qrs {
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -96,7 +96,7 @@ func GetPVAllocationMetrics(qrs []*prom.QueryResult, defaultClusterID string) (m
 	toReturn := make(map[string][]*PersistentVolumeClaimData)
 
 	for _, val := range qrs {
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -142,7 +142,7 @@ func GetPVCostMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[str
 	toReturn := make(map[string]*costAnalyzerCloud.PV)
 
 	for _, val := range qrs {
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -171,7 +171,7 @@ func GetNamespaceLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string)
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -203,7 +203,7 @@ func GetPodLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID string) (map[
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -232,7 +232,7 @@ func GetNamespaceAnnotationsMetrics(qrs []*prom.QueryResult, defaultClusterID st
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -264,7 +264,8 @@ func GetPodAnnotationsMetrics(qrs []*prom.QueryResult, defaultClusterID string)
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
+
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -297,7 +298,7 @@ func GetStatefulsetMatchLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -323,7 +324,7 @@ func GetPodDaemonsetsWithMetrics(qrs []*prom.QueryResult, defaultClusterID strin
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -354,7 +355,7 @@ func GetPodJobsWithMetrics(qrs []*prom.QueryResult, defaultClusterID string) (ma
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -386,7 +387,7 @@ func GetDeploymentMatchLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID s
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}
@@ -413,7 +414,7 @@ func GetServiceSelectorLabelsMetrics(qrs []*prom.QueryResult, defaultClusterID s
 			return toReturn, err
 		}
 
-		clusterID, err := val.GetString(env.GetPromClusterLabel())
+		clusterID, _ := val.GetString(env.GetPromClusterLabel())
 		if clusterID == "" {
 			clusterID = defaultClusterID
 		}

+ 113 - 178
pkg/costmodel/router.go

@@ -85,28 +85,22 @@ var (
 // Accesses defines a singleton application instance, providing access to
 // Prometheus, Kubernetes, the cloud provider, and caches.
 type Accesses struct {
-	Router                    *httprouter.Router
-	PrometheusClient          prometheus.Client
-	ThanosClient              prometheus.Client
-	KubeClientSet             kubernetes.Interface
-	ClusterCache              clustercache.ClusterCache
-	ClusterMap                clusters.ClusterMap
-	CloudProvider             models.Provider
-	ConfigFileManager         *config.ConfigFileManager
-	CloudConfigController     *cloudconfig.Controller
-	CloudCostPipelineService  *cloudcost.PipelineService
-	CloudCostQueryService     *cloudcost.QueryService
-	CustomCostQueryService    *customcost.QueryService
-	CustomCostPipelineService *customcost.PipelineService
-	ClusterInfoProvider       clusters.ClusterInfoProvider
-	Model                     *CostModel
-	MetricsEmitter            *CostModelMetricsEmitter
-	OutOfClusterCache         *cache.Cache
-	AggregateCache            *cache.Cache
-	CostDataCache             *cache.Cache
-	ClusterCostsCache         *cache.Cache
-	CacheExpiration           map[time.Duration]time.Duration
-	AggAPI                    Aggregator
+	PrometheusClient    prometheus.Client
+	ThanosClient        prometheus.Client
+	KubeClientSet       kubernetes.Interface
+	ClusterCache        clustercache.ClusterCache
+	ClusterMap          clusters.ClusterMap
+	CloudProvider       models.Provider
+	ConfigFileManager   *config.ConfigFileManager
+	ClusterInfoProvider clusters.ClusterInfoProvider
+	Model               *CostModel
+	MetricsEmitter      *CostModelMetricsEmitter
+	OutOfClusterCache   *cache.Cache
+	AggregateCache      *cache.Cache
+	CostDataCache       *cache.Cache
+	ClusterCostsCache   *cache.Cache
+	CacheExpiration     map[time.Duration]time.Duration
+	AggAPI              Aggregator
 	// SettingsCache stores current state of app settings
 	SettingsCache *cache.Cache
 	// settingsSubscribers tracks channels through which changes to different
@@ -1426,47 +1420,6 @@ func (a *Accesses) Status(w http.ResponseWriter, r *http.Request, _ httprouter.P
 	}
 }
 
-type LogLevelRequestResponse struct {
-	Level string `json:"level"`
-}
-
-func (a *Accesses) GetLogLevel(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
-	w.Header().Set("Content-Type", "application/json")
-	w.Header().Set("Access-Control-Allow-Origin", "*")
-
-	level := log.GetLogLevel()
-	llrr := LogLevelRequestResponse{
-		Level: level,
-	}
-
-	body, err := json.Marshal(llrr)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to retrive log level"), http.StatusInternalServerError)
-		return
-	}
-	_, err = w.Write(body)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to write response: %s", body), http.StatusInternalServerError)
-		return
-	}
-}
-
-func (a *Accesses) SetLogLevel(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
-	params := LogLevelRequestResponse{}
-	err := json.NewDecoder(r.Body).Decode(&params)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("unable to decode request body, error: %s", err), http.StatusBadRequest)
-		return
-	}
-
-	err = log.SetLogLevel(params.Level)
-	if err != nil {
-		http.Error(w, fmt.Sprintf("level must be a valid log level according to zerolog; level given: %s, error: %s", params.Level, err), http.StatusBadRequest)
-		return
-	}
-	w.WriteHeader(http.StatusOK)
-}
-
 // captures the panic event in sentry
 func capturePanicEvent(err string, stack string) {
 	msg := fmt.Sprintf("Panic: %s\nStackTrace: %s\n", err, stack)
@@ -1497,7 +1450,7 @@ func handlePanic(p errors.Panic) bool {
 	return p.Type == errors.PanicTypeHTTP
 }
 
-func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses {
+func Initialize(router *httprouter.Router, additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses {
 	configWatchers := watcher.NewConfigMapWatchers(additionalConfigWatchers...)
 
 	var err error
@@ -1723,25 +1676,23 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	metricsEmitter := NewCostModelMetricsEmitter(promCli, k8sCache, cloudProvider, clusterInfoProvider, costModel)
 
 	a := &Accesses{
-		Router:                httprouter.New(),
-		PrometheusClient:      promCli,
-		ThanosClient:          thanosClient,
-		KubeClientSet:         kubeClientset,
-		ClusterCache:          k8sCache,
-		ClusterMap:            clusterMap,
-		CloudProvider:         cloudProvider,
-		CloudConfigController: cloudconfig.NewController(cloudProvider),
-		ConfigFileManager:     confManager,
-		ClusterInfoProvider:   clusterInfoProvider,
-		Model:                 costModel,
-		MetricsEmitter:        metricsEmitter,
-		AggregateCache:        aggregateCache,
-		CostDataCache:         costDataCache,
-		ClusterCostsCache:     clusterCostsCache,
-		OutOfClusterCache:     outOfClusterCache,
-		SettingsCache:         settingsCache,
-		CacheExpiration:       cacheExpiration,
-		httpServices:          services.NewCostModelServices(),
+		httpServices:        services.NewCostModelServices(),
+		PrometheusClient:    promCli,
+		ThanosClient:        thanosClient,
+		KubeClientSet:       kubeClientset,
+		ClusterCache:        k8sCache,
+		ClusterMap:          clusterMap,
+		CloudProvider:       cloudProvider,
+		ConfigFileManager:   confManager,
+		ClusterInfoProvider: clusterInfoProvider,
+		Model:               costModel,
+		MetricsEmitter:      metricsEmitter,
+		AggregateCache:      aggregateCache,
+		CostDataCache:       costDataCache,
+		ClusterCostsCache:   clusterCostsCache,
+		OutOfClusterCache:   outOfClusterCache,
+		SettingsCache:       settingsCache,
+		CacheExpiration:     cacheExpiration,
 	}
 
 	// Use the Accesses instance, itself, as the CostModelAggregator. This is
@@ -1769,120 +1720,104 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 		a.MetricsEmitter.Start()
 	}
 
-	log.Infof("Custom Costs enabled: %t", env.IsCustomCostEnabled())
-	if env.IsCustomCostEnabled() {
-		hourlyRepo := customcost.NewMemoryRepository()
-		dailyRepo := customcost.NewMemoryRepository()
-		ingConfig := customcost.DefaultIngestorConfiguration()
-		var err error
-		a.CustomCostPipelineService, err = customcost.NewPipelineService(hourlyRepo, dailyRepo, ingConfig)
-		if err != nil {
-			log.Errorf("error instantiating custom cost pipeline service: %v", err)
-			return nil
-		}
-
-		customCostQuerier := customcost.NewRepositoryQuerier(hourlyRepo, dailyRepo, ingConfig.HourlyDuration, ingConfig.DailyDuration)
-		a.CustomCostQueryService = customcost.NewQueryService(customCostQuerier)
-	}
-
-	a.Router.GET("/costDataModel", a.CostDataModel)
-	a.Router.GET("/costDataModelRange", a.CostDataModelRange)
-	a.Router.GET("/aggregatedCostModel", a.AggregateCostModelHandler)
-	a.Router.GET("/allocation/compute", a.ComputeAllocationHandler)
-	a.Router.GET("/allocation/compute/summary", a.ComputeAllocationHandlerSummary)
-	a.Router.GET("/allNodePricing", a.GetAllNodePricing)
-	a.Router.POST("/refreshPricing", a.RefreshPricingData)
-	a.Router.GET("/clusterCostsOverTime", a.ClusterCostsOverTime)
-	a.Router.GET("/clusterCosts", a.ClusterCosts)
-	a.Router.GET("/clusterCostsFromCache", a.ClusterCostsFromCacheHandler)
-	a.Router.GET("/validatePrometheus", a.GetPrometheusMetadata)
-	a.Router.GET("/managementPlatform", a.ManagementPlatform)
-	a.Router.GET("/clusterInfo", a.ClusterInfo)
-	a.Router.GET("/clusterInfoMap", a.GetClusterInfoMap)
-	a.Router.GET("/serviceAccountStatus", a.GetServiceAccountStatus)
-	a.Router.GET("/pricingSourceStatus", a.GetPricingSourceStatus)
-	a.Router.GET("/pricingSourceSummary", a.GetPricingSourceSummary)
-	a.Router.GET("/pricingSourceCounts", a.GetPricingSourceCounts)
+	a.httpServices.RegisterAll(router)
+
+	router.GET("/costDataModel", a.CostDataModel)
+	router.GET("/costDataModelRange", a.CostDataModelRange)
+	router.GET("/aggregatedCostModel", a.AggregateCostModelHandler)
+	router.GET("/allocation/compute", a.ComputeAllocationHandler)
+	router.GET("/allocation/compute/summary", a.ComputeAllocationHandlerSummary)
+	router.GET("/allNodePricing", a.GetAllNodePricing)
+	router.POST("/refreshPricing", a.RefreshPricingData)
+	router.GET("/clusterCostsOverTime", a.ClusterCostsOverTime)
+	router.GET("/clusterCosts", a.ClusterCosts)
+	router.GET("/clusterCostsFromCache", a.ClusterCostsFromCacheHandler)
+	router.GET("/validatePrometheus", a.GetPrometheusMetadata)
+	router.GET("/managementPlatform", a.ManagementPlatform)
+	router.GET("/clusterInfo", a.ClusterInfo)
+	router.GET("/clusterInfoMap", a.GetClusterInfoMap)
+	router.GET("/serviceAccountStatus", a.GetServiceAccountStatus)
+	router.GET("/pricingSourceStatus", a.GetPricingSourceStatus)
+	router.GET("/pricingSourceSummary", a.GetPricingSourceSummary)
+	router.GET("/pricingSourceCounts", a.GetPricingSourceCounts)
 
 	// endpoints migrated from server
-	a.Router.GET("/allPersistentVolumes", a.GetAllPersistentVolumes)
-	a.Router.GET("/allDeployments", a.GetAllDeployments)
-	a.Router.GET("/allStorageClasses", a.GetAllStorageClasses)
-	a.Router.GET("/allStatefulSets", a.GetAllStatefulSets)
-	a.Router.GET("/allNodes", a.GetAllNodes)
-	a.Router.GET("/allPods", a.GetAllPods)
-	a.Router.GET("/allNamespaces", a.GetAllNamespaces)
-	a.Router.GET("/allDaemonSets", a.GetAllDaemonSets)
-	a.Router.GET("/pod/:namespace/:name", a.GetPod)
-	a.Router.GET("/prometheusRecordingRules", a.PrometheusRecordingRules)
-	a.Router.GET("/prometheusConfig", a.PrometheusConfig)
-	a.Router.GET("/prometheusTargets", a.PrometheusTargets)
-	a.Router.GET("/orphanedPods", a.GetOrphanedPods)
-	a.Router.GET("/installNamespace", a.GetInstallNamespace)
-	a.Router.GET("/installInfo", a.GetInstallInfo)
-	a.Router.GET("/podLogs", a.GetPodLogs)
-	a.Router.POST("/serviceKey", a.AddServiceKey)
-	a.Router.GET("/helmValues", a.GetHelmValues)
-	a.Router.GET("/status", a.Status)
+	router.GET("/allPersistentVolumes", a.GetAllPersistentVolumes)
+	router.GET("/allDeployments", a.GetAllDeployments)
+	router.GET("/allStorageClasses", a.GetAllStorageClasses)
+	router.GET("/allStatefulSets", a.GetAllStatefulSets)
+	router.GET("/allNodes", a.GetAllNodes)
+	router.GET("/allPods", a.GetAllPods)
+	router.GET("/allNamespaces", a.GetAllNamespaces)
+	router.GET("/allDaemonSets", a.GetAllDaemonSets)
+	router.GET("/pod/:namespace/:name", a.GetPod)
+	router.GET("/prometheusRecordingRules", a.PrometheusRecordingRules)
+	router.GET("/prometheusConfig", a.PrometheusConfig)
+	router.GET("/prometheusTargets", a.PrometheusTargets)
+	router.GET("/orphanedPods", a.GetOrphanedPods)
+	router.GET("/installNamespace", a.GetInstallNamespace)
+	router.GET("/installInfo", a.GetInstallInfo)
+	router.GET("/podLogs", a.GetPodLogs)
+	router.POST("/serviceKey", a.AddServiceKey)
+	router.GET("/helmValues", a.GetHelmValues)
+	router.GET("/status", a.Status)
 
 	// prom query proxies
-	a.Router.GET("/prometheusQuery", a.PrometheusQuery)
-	a.Router.GET("/prometheusQueryRange", a.PrometheusQueryRange)
-	a.Router.GET("/thanosQuery", a.ThanosQuery)
-	a.Router.GET("/thanosQueryRange", a.ThanosQueryRange)
+	router.GET("/prometheusQuery", a.PrometheusQuery)
+	router.GET("/prometheusQueryRange", a.PrometheusQueryRange)
+	router.GET("/thanosQuery", a.ThanosQuery)
+	router.GET("/thanosQueryRange", a.ThanosQueryRange)
 
 	// diagnostics
-	a.Router.GET("/diagnostics/requestQueue", a.GetPrometheusQueueState)
-	a.Router.GET("/diagnostics/prometheusMetrics", a.GetPrometheusMetrics)
+	router.GET("/diagnostics/requestQueue", a.GetPrometheusQueueState)
+	router.GET("/diagnostics/prometheusMetrics", a.GetPrometheusMetrics)
 
-	a.Router.GET("/logs/level", a.GetLogLevel)
-	a.Router.POST("/logs/level", a.SetLogLevel)
+	return a
+}
 
-	a.Router.GET("/cloud/config/export", a.CloudConfigController.GetExportConfigHandler())
-	a.Router.GET("/cloud/config/enable", a.CloudConfigController.GetEnableConfigHandler())
-	a.Router.GET("/cloud/config/disable", a.CloudConfigController.GetDisableConfigHandler())
-	a.Router.GET("/cloud/config/delete", a.CloudConfigController.GetDeleteConfigHandler())
+// InitializeCloudCost Initializes Cloud Cost pipeline and querier and registers endpoints
+func InitializeCloudCost(router *httprouter.Router, providerConfig models.ProviderConfig) {
+	log.Debugf("Cloud Cost config path: %s", env.GetCloudCostConfigPath())
+	cloudConfigController := cloudconfig.NewMemoryController(providerConfig)
 
-	if env.IsCustomCostEnabled() {
-		a.Router.GET("/customCost/total", a.CustomCostQueryService.GetCustomCostTotalHandler())
-		a.Router.GET("/customCost/timeseries", a.CustomCostQueryService.GetCustomCostTimeseriesHandler())
-	}
+	repo := cloudcost.NewMemoryRepository()
+	cloudCostPipelineService := cloudcost.NewPipelineService(repo, cloudConfigController, cloudcost.DefaultIngestorConfiguration())
+	repoQuerier := cloudcost.NewRepositoryQuerier(repo)
+	cloudCostQueryService := cloudcost.NewQueryService(repoQuerier, repoQuerier)
 
-	// this endpoint is intentionally left out of the "if env.IsCustomCostEnabled()" conditional; in the handler, it is
-	// valid for CustomCostPipelineService to be nil
-	a.Router.GET("/customCost/status", a.CustomCostPipelineService.GetCustomCostStatusHandler())
+	router.GET("/cloud/config/export", cloudConfigController.GetExportConfigHandler())
+	router.GET("/cloud/config/enable", cloudConfigController.GetEnableConfigHandler())
+	router.GET("/cloud/config/disable", cloudConfigController.GetDisableConfigHandler())
+	router.GET("/cloud/config/delete", cloudConfigController.GetDeleteConfigHandler())
 
-	a.httpServices.RegisterAll(a.Router)
+	router.GET("/cloudCost", cloudCostQueryService.GetCloudCostHandler())
+	router.GET("/cloudCost/view/graph", cloudCostQueryService.GetCloudCostViewGraphHandler())
+	router.GET("/cloudCost/view/totals", cloudCostQueryService.GetCloudCostViewTotalsHandler())
+	router.GET("/cloudCost/view/table", cloudCostQueryService.GetCloudCostViewTableHandler())
 
-	return a
+	router.GET("/cloudCost/status", cloudCostPipelineService.GetCloudCostStatusHandler())
+	router.GET("/cloudCost/rebuild", cloudCostPipelineService.GetCloudCostRebuildHandler())
+	router.GET("/cloudCost/repair", cloudCostPipelineService.GetCloudCostRepairHandler())
 }
 
-func InitializeWithoutKubernetes() *Accesses {
+func InitializeCustomCost(router *httprouter.Router) *customcost.PipelineService {
+	hourlyRepo := customcost.NewMemoryRepository()
+	dailyRepo := customcost.NewMemoryRepository()
+	ingConfig := customcost.DefaultIngestorConfiguration()
 	var err error
-	if errorReportingEnabled {
-		err = sentry.Init(sentry.ClientOptions{Release: version.FriendlyVersion()})
-		if err != nil {
-			log.Infof("Failed to initialize sentry for error reporting")
-		} else {
-			err = errors.SetPanicHandler(handlePanic)
-			if err != nil {
-				log.Infof("Failed to set panic handler: %s", err)
-			}
-		}
-	}
-
-	a := &Accesses{
-		Router:                httprouter.New(),
-		CloudConfigController: cloudconfig.NewController(nil),
-		httpServices:          services.NewCostModelServices(),
+	customCostPipelineService, err := customcost.NewPipelineService(hourlyRepo, dailyRepo, ingConfig)
+	if err != nil {
+		log.Errorf("error instantiating custom cost pipeline service: %v", err)
+		return nil
 	}
 
-	a.Router.GET("/logs/level", a.GetLogLevel)
-	a.Router.POST("/logs/level", a.SetLogLevel)
+	customCostQuerier := customcost.NewRepositoryQuerier(hourlyRepo, dailyRepo, ingConfig.HourlyDuration, ingConfig.DailyDuration)
+	customCostQueryService := customcost.NewQueryService(customCostQuerier)
 
-	a.httpServices.RegisterAll(a.Router)
+	router.GET("/customCost/total", customCostQueryService.GetCustomCostTotalHandler())
+	router.GET("/customCost/timeseries", customCostQueryService.GetCustomCostTimeseriesHandler())
 
-	return a
+	return customCostPipelineService
 }
 
 func writeErrorResponse(w http.ResponseWriter, code int, message string) {

+ 7 - 4
pkg/customcost/queryservice_helper.go

@@ -5,10 +5,10 @@ import (
 
 	"github.com/opencost/opencost/core/pkg/filter"
 	"github.com/opencost/opencost/core/pkg/opencost"
-	"github.com/opencost/opencost/core/pkg/util/httputil"
+	"github.com/opencost/opencost/core/pkg/util/mapper"
 )
 
-func ParseCustomCostTotalRequest(qp httputil.QueryParams) (*CostTotalRequest, error) {
+func ParseCustomCostTotalRequest(qp mapper.PrimitiveMap) (*CostTotalRequest, error) {
 	windowStr := qp.Get("window", "")
 	if windowStr == "" {
 		return nil, fmt.Errorf("missing require window param")
@@ -28,6 +28,8 @@ func ParseCustomCostTotalRequest(qp httputil.QueryParams) (*CostTotalRequest, er
 		return nil, err
 	}
 
+	accumulate := opencost.ParseAccumulate(qp.Get("accumulate", "day"))
+
 	var filter filter.Filter
 	filterString := qp.Get("filter", "")
 	if filterString != "" {
@@ -42,13 +44,14 @@ func ParseCustomCostTotalRequest(qp httputil.QueryParams) (*CostTotalRequest, er
 		Start:       *window.Start(),
 		End:         *window.End(),
 		AggregateBy: aggregateBy,
+		Accumulate:  accumulate,
 		Filter:      filter,
 	}
 
 	return opts, nil
 }
 
-func ParseCustomCostTimeseriesRequest(qp httputil.QueryParams) (*CostTimeseriesRequest, error) {
+func ParseCustomCostTimeseriesRequest(qp mapper.PrimitiveMap) (*CostTimeseriesRequest, error) {
 	windowStr := qp.Get("window", "")
 	if windowStr == "" {
 		return nil, fmt.Errorf("missing require window param")
@@ -68,7 +71,7 @@ func ParseCustomCostTimeseriesRequest(qp httputil.QueryParams) (*CostTimeseriesR
 		return nil, err
 	}
 
-	accumulate := opencost.ParseAccumulate(qp.Get("accumulate", ""))
+	accumulate := opencost.ParseAccumulate(qp.Get("accumulate", "day"))
 
 	var filter filter.Filter
 	filterString := qp.Get("filter", "")

+ 53 - 0
pkg/customcost/queryservice_helper_test.go

@@ -0,0 +1,53 @@
+package customcost
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/core/pkg/opencost"
+	"github.com/opencost/opencost/core/pkg/util/httputil"
+)
+
+// Test_ParseCustomCostRequest_Accumulate focuses on testing that both Custom Cost request parsing functions properly
+// set the `accumulate` field, inspired by a desire to prevent a regression of https://kubecost.atlassian.net/browse/ENG-2212
+func Test_ParseCustomCostRequest_Accumulate(t *testing.T) {
+	testCases := map[string]struct {
+		accumulateString   string
+		expectedAccumulate opencost.AccumulateOption
+	}{
+		"no accumulate": {
+			accumulateString:   "",
+			expectedAccumulate: opencost.AccumulateOptionDay,
+		},
+		"hour accumulate": {
+			accumulateString:   "hour",
+			expectedAccumulate: opencost.AccumulateOptionHour,
+		},
+		"day accumulate": {
+			accumulateString:   "day",
+			expectedAccumulate: opencost.AccumulateOptionDay,
+		},
+	}
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			qp := httputil.NewQueryParams(map[string][]string{})
+			qp.Set("window", "7d")
+			if len(tc.accumulateString) > 0 {
+				qp.Set("accumulate", tc.accumulateString)
+			}
+
+			totalRequest, err := ParseCustomCostTotalRequest(qp)
+			if err != nil {
+				t.Fatalf("expected no error, got: %v", err)
+			} else if totalRequest.Accumulate != tc.expectedAccumulate {
+				t.Fatalf("expected %v, got %v", tc.expectedAccumulate, totalRequest.Accumulate)
+			}
+
+			timeseriesRequest, err := ParseCustomCostTimeseriesRequest(qp)
+			if err != nil {
+				t.Fatalf("expected no error, got: %v", err)
+			} else if timeseriesRequest.Accumulate != tc.expectedAccumulate {
+				t.Fatalf("expected %v, got %v", tc.expectedAccumulate, timeseriesRequest.Accumulate)
+			}
+		})
+	}
+}

+ 13 - 0
pkg/env/costmodelenv.go

@@ -36,6 +36,7 @@ const (
 	RemotePWEnvVar                 = "REMOTE_WRITE_PASSWORD"
 	SQLAddressEnvVar               = "SQL_ADDRESS"
 	UseCSVProviderEnvVar           = "USE_CSV_PROVIDER"
+	UseCustomProviderEnvVar        = "USE_CUSTOM_PROVIDER"
 	CSVRegionEnvVar                = "CSV_REGION"
 	CSVEndpointEnvVar              = "CSV_ENDPOINT"
 	CSVPathEnvVar                  = "CSV_PATH"
@@ -107,6 +108,8 @@ const (
 	AllocationNodeLabelsEnabled     = "ALLOCATION_NODE_LABELS_ENABLED"
 	AllocationNodeLabelsIncludeList = "ALLOCATION_NODE_LABELS_INCLUDE_LIST"
 
+	AssetIncludeLocalDiskCostEnvVar = "ASSET_INCLUDE_LOCAL_DISK_COST"
+
 	regionOverrideList = "REGION_OVERRIDE_LIST"
 
 	ExportCSVFile       = "EXPORT_CSV_FILE"
@@ -409,6 +412,12 @@ func IsUseCSVProvider() bool {
 	return env.GetBool(UseCSVProviderEnvVar, false)
 }
 
+// IsUseCustomProvider returns the environment variable value for UseCustomProviderEnvVar which represents
+// whether or not the use of a custom cost provider is enabled.
+func IsUseCustomProvider() bool {
+	return env.GetBool(UseCustomProviderEnvVar, false)
+}
+
 // GetCSVRegion returns the environment variable value for CSVRegionEnvVar which represents the
 // region configured for a CSV provider.
 func GetCSVRegion() string {
@@ -633,6 +642,10 @@ func GetAllocationNodeLabelsIncludeList() []string {
 	return list
 }
 
+func GetAssetIncludeLocalDiskCost() bool {
+	return env.GetBool(AssetIncludeLocalDiskCostEnvVar, true)
+}
+
 func GetRegionOverrideList() []string {
 	regionList := env.GetList(regionOverrideList, ",")
 

+ 3 - 1
pkg/metrics/metricsconfig.go

@@ -17,7 +17,9 @@ var (
 )
 
 type MetricsConfig struct {
-	DisabledMetrics []string `json:"disabledMetrics"`
+	DisabledMetrics    []string        `json:"disabledMetrics"`
+	UseLabelsWhitelist bool            `json:"useLabelsWhitelist,omitempty"`
+	LabelsWhitelist    map[string]bool `json:"labelsWhiteList,omitempty"`
 }
 
 // Gets map of disabled metrics to empty structs

+ 53 - 1
pkg/metrics/podlabelmetrics.go

@@ -14,6 +14,14 @@ import (
 type KubePodLabelsCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	metricsConfig    MetricsConfig
+	labelsWhitelist  map[string]bool
+}
+
+func (kpmc *KubePodLabelsCollector) SetLabelsWhiteList() {
+	kpmc.labelsWhitelist = make(map[string]bool)
+	for k, v := range kpmc.metricsConfig.LabelsWhitelist {
+		kpmc.labelsWhitelist[k] = v
+	}
 }
 
 // Describe sends the super-set of all possible descriptors of pod labels only
@@ -29,6 +37,40 @@ func (kpmc KubePodLabelsCollector) Describe(ch chan<- *prometheus.Desc) {
 	}
 }
 
+func (kpmc *KubePodLabelsCollector) UpdateControllerSelectorsCache() {
+	for _, r := range kpmc.KubeClusterCache.GetAllReplicaSets() {
+		for k := range r.Spec.Selector.MatchLabels {
+			kpmc.labelsWhitelist[k] = true
+		}
+		for _, v := range r.Spec.Selector.MatchExpressions {
+			kpmc.labelsWhitelist[v.Key] = true
+		}
+	}
+	for _, ss := range kpmc.KubeClusterCache.GetAllStatefulSets() {
+		for k := range ss.Spec.Selector.MatchLabels {
+			kpmc.labelsWhitelist[k] = true
+		}
+		for _, v := range ss.Spec.Selector.MatchExpressions {
+			kpmc.labelsWhitelist[v.Key] = true
+		}
+	}
+}
+
+func (kpmc *KubePodLabelsCollector) UpdateServiceLabels() {
+	for _, service := range kpmc.KubeClusterCache.GetAllServices() {
+		// Just unroll the selector and keep all labels whose keys could match a service selector
+		for k := range service.Spec.Selector {
+			kpmc.labelsWhitelist[k] = true
+		}
+	}
+}
+
+func (kpmc *KubePodLabelsCollector) UpdateWhitelist() {
+	kpmc.SetLabelsWhiteList()
+	kpmc.UpdateControllerSelectorsCache()
+	kpmc.UpdateServiceLabels()
+}
+
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kpmc KubePodLabelsCollector) Collect(ch chan<- prometheus.Metric) {
 	pods := kpmc.KubeClusterCache.GetAllPods()
@@ -41,7 +83,17 @@ func (kpmc KubePodLabelsCollector) Collect(ch chan<- prometheus.Metric) {
 
 		// Pod Labels
 		if _, disabled := disabledMetrics["kube_pod_labels"]; !disabled {
-			labelNames, labelValues := promutil.KubePrependQualifierToLabels(promutil.SanitizeLabels(pod.Labels), "label_")
+			podLabels := pod.Labels
+			if kpmc.metricsConfig.UseLabelsWhitelist {
+				kpmc.UpdateWhitelist()
+				for lname := range pod.Labels {
+					if _, ok := kpmc.labelsWhitelist[lname]; !ok {
+						delete(podLabels, lname)
+					}
+				}
+			}
+
+			labelNames, labelValues := promutil.KubePrependQualifierToLabels(promutil.SanitizeLabels(podLabels), "label_")
 			ch <- newKubePodLabelsMetric("kube_pod_labels", podNS, podName, podUID, labelNames, labelValues)
 		}
 

+ 77 - 0
pkg/metrics/podlabelmetrics_test.go

@@ -0,0 +1,77 @@
+package metrics
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/pkg/clustercache"
+	appsv1 "k8s.io/api/apps/v1"
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestWhitelist(t *testing.T) {
+	sampleServices := []*v1.Service{&v1.Service{
+		Spec: v1.ServiceSpec{
+			Selector: map[string]string{"servicewhitelistlabel": "foo"},
+		},
+	}}
+	replicaSetLabelSelector := metav1.LabelSelector{
+		MatchLabels: map[string]string{"replicasetwhitelistlabel1": "bar"},
+	}
+	sampleReplicaSets := []*appsv1.ReplicaSet{{
+		Spec: appsv1.ReplicaSetSpec{
+			Selector: &replicaSetLabelSelector,
+		},
+	}}
+
+	sampleStatefulSets := []*appsv1.StatefulSet{}
+
+	kc := NewFakeCache(sampleReplicaSets, sampleStatefulSets, sampleServices)
+	wl := map[string]bool{
+		"whitelistedlabel": true,
+	}
+	mc := MetricsConfig{
+		DisabledMetrics:    []string{},
+		UseLabelsWhitelist: true,
+		LabelsWhitelist:    wl,
+	}
+	kplc := KubePodLabelsCollector{
+		KubeClusterCache: kc,
+		metricsConfig:    mc,
+	}
+	kplc.UpdateWhitelist()
+	if !kplc.labelsWhitelist["servicewhitelistlabel"] {
+		t.Errorf("Missing expected label %s", "servicewhitelistlabel")
+	}
+	if !kplc.labelsWhitelist["replicasetwhitelistlabel1"] {
+		t.Errorf("Missing expected label %s", "servicewhitelistlabel1")
+	}
+
+}
+
+type FakeCache struct {
+	clustercache.ClusterCache
+	replicasets  []*appsv1.ReplicaSet
+	statefulsets []*appsv1.StatefulSet
+	services     []*v1.Service
+}
+
+func (f FakeCache) GetAllReplicaSets() []*appsv1.ReplicaSet {
+	return f.replicasets
+}
+
+func (f FakeCache) GetAllStatefulSets() []*appsv1.StatefulSet {
+	return f.statefulsets
+}
+
+func (f FakeCache) GetAllServices() []*v1.Service {
+	return f.services
+}
+
+func NewFakeCache(replicasets []*appsv1.ReplicaSet, statefulsets []*appsv1.StatefulSet, services []*v1.Service) FakeCache {
+	return FakeCache{
+		replicasets:  replicasets,
+		statefulsets: statefulsets,
+		services:     services,
+	}
+}

+ 1 - 1
pkg/storage/prefixedbucketstorage.go

@@ -32,7 +32,7 @@ func validPrefix(prefix string) bool {
 }
 
 func conditionalPrefix(prefix, name string) string {
-	if len(name) > 0 {
+	if len(name) > 0 && !strings.HasPrefix(name, prefix) {
 		return withPrefix(prefix, name)
 	}
 

+ 7 - 7
pkg/storage/s3storage.go

@@ -347,7 +347,7 @@ func (s3 *S3Storage) FullPath(name string) string {
 func (s3 *S3Storage) Read(name string) ([]byte, error) {
 	name = trimLeading(name)
 
-	log.Debugf("S3Storage::Read(%s)", name)
+	log.Tracef("S3Storage::Read(%s)", name)
 	ctx := context.Background()
 
 	return s3.getRange(ctx, name, 0, -1)
@@ -357,7 +357,7 @@ func (s3 *S3Storage) Read(name string) ([]byte, error) {
 // Exists checks if the given object exists.
 func (s3 *S3Storage) Exists(name string) (bool, error) {
 	name = trimLeading(name)
-	//log.Debugf("S3Storage::Exists(%s)", name)
+	log.Tracef("S3Storage::Exists(%s)", name)
 
 	ctx := context.Background()
 
@@ -376,7 +376,7 @@ func (s3 *S3Storage) Exists(name string) (bool, error) {
 func (s3 *S3Storage) Write(name string, data []byte) error {
 	name = trimLeading(name)
 
-	log.Debugf("S3Storage::Write(%s)", name)
+	log.Tracef("S3Storage::Write(%s)", name)
 
 	ctx := context.Background()
 	sse, err := s3.getServerSideEncryption(ctx)
@@ -410,7 +410,7 @@ func (s3 *S3Storage) Write(name string, data []byte) error {
 func (s3 *S3Storage) Stat(name string) (*StorageInfo, error) {
 	name = trimLeading(name)
 
-	//log.Debugf("S3Storage::Stat(%s)", name)
+	log.Tracef("S3Storage::Stat(%s)", name)
 	ctx := context.Background()
 
 	objInfo, err := s3.client.StatObject(ctx, s3.name, name, minio.StatObjectOptions{})
@@ -432,7 +432,7 @@ func (s3 *S3Storage) Stat(name string) (*StorageInfo, error) {
 func (s3 *S3Storage) Remove(name string) error {
 	name = trimLeading(name)
 
-	log.Debugf("S3Storage::Remove(%s)", name)
+	log.Tracef("S3Storage::Remove(%s)", name)
 	ctx := context.Background()
 
 	return s3.client.RemoveObject(ctx, s3.name, name, minio.RemoveObjectOptions{})
@@ -441,7 +441,7 @@ func (s3 *S3Storage) Remove(name string) error {
 func (s3 *S3Storage) List(path string) ([]*StorageInfo, error) {
 	path = trimLeading(path)
 
-	log.Debugf("S3Storage::List(%s)", path)
+	log.Tracef("S3Storage::List(%s)", path)
 	ctx := context.Background()
 
 	// Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the
@@ -488,7 +488,7 @@ func (s3 *S3Storage) List(path string) ([]*StorageInfo, error) {
 func (s3 *S3Storage) ListDirectories(path string) ([]*StorageInfo, error) {
 	path = trimLeading(path)
 
-	log.Debugf("S3Storage::List(%s)", path)
+	log.Tracef("S3Storage::List(%s)", path)
 	ctx := context.Background()
 
 	if path != "" {

+ 3 - 0
pkg/storage/storage.go

@@ -4,6 +4,7 @@ import (
 	"os"
 	"time"
 
+	"github.com/opencost/opencost/core/pkg/log"
 	"github.com/pkg/errors"
 )
 
@@ -63,6 +64,8 @@ func Validate(storage Storage) error {
 	const testPath = "tmp/test.txt"
 	const testContent = "test"
 
+	log.Debug("validating storage")
+
 	// attempt to read a path
 	_, err := storage.Exists(testPath)
 	if err != nil {

+ 0 - 3
ui/.babelrc

@@ -1,3 +0,0 @@
-{
-  "plugins": ["@babel/plugin-transform-runtime", "@babel/plugin-proposal-class-properties"]
-}

+ 0 - 2
ui/.dockerignore

@@ -1,2 +0,0 @@
-.parcel-cache/
-node_modules/

+ 0 - 1
ui/.nvmrc

@@ -1 +0,0 @@
-18.3.0

+ 0 - 48
ui/Dockerfile

@@ -1,48 +0,0 @@
-FROM node:18.3.0 as builder
-ADD package*.json /opt/ui/
-WORKDIR /opt/ui
-RUN npm install
-ADD src /opt/ui/src
-RUN npx parcel build src/index.html
-
-FROM nginx:alpine
-
-LABEL org.opencontainers.image.description="Cross-cloud cost allocation models for Kubernetes workloads"
-LABEL org.opencontainers.image.documentation=https://opencost.io/docs/
-LABEL org.opencontainers.image.licenses=Apache-2.0
-LABEL org.opencontainers.image.source=https://github.com/opencost/opencost
-LABEL org.opencontainers.image.title=opencost-ui
-LABEL org.opencontainers.image.url=https://opencost.io
-
-ARG version=dev
-ARG	commit=HEAD
-ENV VERSION=${version}
-ENV HEAD=${commit}
-
-ENV API_PORT=9003
-ENV API_SERVER=0.0.0.0
-ENV UI_PORT=9090
-
-COPY --from=builder /opt/ui/dist /opt/ui/dist
-RUN mkdir -p /var/www
-
-COPY THIRD_PARTY_LICENSES.txt /THIRD_PARTY_LICENSES.txt
-COPY --from=builder /opt/ui/dist /var/www
-
-COPY default.nginx.conf.template /etc/nginx/conf.d/default.nginx.conf.template
-COPY nginx.conf /etc/nginx/
-COPY ./docker-entrypoint.sh /usr/local/bin/
-
-RUN rm -rf /etc/nginx/conf.d/default.conf
-
-RUN adduser 1001 -g 1000 -D
-RUN chown 1001:1000 -R /var/www
-RUN chown 1001:1000 -R /etc/nginx
-RUN chown 1001:1000 -R /usr/local/bin/docker-entrypoint.sh
-
-ENV BASE_URL=/model
-
-USER 1001
-
-ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
-CMD ["nginx", "-g", "daemon off;"]

+ 0 - 38
ui/Dockerfile.cross

@@ -1,38 +0,0 @@
-FROM nginx:alpine
-
-LABEL org.opencontainers.image.description="Cross-cloud cost allocation models for Kubernetes workloads"
-LABEL org.opencontainers.image.documentation=https://opencost.io/docs/
-LABEL org.opencontainers.image.licenses=Apache-2.0
-LABEL org.opencontainers.image.source=https://github.com/opencost/opencost
-LABEL org.opencontainers.image.title=opencost-ui
-LABEL org.opencontainers.image.url=https://opencost.io
-
-ARG version=dev
-ARG	commit=HEAD
-ENV VERSION=${version}
-ENV HEAD=${commit}
-
-ENV API_PORT=9003
-ENV API_SERVER=0.0.0.0
-ENV UI_PORT=9090
-
-COPY ./dist /opt/ui/dist
-COPY THIRD_PARTY_LICENSES.txt /THIRD_PARTY_LICENSES.txt
-COPY default.nginx.conf.template /etc/nginx/conf.d/default.nginx.conf.template
-COPY nginx.conf /etc/nginx/
-COPY ./docker-entrypoint.sh /usr/local/bin/
-RUN mkdir -p /var/www
-
-RUN rm -rf /etc/nginx/conf.d/default.conf
-
-RUN adduser 1001 -g 1000 -D
-RUN chown 1001:1000 -R /var/www
-RUN chown 1001:1000 -R /etc/nginx
-RUN chown 1001:1000 -R /usr/local/bin/docker-entrypoint.sh
-
-ENV BASE_URL=/model
-
-USER 1001
-
-ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
-CMD ["nginx", "-g", "daemon off;"]

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio