Procházet zdrojové kódy

Merge branch 'develop' into governance

Matt Ray před 3 roky
rodič
revize
4e7caaf875
76 změnil soubory, kde provedl 7295 přidání a 297 odebrání
  1. 87 0
      pkg/cloud/alibaba/authorizer.go
  2. 130 0
      pkg/cloud/alibaba/boaconfiguration.go
  3. 289 0
      pkg/cloud/alibaba/boaconfiguration_test.go
  4. 127 0
      pkg/cloud/alibaba/boaquerier.go
  5. 16 15
      pkg/cloud/alibaba/provider.go
  6. 2 2
      pkg/cloud/alibaba/provider_test.go
  7. 223 0
      pkg/cloud/aws/athenaconfiguration.go
  8. 594 0
      pkg/cloud/aws/athenaconfiguration_test.go
  9. 521 0
      pkg/cloud/aws/athenaintegration.go
  10. 65 0
      pkg/cloud/aws/athenaintegration_test.go
  11. 259 0
      pkg/cloud/aws/athenaquerier.go
  12. 251 0
      pkg/cloud/aws/authorizer.go
  13. 67 0
      pkg/cloud/aws/authorizer_test.go
  14. 13 23
      pkg/cloud/aws/provider.go
  15. 0 0
      pkg/cloud/aws/provider_test.go
  16. 134 0
      pkg/cloud/aws/s3configuration.go
  17. 40 0
      pkg/cloud/aws/s3connection.go
  18. 387 0
      pkg/cloud/aws/s3connection_test.go
  19. 181 0
      pkg/cloud/aws/s3selectquerier.go
  20. 80 0
      pkg/cloud/azure/authorizer.go
  21. 322 0
      pkg/cloud/azure/billingexportparser.go
  22. 194 0
      pkg/cloud/azure/billingexportparser_test.go
  23. 0 0
      pkg/cloud/azure/pricesheetclient.go
  24. 0 0
      pkg/cloud/azure/pricesheetdownloader.go
  25. 0 0
      pkg/cloud/azure/pricesheetdownloader_test.go
  26. 3 1
      pkg/cloud/azure/provider.go
  27. 0 0
      pkg/cloud/azure/provider_test.go
  28. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/BOM.csv
  29. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/Enterprise.csv
  30. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/EnterpriseCamel.csv
  31. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/German.csv
  32. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/PayAsYouGo.csv
  33. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/YA.csv
  34. 2 0
      pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv
  35. 88 0
      pkg/cloud/azure/resources/billingexports/values/Template.csv
  36. 2 0
      pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv
  37. 170 0
      pkg/cloud/azure/storagebillingparser.go
  38. 204 0
      pkg/cloud/azure/storagebillingparser_test.go
  39. 179 0
      pkg/cloud/azure/storageconfiguration.go
  40. 446 0
      pkg/cloud/azure/storageconfiguration_test.go
  41. 77 0
      pkg/cloud/azure/storageconnection.go
  42. 12 0
      pkg/cloud/cloudcostintegration.go
  43. 53 0
      pkg/cloud/config/authorizer.go
  44. 37 0
      pkg/cloud/config/config.go
  45. 47 0
      pkg/cloud/connectionstatus.go
  46. 132 0
      pkg/cloud/gcp/authorizer.go
  47. 172 0
      pkg/cloud/gcp/bigqueryconfiguration.go
  48. 388 0
      pkg/cloud/gcp/bigqueryconfiguration_test.go
  49. 110 0
      pkg/cloud/gcp/bigqueryquerier.go
  50. 1 0
      pkg/cloud/gcp/provider.go
  51. 0 0
      pkg/cloud/gcp/provider_test.go
  52. 1 1
      pkg/cloud/provider/csvprovider.go
  53. 6 6
      pkg/cloud/provider/customprovider.go
  54. 14 12
      pkg/cloud/provider/provider.go
  55. 2 3
      pkg/cloud/provider/providerconfig.go
  56. 6 6
      pkg/cloud/scaleway/provider.go
  57. 3 3
      pkg/cmd/agent/agent.go
  58. 29 6
      pkg/costmodel/aggregation.go
  59. 5 5
      pkg/costmodel/allocation_helpers.go
  60. 11 0
      pkg/costmodel/assets.go
  61. 31 15
      pkg/costmodel/cluster.go
  62. 58 10
      pkg/costmodel/cluster_helpers.go
  63. 29 5
      pkg/costmodel/cluster_helpers_test.go
  64. 3 3
      pkg/costmodel/router.go
  65. 294 100
      pkg/kubecost/allocation.go
  66. 2 0
      pkg/kubecost/allocation_json.go
  67. 398 32
      pkg/kubecost/allocation_test.go
  68. 16 0
      pkg/kubecost/asset.go
  69. 4 0
      pkg/kubecost/asset_json.go
  70. 3 2
      pkg/kubecost/bingen.go
  71. 15 3
      pkg/kubecost/cloudcost.go
  72. 12 0
      pkg/kubecost/cloudcost_test.go
  73. 168 2
      pkg/kubecost/kubecost_codecs.go
  74. 4 4
      pkg/kubecost/status.go
  75. 26 0
      pkg/util/allocationfilterutil/queryfilters.go
  76. 38 38
      test/cloud_test.go

+ 87 - 0
pkg/cloud/alibaba/authorizer.go

@@ -0,0 +1,87 @@
+package alibaba
+
+import (
+	"fmt"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const AccessKeyAuthorizerType = "AlibabaAccessKey"
+
+// Authorizer provide *bssopenapi.Client for Alibaba cloud BOS for Billing related SDK calls
+type Authorizer interface {
+	config.Authorizer
+	GetCredentials() (auth.Credential, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	default:
+		return nil, fmt.Errorf("alibaba: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// AccessKey holds Alibaba credentials parsing from the service-key.json file.
+type AccessKey struct {
+	AccessKeyID     string `json:"accessKeyID"`
+	AccessKeySecret string `json:"accessKeySecret"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["accessKeyID"] = ak.AccessKeyID
+	fmap["accessKeySecret"] = ak.AccessKeySecret
+	return json.Marshal(fmap)
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.AccessKeyID == "" {
+		return fmt.Errorf("AccessKey: missing Access key ID")
+	}
+	if ak.AccessKeySecret == "" {
+		return fmt.Errorf("AccessKey: missing Access Key secret")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.AccessKeyID != thatConfig.AccessKeyID {
+		return false
+	}
+	if ak.AccessKeySecret != thatConfig.AccessKeySecret {
+		return false
+	}
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		AccessKeyID:     ak.AccessKeyID,
+		AccessKeySecret: config.Redacted,
+	}
+}
+
+// GetCredentials creates a credentials object to authorize the use of service sdk calls
+func (ak *AccessKey) GetCredentials() (auth.Credential, error) {
+	err := ak.Validate()
+	if err != nil {
+		return nil, err
+	}
+	return &credentials.AccessKeyCredential{AccessKeyId: ak.AccessKeyID, AccessKeySecret: ak.AccessKeySecret}, nil
+}

+ 130 - 0
pkg/cloud/alibaba/boaconfiguration.go

@@ -0,0 +1,130 @@
+package alibaba
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// BOAConfiguration is the BSS open API configuration for Alibaba's Billing information
+type BOAConfiguration struct {
+	Account    string     `json:"account"`
+	Region     string     `json:"region"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (bc *BOAConfiguration) Validate() error {
+	// Validate Authorizer
+	if bc.Authorizer == nil {
+		return fmt.Errorf("BOAConfiguration: missing authorizer")
+	}
+
+	err := bc.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	// Validate base properties
+	if bc.Region == "" {
+		return fmt.Errorf("BOAConfiguration: missing region")
+	}
+
+	if bc.Account == "" {
+		return fmt.Errorf("BOAConfiguration: missing account")
+	}
+	return nil
+}
+
+func (bc *BOAConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*BOAConfiguration)
+	if !ok {
+		return false
+	}
+
+	if bc.Authorizer != nil {
+		if !bc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if bc.Account != thatConfig.Account {
+		return false
+	}
+
+	if bc.Region != thatConfig.Region {
+		return false
+	}
+	return true
+}
+
+func (bc *BOAConfiguration) Sanitize() config.Config {
+	return &BOAConfiguration{
+		Account:    bc.Account,
+		Region:     bc.Region,
+		Authorizer: bc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (bc *BOAConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", bc.Account, bc.Region)
+}
+
+func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Account = account
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Region = region
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Authorizer = authorizer
+
+	return nil
+}
+
+func ConvertAlibabaInfoToConfig(acc AlibabaInfo) config.KeyedConfig {
+	if acc.IsEmpty() {
+		return nil
+	}
+	var configurer Authorizer
+
+	configurer = &AccessKey{
+		AccessKeyID:     acc.AlibabaServiceKeyName,
+		AccessKeySecret: acc.AlibabaServiceKeySecret,
+	}
+
+	return &BOAConfiguration{
+		Account:    acc.AlibabaAccountID,
+		Region:     acc.AlibabaClusterRegion,
+		Authorizer: configurer,
+	}
+}

+ 289 - 0
pkg/cloud/alibaba/boaconfiguration_test.go

@@ -0,0 +1,289 @@
+package alibaba
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestBoaConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   BOAConfiguration
+		expected error
+	}{
+		"valid config Azure AccessKey": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing Access key ID"),
+		},
+		"access secret invalid": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID: "accessKeyId",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing Access Key secret"),
+		},
+		"missing authorizer": {
+			config: BOAConfiguration{
+				Account:    "Account Number",
+				Region:     "Region",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing authorizer"),
+		},
+		"missing Account": {
+			config: BOAConfiguration{
+				Account: "",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing account"),
+		},
+		"missing Region": {
+			config: BOAConfiguration{
+				Account: "Account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing region"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestBOAConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     BOAConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id2",
+					AccessKeySecret: "secret2",
+				},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different region": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region2",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account2",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &AccessKey{
+				AccessKeyID:     "id",
+				AccessKeySecret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestBOAConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config BOAConfiguration
+	}{
+		"Empty Config": {
+			config: BOAConfiguration{},
+		},
+		"AccessKey": {
+			config: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &BOAConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 127 - 0
pkg/cloud/alibaba/boaquerier.go

@@ -0,0 +1,127 @@
+package alibaba
+
+import (
+	"fmt"
+	"strings"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+	"github.com/aliyun/alibaba-cloud-sdk-go/services/bssopenapi"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+const (
+	boaIsNode    = "i-"    // isNode if prefix of instance_id is i-
+	boaIsDisk    = "d-"    // isDisk if prefix is disk is d-
+	boaIsNetwork = "piece" //usage unit of network resource in Alibaba is Piece
+)
+
+type BoaQuerier struct {
+	BOAConfiguration
+}
+
+func (bq *BoaQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*BoaQuerier)
+	if !ok {
+		return false
+	}
+
+	return bq.BOAConfiguration.Equals(&thatConfig.BOAConfiguration)
+}
+
+// QueryInstanceBill performs the request to the BSS client and get the response for the current page number
+func (bq *BoaQuerier) QueryInstanceBill(client *bssopenapi.Client, isBillingItem bool, invocationScheme, granularity, billingCycle, billingDate string, pageNum int) (*bssopenapi.QueryInstanceBillResponse, error) {
+	log.Debugf("QueryInstanceBill: query for BSS Open API for billing date: %s with pageNum: %d ", billingDate, pageNum)
+	request := bssopenapi.CreateQueryInstanceBillRequest()
+	request.Scheme = invocationScheme
+	request.BillingCycle = billingCycle
+	request.IsBillingItem = requests.NewBoolean(true)
+	request.Granularity = granularity
+	request.BillingDate = billingDate
+	request.PageNum = requests.NewInteger(pageNum)
+	response, err := client.QueryInstanceBill(request)
+	if err != nil {
+		return nil, fmt.Errorf("QueryInstanceBill: Failed to hit the BSS Open API with error for page num %d: %v", pageNum, err)
+	}
+	log.Debugf("QueryInstanceBill: Total Number of total items for billing Date: %s pageNum: %d is %d", billingDate, pageNum, response.Data.TotalCount)
+	return response, nil
+}
+
+// QueryBoaPaginated Calls the API in a paginated fashion. There's no paramter in API that can distinguish if it hasMorePages
+// hence the logic of processedItem <= TotalItem.
+func (bq *BoaQuerier) QueryBoaPaginated(client *bssopenapi.Client, isBillingItem bool, invocationScheme, granularity, billingCycle, billingDate string, fn func(*bssopenapi.QueryInstanceBillResponse) bool) error {
+	pageNum := 1
+	processedItem := 0 // setting default here to hit the API for the first time
+	totalItem := 1
+	for processedItem < totalItem {
+		log.Debugf("QueryBoaPaginated: query for BSS Open API for billing date: %s with pageNum: %d", billingDate, pageNum)
+		response, err := bq.QueryInstanceBill(client, isBillingItem, invocationScheme, granularity, billingCycle, billingDate, pageNum)
+		if err != nil {
+			return fmt.Errorf("QueryBoaPaginated for billing cycle : %s, billing date: %s, page num %d: %v", billingCycle, billingDate, pageNum, err)
+		}
+		fn(response)
+		totalItem = response.Data.TotalCount
+		processedItem += response.Data.PageSize
+		pageNum += 1
+	}
+	return nil
+}
+
+// GetBoaQueryInstanceBillFunc gives the item to the handler function in boaIntegration.go to process
+// computeItem, topNItem and aggregatedItem
+func GetBoaQueryInstanceBillFunc(fn func(bssopenapi.Item) error, billingDate string) func(output *bssopenapi.QueryInstanceBillResponse) bool {
+	processBOAItems := func(output *bssopenapi.QueryInstanceBillResponse) bool {
+		// This could be connection error were unable to fetch response output from Client
+		if output == nil {
+			log.Errorf("BoaQuerier: No Response from the ALibaba BSS Open API client for billing Date: %s", billingDate)
+			return false
+		}
+
+		// These infer that the rest call was successful but the Cloud Usage resource for those days were 0
+		if output.Data.TotalCount == 0 {
+			log.Warnf("BoaQuerier: Total Item Count is 0 for billing Date: %s ", billingDate)
+			return false
+		}
+
+		for _, item := range output.Data.Items.Item {
+			fn(item)
+		}
+		return true
+	}
+	return processBOAItems
+}
+
+// SelectAlibabaCategory processes the Alibaba service to associated Kubecost category
+func SelectAlibabaCategory(item bssopenapi.Item) string {
+	if (item != bssopenapi.Item{}) {
+		// Provider ID has prefix "i-" for node in Alibaba
+		if strings.HasPrefix(item.InstanceID, boaIsNode) {
+			return kubecost.ComputeCategory
+		}
+		// Provider ID for disk start with "d-" for storage type in Alibaba
+		if strings.HasPrefix(item.InstanceID, boaIsDisk) {
+			return kubecost.StorageCategory
+		}
+		// Network has the highest priority and is based on the usage type of "piece" in Alibaba
+		if item.UsageUnit == boaIsNetwork {
+			return kubecost.NetworkCategory
+		}
+	}
+
+	// Alibaba CUR integration report has service lower case mostly unlike AWS
+	// TO-DO: Can investigate further product codes but bare minimal differentiation for start
+	switch strings.ToLower(item.ProductCode) {
+	case "slb", "eip", "nis", "gtm":
+		return kubecost.NetworkCategory
+	case "ecs", "eds", "sas":
+		return kubecost.ComputeCategory
+	case "ack":
+		return kubecost.ManagementCategory
+	case "ebs", "oss", "scu":
+		return kubecost.StorageCategory
+	default:
+		return kubecost.OtherCategory
+	}
+}

+ 16 - 15
pkg/cloud/aliyunprovider.go → pkg/cloud/alibaba/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package alibaba
 
 import (
 	"errors"
@@ -122,8 +122,9 @@ var alibabaInstanceFamilies = []string{
 }
 
 // AlibabaInfo contains configuration for Alibaba's CUR integration
+// Deprecated: v1.104 Use BOAConfiguration instead
 type AlibabaInfo struct {
-	AlibabaClusterRegion    string `json:"clusterRegion"`
+	AlibabaClusterRegion    string `json:"ClusterRegion"`
 	AlibabaServiceKeyName   string `json:"serviceKeyName"`
 	AlibabaServiceKeySecret string `json:"serviceKeySecret"`
 	AlibabaAccountID        string `json:"accountID"`
@@ -138,6 +139,7 @@ func (ai *AlibabaInfo) IsEmpty() bool {
 }
 
 // AlibabaAccessKey holds Alibaba credentials parsing from the service-key.json file.
+// Deprecated: v1.104 Use AccessKey instead
 type AlibabaAccessKey struct {
 	AccessKeyID     string `json:"alibaba_access_key_id"`
 	SecretAccessKey string `json:"alibaba_secret_access_key"`
@@ -323,15 +325,14 @@ type Alibaba struct {
 	// Lock Needed to provide thread safe
 	DownloadPricingDataLock sync.RWMutex
 	Clientset               clustercache.ClusterCache
-	Config                  *ProviderConfig
-	*CustomProvider
+	Config                  models.ProviderConfig
+	ServiceAccountChecks    *models.ServiceAccountChecks
+	ClusterAccountId        string
+	ClusterRegion           string
 
 	// The following fields are unexported because of avoiding any leak of secrets of these keys.
 	// Alibaba Access key used specifically in signer interface used to sign API calls
-	serviceAccountChecks *models.ServiceAccountChecks
-	clusterAccountId     string
-	clusterRegion        string
-	accessKey            *credentials.AccessKeyCredential
+	accessKey *credentials.AccessKeyCredential
 	// Map of regionID to sdk.client to call API for that region
 	clients map[string]*sdk.Client
 }
@@ -461,10 +462,10 @@ func (alibaba *Alibaba) DownloadPricingData() error {
 	}
 
 	// set the first occurrence of region from the node
-	if alibaba.clusterRegion == "" {
+	if alibaba.ClusterRegion == "" {
 		for _, node := range nodeList {
 			if regionID, ok := node.Labels["topology.kubernetes.io/region"]; ok {
-				alibaba.clusterRegion = regionID
+				alibaba.ClusterRegion = regionID
 				break
 			}
 		}
@@ -478,7 +479,7 @@ func (alibaba *Alibaba) DownloadPricingData() error {
 	for _, pv := range pvList {
 		pvRegion := determinePVRegion(pv)
 		if pvRegion == "" {
-			pvRegion = alibaba.clusterRegion
+			pvRegion = alibaba.ClusterRegion
 		}
 		pricingObj := &AlibabaPricing{}
 		slimK8sDisk := generateSlimK8sDiskFromV1PV(pv, pvRegion)
@@ -685,8 +686,8 @@ func (alibaba *Alibaba) ClusterInfo() (map[string]string, error) {
 	m := make(map[string]string)
 	m["name"] = clusterName
 	m["provider"] = kubecost.AlibabaProvider
-	m["project"] = alibaba.clusterAccountId
-	m["region"] = alibaba.clusterRegion
+	m["project"] = alibaba.ClusterAccountId
+	m["region"] = alibaba.ClusterRegion
 	m["id"] = env.GetClusterID()
 	return m, nil
 }
@@ -912,7 +913,7 @@ func (alibaba *Alibaba) GetPVKey(pv *v1.PersistentVolume, parameters map[string]
 	regionID := defaultRegion
 	// If default Region is not passed default it to cluster region ID.
 	if defaultRegion == "" {
-		regionID = alibaba.clusterRegion
+		regionID = alibaba.ClusterRegion
 	}
 	slimK8sDisk := generateSlimK8sDiskFromV1PV(pv, defaultRegion)
 	return &AlibabaPVKey{
@@ -1356,7 +1357,7 @@ func determinePVRegion(pv *v1.PersistentVolume) string {
 
 	if pvZone == "" {
 		// zone and regionID labels are optional in Alibaba PV creation, while PV through UI creation put's a zone PV is associated with and the region
-		// can be determined from this information. If pv is provision via yaml and the block is missing that's the only time it gets defaulted to clusterRegion.
+		// can be determined from this information. If pv is provision via yaml and the block is missing that's the only time it gets defaulted to ClusterRegion.
 		if pv.Spec.NodeAffinity != nil {
 			nodeAffinity := pv.Spec.NodeAffinity
 			if nodeAffinity.Required != nil && nodeAffinity.Required.NodeSelectorTerms != nil {

+ 2 - 2
pkg/cloud/aliyunprovider_test.go → pkg/cloud/alibaba/provider_test.go

@@ -1,4 +1,4 @@
-package cloud
+package alibaba
 
 import (
 	"fmt"
@@ -9,7 +9,7 @@ import (
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	v1 "k8s.io/api/core/v1"
-	resource "k8s.io/apimachinery/pkg/api/resource"
+	"k8s.io/apimachinery/pkg/api/resource"
 )
 
 func TestCreateDescribePriceACSRequest(t *testing.T) {

+ 223 - 0
pkg/cloud/aws/athenaconfiguration.go

@@ -0,0 +1,223 @@
+package aws
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// AthenaConfiguration
+type AthenaConfiguration struct {
+	Bucket     string     `json:"bucket"`
+	Region     string     `json:"region"`
+	Database   string     `json:"database"`
+	Table      string     `json:"table"`
+	Workgroup  string     `json:"workgroup"`
+	Account    string     `json:"account"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (ac *AthenaConfiguration) Validate() error {
+
+	// Validate Authorizer
+	if ac.Authorizer == nil {
+		return fmt.Errorf("AthenaConfiguration: missing Authorizer")
+	}
+
+	err := ac.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: %s", err)
+	}
+
+	// Validate base properties
+	if ac.Bucket == "" {
+		return fmt.Errorf("AthenaConfiguration: missing bucket")
+	}
+
+	if ac.Region == "" {
+		return fmt.Errorf("AthenaConfiguration: missing region")
+	}
+
+	if ac.Database == "" {
+		return fmt.Errorf("AthenaConfiguration: missing database")
+	}
+
+	if ac.Table == "" {
+		return fmt.Errorf("AthenaConfiguration: missing table")
+	}
+
+	if ac.Account == "" {
+		return fmt.Errorf("AthenaConfiguration: missing account")
+	}
+
+	return nil
+}
+
+func (ac *AthenaConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AthenaConfiguration)
+	if !ok {
+		return false
+	}
+
+	if ac.Authorizer != nil {
+		if !ac.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if ac.Bucket != thatConfig.Bucket {
+		return false
+	}
+
+	if ac.Region != thatConfig.Region {
+		return false
+	}
+
+	if ac.Database != thatConfig.Database {
+		return false
+	}
+
+	if ac.Table != thatConfig.Table {
+		return false
+	}
+
+	if ac.Workgroup != thatConfig.Workgroup {
+		return false
+	}
+
+	if ac.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (ac *AthenaConfiguration) Sanitize() config.Config {
+	return &AthenaConfiguration{
+		Bucket:     ac.Bucket,
+		Region:     ac.Region,
+		Database:   ac.Database,
+		Table:      ac.Table,
+		Workgroup:  ac.Workgroup,
+		Account:    ac.Account,
+		Authorizer: ac.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (ac *AthenaConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", ac.Account, ac.Bucket)
+}
+
+func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Bucket = bucket
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Region = region
+
+	database, err := config.GetInterfaceValue[string](fmap, "database")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Database = database
+
+	table, err := config.GetInterfaceValue[string](fmap, "table")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Table = table
+
+	workgroup, err := config.GetInterfaceValue[string](fmap, "workgroup")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Workgroup = workgroup
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Account = account
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Authorizer = authorizer
+
+	return nil
+}
+
+// ConvertAwsAthenaInfoToConfig takes a legacy config and generates a Config based on the presence of properties to match
+// legacy behavior
+func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) config.KeyedConfig {
+	if aai.IsEmpty() {
+		return nil
+	}
+
+	var authorizer Authorizer
+	if aai.ServiceKeyName == "" && aai.ServiceKeySecret == "" {
+		authorizer = &ServiceAccount{}
+	} else {
+		authorizer = &AccessKey{
+			ID:     aai.ServiceKeyName,
+			Secret: aai.ServiceKeySecret,
+		}
+	}
+
+	// Wrap Authorizer with AssumeRole if MasterPayerArn is set
+	if aai.MasterPayerARN != "" {
+		authorizer = &AssumeRole{
+			Authorizer: authorizer,
+			RoleARN:    aai.MasterPayerARN,
+		}
+	}
+
+	var config config.KeyedConfig
+	if aai.AthenaTable != "" || aai.AthenaDatabase != "" {
+		config = &AthenaConfiguration{
+			Bucket:     aai.AthenaBucketName,
+			Region:     aai.AthenaRegion,
+			Database:   aai.AthenaDatabase,
+			Table:      aai.AthenaTable,
+			Workgroup:  aai.AthenaWorkgroup,
+			Account:    aai.AccountID,
+			Authorizer: authorizer,
+		}
+	} else {
+		config = &S3Configuration{
+			Bucket:     aai.AthenaBucketName,
+			Region:     aai.AthenaRegion,
+			Account:    aai.AccountID,
+			Authorizer: authorizer,
+		}
+	}
+
+	return config
+}

+ 594 - 0
pkg/cloud/aws/athenaconfiguration_test.go

@@ -0,0 +1,594 @@
+package aws
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestAthenaConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   AthenaConfiguration
+		expected error
+	}{
+		"valid config access key": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: nil,
+		},
+		"valid config service account": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID: "id",
+				},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: AccessKey: missing Secret"),
+		},
+		"missing Authorizer": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing Authorizer"),
+		},
+		"missing bucket": {
+			config: AthenaConfiguration{
+				Bucket:     "",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing bucket"),
+		},
+		"missing region": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing region"),
+		},
+		"missing database": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing database"),
+		},
+		"missing table": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing table"),
+		},
+		"missing workgroup": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"missing account": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing account"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestAthenaConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     AthenaConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different bucket": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket2",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region2",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different database": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database2",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different table": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table2",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different workgroup": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup2",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account2",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AccessKey{
+				ID:     "id",
+				Secret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestAthenaConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config AthenaConfiguration
+	}{
+		"Empty Config": {
+			config: AthenaConfiguration{},
+		},
+		"AccessKey": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+		},
+
+		"ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+		},
+		"AssumeRole with AccessKey": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+		"AssumeRole with ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &ServiceAccount{},
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"RoleArnNil": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: nil,
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"AssumeRole with AssumeRole with ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AssumeRole{
+						RoleARN:    "12345",
+						Authorizer: &ServiceAccount{},
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &AthenaConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 521 - 0
pkg/cloud/aws/athenaintegration.go

@@ -0,0 +1,521 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/service/athena/types"
+	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+)
+
+const LabelColumnPrefix = "resource_tags_user_"
+
+// athenaDateLayout is the default AWS date format
+const AthenaDateLayout = "2006-01-02 15:04:05.000"
+
+// Cost Columns
+const AthenaPricingColumn = "line_item_unblended_cost"
+
+// Amortized Cost Columns
+const AthenaRIPricingColumn = "reservation_effective_cost"
+const AthenaSPPricingColumn = "savings_plan_savings_plan_effective_cost"
+
+// Net Cost Columns
+const AthenaNetPricingColumn = "line_item_net_unblended_cost"
+
+// Amortized Net Cost Columns
+const AthenaNetRIPricingColumn = "reservation_net_effective_cost"
+const AthenaNetSPPricingColumn = "savings_plan_net_savings_plan_effective_cost"
+
+// Category Columns
+const AthenaIsNode = "SUBSTRING(line_item_resource_id,1,2) = 'i-'"
+const AthenaIsVol = "SUBSTRING(line_item_resource_id, 1, 4) = 'vol-'"
+const AthenaIsNetwork = "line_item_usage_type LIKE '%Bytes'"
+
+// athenaDateTruncColumn Aggregates line items from the hourly level to daily. "line_item_usage_start_date" is used because at
+// all time values 00:00-23:00 it will truncate to the correct date.
+const AthenaDateColumn = "line_item_usage_start_date"
+const AthenaDateTruncColumn = "DATE_TRUNC('day'," + AthenaDateColumn + ") as usage_date"
+
+const AthenaWhereDateFmt = `line_item_usage_start_date >= date '%s' AND line_item_usage_start_date < date '%s'`
+const AthenaWhereUsage = "(line_item_line_item_type = 'Usage' OR line_item_line_item_type = 'DiscountedUsage' OR line_item_line_item_type = 'SavingsPlanCoveredUsage' OR line_item_line_item_type = 'EdpDiscount' OR line_item_line_item_type = 'PrivateRateDiscount')"
+
+// AthenaQueryIndexes is a struct for holding the context of a query
+type AthenaQueryIndexes struct {
+	Query                     string
+	ColumnIndexes             map[string]int
+	TagColumns                []string
+	ListCostColumn            string
+	ListK8sCostColumn         string
+	NetCostColumn             string
+	NetK8sCostColumn          string
+	AmortizedNetCostColumn    string
+	AmortizedNetK8sCostColumn string
+	AmortizedCostColumn       string
+	AmortizedK8sCostColumn    string
+	InvoicedCostColumn        string
+	InvoicedK8sCostColumn     string
+}
+
+type AthenaIntegration struct {
+	AthenaQuerier
+}
+
+// Query Athena for CUR data and build a new CloudCostSetRange containing the info
+func (ai *AthenaIntegration) GetCloudCost(start, end time.Time) (*kubecost.CloudCostSetRange, error) {
+	log.Infof("AthenaIntegration[%s]: StoreCloudCost: %s", ai.Key(), kubecost.NewWindow(&start, &end).String())
+	// Query for all column names
+	allColumns, err := ai.GetColumns()
+	if err != nil {
+		return nil, fmt.Errorf("GetCloudCost: error getting Athena columns: %w", err)
+	}
+
+	// List known, hard-coded columns to query
+	groupByColumns := []string{
+		AthenaDateTruncColumn,
+		"line_item_resource_id",
+		"bill_payer_account_id",
+		"line_item_usage_account_id",
+		"line_item_product_code",
+		"line_item_usage_type",
+		AthenaIsNode,
+		AthenaIsVol,
+		AthenaIsNetwork,
+	}
+
+	// Create query indices
+	aqi := AthenaQueryIndexes{}
+
+	// Determine which columns are user-defined tags and add those to the list
+	// of columns to query.
+	for column := range allColumns {
+		if strings.HasPrefix(column, LabelColumnPrefix) {
+			groupByColumns = append(groupByColumns, column)
+			aqi.TagColumns = append(aqi.TagColumns, column)
+		}
+	}
+	var selectColumns []string
+
+	// Duplicate GroupBy Columns into select columns
+	selectColumns = append(selectColumns, groupByColumns...)
+
+	// Clean Up group by columns
+	ai.RemoveColumnAliases(groupByColumns)
+
+	// Build list cost column and add it to the select columns
+	listCostColumn := fmt.Sprintf("SUM(%s) as list_cost", ai.GetListCostColumn())
+	selectColumns = append(selectColumns, listCostColumn)
+	aqi.ListCostColumn = listCostColumn
+	listK8sCostColumn := fmt.Sprintf(
+		"SUM(%s) as list_kubernetes_cost",
+		ai.GetKubernetesCostColumn(allColumns, ai.GetListCostColumn()),
+	)
+	selectColumns = append(selectColumns, listK8sCostColumn)
+	aqi.ListK8sCostColumn = listK8sCostColumn
+
+	// Build net cost column and add it to select columns
+	netCostColumn := fmt.Sprintf("SUM(%s) as net_cost", ai.GetNetCostColumn(allColumns))
+	selectColumns = append(selectColumns, netCostColumn)
+	aqi.NetCostColumn = netCostColumn
+	netK8sCostColumn := fmt.Sprintf(
+		"SUM(%s) as net_kubernetes_cost",
+		ai.GetKubernetesCostColumn(allColumns, ai.GetNetCostColumn(allColumns)),
+	)
+	selectColumns = append(selectColumns, netK8sCostColumn)
+	aqi.NetK8sCostColumn = netK8sCostColumn
+
+	// Build amortized net cost column and add it to select columns
+	amortizedNetCostColumn := fmt.Sprintf("SUM(%s) as amortized_net_cost", ai.GetAmortizedNetCostColumn(allColumns))
+	selectColumns = append(selectColumns, amortizedNetCostColumn)
+	aqi.AmortizedNetCostColumn = amortizedNetCostColumn
+	amortizedNetK8sCostColumn := fmt.Sprintf(
+		"SUM(%s) as amortized_net_kubernetes_cost",
+		ai.GetKubernetesCostColumn(allColumns, ai.GetNetCostColumn(allColumns)),
+	)
+	selectColumns = append(selectColumns, amortizedNetK8sCostColumn)
+	aqi.AmortizedNetK8sCostColumn = amortizedNetK8sCostColumn
+
+	// Build Amortized cost column and add it to select columns
+	amortizedCostColumn := fmt.Sprintf("SUM(%s) as amortized_cost", ai.GetAmortizedCostCase(allColumns))
+	selectColumns = append(selectColumns, amortizedCostColumn)
+	aqi.AmortizedCostColumn = amortizedCostColumn
+	amortizedK8sCostColumn := fmt.Sprintf(
+		"SUM(%s) as amortized_kubernetes_cost",
+		ai.GetKubernetesCostColumn(allColumns, ai.GetAmortizedCostCase(allColumns)),
+	)
+	selectColumns = append(selectColumns, amortizedK8sCostColumn)
+	aqi.AmortizedK8sCostColumn = amortizedK8sCostColumn
+
+	// We are using Net Cost for Invoiced Cost for now as it is the closest approximation
+	invoicedCostColumn := netCostColumn
+	selectColumns = append(selectColumns, invoicedCostColumn)
+	aqi.InvoicedCostColumn = invoicedCostColumn
+	invoicedK8sCostColumn := netK8sCostColumn
+	selectColumns = append(selectColumns, invoicedK8sCostColumn)
+	aqi.InvoicedK8sCostColumn = invoicedK8sCostColumn
+
+	// Build map of query columns to use for parsing query
+	aqi.ColumnIndexes = map[string]int{}
+	for i, column := range selectColumns {
+		aqi.ColumnIndexes[column] = i
+	}
+	athenaWhereDate := fmt.Sprintf(AthenaWhereDateFmt, start.Format("2006-01-02"), end.Format("2006-01-02"))
+
+	// Query for all line items with a resource_id or from AWS Marketplace, which did not end before
+	// the range or start after it. This captures all costs with any amount of
+	// overlap with the range, for which we will only extract the relevant costs
+	whereConjuncts := []string{
+		athenaWhereDate,
+		AthenaWhereUsage,
+	}
+	columnStr := strings.Join(selectColumns, ", ")
+	whereClause := strings.Join(whereConjuncts, " AND ")
+	groupByStr := strings.Join(groupByColumns, ", ")
+	queryStr := `
+		SELECT %s
+		FROM %s
+		WHERE %s
+		GROUP BY %s
+	`
+	aqi.Query = fmt.Sprintf(queryStr, columnStr, ai.Table, whereClause, groupByStr)
+
+	CCSR, err := kubecost.NewCloudCostSetRange(start, end, timeutil.Day, ai.Key())
+	if err != nil {
+		return nil, err
+	}
+
+	// Generate row handling function.
+	rowHandler := func(row types.Row) {
+		err2 := ai.RowToCloudCost(row, aqi, CCSR)
+		if err2 != nil {
+			log.Errorf("AthenaIntegration: queryCloudCostCompute: error while parsing row: %s", err2.Error())
+		}
+	}
+	log.Debugf("AthenaIntegration[%s]: queryCloudCostCompute: querying: %s", ai.Key(), aqi.Query)
+	// Query CUR data and fill out CCSR
+	err = ai.Query(context.TODO(), aqi.Query, GetAthenaQueryFunc(rowHandler))
+	if err != nil {
+		return nil, err
+	}
+
+	// TODO: May not be needed anymore?
+	for _, ccs := range CCSR.CloudCostSets {
+		log.Debugf("AthenaIntegration[%s]: queryCloudCostCompute: writing compute items for window %s: %d", ai.Key(), ccs.Window, len(ccs.CloudCosts))
+		ai.ConnectionStatus = ai.GetConnectionStatusFromResult(ccs, ai.ConnectionStatus)
+	}
+	return CCSR, nil
+
+}
+
+func (ai *AthenaIntegration) GetListCostColumn() string {
+	var listCostBuilder strings.Builder
+	listCostBuilder.WriteString("CASE line_item_line_item_type")
+	listCostBuilder.WriteString(" WHEN 'EdpDiscount' THEN 0")
+	listCostBuilder.WriteString(" WHEN 'PrivateRateDiscount' THEN 0")
+	listCostBuilder.WriteString(" ELSE ")
+	listCostBuilder.WriteString(AthenaPricingColumn)
+	listCostBuilder.WriteString(" END")
+	return listCostBuilder.String()
+}
+
+func (ai *AthenaIntegration) GetNetCostColumn(allColumns map[string]bool) string {
+	netCostColumn := ""
+	if allColumns[AthenaNetPricingColumn] { // if Net pricing exists
+		netCostColumn = AthenaNetPricingColumn
+	} else { // Non-net for if there's no net pricing.
+		netCostColumn = AthenaPricingColumn
+	}
+	return netCostColumn
+}
+
+func (ai *AthenaIntegration) GetAmortizedNetCostColumn(allColumns map[string]bool) string {
+	amortizedNetCostCase := ""
+	if allColumns[AthenaNetPricingColumn] { // if Net pricing exists
+		amortizedNetCostCase = ai.GetAmortizedNetCostCase(allColumns)
+	} else { // Non-net for if there's no net pricing.
+		amortizedNetCostCase = ai.GetAmortizedCostCase(allColumns)
+	}
+	return amortizedNetCostCase
+}
+
+// getIsKubernetesColumn generates a boolean column which determines whether a line item is from kubernetes
+func (ai *AthenaIntegration) GetIsKubernetesColumn(allColumns map[string]bool) string {
+	return ai.GetIsKubernetesCase(allColumns)
+}
+
+// getKubernetesCostColumn generates a double column which determines the cost of k8s items in an aggregate
+func (ai *AthenaIntegration) GetKubernetesCostColumn(allColumns map[string]bool, pricingCase string) string {
+	k8sCase := ai.GetIsKubernetesCase(allColumns)
+	return fmt.Sprintf("CAST((%s) as double) * (%s)", k8sCase, pricingCase)
+
+}
+
+func (ai *AthenaIntegration) RemoveColumnAliases(columns []string) {
+	for i, column := range columns {
+		if strings.Contains(column, " as ") {
+			columnValues := strings.Split(column, " as ")
+			columns[i] = columnValues[0]
+		}
+	}
+}
+
+func (ai *AthenaIntegration) ConvertLabelToAWSTag(label string) string {
+	// if the label already has the column prefix assume that it is in the correct format
+	if strings.HasPrefix(label, LabelColumnPrefix) {
+		return label
+	}
+	// replace characters with underscore
+	tag := label
+	tag = strings.ReplaceAll(tag, ".", "_")
+	tag = strings.ReplaceAll(tag, "/", "_")
+	tag = strings.ReplaceAll(tag, ":", "_")
+	tag = strings.ReplaceAll(tag, "-", "_")
+	// add prefix and return
+	return LabelColumnPrefix + tag
+}
+
+func (ai *AthenaIntegration) GetAmortizedCostCase(allColumns map[string]bool) string {
+	// Use unblended costs if Reserved Instances/Savings Plans aren't in use
+	if !allColumns[AthenaRIPricingColumn] && !allColumns[AthenaSPPricingColumn] {
+		return AthenaPricingColumn
+	}
+
+	var costBuilder strings.Builder
+	costBuilder.WriteString("CASE line_item_line_item_type")
+	if allColumns[AthenaRIPricingColumn] {
+		costBuilder.WriteString(" WHEN 'DiscountedUsage' THEN ")
+		costBuilder.WriteString(AthenaRIPricingColumn)
+	}
+
+	if allColumns[AthenaSPPricingColumn] {
+		costBuilder.WriteString(" WHEN 'SavingsPlanCoveredUsage' THEN ")
+		costBuilder.WriteString(AthenaSPPricingColumn)
+	}
+
+	costBuilder.WriteString(" ELSE ")
+	costBuilder.WriteString(AthenaPricingColumn)
+	costBuilder.WriteString(" END")
+	return costBuilder.String()
+}
+
+func (ai *AthenaIntegration) GetAmortizedNetCostCase(allColumns map[string]bool) string {
+	// Use net unblended costs if Reserved Instances/Savings Plans aren't in use
+	if !allColumns[AthenaNetRIPricingColumn] && !allColumns[AthenaNetSPPricingColumn] {
+		return AthenaNetPricingColumn
+	}
+
+	var costBuilder strings.Builder
+	costBuilder.WriteString("CASE line_item_line_item_type")
+	if allColumns[AthenaNetRIPricingColumn] {
+		costBuilder.WriteString(" WHEN 'DiscountedUsage' THEN ")
+		costBuilder.WriteString(AthenaNetRIPricingColumn)
+	}
+
+	if allColumns[AthenaNetSPPricingColumn] {
+		costBuilder.WriteString(" WHEN 'SavingsPlanCoveredUsage' THEN ")
+		costBuilder.WriteString(AthenaNetSPPricingColumn)
+	}
+
+	costBuilder.WriteString(" ELSE ")
+	costBuilder.WriteString(AthenaNetPricingColumn)
+	costBuilder.WriteString(" END")
+	return costBuilder.String()
+}
+
+// GetIsKubernetesCase builds a "CASE" clause which attempts to determine if a line item is kubernetes based on labels
+// that may be available in the CUR
+func (ai *AthenaIntegration) GetIsKubernetesCase(allColumns map[string]bool) string {
+	// k8sColumns is a list of columns where the presence of a value indicates that a resource is part of a kubernetes cluster
+	k8sColumns := []string{
+		"resource_tags_aws_eks_cluster_name",
+		"resource_tags_user_eks_cluster_name",
+		"resource_tags_user_alpha_eksctl_io_cluster_name",
+		"resource_tags_user_kubernetes_io_service_name",
+		"resource_tags_user_kubernetes_io_created_for_pvc_name",
+		"resource_tags_user_kubernetes_io_created_for_pv_name",
+	}
+	var k8sBuilder strings.Builder
+
+	k8sBuilder.WriteString("CASE ")
+	// EKS is always kubernetes
+	k8sBuilder.WriteString("WHEN line_item_product_code = 'AmazonEKS' THEN TRUE ")
+	for _, k8sColumn := range k8sColumns {
+		if _, ok := allColumns[k8sColumn]; ok {
+			k8sBuilder.WriteString("WHEN ")
+			k8sBuilder.WriteString(k8sColumn)
+			k8sBuilder.WriteString(" <> '' THEN TRUE ")
+		}
+	}
+
+	k8sBuilder.WriteString("ELSE FALSE END")
+	return k8sBuilder.String()
+}
+
+func (ai *AthenaIntegration) RowToCloudCost(row types.Row, aqi AthenaQueryIndexes, ccsr *kubecost.CloudCostSetRange) error {
+	if len(row.Data) < len(aqi.ColumnIndexes) {
+		return fmt.Errorf("rowToCloudCost: row with fewer than %d columns (has only %d)", len(aqi.ColumnIndexes), len(row.Data))
+	}
+
+	// Iterate through the slice of tag columns, assigning
+	// values to the column names, minus the tag prefix.
+	labels := kubecost.CloudCostLabels{}
+	labelValues := []string{}
+	for _, tagColumnName := range aqi.TagColumns {
+		labelName := strings.TrimPrefix(tagColumnName, LabelColumnPrefix)
+		value := GetAthenaRowValue(row, aqi.ColumnIndexes, tagColumnName)
+		if value != "" {
+			labels[labelName] = value
+			labelValues = append(labelValues, value)
+		}
+	}
+
+	invoiceEntityID := GetAthenaRowValue(row, aqi.ColumnIndexes, "bill_payer_account_id")
+	accountID := GetAthenaRowValue(row, aqi.ColumnIndexes, "line_item_usage_account_id")
+	startStr := GetAthenaRowValue(row, aqi.ColumnIndexes, AthenaDateTruncColumn)
+	providerID := GetAthenaRowValue(row, aqi.ColumnIndexes, "line_item_resource_id")
+	productCode := GetAthenaRowValue(row, aqi.ColumnIndexes, "line_item_product_code")
+	usageType := GetAthenaRowValue(row, aqi.ColumnIndexes, "line_item_usage_type")
+	isNode, _ := strconv.ParseBool(GetAthenaRowValue(row, aqi.ColumnIndexes, AthenaIsNode))
+	isVol, _ := strconv.ParseBool(GetAthenaRowValue(row, aqi.ColumnIndexes, AthenaIsVol))
+	isNetwork, _ := strconv.ParseBool(GetAthenaRowValue(row, aqi.ColumnIndexes, AthenaIsNetwork))
+
+	listCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.ListCostColumn)
+	if err != nil {
+		return err
+	}
+
+	listK8sCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.ListK8sCostColumn)
+	if err != nil {
+		return err
+	}
+
+	netCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.NetCostColumn)
+	if err != nil {
+		return err
+	}
+
+	netK8sCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.NetK8sCostColumn)
+	if err != nil {
+		return err
+	}
+
+	amortizedNetCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.AmortizedNetCostColumn)
+	if err != nil {
+		return err
+	}
+
+	amortizedNetK8sCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.AmortizedNetK8sCostColumn)
+	if err != nil {
+		return err
+	}
+	amortizedCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.AmortizedCostColumn)
+	if err != nil {
+		return err
+	}
+
+	amortizedK8sCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.AmortizedK8sCostColumn)
+	if err != nil {
+		return err
+	}
+
+	invoicedCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.InvoicedCostColumn)
+	if err != nil {
+		return err
+	}
+
+	invoicedK8sCost, err := GetAthenaRowValueFloat(row, aqi.ColumnIndexes, aqi.InvoicedK8sCostColumn)
+	if err != nil {
+		return err
+	}
+
+	// Identify resource category in the CUR
+	category := SelectAWSCategory(isNode, isVol, isNetwork, providerID, productCode)
+
+	// Retrieve final stanza of product code for ProviderID
+	if productCode == "AWSELB" || productCode == "AmazonFSx" {
+		providerID = ParseARN(providerID)
+	}
+
+	if productCode == "AmazonEKS" && category == kubecost.ComputeCategory {
+		if strings.Contains(usageType, "CPU") {
+			providerID = fmt.Sprintf("%s/CPU", providerID)
+		} else if strings.Contains(usageType, "GB") {
+			providerID = fmt.Sprintf("%s/RAM", providerID)
+		}
+	}
+
+	properties := kubecost.CloudCostProperties{
+		ProviderID:      providerID,
+		Provider:        kubecost.AWSProvider,
+		AccountID:       accountID,
+		InvoiceEntityID: invoiceEntityID,
+		Service:         productCode,
+		Category:        category,
+		Labels:          labels,
+	}
+
+	start, err := time.Parse(AthenaDateLayout, startStr)
+	if err != nil {
+		return fmt.Errorf("unable to parse %s: '%s'", AthenaDateTruncColumn, err.Error())
+	}
+	end := start.AddDate(0, 0, 1)
+
+	cc := &kubecost.CloudCost{
+		Properties: &properties,
+		Window:     kubecost.NewWindow(&start, &end),
+		ListCost: kubecost.CostMetric{
+			Cost:              listCost,
+			KubernetesPercent: ai.CalculateK8sPercent(listCost, listK8sCost),
+		},
+		NetCost: kubecost.CostMetric{
+			Cost:              netCost,
+			KubernetesPercent: ai.CalculateK8sPercent(netCost, netK8sCost),
+		},
+		AmortizedNetCost: kubecost.CostMetric{
+			Cost:              amortizedNetCost,
+			KubernetesPercent: ai.CalculateK8sPercent(amortizedNetCost, amortizedNetK8sCost),
+		},
+		AmortizedCost: kubecost.CostMetric{
+			Cost:              amortizedCost,
+			KubernetesPercent: ai.CalculateK8sPercent(amortizedCost, amortizedK8sCost),
+		},
+		InvoicedCost: kubecost.CostMetric{
+			Cost:              invoicedCost,
+			KubernetesPercent: ai.CalculateK8sPercent(invoicedCost, invoicedK8sCost),
+		},
+	}
+
+	ccsr.LoadCloudCost(cc)
+	return nil
+}
+
+func (ai *AthenaIntegration) CalculateK8sPercent(cost, k8sCost float64) float64 {
+	// Calculate percent of cost that is k8s with the k8sCost
+	k8sPercent := 0.0
+	if k8sCost != 0.0 && cost != 0.0 {
+		k8sPercent = k8sCost / cost
+	}
+	return k8sPercent
+}
+
+func (ai *AthenaIntegration) GetConnectionStatusFromResult(result cloud.EmptyChecker, currentStatus cloud.ConnectionStatus) cloud.ConnectionStatus {
+	if result.IsEmpty() && currentStatus != cloud.SuccessfulConnection {
+		return cloud.MissingData
+	}
+	return cloud.SuccessfulConnection
+}
+
+func (ai *AthenaIntegration) GetConnectionStatus() string {
+	// initialize status if it has not done so; this can happen if the integration is inactive
+	if ai.ConnectionStatus.String() == "" {
+		ai.ConnectionStatus = cloud.InitialStatus
+	}
+
+	return ai.ConnectionStatus.String()
+}

+ 65 - 0
pkg/cloud/aws/athenaintegration_test.go

@@ -0,0 +1,65 @@
+package aws
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	"github.com/opencost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+)
+
+func GetCloudCost_Test(t *testing.T) {
+	athenaConfigPath := os.Getenv("ATHENA_CONFIGURATION")
+	if athenaConfigPath == "" {
+		t.Skip("skipping integration test, set environment variable ATHENA_CONFIGURATION")
+	}
+	athenaConfigBin, err := os.ReadFile(athenaConfigPath)
+	if err != nil {
+		t.Fatalf("failed to read config file: %s", err.Error())
+	}
+	var athenaConfig AthenaConfiguration
+	err = json.Unmarshal(athenaConfigBin, &athenaConfig)
+	if err != nil {
+		t.Fatalf("failed to unmarshal config from JSON: %s", err.Error())
+	}
+	testCases := map[string]struct {
+		integration *AthenaIntegration
+		start       time.Time
+		end         time.Time
+		expected    bool
+	}{
+		// No CUR data is expected within 2 days of now
+		"too_recent_window": {
+			integration: &AthenaIntegration{
+				AthenaQuerier: AthenaQuerier{
+					AthenaConfiguration: athenaConfig,
+				},
+			},
+			end:      time.Now(),
+			start:    time.Now().Add(-timeutil.Day),
+			expected: true,
+		},
+		// CUR data should be available
+		"last week window": {
+			integration: &AthenaIntegration{
+				AthenaQuerier: AthenaQuerier{
+					AthenaConfiguration: athenaConfig,
+				},
+			},
+			end:      time.Now().Add(-7 * timeutil.Day),
+			start:    time.Now().Add(-8 * timeutil.Day),
+			expected: false,
+		},
+	}
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual, err := testCase.integration.GetCloudCost(testCase.start, testCase.end)
+			if err != nil {
+				t.Errorf("Other error during testing %s", err)
+			} else if actual.IsEmpty() != testCase.expected {
+				t.Errorf("Incorrect result, actual emptiness: %t, expected: %t", actual.IsEmpty(), testCase.expected)
+			}
+		})
+	}
+}

+ 259 - 0
pkg/cloud/aws/athenaquerier.go

@@ -0,0 +1,259 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/athena"
+	"github.com/aws/aws-sdk-go-v2/service/athena/types"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+)
+
+type AthenaQuerier struct {
+	AthenaConfiguration
+	ConnectionStatus cloud.ConnectionStatus
+}
+
+func (aq *AthenaQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*AthenaQuerier)
+	if !ok {
+		return false
+	}
+
+	return aq.AthenaConfiguration.Equals(&thatConfig.AthenaConfiguration)
+}
+
+// GetColumns returns a list of the names of all columns in the configured
+// Athena table
+func (aq *AthenaQuerier) GetColumns() (map[string]bool, error) {
+	columnSet := map[string]bool{}
+
+	// This Query is supported by Athena tables and views
+	q := `SELECT column_name FROM information_schema.columns WHERE table_schema = '%s' AND table_name = '%s'`
+	query := fmt.Sprintf(q, aq.Database, aq.Table)
+
+	athenaErr := aq.Query(context.TODO(), query, GetAthenaQueryFunc(func(row types.Row) {
+		columnSet[*row.Data[0].VarCharValue] = true
+	}))
+
+	if athenaErr != nil {
+		return columnSet, athenaErr
+	}
+
+	if len(columnSet) == 0 {
+		log.Infof("No columns retrieved from Athena")
+	}
+
+	return columnSet, nil
+}
+
+func (aq *AthenaQuerier) Query(ctx context.Context, query string, fn func(*athena.GetQueryResultsOutput) bool) error {
+	err := aq.Validate()
+	if err != nil {
+		aq.ConnectionStatus = cloud.InvalidConfiguration
+		return err
+	}
+
+	log.Debugf("AthenaQuerier[%s]: Performing Query: %s", aq.Key(), query)
+	err = aq.queryAthenaPaginated(ctx, query, fn)
+	if err != nil {
+		aq.ConnectionStatus = cloud.FailedConnection
+		return err
+	}
+	return nil
+}
+
+func (aq *AthenaQuerier) GetAthenaClient() (*athena.Client, error) {
+	cfg, err := aq.Authorizer.CreateAWSConfig(aq.Region)
+	if err != nil {
+		return nil, err
+	}
+	cli := athena.NewFromConfig(cfg)
+	return cli, nil
+}
+
+// QueryAthenaPaginated executes athena query and processes results. An error from this method indicates a
+// FAILED_CONNECTION CloudConnectionStatus and should immediately stop the caller to maintain the correct CloudConnectionStatus
+func (aq *AthenaQuerier) queryAthenaPaginated(ctx context.Context, query string, fn func(*athena.GetQueryResultsOutput) bool) error {
+
+	queryExecutionCtx := &types.QueryExecutionContext{
+		Database: aws.String(aq.Database),
+	}
+
+	resultConfiguration := &types.ResultConfiguration{
+		OutputLocation: aws.String(aq.Bucket),
+	}
+	startQueryExecutionInput := &athena.StartQueryExecutionInput{
+		QueryString:           aws.String(query),
+		QueryExecutionContext: queryExecutionCtx,
+		ResultConfiguration:   resultConfiguration,
+	}
+
+	// Only set if there is a value, the default input is nil
+	if aq.Workgroup != "" {
+		startQueryExecutionInput.WorkGroup = aws.String(aq.Workgroup)
+	}
+
+	// Create Athena Client
+	cli, err := aq.GetAthenaClient()
+
+	// Query Athena
+	startQueryExecutionOutput, err := cli.StartQueryExecution(ctx, startQueryExecutionInput)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: start query error: %s", err.Error())
+	}
+	err = waitForQueryToComplete(ctx, cli, startQueryExecutionOutput.QueryExecutionId)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: query execution error: %s", err.Error())
+	}
+	queryResultsInput := &athena.GetQueryResultsInput{
+		QueryExecutionId: startQueryExecutionOutput.QueryExecutionId,
+	}
+	getQueryResultsPaginator := athena.NewGetQueryResultsPaginator(cli, queryResultsInput)
+	for getQueryResultsPaginator.HasMorePages() {
+		pg, err := getQueryResultsPaginator.NextPage(ctx)
+		if err != nil {
+			log.Errorf("queryAthenaPaginated: NextPage error: %s", err.Error())
+			continue
+		}
+		fn(pg)
+	}
+	return nil
+}
+
+func waitForQueryToComplete(ctx context.Context, client *athena.Client, queryExecutionID *string) error {
+	inp := &athena.GetQueryExecutionInput{
+		QueryExecutionId: queryExecutionID,
+	}
+	isQueryStillRunning := true
+	for isQueryStillRunning {
+		qe, err := client.GetQueryExecution(ctx, inp)
+		if err != nil {
+			return err
+		}
+		if qe.QueryExecution.Status.State == "SUCCEEDED" {
+			isQueryStillRunning = false
+			continue
+		}
+		if qe.QueryExecution.Status.State != "RUNNING" && qe.QueryExecution.Status.State != "QUEUED" {
+			return fmt.Errorf("no query results available for query %s", *queryExecutionID)
+		}
+		time.Sleep(2 * time.Second)
+	}
+	return nil
+}
+
+// GetAthenaRowValue retrieve value from athena row based on column names and used stringutil.Bank() to prevent duplicate
+// allocation of strings
+func GetAthenaRowValue(row types.Row, queryColumnIndexes map[string]int, columnName string) string {
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return ""
+	}
+	valuePointer := row.Data[columnIndex].VarCharValue
+	if valuePointer == nil {
+		return ""
+	}
+	return stringutil.Bank(*valuePointer)
+}
+
+// getAthenaRowValueFloat retrieve value from athena row based on column names and convert to float if possible
+func GetAthenaRowValueFloat(row types.Row, queryColumnIndexes map[string]int, columnName string) (float64, error) {
+
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return 0.0, fmt.Errorf("getAthenaRowValueFloat: missing column index: %s", columnName)
+	}
+
+	valuePointer := row.Data[columnIndex].VarCharValue
+	if valuePointer == nil {
+		return 0.0, fmt.Errorf("getAthenaRowValueFloat: nil field")
+	}
+
+	cost, err := strconv.ParseFloat(*valuePointer, 64)
+	if err != nil {
+		return cost, fmt.Errorf("getAthenaRowValueFloat: failed to parse %s: '%s': %s", columnName, *valuePointer, err.Error())
+	}
+	return cost, nil
+}
+
+func SelectAWSCategory(isNode, isVol, isNetwork bool, providerID, service string) string {
+	// Network has the highest priority and is based on the usage type ending in "Bytes"
+	if isNetwork {
+		return kubecost.NetworkCategory
+	}
+	// The node and volume conditions are mutually exclusive.
+	// Provider ID has prefix "i-"
+	if isNode {
+		return kubecost.ComputeCategory
+	}
+	// Provider ID has prefix "vol-"
+	if isVol {
+		return kubecost.StorageCategory
+	}
+
+	// Default categories based on service
+	switch strings.ToUpper(service) {
+	case "AWSELB", "AWSGLUE", "AMAZONROUTE53":
+		return kubecost.NetworkCategory
+	case "AMAZONEC2", "AWSLAMBDA", "AMAZONELASTICACHE":
+		return kubecost.ComputeCategory
+	case "AMAZONEKS":
+		// Check if line item is a fargate pod
+		if strings.Contains(providerID, ":pod/") {
+			return kubecost.ComputeCategory
+		}
+		return kubecost.ManagementCategory
+	case "AMAZONS3", "AMAZONATHENA", "AMAZONRDS", "AMAZONDYNAMODB", "AWSSECRETSMANAGER", "AMAZONFSX":
+		return kubecost.StorageCategory
+	default:
+		return kubecost.OtherCategory
+	}
+}
+
+var parseARNRx = regexp.MustCompile("^.+\\/(.+)?") // Capture "a406f7761142e4ef58a8f2ba478d2db2" from "arn:aws:elasticloadbalancing:us-east-1:297945954695:loadbalancer/a406f7761142e4ef58a8f2ba478d2db2"
+
+func ParseARN(id string) string {
+	match := parseARNRx.FindStringSubmatch(id)
+	if len(match) == 0 {
+		if id != "" {
+			log.DedupedInfof(10, "aws.parseARN: failed to parse %s", id)
+		}
+		return id
+	}
+	return match[len(match)-1]
+}
+
+func GetAthenaQueryFunc(fn func(types.Row)) func(*athena.GetQueryResultsOutput) bool {
+	pageNum := 0
+	processItemQueryResults := func(page *athena.GetQueryResultsOutput) bool {
+		if page == nil {
+			log.Errorf("AthenaQuerier: Athena page is nil")
+			return false
+		} else if page.ResultSet == nil {
+			log.Errorf("AthenaQuerier: Athena page.ResultSet is nil")
+			return false
+		}
+		rows := page.ResultSet.Rows
+		if pageNum == 0 {
+			rows = page.ResultSet.Rows[1:len(page.ResultSet.Rows)]
+		}
+
+		for _, row := range rows {
+			fn(row)
+		}
+		pageNum++
+		return true
+	}
+	return processItemQueryResults
+}

+ 251 - 0
pkg/cloud/aws/authorizer.go

@@ -0,0 +1,251 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsconfig "github.com/aws/aws-sdk-go-v2/config"
+	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+	"github.com/aws/aws-sdk-go-v2/service/sts"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const AccessKeyAuthorizerType = "AWSAccessKey"
+const ServiceAccountAuthorizerType = "AWSServiceAccount"
+const AssumeRoleAuthorizerType = "AWSAssumeRole"
+
+// Authorizer implementations provide aws.Config for AWS SDK calls
+type Authorizer interface {
+	config.Authorizer
+	CreateAWSConfig(string) (aws.Config, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	case ServiceAccountAuthorizerType:
+		return &ServiceAccount{}, nil
+	case AssumeRoleAuthorizerType:
+		return &AssumeRole{}, nil
+	default:
+		return nil, fmt.Errorf("AWS: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// AccessKey holds AWS credentials and fulfils the awsV2.CredentialsProvider interface
+type AccessKey struct {
+	ID     string `json:"id"`
+	Secret string `json:"secret"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["id"] = ak.ID
+	fmap["secret"] = ak.Secret
+	return json.Marshal(fmap)
+}
+
+// Retrieve returns a set of awsV2 credentials using the AccessKey's key and secret.
+// This fulfils the awsV2.CredentialsProvider interface contract.
+func (ak *AccessKey) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	return aws.Credentials{
+		AccessKeyID:     ak.ID,
+		SecretAccessKey: ak.Secret,
+	}, nil
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.ID == "" {
+		return fmt.Errorf("AccessKey: missing ID")
+	}
+	if ak.Secret == "" {
+		return fmt.Errorf("AccessKey: missing Secret")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.ID != thatConfig.ID {
+		return false
+	}
+	if ak.Secret != thatConfig.Secret {
+		return false
+	}
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		ID:     ak.ID,
+		Secret: config.Redacted,
+	}
+}
+
+// CreateAWSConfig creates an AWS SDK V2 Config for the credentials that it contains for the provided region
+func (ak *AccessKey) CreateAWSConfig(region string) (cfg aws.Config, err error) {
+	err = ak.Validate()
+	if err != nil {
+		return cfg, err
+	}
+	// The AWS SDK v2 requires an object fulfilling the CredentialsProvider interface, which cloud.AccessKey does
+	cfg, err = awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithCredentialsProvider(ak), awsconfig.WithRegion(region))
+	if err != nil {
+		return cfg, fmt.Errorf("failed to initialize AWS SDK config for region %s: %s", region, err)
+	}
+	return cfg, nil
+}
+
+// ServiceAccount uses pod annotations along with a service account to authenticate integrations
+type ServiceAccount struct{}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (sa *ServiceAccount) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 1)
+	fmap[config.AuthorizerTypeProperty] = ServiceAccountAuthorizerType
+	return json.Marshal(fmap)
+}
+
+// Check has nothing to check at this level, connection will fail if Pod Annotation and Service Account are not configured correctly
+func (sa *ServiceAccount) Validate() error {
+	return nil
+}
+
+func (sa *ServiceAccount) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	_, ok := config.(*ServiceAccount)
+	if !ok {
+		return false
+	}
+
+	return true
+}
+
+func (sa *ServiceAccount) Sanitize() config.Config {
+	return &ServiceAccount{}
+}
+
+func (sa *ServiceAccount) CreateAWSConfig(region string) (aws.Config, error) {
+	cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithRegion(region))
+	if err != nil {
+		return cfg, fmt.Errorf("failed to initialize AWS SDK config for region from annotation %s: %s", region, err)
+	}
+	return cfg, nil
+}
+
+// AssumeRole is a wrapper for another Authorizer which adds an assumed role to the configuration
+type AssumeRole struct {
+	Authorizer Authorizer `json:"authorizer"`
+	RoleARN    string     `json:"roleARN"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ara *AssumeRole) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AssumeRoleAuthorizerType
+	fmap["roleARN"] = ara.RoleARN
+	fmap["authorizer"] = ara.Authorizer
+	return json.Marshal(fmap)
+}
+
+// UnmarshalJSON is required for AssumeRole because it needs to unmarshal an Authorizer interface
+func (ara *AssumeRole) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	roleARN, err := config.GetInterfaceValue[string](fmap, "roleARN")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ara.RoleARN = roleARN
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("AssumeRole: UnmarshalJSON: missing Authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("AssumeRole: UnmarshalJSON: %s", err.Error())
+	}
+	ara.Authorizer = authorizer
+
+	return nil
+}
+
+func (ara *AssumeRole) CreateAWSConfig(region string) (aws.Config, error) {
+	cfg, _ := ara.Authorizer.CreateAWSConfig(region)
+	// Create the credentials from AssumeRoleProvider to assume the role
+	// referenced by the RoleARN.
+	stsSvc := sts.NewFromConfig(cfg)
+	creds := stscreds.NewAssumeRoleProvider(stsSvc, ara.RoleARN)
+	cfg.Credentials = aws.NewCredentialsCache(creds)
+	return cfg, nil
+}
+
+func (ara *AssumeRole) Validate() error {
+	if ara.Authorizer == nil {
+		return fmt.Errorf("AssumeRole: misisng base Authorizer")
+	}
+	err := ara.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	if ara.RoleARN == "" {
+		return fmt.Errorf("AssumeRole: misisng RoleARN configuration")
+	}
+
+	return nil
+}
+
+func (ara *AssumeRole) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AssumeRole)
+	if !ok {
+		return false
+	}
+	if ara.Authorizer != nil {
+		if !ara.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if ara.RoleARN != thatConfig.RoleARN {
+		return false
+	}
+
+	return true
+}
+
+func (ara *AssumeRole) Sanitize() config.Config {
+	return &AssumeRole{
+		Authorizer: ara.Authorizer.Sanitize().(Authorizer),
+		RoleARN:    ara.RoleARN,
+	}
+}

+ 67 - 0
pkg/cloud/aws/authorizer_test.go

@@ -0,0 +1,67 @@
+package aws
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+func TestAuthorizerJSON_Sanitize(t *testing.T) {
+
+	testCases := map[string]struct {
+		input    Authorizer
+		expected Authorizer
+	}{
+		"Access Key": {
+			input: &AccessKey{
+				ID:     "ID",
+				Secret: "Secret",
+			},
+			expected: &AccessKey{
+				ID:     "ID",
+				Secret: config.Redacted,
+			},
+		},
+		"Service Account": {
+			input:    &ServiceAccount{},
+			expected: &ServiceAccount{},
+		},
+		"Master Payer Access Key": {
+			input: &AssumeRole{
+				Authorizer: &AccessKey{
+					ID:     "ID",
+					Secret: "Secret",
+				},
+				RoleARN: "role arn",
+			},
+			expected: &AssumeRole{
+				Authorizer: &AccessKey{
+					ID:     "ID",
+					Secret: config.Redacted,
+				},
+				RoleARN: "role arn",
+			},
+		},
+		"Master Payer Service Account": {
+			input: &AssumeRole{
+				Authorizer: &ServiceAccount{},
+				RoleARN:    "role arn",
+			},
+			expected: &AssumeRole{
+				Authorizer: &ServiceAccount{},
+				RoleARN:    "role arn",
+			},
+		},
+	}
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// Convert to AuthorizerJSON for sanitization
+			sanitizedAuthorizer := tc.input.Sanitize()
+
+			if !tc.expected.Equals(sanitizedAuthorizer) {
+				t.Error("Authorizer was not as expected after Sanitization")
+			}
+
+		})
+	}
+}

+ 13 - 23
pkg/cloud/aws/awsprovider.go → pkg/cloud/aws/provider.go

@@ -187,6 +187,7 @@ type AWS struct {
 }
 
 // AWSAccessKey holds AWS credentials and fulfils the awsV2.CredentialsProvider interface
+// Deprecated: v1.104 Use AccessKey instead
 type AWSAccessKey struct {
 	AccessKeyID     string `json:"aws_access_key_id"`
 	SecretAccessKey string `json:"aws_secret_access_key"`
@@ -393,6 +394,7 @@ type AwsSpotFeedInfo struct {
 }
 
 // AwsAthenaInfo contains configuration for CUR integration
+// Deprecated: v1.104 Use AthenaConfiguration instead
 type AwsAthenaInfo struct {
 	AthenaBucketName string `json:"athenaBucketName"`
 	AthenaRegion     string `json:"athenaRegion"`
@@ -1777,7 +1779,17 @@ func (aws *AWS) findCostForDisk(disk *ec2Types.Volume) (*float64, error) {
 
 	key := "us-east-2" + "," + class
 
-	priceStr := aws.Pricing[key].PV.Cost
+	pricing, ok := aws.Pricing[key]
+	if !ok {
+		return nil, fmt.Errorf("no pricing data for key '%s'", key)
+	}
+	if pricing == nil {
+		return nil, fmt.Errorf("nil pricing data for key '%s'", key)
+	}
+	if pricing.PV == nil {
+		return nil, fmt.Errorf("pricing for key '%s' has nil PV", key)
+	}
+	priceStr := pricing.PV.Cost
 
 	price, err := strconv.ParseFloat(priceStr, 64)
 	if err != nil {
@@ -1848,28 +1860,6 @@ func (aws *AWS) QueryAthenaPaginated(ctx context.Context, query string, fn func(
 	return nil
 }
 
-func waitForQueryToComplete(ctx context.Context, client *athena.Client, queryExecutionID *string) error {
-	inp := &athena.GetQueryExecutionInput{
-		QueryExecutionId: queryExecutionID,
-	}
-	isQueryStillRunning := true
-	for isQueryStillRunning {
-		qe, err := client.GetQueryExecution(ctx, inp)
-		if err != nil {
-			return err
-		}
-		if qe.QueryExecution.Status.State == "SUCCEEDED" {
-			isQueryStillRunning = false
-			continue
-		}
-		if qe.QueryExecution.Status.State != "RUNNING" && qe.QueryExecution.Status.State != "QUEUED" {
-			return fmt.Errorf("no query results available for query %s", *queryExecutionID)
-		}
-		time.Sleep(2 * time.Second)
-	}
-	return nil
-}
-
 type SavingsPlanData struct {
 	ResourceID     string
 	EffectiveCost  float64

+ 0 - 0
pkg/cloud/aws/awsprovider_test.go → pkg/cloud/aws/provider_test.go


+ 134 - 0
pkg/cloud/aws/s3configuration.go

@@ -0,0 +1,134 @@
+package aws
+
+import (
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type S3Configuration struct {
+	Bucket     string     `json:"bucket"`
+	Region     string     `json:"region"`
+	Account    string     `json:"account"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (s3c *S3Configuration) Validate() error {
+	// Validate Authorizer
+	if s3c.Authorizer == nil {
+		return fmt.Errorf("S3Configuration: missing Authorizer")
+	}
+
+	err := s3c.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("S3Configuration: %s", err)
+	}
+
+	// Validate base properties
+	if s3c.Bucket == "" {
+		return fmt.Errorf("S3Configuration: missing bucket")
+	}
+
+	if s3c.Region == "" {
+		return fmt.Errorf("S3Configuration: missing region")
+	}
+
+	if s3c.Account == "" {
+		return fmt.Errorf("S3Configuration: missing account")
+	}
+
+	return nil
+}
+
+func (s3c *S3Configuration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*S3Configuration)
+	if !ok {
+		return false
+	}
+
+	if s3c.Authorizer != nil {
+		if !s3c.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if s3c.Bucket != thatConfig.Bucket {
+		return false
+	}
+
+	if s3c.Region != thatConfig.Region {
+		return false
+	}
+
+	if s3c.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (s3c *S3Configuration) Sanitize() config.Config {
+	return &S3Configuration{
+		Bucket:     s3c.Bucket,
+		Region:     s3c.Region,
+		Account:    s3c.Account,
+		Authorizer: s3c.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (s3c *S3Configuration) Key() string {
+	return fmt.Sprintf("%s/%s", s3c.Account, s3c.Bucket)
+}
+
+func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Bucket = bucket
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Region = region
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Account = account
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Authorizer = authorizer
+
+	return nil
+}
+
+func (s3c *S3Configuration) CreateAWSConfig() (aws.Config, error) {
+	return s3c.Authorizer.CreateAWSConfig(s3c.Region)
+}

+ 40 - 0
pkg/cloud/aws/s3connection.go

@@ -0,0 +1,40 @@
+package aws
+
+import (
+	"context"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+type S3Connection struct {
+	S3Configuration
+}
+
+func (s3c *S3Connection) Equals(config config.Config) bool {
+	thatConfig, ok := config.(*S3Connection)
+	if !ok {
+		return false
+	}
+
+	return s3c.S3Configuration.Equals(&thatConfig.S3Configuration)
+}
+
+func (s3c *S3Connection) GetS3Client() (*s3.Client, error) {
+	cfg, err := s3c.CreateAWSConfig()
+	if err != nil {
+		return nil, err
+	}
+	return s3.NewFromConfig(cfg), nil
+}
+
+func (s3c *S3Connection) ListObjects(cli *s3.Client) (*s3.ListObjectsOutput, error) {
+	objs, err := cli.ListObjects(context.TODO(), &s3.ListObjectsInput{
+		Bucket: aws.String(s3c.Bucket),
+	})
+	if err != nil {
+		return nil, err
+	}
+	return objs, err
+}

+ 387 - 0
pkg/cloud/aws/s3connection_test.go

@@ -0,0 +1,387 @@
+package aws
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestS3Configuration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   S3Configuration
+		expected error
+	}{
+		"valid config access key": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: nil,
+		},
+		"valid config service account": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID: "id",
+				},
+			},
+			expected: fmt.Errorf("S3Configuration: AccessKey: missing Secret"),
+		},
+		"missing Authorizer": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("S3Configuration: missing Authorizer"),
+		},
+		"missing bucket": {
+			config: S3Configuration{
+				Bucket:     "",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing bucket"),
+		},
+		"missing region": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing region"),
+		},
+		"missing account": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing account"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestS3Configuration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     S3Configuration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different bucket": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket2",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region2",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account2",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AccessKey{
+				ID:     "id",
+				Secret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestS3Configuration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config S3Configuration
+	}{
+		"Empty Config": {
+			config: S3Configuration{},
+		},
+		"AccessKey": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+		},
+
+		"ServiceAccount": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+		},
+		"AssumeRole with AccessKey": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+		"AssumeRole with ServiceAccount": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &ServiceAccount{},
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"RoleArnNil": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: nil,
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"AssumeRole with AssumeRole with ServiceAccount": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AssumeRole{
+						RoleARN:    "12345",
+						Authorizer: &ServiceAccount{},
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &S3Configuration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 181 - 0
pkg/cloud/aws/s3selectquerier.go

@@ -0,0 +1,181 @@
+package aws
+
+import (
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+)
+
+type S3SelectQuerier struct {
+	S3Connection
+}
+
+func (s3sq *S3SelectQuerier) Equals(config config.Config) bool {
+	thatConfig, ok := config.(*S3SelectQuerier)
+	if !ok {
+		return false
+	}
+
+	return s3sq.S3Connection.Equals(&thatConfig.S3Connection)
+}
+
+func (s3sq *S3SelectQuerier) Query(query string, queryKeys []string, cli *s3.Client, fn func(*csv.Reader) error) error {
+	for _, queryKey := range queryKeys {
+		reader, err2 := s3sq.fetchCSVReader(query, queryKey, cli, s3Types.FileHeaderInfoUse)
+		if err2 != nil {
+			return err2
+		}
+		err2 = fn(reader)
+		if err2 != nil {
+			return err2
+		}
+	}
+
+	return nil
+}
+
+// GetQueryKeys returns a list of s3 object names, where the there are 1 object for each month within the range between
+// start and end
+func (s3sq *S3SelectQuerier) GetQueryKeys(start, end time.Time, client *s3.Client) ([]string, error) {
+	objs, err := s3sq.ListObjects(client)
+	if err != nil {
+		return nil, err
+	}
+
+	monthStrings, err := getMonthStrings(start, end)
+	if err != err {
+		return nil, err
+	}
+
+	var queryKeys []string
+	// Find all matching "csv.gz" files per monthString
+	for _, monthStr := range monthStrings {
+		for _, obj := range objs.Contents {
+			if strings.Contains(*obj.Key, monthStr) && strings.HasSuffix(*obj.Key, ".csv.gz") {
+				queryKeys = append(queryKeys, *obj.Key)
+			}
+		}
+	}
+
+	if len(queryKeys) == 0 {
+		return nil, fmt.Errorf("no CUR files for given time range")
+	}
+
+	return queryKeys, nil
+}
+
+func (s3sq *S3SelectQuerier) fetchCSVReader(query string, queryKey string, client *s3.Client, fileHeaderInfo s3Types.FileHeaderInfo) (*csv.Reader, error) {
+	input := &s3.SelectObjectContentInput{
+		Bucket:         aws.String(s3sq.Bucket),
+		Key:            aws.String(queryKey),
+		Expression:     aws.String(query),
+		ExpressionType: s3Types.ExpressionTypeSql,
+		InputSerialization: &s3Types.InputSerialization{
+			CompressionType: s3Types.CompressionTypeGzip,
+			CSV: &s3Types.CSVInput{
+				FileHeaderInfo: fileHeaderInfo,
+			},
+		},
+		OutputSerialization: &s3Types.OutputSerialization{
+			CSV: &s3Types.CSVOutput{},
+		},
+	}
+
+	res, err := client.SelectObjectContent(context.TODO(), input)
+	if err != nil {
+		return nil, err
+	}
+	resStream := res.GetStream()
+	// todo: this needs work
+	results, resultWriter := io.Pipe()
+	go func() {
+		defer resultWriter.Close()
+		defer resStream.Close()
+		resStream.Events()
+		for event := range resStream.Events() {
+			switch e := event.(type) {
+			case *s3Types.SelectObjectContentEventStreamMemberRecords:
+				resultWriter.Write(e.Value.Payload)
+			case *s3Types.SelectObjectContentEventStreamMemberEnd:
+				break
+			}
+
+		}
+	}()
+
+	if err := resStream.Err(); err != nil {
+		return nil, fmt.Errorf("failed to read from SelectObjectContent EventStream, %v", err)
+	}
+
+	return csv.NewReader(results), nil
+}
+
+func getMonthStrings(start, end time.Time) ([]string, error) {
+	if start.After(end) {
+		return []string{}, fmt.Errorf("start date must be before end date")
+	}
+	if end.After(time.Now()) {
+		end = time.Now()
+	}
+	dateTemplate := "%d%02d01-%d%02d01/"
+	// set to first of the month
+	currMonth := start.AddDate(0, 0, -start.Day()+1)
+	nextMonth := currMonth.AddDate(0, 1, 0)
+	monthStr := fmt.Sprintf(dateTemplate, currMonth.Year(), int(currMonth.Month()), nextMonth.Year(), int(nextMonth.Month()))
+
+	// Create string for end condition
+	endMonth := end.AddDate(0, 0, -end.Day()+1)
+	endNextMonth := endMonth.AddDate(0, 1, 0)
+	endStr := fmt.Sprintf(dateTemplate, endMonth.Year(), int(endMonth.Month()), endNextMonth.Year(), int(endNextMonth.Month()))
+
+	var monthStrs []string
+	monthStrs = append(monthStrs, monthStr)
+
+	for monthStr != endStr {
+		currMonth = nextMonth
+		nextMonth = nextMonth.AddDate(0, 1, 0)
+		monthStr = fmt.Sprintf(dateTemplate, currMonth.Year(), int(currMonth.Month()), nextMonth.Year(), int(nextMonth.Month()))
+		monthStrs = append(monthStrs, monthStr)
+	}
+
+	return monthStrs, nil
+}
+
+// GetCSVRowValue retrieve value from athena row based on column names and used stringutil.Bank() to prevent duplicate
+// allocation of strings
+func GetCSVRowValue(row []string, queryColumnIndexes map[string]int, columnName string) string {
+	if row == nil {
+		return ""
+	}
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return ""
+	}
+	return stringutil.Bank(row[columnIndex])
+}
+
+// GetCSVRowValueFloat retrieve value from athena row based on column names and convert to float if possible.
+func GetCSVRowValueFloat(row []string, queryColumnIndexes map[string]int, columnName string) (float64, error) {
+	if row == nil {
+		return 0.0, fmt.Errorf("getCSVRowValueFloat: nil row")
+	}
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return 0.0, fmt.Errorf("getCSVRowValueFloat: missing column index: %s", columnName)
+	}
+	cost, err := strconv.ParseFloat(row[columnIndex], 64)
+	if err != nil {
+		return cost, fmt.Errorf("getCSVRowValueFloat: failed to parse %s: '%s': %s", columnName, row[columnIndex], err.Error())
+	}
+	return cost, nil
+}

+ 80 - 0
pkg/cloud/azure/authorizer.go

@@ -0,0 +1,80 @@
+package azure
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+const AccessKeyAuthorizerType = "AzureAccessKey"
+
+type Authorizer interface {
+	config.Authorizer
+	GetBlobCredentials() (azblob.Credential, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	default:
+		return nil, fmt.Errorf("azure: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+type AccessKey struct {
+	AccessKey string `json:"accessKey"`
+	Account   string `json:"account"`
+}
+
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["accessKey"] = ak.AccessKey
+	fmap["account"] = ak.Account
+	return json.Marshal(fmap)
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.AccessKey == "" {
+		return fmt.Errorf("AccessKey: missing access key")
+	}
+	if ak.Account == "" {
+		return fmt.Errorf("AccessKey: missing account")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.AccessKey != thatConfig.AccessKey {
+		return false
+	}
+	if ak.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		AccessKey: config.Redacted,
+		Account:   ak.Account,
+	}
+}
+
+func (ak *AccessKey) GetBlobCredentials() (azblob.Credential, error) {
+	// Create a default request pipeline using your storage account name and account key.
+	return azblob.NewSharedKeyCredential(ak.Account, ak.AccessKey)
+}

+ 322 - 0
pkg/cloud/azure/billingexportparser.go

@@ -0,0 +1,322 @@
+package azure
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const azureDateLayout = "2006-01-02"
+const AzureEnterpriseDateLayout = "01/02/2006"
+
+var groupRegex = regexp.MustCompile("(/[^/]+)")
+
+// BillingRowValues holder for Azure Billing Values
+type BillingRowValues struct {
+	Date            time.Time
+	MeterCategory   string
+	SubscriptionID  string
+	InvoiceEntityID string
+	InstanceID      string
+	Service         string
+	Tags            map[string]string
+	AdditionalInfo  map[string]any
+	Cost            float64
+	NetCost         float64
+}
+
+func (brv *BillingRowValues) IsCompute(category string) bool {
+	if category == kubecost.ComputeCategory {
+		return true
+	}
+
+	if category == kubecost.StorageCategory || category == kubecost.NetworkCategory {
+		if brv.Service == "Microsoft.Compute" {
+			return true
+		}
+	}
+	if category == kubecost.NetworkCategory && brv.MeterCategory == "Virtual Network" {
+		return true
+	}
+	return false
+}
+
+// BillingExportParser holds indexes of relevent fields in Azure Billing CSV in addition to the correct data format
+type BillingExportParser struct {
+	Date            int
+	MeterCategory   int
+	InvoiceEntityID int
+	SubscriptionID  int
+	InstanceID      int
+	Service         int
+	Tags            int
+	AdditionalInfo  int
+	Cost            int
+	NetCost         int
+	DateFormat      string
+}
+
+// match "SubscriptionGuid" in "Abonnement-GUID (SubscriptionGuid)"
+var getParenContentRegEx = regexp.MustCompile("\\((.*?)\\)")
+
+func NewBillingParseSchema(headers []string) (*BillingExportParser, error) {
+	// clear BOM from headers
+	if len(headers) != 0 {
+		headers[0] = strings.TrimPrefix(headers[0], "\xEF\xBB\xBF")
+	}
+
+	headerIndexes := map[string]int{}
+	for i, header := range headers {
+		// Azure Headers in different regions will have english headers in parentheses
+		match := getParenContentRegEx.FindStringSubmatch(header)
+		if len(match) != 0 {
+			header = match[len(match)-1]
+		}
+		headerIndexes[strings.ToLower(header)] = i
+	}
+
+	abp := &BillingExportParser{}
+
+	// Set Date Column and Date Format
+	if i, ok := headerIndexes["usagedatetime"]; ok {
+		abp.Date = i
+		abp.DateFormat = azureDateLayout
+	} else if j, ok2 := headerIndexes["date"]; ok2 {
+		abp.Date = j
+		abp.DateFormat = AzureEnterpriseDateLayout
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Date field")
+	}
+
+	// set Subscription ID
+	if i, ok := headerIndexes["subscriptionid"]; ok {
+		abp.SubscriptionID = i
+	} else if j, ok2 := headerIndexes["subscriptionguid"]; ok2 {
+		abp.SubscriptionID = j
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Subscription ID field")
+	}
+
+	// Set Billing ID
+	if i, ok := headerIndexes["billingaccountid"]; ok {
+		abp.InvoiceEntityID = i
+	} else if j, ok2 := headerIndexes["billingaccountname"]; ok2 {
+		abp.InvoiceEntityID = j
+	} else {
+		// if no billing ID column is present use subscription ID
+		abp.InvoiceEntityID = abp.SubscriptionID
+	}
+
+	// Set Instance ID
+	if i, ok := headerIndexes["instanceid"]; ok {
+		abp.InstanceID = i
+	} else if j, ok2 := headerIndexes["instancename"]; ok2 {
+		abp.InstanceID = j
+	} else if k, ok3 := headerIndexes["resourceid"]; ok3 {
+		abp.InstanceID = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Instance ID field")
+	}
+
+	// Set Meter Category
+	if i, ok := headerIndexes["metercategory"]; ok {
+		abp.MeterCategory = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Meter Category field")
+	}
+
+	// Set Tags
+	if i, ok := headerIndexes["tags"]; ok {
+		abp.Tags = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Tags field")
+	}
+
+	// Set Additional Info
+	if i, ok := headerIndexes["additionalinfo"]; ok {
+		abp.AdditionalInfo = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Additional Info field")
+	}
+
+	// Set Service
+	if i, ok := headerIndexes["consumedservice"]; ok {
+		abp.Service = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Service field")
+	}
+
+	// Set Net Cost
+	if i, ok := headerIndexes["costinbillingcurrency"]; ok {
+		abp.NetCost = i
+	} else if j, ok2 := headerIndexes["pretaxcost"]; ok2 {
+		abp.NetCost = j
+	} else if k, ok3 := headerIndexes["cost"]; ok3 {
+		abp.NetCost = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Net Cost field")
+	}
+
+	// Set Cost
+	if i, ok := headerIndexes["paygcostinbillingcurrency"]; ok {
+		abp.Cost = i
+	} else {
+		// if no Cost column is present use Net Cost column
+		abp.Cost = abp.NetCost
+	}
+
+	return abp, nil
+}
+
+func (bep *BillingExportParser) ParseRow(start, end time.Time, record []string) *BillingRowValues {
+	usageDate, err := time.Parse(bep.DateFormat, record[bep.Date])
+	if err != nil {
+		// try other format, and switch if successful
+		if bep.DateFormat == azureDateLayout {
+			bep.DateFormat = AzureEnterpriseDateLayout
+		} else {
+			bep.DateFormat = azureDateLayout
+		}
+		usageDate, err = time.Parse(bep.DateFormat, record[bep.Date])
+		// If parse still fails then return line
+		if err != nil {
+			log.Errorf("failed to parse usage date: '%s'", record[bep.Date])
+			return nil
+		}
+	}
+
+	// skip if usage data isn't in subject window
+	if usageDate.Before(start) || !usageDate.Before(end) {
+		return nil
+	}
+
+	cost, err := strconv.ParseFloat(record[bep.Cost], 64)
+	if err != nil {
+		log.Errorf("failed to parse cost: '%s'", record[bep.Cost])
+		return nil
+	}
+
+	netCost, err := strconv.ParseFloat(record[bep.NetCost], 64)
+	if err != nil {
+		log.Errorf("failed to parse net cost: '%s'", record[bep.NetCost])
+		return nil
+	}
+
+	additionalInfo := make(map[string]any)
+	additionalInfoJson := encloseInBrackets(record[bep.AdditionalInfo])
+	if additionalInfoJson != "" {
+		err = json.Unmarshal([]byte(additionalInfoJson), &additionalInfo)
+		if err != nil {
+			log.Errorf("Could not parse additional information %s, with Error: %s", additionalInfoJson, err.Error())
+		}
+	}
+
+	tags := make(map[string]string)
+	tagJson := encloseInBrackets(record[bep.Tags])
+	if tagJson != "" {
+		tagsAny := make(map[string]any)
+		err = json.Unmarshal([]byte(tagJson), &tagsAny)
+		if err != nil {
+			log.Errorf("Could not parse tags: %v, with Error: %s", tagJson, err.Error())
+		}
+
+		for name, value := range tagsAny {
+			if valueStr, ok := value.(string); ok && valueStr != "" {
+				tags[name] = valueStr
+			}
+		}
+	}
+
+	return &BillingRowValues{
+		Date:            usageDate,
+		MeterCategory:   record[bep.MeterCategory],
+		SubscriptionID:  record[bep.SubscriptionID],
+		InvoiceEntityID: record[bep.InvoiceEntityID],
+		InstanceID:      record[bep.InstanceID],
+		Service:         record[bep.Service],
+		Tags:            tags,
+		AdditionalInfo:  additionalInfo,
+		Cost:            cost,
+		NetCost:         netCost,
+	}
+}
+
+// enclose json strings in brackets if they are missing
+func encloseInBrackets(jsonString string) string {
+	if jsonString == "" || (jsonString[0] == '{' && jsonString[len(jsonString)-1] == '}') {
+		return jsonString
+	}
+	return fmt.Sprintf("{%s}", jsonString)
+}
+
+func AzureSetProviderID(abv *BillingRowValues) string {
+	category := SelectAzureCategory(abv.MeterCategory)
+	if value, ok := abv.AdditionalInfo["VMName"]; ok {
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+	} else if value, ok := abv.AdditionalInfo["VmName"]; ok {
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+	} else if value2, ook := abv.AdditionalInfo["IpAddress"]; ook && abv.MeterCategory == "Virtual Network" {
+		return fmt.Sprintf("%v", value2)
+	}
+
+	if category == kubecost.StorageCategory {
+		if value2, ok2 := abv.Tags["creationSource"]; ok2 {
+			creationSource := fmt.Sprintf("%v", value2)
+			return strings.TrimPrefix(creationSource, "aks-")
+		} else if value2, ok2 := abv.Tags["aks-managed-creationSource"]; ok2 {
+			creationSource := fmt.Sprintf("%v", value2)
+			return strings.TrimPrefix(creationSource, "vmssclient-")
+		} else {
+			return getSubStringAfterFinalSlash(abv.InstanceID)
+		}
+	}
+	return "azure://" + resourceGroupToLowerCase(abv.InstanceID)
+}
+
+func SelectAzureCategory(meterCategory string) string {
+	if meterCategory == "Virtual Machines" {
+		return kubecost.ComputeCategory
+	} else if meterCategory == "Storage" {
+		return kubecost.StorageCategory
+	} else if meterCategory == "Load Balancer" || meterCategory == "Bandwidth" || meterCategory == "Virtual Network" {
+		return kubecost.NetworkCategory
+	} else {
+		return kubecost.OtherCategory
+	}
+}
+
+func resourceGroupToLowerCase(providerID string) string {
+	var sb strings.Builder
+	for matchNum, group := range groupRegex.FindAllString(providerID, -1) {
+		if matchNum == 3 {
+			sb.WriteString(strings.ToLower(group))
+		} else {
+			sb.WriteString(group)
+		}
+	}
+	return sb.String()
+}
+
+// Returns the substring after the final "/" in a string
+func getSubStringAfterFinalSlash(id string) string {
+	index := strings.LastIndex(id, "/")
+	if index == -1 {
+		log.DedupedInfof(5, "azure.getSubStringAfterFinalSlash: failed to parse %s", id)
+		return id
+	}
+	return id[index+1:]
+}
+
+func getVMNumberForVMSS(vmName string) string {
+	vmNameSplit := strings.Split(vmName, "_")
+	if len(vmNameSplit) > 1 {
+		return "/virtualMachines/" + vmNameSplit[1]
+	}
+	return ""
+}

+ 194 - 0
pkg/cloud/azure/billingexportparser_test.go

@@ -0,0 +1,194 @@
+package azure
+
+import (
+	"encoding/csv"
+	"os"
+	"testing"
+	"time"
+)
+
+const billingExportPath = "./resources/billingexports/"
+const headerSetPath = billingExportPath + "headersets/"
+const valueCasesPath = billingExportPath + "values/"
+
+type TestCSVRetriever struct {
+	CSVName string
+}
+
+func (tcr TestCSVRetriever) getCSVReaders(start, end time.Time) ([]*csv.Reader, error) {
+	csvFile, err := os.Open(tcr.CSVName)
+	if err != nil {
+		return nil, err
+	}
+	reader := csv.NewReader(csvFile)
+	return append([]*csv.Reader{}, reader), nil
+}
+
+func Test_NewBillingExportParser(t *testing.T) {
+	loc, _ := time.LoadLocation("UTC")
+	start := time.Date(2021, 2, 1, 00, 00, 00, 00, loc)
+	end := time.Date(2021, 2, 3, 00, 00, 00, 00, loc)
+	tests := map[string]struct {
+		input    string
+		expected BillingExportParser
+	}{
+		"English Headers": {
+			input: "PayAsYouGo.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+		"Enterprise Camel Headers": {
+			input: "EnterpriseCamel.csv",
+			expected: BillingExportParser{
+				Date:            11,
+				MeterCategory:   18,
+				InvoiceEntityID: 0,
+				SubscriptionID:  23,
+				InstanceID:      29,
+				Service:         15,
+				Tags:            45,
+				AdditionalInfo:  44,
+				Cost:            38,
+				NetCost:         38,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"Enterprise Headers": {
+			input: "Enterprise.csv",
+			expected: BillingExportParser{
+				Date:            7,
+				MeterCategory:   9,
+				InvoiceEntityID: 39,
+				SubscriptionID:  3,
+				InstanceID:      20,
+				Service:         19,
+				Tags:            21,
+				AdditionalInfo:  23,
+				Cost:            17,
+				NetCost:         17,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"German Headers": {
+			input: "German.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+		"YA Headers": {
+			input: "YA.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"BOM Prefixed Headers": {
+			input: "BOM.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			csvRetriever := TestCSVRetriever{
+				CSVName: headerSetPath + tc.input,
+			}
+			csvs, err := csvRetriever.getCSVReaders(start, end)
+			if err != nil {
+				t.Errorf("Failed to read specified CSV: %s", err.Error())
+			}
+			reader := csvs[0]
+			headers, _ := reader.Read()
+			abp, err := NewBillingParseSchema(headers)
+			if err != nil {
+				t.Errorf("failed to create Azure Billing Parser from headers with error: %s", err.Error())
+			}
+
+			if abp.DateFormat != tc.expected.DateFormat {
+				t.Errorf("Azure Billing Parser does not have expected DateFormat index. Expected: %s, Actual: %s", tc.expected.DateFormat, abp.DateFormat)
+			}
+
+			if abp.Date != tc.expected.Date {
+				t.Errorf("Azure Billing Parser does not have expected Date index. Expected: %d, Actual: %d", tc.expected.Date, abp.Date)
+			}
+
+			if abp.MeterCategory != tc.expected.MeterCategory {
+				t.Errorf("Azure Billing Parser does not have expected MeterCategory index. Expected: %d, Actual: %d", tc.expected.MeterCategory, abp.MeterCategory)
+			}
+
+			if abp.InvoiceEntityID != tc.expected.InvoiceEntityID {
+				t.Errorf("Azure Billing Parser does not have expected InvoiceEntityID index. Expected: %d, Actual: %d", tc.expected.InvoiceEntityID, abp.InvoiceEntityID)
+			}
+
+			if abp.SubscriptionID != tc.expected.SubscriptionID {
+				t.Errorf("Azure Billing Parser does not have expected SubscriptionID index. Expected: %d, Actual: %d", tc.expected.SubscriptionID, abp.SubscriptionID)
+			}
+
+			if abp.InstanceID != tc.expected.InstanceID {
+				t.Errorf("Azure Billing Parser does not have expected InstanceID index. Expected: %d, Actual: %d", tc.expected.InstanceID, abp.InstanceID)
+			}
+
+			if abp.Service != tc.expected.Service {
+				t.Errorf("Azure Billing Parser does not have expected Service index. Expected: %d, Actual: %d", tc.expected.Service, abp.Service)
+			}
+
+			if abp.Tags != tc.expected.Tags {
+				t.Errorf("Azure Billing Parser does not have expected Tags index. Expected: %d, Actual: %d", tc.expected.Tags, abp.Tags)
+			}
+
+			if abp.AdditionalInfo != tc.expected.AdditionalInfo {
+				t.Errorf("Azure Billing Parser does not have expected AdditionalInfo index. Expected: %d, Actual: %d", tc.expected.AdditionalInfo, abp.AdditionalInfo)
+			}
+
+			if abp.Cost != tc.expected.Cost {
+				t.Errorf("Azure Billing Parser does not have expected Cost index. Expected: %d, Actual: %d", tc.expected.Cost, abp.Cost)
+			}
+
+			if abp.NetCost != tc.expected.NetCost {
+				t.Errorf("Azure Billing Parser does not have expected NetCost index. Expected: %d, Actual: %d", tc.expected.NetCost, abp.NetCost)
+			}
+		})
+	}
+}

+ 0 - 0
pkg/cloud/azure/client.go → pkg/cloud/azure/pricesheetclient.go


+ 0 - 0
pkg/cloud/azure/downloader.go → pkg/cloud/azure/pricesheetdownloader.go


+ 0 - 0
pkg/cloud/azure/downloader_test.go → pkg/cloud/azure/pricesheetdownloader_test.go


+ 3 - 1
pkg/cloud/azure/azureprovider.go → pkg/cloud/azure/provider.go

@@ -489,6 +489,7 @@ func (k *azureKey) GetGPUCount() string {
 }
 
 // AzureStorageConfig Represents an azure storage config
+// Deprecated: v1.104 Use StorageConfiguration instead
 type AzureStorageConfig struct {
 	SubscriptionId string `json:"azureSubscriptionID"`
 	AccountName    string `json:"azureStorageAccount"`
@@ -517,7 +518,8 @@ type AzureAppKey struct {
 	Tenant      string `json:"tenant"`
 }
 
-// Azure service key for a specific subscription
+// AzureServiceKey service key for a specific subscription
+// Deprecated: v1.104 Use ServiceKey instead
 type AzureServiceKey struct {
 	SubscriptionID string       `json:"subscriptionId"`
 	ServiceKey     *AzureAppKey `json:"serviceKey"`

+ 0 - 0
pkg/cloud/azure/azureprovider_test.go → pkg/cloud/azure/provider_test.go


+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/BOM.csv

@@ -0,0 +1,2 @@
+SubscriptionGuid,ResourceGroup,ResourceLocation,UsageDateTime,MeterCategory,MeterSubcategory,MeterId,MeterName,MeterRegion,UsageQuantity,ResourceRate,PreTaxCost,ConsumedService,ResourceType,InstanceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ServiceName,ServiceTier,Currency,UnitOfMeasure
+,,,2022-11-03,,,,,,,,,,,,,,,,,,,,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/Enterprise.csv

@@ -0,0 +1,2 @@
+InvoiceSectionName,AccountName,AccountOwnerId,SubscriptionId,SubscriptionName,ResourceGroup,ResourceLocation,Date,ProductName,MeterCategory,MeterSubCategory,MeterId,MeterName,MeterRegion,UnitOfMeasure,Quantity,EffectivePrice,CostInBillingCurrency,CostCenter,ConsumedService,ResourceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ResourceName,ReservationId,ReservationName,UnitPrice,ProductOrderId,ProductOrderName,Term,PublisherType,PublisherName,ChargeType,Frequency,PricingModel,AvailabilityZone,BillingAccountId,BillingAccountName,BillingCurrencyCode,BillingPeriodStartDate,BillingPeriodEndDate,BillingProfileId,BillingProfileName,InvoiceSectionId,IsAzureCreditEligible,PartNumber,PayGPrice,PlanName,ServiceFamily,CostAllocationRuleName
+Unassigned,Azure Service,email@email.com,11111111-12ab-34dc-56ef-123456abcdef,Example-Subscription,Example-Resource-Group,canadacentral,02/02/2021,Virtual Machines Ev3/ESv3 Series - E4 v3/E4s v3 - CA Central,Virtual Machines,Ev3/ESv3 Series,3dbc3a0c-32b6-4c4d-adbb-3ee577aaba4d,E4 v3/E4s v3,CA Central,10 Hours,10,1.2,0,,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-defaultpool-12345678-vmss,"""createOperationID"": ""11111111-12ab-34dc-56ef-123456abcdef"",""creationSource"": ""vmssclient-aks-defaultpool-12345678-vmss"",""orchestrator"": ""Kubernetes:1.19.9"",""poolName"": ""defaultpool"",""resourceNameSuffix"": ""12345678""",MS-AZR-0017P,"{""UsageType"":""ComputeHR"",""ImageType"":""Canonical"",""ServiceType"":""Standard_E4s_v3"",""VMName"":""aks-defaultpool-12345678-vmss_2"",""VMProperties"":null,""VCPUs"":4,""CPUs"":0,""ReservationOrderId"":""11111111-12ab-34dc-56ef-123456abcdef"",""ReservationId"":""4f18e7c9-9ae8-4251-886b-8bd942a41bdf"",""ConsumptionMeter"":""11111111-12ab-34dc-56ef-123456abcdef"",""RINormalizationRatio"":2.0}",,Canonical,aks-defaultpool-12345678-vmss,11111111-12ab-34dc-56ef-123456abcdef,ExampleReservationName,0.1,b13f2808-a13e-49a3-a899-06d83b8f5d32,"Reserved VM Instance, Standard_E2s_v3, CA Central, 3 Years",36,Azure,,Usage,UsageBased,,,12345678,Example Company,CAD,05/01/2021,05/31/2021,12345678,Example Company,,TRUE,ABC-12345,0,,Compute,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/EnterpriseCamel.csv

@@ -0,0 +1,2 @@
+billingAccountName,partnerName,resellerName,resellerMpnId,customerTenantId,customerName,costCenter,billingPeriodEndDate,billingPeriodStartDate,servicePeriodEndDate,servicePeriodStartDate,date,serviceFamily,productOrderId,productOrderName,consumedService,meterId,meterName,meterCategory,meterSubCategory,meterRegion,ProductId,ProductName,SubscriptionId,subscriptionName,publisherType,publisherId,publisherName,resourceGroupName,ResourceId,resourceLocation,location,effectivePrice,quantity,unitOfMeasure,chargeType,billingCurrency,pricingCurrency,costInBillingCurrency,costInUsd,exchangeRatePricingToBilling,exchangeRateDate,serviceInfo1,serviceInfo2,additionalInfo,tags,PayGPrice,frequency,term,reservationId,reservationName,pricingModel
+,PartnerName,,,11111111-1111-1111-1111-123456789012,Customer Name,,,,02/01/2021,02/01/2021,02/02/2021,Networking,11111111-1111-1111-1111-123456789012,Azure plan,Microsoft.Network,11111111-1111-1111-1111-123456789012,Dynamic Public IP,Virtual Network,IP Addresses,,DZH318Z0BNXN0032,IP Addresses - Basic,11111111-1111-1111-1111-123456789012,Microsoft Azure,Azure,,Microsoft,databricks,/subscriptions/11111111-1111-1111-1111-123456789012/resourceGroups/testspot/providers/Microsoft.Storage/storageAccounts/storename,WESTUS,US West,0.004,3,1 Hour,Usage,USD,USD,0.012,0.012,1,3/1/21,,,,"{  ""ClusterId"": ""0103-212455-stash756"",  ""ServiceType"": ""DataAnalysis"",  ""ClusterName"": ""SrgExtractsPartDeux"",  ""databricks-instance-name"": ""0c1ef59764casdf0c0e094e1cc"",  ""Creator"": ""email@email.com"",  ""Vendor"": ""Databricks"",  ""DatabricksEnvironment"": ""workerenv-6448504491843616""}",0.004,UsageBased,,,,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/German.csv

@@ -0,0 +1,2 @@
+Abonnement-GUID (SubscriptionGuid),Ressourcengruppe (ResourceGroup),Ressourcenstandort (ResourceLocation),UsageDateTime (UsageDateTime),Kategorie der Verbrauchseinheit (MeterCategory),MeterSubcategory (MeterSubcategory),ID der Verbrauchseinheit (MeterId),Name der Verbrauchseinheit (MeterName),Region der Verbrauchseinheit (MeterRegion),UsageQuantity (UsageQuantity),Ressourcensatz (ResourceRate),PreTaxCost (PreTaxCost),Genutzter Dienst (ConsumedService),ResourceType (ResourceType),InstanceId (InstanceId),Tags (Tags),OfferId (OfferId),Zusätzliche Informationen (AdditionalInfo),Dienstinformation 1 (ServiceInfo1),Dienstinformation 2 (ServiceInfo2),ServiceName,ServiceTier,Currency,Maßeinheit (UnitOfMeasure)
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,2021-02-02,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/PayAsYouGo.csv

@@ -0,0 +1,2 @@
+SubscriptionGuid,ResourceGroup,ResourceLocation,UsageDateTime,MeterCategory,MeterSubcategory,MeterId,MeterName,MeterRegion,UsageQuantity,ResourceRate,PreTaxCost,ConsumedService,ResourceType,InstanceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ServiceName,ServiceTier,Currency,UnitOfMeasure
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,2021-02-02,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/YA.csv

@@ -0,0 +1,2 @@
+subscriptionId,Ressourcengruppe (ResourceGroup),Ressourcenstandort (ResourceLocation),date,meterCategory,MeterSubcategory (MeterSubcategory),ID der Verbrauchseinheit (MeterId),Name der Verbrauchseinheit (MeterName),Region der Verbrauchseinheit (MeterRegion),UsageQuantity (UsageQuantity),Ressourcensatz (ResourceRate),costInBillingCurrency,consumedService,ResourceType (ResourceType),InstanceName,tags,OfferId (OfferId),additionalInfo,Dienstinformation 1 (ServiceInfo1),Dienstinformation 2 (ServiceInfo2),ServiceName,ServiceTier,Currency,Maßeinheit (UnitOfMeasure)
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,02/02/2021,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv

@@ -0,0 +1,2 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456abcdef,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""","""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2"

+ 88 - 0
pkg/cloud/azure/resources/billingexports/values/Template.csv

@@ -0,0 +1,88 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0,  ""ReservationOrderId"": ""689aadb1-13ea-40bb-a8f9-e705dbe57543"",  ""ReservationId"": ""770228a7-62da-4155-802b-0422e1c62efc"",  ""ConsumptionMeter"": ""14fc9a21-4919-4cb1-b495-5666966556bc""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Log Analytics,0,0,microsoft.operationalinsights,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourcegroups/defaultresourcegroup-eus/providers/microsoft.operationalinsights/workspaces/defaultworkspace-11111111-12ab-34dc-56ef-123456abcdef-eus,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-agentpool-23456789-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Log Analytics,0,0,microsoft.operationalinsights,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourcegroups/defaultresourcegroup-eus/providers/microsoft.operationalinsights/workspaces/defaultworkspace-11111111-12ab-34dc-56ef-123456abcdef-eus,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd08,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd08"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-192133aks-nodepool1-1921336OS__1_0a5e4b97e5ca4c2ab46328ca392a02f5,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.001301934407093,0.001301934407093,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.0828,0.0828,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-aef001b536d4711ea86115a2af700dc9,"{""service"":""kubecost/kubecost-frontend-test""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-agentpool-23456789-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.09,0.09,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a173cf24babf311e98b7f8e5ecb03810,"{""service"":""kubecost/kubecost-frontend""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0015896,0.0015896,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0000362,0.0000362,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.01236783717759,0.01236783717759,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.00000204,0.00000204,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0821376,0.0821376,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-229217aks-agentpool-2292178OS__1_7fcada7aa38e4d5ca6d15257b8998b7a,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.005,0.005,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/bc6b73c3-5689-4f72-9a15-103d0c48d98f,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000060000000000000000000,0.000000060000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000140000000000000000,0.000000140000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000020000000000000000000,0.000000020000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0013522,0.0013522,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000160000000000000000,0.000000160000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000100000000000000000,0.000000100000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.001686412831768,0.001686412831768,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.005,0.005,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a4969d597c5674b4480ec987cc6b24a1,"{""service"":""kubecost/kubecost-frontend"",""kubernetes-cluster-name"":""kubernetes""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-34567890-0_OsDisk_1_c523fe080d784f55a7cd3868bf989fde,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.125,0.125,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/7b21b77b-4ed1-474b-b068-6ab6d1ecf549,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0004494,0.0004494,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.00000154,0.00000154,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-45678901-0_OsDisk_1_6bb726d077d84b238780857a380772ea,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.15,0.15,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/bc6b73c3-5689-4f72-9a15-103d0c48d98f,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0,  ""ReservationOrderId"": ""689aadb1-13ea-40bb-a8f9-e705dbe57543"",  ""ReservationId"": ""770228a7-62da-4155-802b-0422e1c62efc"",  ""ConsumptionMeter"": ""14fc9a21-4919-4cb1-b495-5666966556bc""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000040000000000000000000,0.000000040000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0002082,0.0002082,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.000177,0.000177,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-192133aks-nodepool1-1921336OS__1_0a5e4b97e5ca4c2ab46328ca392a02f5,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0107136,0.0107136,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0032604,0.0032604,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.0072,0.0072,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-aef001b536d4711ea86115a2af700dc9,"{""service"":""kubecost/kubecost-frontend-test""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.00000445,0.00000445,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.575,0.575,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.00992768780794,0.00992768780794,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.14,0.14,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a4969d597c5674b4480ec987cc6b24a1,"{""service"":""kubecost/kubecost-frontend"",""kubernetes-cluster-name"":""kubernetes""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd08,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd08"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.000191,0.000191,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0014714,0.0014714,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.575,0.575,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.0144,0.0144,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a173cf24babf311e98b7f8e5ecb03810,"{""service"":""kubecost/kubecost-frontend""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.015,0.015,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/7b21b77b-4ed1-474b-b068-6ab6d1ecf549,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-34567890-0_OsDisk_1_c523fe080d784f55a7cd3868bf989fde,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000280000000000000000,0.000000280000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-229217aks-agentpool-2292178OS__1_7fcada7aa38e4d5ca6d15257b8998b7a,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-45678901-0_OsDisk_1_6bb726d077d84b238780857a380772ea,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",

+ 2 - 0
pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv

@@ -0,0 +1,2 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456billing,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""}","{ ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2  }"

+ 170 - 0
pkg/cloud/azure/storagebillingparser.go

@@ -0,0 +1,170 @@
+package azure
+
+import (
+	"bytes"
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	"github.com/opencost/opencost/pkg/cloud"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// AzureStorageBillingParser accesses billing data stored in CSV files in Azure Storage
+type AzureStorageBillingParser struct {
+	StorageConnection
+}
+
+func (asbp *AzureStorageBillingParser) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*AzureStorageBillingParser)
+	if !ok {
+		return false
+	}
+	return asbp.StorageConnection.Equals(&thatConfig.StorageConnection)
+}
+
+type AzureBillingResultFunc func(*BillingRowValues) error
+
+func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, resultFn AzureBillingResultFunc) (cloud.ConnectionStatus, error) {
+	err := asbp.Validate()
+	if err != nil {
+		return cloud.InvalidConfiguration, err
+	}
+
+	containerURL, err := asbp.getContainer()
+	if err != nil {
+		return cloud.FailedConnection, err
+	}
+	ctx := context.Background()
+	blobNames, err := asbp.getMostRecentBlobs(start, end, containerURL, ctx)
+	if err != nil {
+		return cloud.FailedConnection, err
+	}
+	for _, blobName := range blobNames {
+		blobBytes, err2 := asbp.DownloadBlob(blobName, containerURL, ctx)
+		if err2 != nil {
+			return cloud.FailedConnection, err2
+		}
+		err2 = asbp.parseCSV(start, end, csv.NewReader(bytes.NewReader(blobBytes)), resultFn)
+		if err2 != nil {
+			return cloud.ParseError, err2
+		}
+
+	}
+	return cloud.SuccessfulConnection, nil
+}
+
+func (asbp *AzureStorageBillingParser) parseCSV(start, end time.Time, reader *csv.Reader, resultFn AzureBillingResultFunc) error {
+	headers, err := reader.Read()
+	if err != nil {
+		return err
+	}
+	abp, err := NewBillingParseSchema(headers)
+	if err != nil {
+		return err
+	}
+	for {
+		var record, err = reader.Read()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		abv := abp.ParseRow(start, end, record)
+		if abv == nil {
+			continue
+		}
+
+		err = resultFn(abv)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (asbp *AzureStorageBillingParser) getMostRecentBlobs(start, end time.Time, containerURL *azblob.ContainerURL, ctx context.Context) ([]string, error) {
+	log.Infof("Azure Storage: retrieving most recent reports from: %v - %v", start, end)
+
+	// Get list of month substrings for months contained in the start to end range
+	monthStrs, err := asbp.getMonthStrings(start, end)
+	if err != nil {
+		return nil, err
+	}
+	mostResentBlobs := make(map[string]azblob.BlobItemInternal)
+	for marker := (azblob.Marker{}); marker.NotDone(); {
+		// Get a result segment starting with the blob indicated by the current Marker.
+		listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{})
+		if err != nil {
+			return nil, err
+		}
+
+		// ListBlobs returns the start of the next segment; you MUST use this to get
+		// the next segment (after processing the current result segment).
+		marker = listBlob.NextMarker
+
+		// Using the list of months strings find the most resent blob for each month in the range
+		for _, blobInfo := range listBlob.Segment.BlobItems {
+			for _, month := range monthStrs {
+				if strings.Contains(blobInfo.Name, month) {
+					// If Container Path configuration exists, check if it is in the blobs name
+					if asbp.Path != "" && !strings.Contains(blobInfo.Name, asbp.Path) {
+						continue
+					}
+
+					if prevBlob, ok := mostResentBlobs[month]; ok {
+						if prevBlob.Properties.CreationTime.After(*blobInfo.Properties.CreationTime) {
+							continue
+						}
+					}
+					mostResentBlobs[month] = blobInfo
+				}
+			}
+		}
+	}
+
+	// convert blob names into blob urls and move from map into ordered list of blob names
+	var blobNames []string
+	for _, month := range monthStrs {
+		if blob, ok := mostResentBlobs[month]; ok {
+			blobNames = append(blobNames, blob.Name)
+		}
+	}
+
+	return blobNames, nil
+}
+
+func (asbp *AzureStorageBillingParser) getMonthStrings(start, end time.Time) ([]string, error) {
+	if start.After(end) {
+		return []string{}, fmt.Errorf("start date must be before end date")
+	}
+	if end.After(time.Now()) {
+		end = time.Now()
+	}
+	var monthStrs []string
+	monthStr := asbp.timeToMonthString(start)
+	endStr := asbp.timeToMonthString(end)
+	monthStrs = append(monthStrs, monthStr)
+	currMonth := start.AddDate(0, 0, -start.Day()+1)
+	for monthStr != endStr {
+		currMonth = currMonth.AddDate(0, 1, 0)
+		monthStr = asbp.timeToMonthString(currMonth)
+		monthStrs = append(monthStrs, monthStr)
+	}
+
+	return monthStrs, nil
+}
+
+func (asbp *AzureStorageBillingParser) timeToMonthString(input time.Time) string {
+	format := "20060102"
+	startOfMonth := input.AddDate(0, 0, -input.Day()+1)
+	endOfMonth := input.AddDate(0, 1, -input.Day())
+	return startOfMonth.Format(format) + "-" + endOfMonth.Format(format)
+}

+ 204 - 0
pkg/cloud/azure/storagebillingparser_test.go

@@ -0,0 +1,204 @@
+package azure
+
+import (
+	"testing"
+	"time"
+)
+
+func TestAzureStorageBillingParser_getMonthStrings(t *testing.T) {
+	asbp := AzureStorageBillingParser{}
+	loc, _ := time.LoadLocation("UTC")
+	testCases := map[string]struct {
+		start    time.Time
+		end      time.Time
+		expected []string
+	}{
+		"Single Month": {
+			start: time.Date(2021, 2, 1, 00, 00, 00, 00, loc),
+			end:   time.Date(2021, 2, 3, 00, 00, 00, 00, loc),
+			expected: []string{
+				"20210201-20210228",
+			},
+		},
+		"Two Month": {
+			start: time.Date(2021, 2, 1, 00, 00, 00, 00, loc),
+			end:   time.Date(2021, 3, 3, 00, 00, 00, 00, loc),
+			expected: []string{
+				"20210201-20210228",
+				"20210301-20210331",
+			},
+		},
+	}
+
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			months, err := asbp.getMonthStrings(tc.start, tc.end)
+			if err != nil {
+				t.Errorf("Could not retrieve month strings %v", err)
+			}
+
+			if len(months) != len(tc.expected) {
+				t.Errorf("Did not create the expected number of month strings. Expected: %d, Actual: %d", len(tc.expected), len(months))
+			}
+
+			for i, monthStr := range months {
+				if monthStr != tc.expected[i] {
+					t.Errorf("Incorrect month string at index %d. Expected: %s, Actual: %s", i, tc.expected[i], monthStr)
+				}
+			}
+		})
+	}
+}
+
+func TestAzureStorageBillingParser_parseCSV(t *testing.T) {
+	loc, _ := time.LoadLocation("UTC")
+	start := time.Date(2021, 2, 1, 00, 00, 00, 00, loc)
+	end := time.Date(2021, 2, 3, 00, 00, 00, 00, loc)
+	tests := map[string]struct {
+		input    string
+		expected []BillingRowValues
+	}{
+		"Virtual Machine": {
+			input: "VirtualMachine.csv",
+			expected: []BillingRowValues{
+				{
+					Date:            start,
+					MeterCategory:   "Virtual Machines",
+					SubscriptionID:  "11111111-12ab-34dc-56ef-123456abcdef",
+					InvoiceEntityID: "11111111-12ab-34dc-56ef-123456billing",
+					InstanceID:      "/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss",
+					Service:         "Microsoft.Compute",
+					Tags: map[string]string{
+						"resourceNameSuffix": "12345678",
+						"aksEngineVersion":   "aks-release-v0.47.0-1-aks",
+						"creationSource":     "aks-aks-nodepool1-12345678-vmss",
+					},
+					AdditionalInfo: map[string]any{
+						"ServiceType": "Standard_DS2_v2",
+						"VMName":      "aks-nodepool1-12345678-vmss_0",
+						"VCPUs":       2.0,
+					},
+					Cost:    5,
+					NetCost: 4,
+				},
+			},
+		},
+		"Missing Brackets": {
+			input: "MissingBrackets.csv",
+			expected: []BillingRowValues{
+				{
+					Date:            start,
+					MeterCategory:   "Virtual Machines",
+					SubscriptionID:  "11111111-12ab-34dc-56ef-123456abcdef",
+					InvoiceEntityID: "11111111-12ab-34dc-56ef-123456abcdef",
+					InstanceID:      "/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss",
+					Service:         "Microsoft.Compute",
+					Tags: map[string]string{
+						"resourceNameSuffix": "12345678",
+						"aksEngineVersion":   "aks-release-v0.47.0-1-aks",
+						"creationSource":     "aks-aks-nodepool1-12345678-vmss",
+					},
+					AdditionalInfo: map[string]any{
+						"ServiceType": "Standard_DS2_v2",
+						"VMName":      "aks-nodepool1-12345678-vmss_0",
+						"VCPUs":       2.0,
+					},
+					Cost:    5,
+					NetCost: 4,
+				},
+			},
+		},
+	}
+	asbp := &AzureStorageBillingParser{}
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			csvRetriever := &TestCSVRetriever{
+				CSVName: valueCasesPath + tc.input,
+			}
+			csvs, err := csvRetriever.getCSVReaders(start, end)
+			if err != nil {
+				t.Errorf("Failed to read specified CSV: %s", err.Error())
+			}
+			reader := csvs[0]
+
+			var actual []*BillingRowValues
+			resultFn := func(abv *BillingRowValues) error {
+				actual = append(actual, abv)
+				return nil
+			}
+
+			err = asbp.parseCSV(start, end, reader, resultFn)
+			if err != nil {
+				t.Errorf("Error generating BillingRowValues: %s", err.Error())
+			}
+
+			if len(actual) != len(tc.expected) {
+				t.Errorf("Actual output length did not match expected. Expected: %d, Actual: %d", len(tc.expected), len(actual))
+			}
+
+			for i, this := range actual {
+				that := tc.expected[i]
+
+				if !this.Date.Equal(that.Date) {
+					t.Errorf("Parsed data at index %d has incorrect Date value. Expected: %s, Actual: %s", i, this.Date.String(), that.Date.String())
+				}
+
+				if this.MeterCategory != that.MeterCategory {
+					t.Errorf("Parsed data at index %d has incorrect MeterCategroy value. Expected: %s, Actual: %s", i, this.MeterCategory, that.MeterCategory)
+				}
+
+				if this.SubscriptionID != that.SubscriptionID {
+					t.Errorf("Parsed data at index %d has incorrect SubscriptionID value. Expected: %s, Actual: %s", i, this.SubscriptionID, that.SubscriptionID)
+				}
+
+				if this.InvoiceEntityID != that.InvoiceEntityID {
+					t.Errorf("Parsed data at index %d has incorrect InvoiceEntityID value. Expected: %s, Actual: %s", i, this.InvoiceEntityID, that.InvoiceEntityID)
+				}
+
+				if this.InstanceID != that.InstanceID {
+					t.Errorf("Parsed data at index %d has incorrect InstanceID value. Expected: %s, Actual: %s", i, this.InstanceID, that.InstanceID)
+				}
+
+				if this.Service != that.Service {
+					t.Errorf("Parsed data at index %d has incorrect Service value. Expected: %s, Actual: %s", i, this.Service, that.Service)
+				}
+
+				if this.Cost != that.Cost {
+					t.Errorf("Parsed data at index %d has incorrect Cost value. Expected: %f, Actual: %f", i, this.Cost, that.Cost)
+				}
+
+				if this.NetCost != that.NetCost {
+					t.Errorf("Parsed data at index %d has incorrect NetCost value. Expected: %f, Actual: %f", i, this.NetCost, that.NetCost)
+				}
+
+				if len(this.Tags) != len(that.Tags) {
+					t.Errorf("Parsed data at index %d did not have the expected number of tags. Expected: %d, Actual: %d", i, len(that.Tags), len(this.Tags))
+				}
+
+				for key, thisTag := range this.Tags {
+					thatTag, ok := that.Tags[key]
+					if !ok {
+						t.Errorf("Parsed data at index %d is has unexpected entry in Tags with key: %s", i, key)
+					}
+
+					if thisTag != thatTag {
+						t.Errorf("Parsed data at index %d is has unexpected value in Tags for key: %s. Expected: %s, Actual: %s", i, key, thatTag, thisTag)
+					}
+				}
+
+				for key, thisAI := range this.AdditionalInfo {
+					thatAI, ok := that.AdditionalInfo[key]
+					if !ok {
+						t.Errorf("Parsed data at index %d is has unexpected entry in Additional Inforamation with key: %s", i, key)
+					}
+
+					if thisAI != thatAI {
+						t.Errorf("Parsed data at index %d is has unexpected value in Tags for key: %s. Expected: %v, Actual: %v", i, key, thisAI, thatAI)
+					}
+				}
+			}
+
+		})
+
+	}
+}

+ 179 - 0
pkg/cloud/azure/storageconfiguration.go

@@ -0,0 +1,179 @@
+package azure
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type StorageConfiguration struct {
+	SubscriptionID string     `json:"subscriptionID"`
+	Account        string     `json:"account"`
+	Container      string     `json:"container"`
+	Path           string     `json:"path"`
+	Cloud          string     `json:"cloud"`
+	Authorizer     Authorizer `json:"authorizer"`
+}
+
+// Check ensures that all required fields are set, and throws an error if they are not
+func (sc *StorageConfiguration) Validate() error {
+
+	if sc.Authorizer == nil {
+		return fmt.Errorf("StorageConfiguration: missing authorizer")
+	}
+
+	err := sc.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	if sc.SubscriptionID == "" {
+		return fmt.Errorf("StorageConfiguration: missing Subcription ID")
+	}
+
+	if sc.Account == "" {
+		return fmt.Errorf("StorageConfiguration: missing Account")
+	}
+
+	if sc.Container == "" {
+		return fmt.Errorf("StorageConfiguration: missing Container")
+	}
+
+	return nil
+}
+
+func (sc *StorageConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*StorageConfiguration)
+	if !ok {
+		return false
+	}
+
+	if sc.Authorizer != nil {
+		if !sc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if sc.SubscriptionID != thatConfig.SubscriptionID {
+		return false
+	}
+
+	if sc.Account != thatConfig.Account {
+		return false
+	}
+
+	if sc.Container != thatConfig.Container {
+		return false
+	}
+
+	if sc.Path != thatConfig.Path {
+		return false
+	}
+
+	if sc.Cloud != thatConfig.Cloud {
+		return false
+	}
+
+	return true
+}
+
+func (sc *StorageConfiguration) Sanitize() config.Config {
+	return &StorageConfiguration{
+		SubscriptionID: sc.SubscriptionID,
+		Account:        sc.Account,
+		Container:      sc.Container,
+		Path:           sc.Path,
+		Cloud:          sc.Cloud,
+		Authorizer:     sc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (sc *StorageConfiguration) Key() string {
+	key := fmt.Sprintf("%s/%s", sc.SubscriptionID, sc.Container)
+	// append container path to key if it exists
+	if sc.Path != "" {
+		key = fmt.Sprintf("%s/%s", key, sc.Path)
+	}
+	return key
+}
+
+func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	subscriptionID, err := config.GetInterfaceValue[string](fmap, "subscriptionID")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.SubscriptionID = subscriptionID
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Account = account
+
+	container, err := config.GetInterfaceValue[string](fmap, "container")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Container = container
+
+	path, err := config.GetInterfaceValue[string](fmap, "path")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Path = path
+
+	cloud, err := config.GetInterfaceValue[string](fmap, "cloud")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Cloud = cloud
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Authorizer = authorizer
+
+	return nil
+}
+
+func ConvertAzureStorageConfigToConfig(asc AzureStorageConfig) config.KeyedConfig {
+	if asc.IsEmpty() {
+		return nil
+	}
+
+	var authorizer Authorizer
+	authorizer = &AccessKey{
+		AccessKey: asc.AccessKey,
+		Account:   asc.AccountName,
+	}
+
+	return &StorageConfiguration{
+		SubscriptionID: asc.SubscriptionId,
+		Account:        asc.AccountName,
+		Container:      asc.ContainerName,
+		Path:           asc.ContainerPath,
+		Cloud:          asc.AzureCloud,
+		Authorizer:     authorizer,
+	}
+}

+ 446 - 0
pkg/cloud/azure/storageconfiguration_test.go

@@ -0,0 +1,446 @@
+package azure
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestStorageConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   StorageConfiguration
+		expected error
+	}{
+		"valid config Azure AccessKey": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					Account: "account",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing access key"),
+		},
+		"missing authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing authorizer"),
+		},
+		"missing subscriptionID": {
+			config: StorageConfiguration{
+				SubscriptionID: "",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Subcription ID"),
+		},
+		"missing account": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Account"),
+		},
+		"missing container": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Container"),
+		},
+		"missing path": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+		"missing cloud": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestStorageConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     StorageConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: true,
+		},
+
+		"missing both authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: true,
+		},
+		"missing left authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"missing right authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: false,
+		},
+		"different subscriptionID": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID2",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account2",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different container": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container2",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different path": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path2",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different cloud": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud2",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &AccessKey{
+				AccessKey: "accessKey",
+				Account:   "account",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestStorageConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config StorageConfiguration
+	}{
+		"Empty Config": {
+			config: StorageConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+		},
+		"AccessKey Authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &StorageConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 77 - 0
pkg/cloud/azure/storageconnection.go

@@ -0,0 +1,77 @@
+package azure
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// StorageConnection provides access to Azure Storage
+type StorageConnection struct {
+	StorageConfiguration
+}
+
+func (sc *StorageConnection) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*StorageConnection)
+	if !ok {
+		return false
+	}
+
+	return sc.StorageConfiguration.Equals(&thatConfig.StorageConfiguration)
+}
+
+func (sc *StorageConnection) getContainer() (*azblob.ContainerURL, error) {
+
+	credential, err := sc.Authorizer.GetBlobCredentials()
+	if err != nil {
+		return nil, err
+	}
+
+	p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
+
+	// From the Azure portal, get your storage account blob service URL endpoint.
+	URL, _ := url.Parse(
+		fmt.Sprintf(sc.getBlobURLTemplate(), sc.Account, sc.Container))
+
+	// Create a ContainerURL object that wraps the container URL and a request
+	// pipeline to make requests.
+	containerURL := azblob.NewContainerURL(*URL, p)
+	return &containerURL, nil
+}
+
+// getBlobURLTemplate returns the correct BlobUrl for whichever Cloud storage account is specified by the AzureCloud configuration
+// defaults to the Public Cloud template
+func (sc *StorageConnection) getBlobURLTemplate() string {
+	// Use gov cloud blob url if gov is detected in AzureCloud
+	if strings.Contains(strings.ToLower(sc.Cloud), "gov") {
+		return "https://%s.blob.core.usgovcloudapi.net/%s"
+	}
+	// default to Public Cloud template
+	return "https://%s.blob.core.windows.net/%s"
+}
+
+func (sc *StorageConnection) DownloadBlob(blobName string, containerURL *azblob.ContainerURL, ctx context.Context) ([]byte, error) {
+	log.Infof("Azure Storage: retrieving blob: %v", blobName)
+
+	blobURL := containerURL.NewBlobURL(blobName)
+	downloadResponse, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
+	if err != nil {
+		return nil, err
+	}
+	// NOTE: automatically retries are performed if the connection fails
+	bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
+
+	// read the body into a buffer
+	downloadedData := bytes.Buffer{}
+	_, err = downloadedData.ReadFrom(bodyStream)
+	if err != nil {
+		return nil, err
+	}
+	return downloadedData.Bytes(), nil
+}

+ 12 - 0
pkg/cloud/cloudcostintegration.go

@@ -0,0 +1,12 @@
+package cloud
+
+import (
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+// CloudCostIntegration is an interface for retrieving daily granularity CloudCost data for a given range
+type CloudCostIntegration interface {
+	GetCloudCost(time.Time, time.Time) (*kubecost.CloudCostSetRange, error)
+}

+ 53 - 0
pkg/cloud/config/authorizer.go

@@ -0,0 +1,53 @@
+package config
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// AuthorizerTypeProperty is the property where the id of an Authorizer should be placed in its custom MarshalJSON function
+const AuthorizerTypeProperty = "authorizerType"
+
+type Authorizer interface {
+	Config
+	json.Marshaler
+}
+
+// AuthorizerSelectorFn implementations of this function should be a simple switch
+// and acts as a register for the Authorizer types, returned Authorizer should be empty
+// except for its default type property and will have other values marshalled into it
+type AuthorizerSelectorFn[T Authorizer] func(string) (T, error)
+
+// AuthorizerFromInterface this generic function provides Authorizer unmarshalling for all providers
+func AuthorizerFromInterface[T Authorizer](f any, authSelectFn AuthorizerSelectorFn[T]) (T, error) {
+	var emptyAuth T
+	if f == nil {
+		return emptyAuth, nil
+	}
+	fmap, ok := f.(map[string]interface{})
+	if !ok {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not cast interface as map")
+	}
+
+	authType, err := GetInterfaceValue[string](fmap, AuthorizerTypeProperty)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not retrieve type property: %w", err)
+	}
+	authorizer, err := authSelectFn(authType)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: %w", err)
+	}
+
+	// convert the interface back to a []Byte so that it can be unmarshalled into the correct type
+	fBin, err := json.Marshal(f)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not marshal value %v: %w", f, err)
+	}
+
+	err = json.Unmarshal(fBin, authorizer)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: failed to unmarshal into Authorizer type %T from value %v: %w", authorizer, f, err)
+	}
+	return authorizer, nil
+}

+ 37 - 0
pkg/cloud/config/config.go

@@ -0,0 +1,37 @@
+package config
+
+import (
+	"fmt"
+)
+
+const Redacted = "REDACTED"
+
+// Config allows for nested configurations which encapsulate their functionality to be validated and compared easily
+type Config interface {
+	Validate() error
+	Sanitize() Config
+	Equals(Config) bool
+}
+
+// KeyedConfig is a top level Config which uses its public values as a unique identifier allowing duplicates to be identified
+type KeyedConfig interface {
+	Config
+	Key() string
+}
+
+type KeyedConfigWatcher interface {
+	GetConfigs() []KeyedConfig
+}
+
+func GetInterfaceValue[T any](fmap map[string]interface{}, key string) (T, error) {
+	var value T
+	interfaceValue, ok := fmap[key]
+	if !ok {
+		return value, fmt.Errorf("FromInterface: missing '%s' property", key)
+	}
+	typedValue, ok := interfaceValue.(T)
+	if !ok {
+		return value, fmt.Errorf("GetInterfaceValue: property '%s' had expected type '%T' but did not match", key, value)
+	}
+	return typedValue, nil
+}

+ 47 - 0
pkg/cloud/connectionstatus.go

@@ -0,0 +1,47 @@
+package cloud
+
+// ConnectionStatus communicates the status of a cloud connection in a way that is general enough to apply to each
+// Cloud Provider, but still give actionable information on how to trouble shoot one the four failing statuses.
+type ConnectionStatus string
+
+const (
+	// InitialStatus is the zero value of CloudConnectionStatus and means that cloud connection is untested. Once
+	// CloudConnection Status has been changed in should not return to this value. This status is assigned on creation
+	// to the cloud provider
+	InitialStatus ConnectionStatus = "No Connection"
+
+	// InvalidConfiguration means that Cloud Configuration is missing required values to connect to cloud provider.
+	// This status is assigned during failures in the provider implementation of getCloudConfig()
+	InvalidConfiguration = "Invalid Configuration"
+
+	// FailedConnection means that all required Cloud Configuration values are filled in, but a connection with the
+	// Cloud Provider cannot be established. This is indicative of a typo in one of the Cloud Configuration values or an
+	// issue in how the connection was set up in the Cloud Provider's Console. The assignment of this status varies
+	// between implementations, but should happen if an error is thrown when an interaction with an object from
+	// the Cloud Service Provider's sdk occurs.
+	FailedConnection = "Failed Connection"
+
+	// ParseError indicates an issue with our functions which parse responses
+	ParseError = "Parse Error"
+
+	// MissingData means that the Cloud Integration is properly configured, but the cloud provider is not returning
+	// billing/cost and usage data. This status is indicative of the billing/cost and usage data export of the Cloud Provider
+	// being incorrectly set up or the export being set up in the last 48 hours and not having started populating data yet.
+	// This status is set when a query has been successfully made but the results come back empty. If the cloud provider,
+	// already has a SUCCESSFUL_CONNECTION status then this status should not be set, because this indicates that the specific
+	// query made may have been empty.
+	MissingData = "Data Missing"
+
+	// SuccessfulConnection means that the Cloud Integration is properly configured and returning data. This status is
+	// set on any successful query where data is returned
+	SuccessfulConnection = "Connection Successful"
+)
+
+func (cs ConnectionStatus) String() string {
+	return string(cs)
+}
+
+// EmptyChecker provides an interface for to check if a result is empty which can be useful for setting a MissingData status
+type EmptyChecker interface {
+	IsEmpty() bool
+}

+ 132 - 0
pkg/cloud/gcp/authorizer.go

@@ -0,0 +1,132 @@
+package gcp
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"google.golang.org/api/option"
+)
+
+const ServiceAccountKeyAuthorizerType = "GCPServiceAccountKey"
+const WorkloadIdentityAuthorizerType = "GCPWorkloadIdentity"
+
+// Authorizer provide a []option.ClientOption which is used in when creating clients in the GCP SDK
+type Authorizer interface {
+	config.Authorizer
+	CreateGCPClientOptions() ([]option.ClientOption, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case ServiceAccountKeyAuthorizerType:
+		return &ServiceAccountKey{}, nil
+	case WorkloadIdentityAuthorizerType:
+		return &WorkloadIdentity{}, nil
+	default:
+		return nil, fmt.Errorf("GCP: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+type ServiceAccountKey struct {
+	Key map[string]string `json:"key"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (gkc *ServiceAccountKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 2)
+	fmap[config.AuthorizerTypeProperty] = ServiceAccountKeyAuthorizerType
+	fmap["key"] = gkc.Key
+	return json.Marshal(fmap)
+}
+
+func (gkc *ServiceAccountKey) Validate() error {
+	if gkc.Key == nil || len(gkc.Key) == 0 {
+		return fmt.Errorf("ServiceAccountKey: missing Key")
+	}
+
+	return nil
+}
+
+func (gkc *ServiceAccountKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*ServiceAccountKey)
+	if !ok {
+		return false
+	}
+
+	if len(gkc.Key) != len(thatConfig.Key) {
+		return false
+	}
+
+	for k, v := range gkc.Key {
+		if thatConfig.Key[k] != v {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (gkc *ServiceAccountKey) Sanitize() config.Config {
+	redactedMap := make(map[string]string, len(gkc.Key))
+	for key, _ := range gkc.Key {
+		redactedMap[key] = config.Redacted
+	}
+	return &ServiceAccountKey{
+		Key: redactedMap,
+	}
+}
+
+func (gkc *ServiceAccountKey) CreateGCPClientOptions() ([]option.ClientOption, error) {
+	err := gkc.Validate()
+	if err != nil {
+		return nil, err
+	}
+
+	b, err := json.Marshal(gkc.Key)
+	if err != nil {
+		return nil, fmt.Errorf("Key: failed to marshal Key: %s", err.Error())
+	}
+	clientOption := option.WithCredentialsJSON(b)
+
+	// The creation of the BigQuery Client is where FAILED_CONNECTION CloudConnectionStatus is recorded for GCP
+	return []option.ClientOption{clientOption}, nil
+}
+
+// WorkloadIdentity passes an empty slice of client options which causes the GCP SDK to check for the workload identity in the environment
+type WorkloadIdentity struct{}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (wi *WorkloadIdentity) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 1)
+	fmap[config.AuthorizerTypeProperty] = WorkloadIdentityAuthorizerType
+	return json.Marshal(fmap)
+}
+
+func (wi *WorkloadIdentity) Validate() error {
+	return nil
+}
+
+func (wi *WorkloadIdentity) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	_, ok := config.(*WorkloadIdentity)
+	if !ok {
+		return false
+	}
+
+	return true
+}
+
+func (wi *WorkloadIdentity) Sanitize() config.Config {
+	return &WorkloadIdentity{}
+}
+
+func (wi *WorkloadIdentity) CreateGCPClientOptions() ([]option.ClientOption, error) {
+	return []option.ClientOption{}, nil
+}

+ 172 - 0
pkg/cloud/gcp/bigqueryconfiguration.go

@@ -0,0 +1,172 @@
+package gcp
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"cloud.google.com/go/bigquery"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type BigQueryConfiguration struct {
+	ProjectID  string     `json:"projectID"`
+	Dataset    string     `json:"dataset"`
+	Table      string     `json:"table"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (bqc *BigQueryConfiguration) Validate() error {
+
+	if bqc.Authorizer == nil {
+		return fmt.Errorf("BigQueryConfig: missing configurer")
+	}
+
+	err := bqc.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("BigQueryConfig: issue with GCP Authorizer: %s", err.Error())
+	}
+
+	if bqc.ProjectID == "" {
+		return fmt.Errorf("BigQueryConfig: missing ProjectID")
+	}
+
+	if bqc.Dataset == "" {
+		return fmt.Errorf("BigQueryConfig: missing Dataset")
+	}
+
+	if bqc.Table == "" {
+		return fmt.Errorf("BigQueryConfig: missing Table")
+	}
+
+	return nil
+}
+
+func (bqc *BigQueryConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*BigQueryConfiguration)
+	if !ok {
+		return false
+	}
+
+	if bqc.Authorizer != nil {
+		if !bqc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if bqc.ProjectID != thatConfig.ProjectID {
+		return false
+	}
+
+	if bqc.Dataset != thatConfig.Dataset {
+		return false
+	}
+
+	if bqc.Table != thatConfig.Table {
+		return false
+	}
+
+	return true
+}
+
+func (bqc *BigQueryConfiguration) Sanitize() config.Config {
+	return &BigQueryConfiguration{
+		ProjectID:  bqc.ProjectID,
+		Dataset:    bqc.Dataset,
+		Table:      bqc.Table,
+		Authorizer: bqc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+// Key uses the Usage Project Id as the Provider Key for GCP
+func (bqc *BigQueryConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", bqc.ProjectID, bqc.GetBillingDataDataset())
+}
+
+func (bqc *BigQueryConfiguration) GetBillingDataDataset() string {
+	return fmt.Sprintf("%s.%s", bqc.Dataset, bqc.Table)
+}
+
+func (bqc *BigQueryConfiguration) GetBigQueryClient(ctx context.Context) (*bigquery.Client, error) {
+	clientOpts, err := bqc.Authorizer.CreateGCPClientOptions()
+	if err != nil {
+		return nil, err
+	}
+	return bigquery.NewClient(ctx, bqc.ProjectID, clientOpts...)
+}
+
+// UnmarshalJSON assumes data is save as an BigQueryConfigurationDTO
+func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	projectID, err := config.GetInterfaceValue[string](fmap, "projectID")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.ProjectID = projectID
+
+	dataset, err := config.GetInterfaceValue[string](fmap, "dataset")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.Dataset = dataset
+
+	table, err := config.GetInterfaceValue[string](fmap, "table")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.Table = table
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bqc.Authorizer = authorizer
+	return nil
+}
+
+func ConvertBigQueryConfigToConfig(bqc BigQueryConfig) config.KeyedConfig {
+	if bqc.IsEmpty() {
+		return nil
+	}
+
+	BillingDataDataset := strings.Split(bqc.BillingDataDataset, ".")
+	dataset := BillingDataDataset[0]
+	var table string
+	if len(BillingDataDataset) > 1 {
+		table = BillingDataDataset[1]
+	}
+
+	bigQueryConfiguration := &BigQueryConfiguration{
+		ProjectID:  bqc.ProjectID,
+		Dataset:    dataset,
+		Table:      table,
+		Authorizer: &WorkloadIdentity{}, // Default to WorkloadIdentity
+	}
+
+	if len(bqc.Key) != 0 {
+		bigQueryConfiguration.Authorizer = &ServiceAccountKey{
+			Key: bqc.Key,
+		}
+	}
+
+	return bigQueryConfiguration
+}

+ 388 - 0
pkg/cloud/gcp/bigqueryconfiguration_test.go

@@ -0,0 +1,388 @@
+package gcp
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestBigQueryConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   BigQueryConfiguration
+		expected error
+	}{
+		"valid config GCP Key": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: nil,
+		},
+		"valid config WorkloadIdentity": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: nil,
+		},
+		"access Key invalid": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: nil,
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: issue with GCP Authorizer: ServiceAccountKey: missing Key"),
+		},
+		"missing configurer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing configurer"),
+		},
+		"missing projectID": {
+			config: BigQueryConfiguration{
+				ProjectID: "",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing ProjectID"),
+		},
+		"missing dataset": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing Dataset"),
+		},
+		"missing table": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing Table"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestBigQueryConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     BigQueryConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: true,
+		},
+		"different configurer": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: false,
+		},
+		"missing both configurer": {
+			left: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left configurer": {
+			left: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: false,
+		},
+		"missing right configurer": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different projectID": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID2",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different dataset": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset2",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different table": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table2",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &ServiceAccountKey{
+
+				Key: map[string]string{
+					"Key":  "Key",
+					"key1": "key2",
+				},
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestBigQueryConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config BigQueryConfiguration
+	}{
+		"Empty Config": {
+			config: BigQueryConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+		},
+		"ServiceAccountKeyConfigurer": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+		},
+		"WorkLoadIdentityConfigurer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &BigQueryConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 110 - 0
pkg/cloud/gcp/bigqueryquerier.go

@@ -0,0 +1,110 @@
+package gcp
+
+import (
+	"context"
+	"regexp"
+	"strings"
+
+	"cloud.google.com/go/bigquery"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+type BigQueryQuerier struct {
+	BigQueryConfiguration
+}
+
+func (bqq *BigQueryQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*BigQueryQuerier)
+	if !ok {
+		return false
+	}
+
+	return bqq.BigQueryConfiguration.Equals(&thatConfig.BigQueryConfiguration)
+}
+
+func (bqq *BigQueryQuerier) QueryBigQuery(ctx context.Context, queryStr string) (*bigquery.RowIterator, error) {
+	client, err := bqq.GetBigQueryClient(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	query := client.Query(queryStr)
+	return query.Read(ctx)
+}
+
+func GCPSelectCategory(service, description string) string {
+	s := strings.ToLower(service)
+	d := strings.ToLower(description)
+
+	// Network descriptions
+	if strings.Contains(d, "download") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "network") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "ingress") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "egress") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "static ip") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "external ip") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "load balanced") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "licensing fee") {
+		return kubecost.OtherCategory
+	}
+
+	// Storage Descriptions
+	if strings.Contains(d, "storage") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd capacity") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd iops") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd snapshot") {
+		return kubecost.StorageCategory
+	}
+
+	// Service Defaults
+	if strings.Contains(s, "storage") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "compute") {
+		return kubecost.ComputeCategory
+	}
+	if strings.Contains(s, "sql") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "bigquery") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "kubernetes") {
+		return kubecost.ManagementCategory
+	} else if strings.Contains(s, "pub/sub") {
+		return kubecost.NetworkCategory
+	}
+
+	return kubecost.OtherCategory
+}
+
+var parseProviderIDRx = regexp.MustCompile("^.+\\/(.+)?") // Capture "gke-cluster-3-default-pool-xxxx-yy" from "projects/###/instances/gke-cluster-3-default-pool-xxxx-yy"
+
+func GCPParseProviderID(id string) string {
+	match := parseProviderIDRx.FindStringSubmatch(id)
+	if len(match) == 0 {
+		return id
+	}
+	return match[len(match)-1]
+}

+ 1 - 0
pkg/cloud/gcp/gcpprovider.go → pkg/cloud/gcp/provider.go

@@ -176,6 +176,7 @@ func (gcp *GCP) GetConfig() (*models.CustomPricing, error) {
 }
 
 // BigQueryConfig contain the required config and credentials to access OOC resources for GCP
+// Deprecated: v1.104 Use BigQueryConfiguration instead
 type BigQueryConfig struct {
 	ProjectID          string            `json:"projectID"`
 	BillingDataDataset string            `json:"billingDataDataset"`

+ 0 - 0
pkg/cloud/gcp/gcpprovider_test.go → pkg/cloud/gcp/provider_test.go


+ 1 - 1
pkg/cloud/csvprovider.go → pkg/cloud/provider/csvprovider.go

@@ -1,4 +1,4 @@
-package cloud
+package provider
 
 import (
 	"encoding/csv"

+ 6 - 6
pkg/cloud/customprovider.go → pkg/cloud/provider/customprovider.go

@@ -1,4 +1,4 @@
-package cloud
+package provider
 
 import (
 	"errors"
@@ -33,10 +33,10 @@ type CustomProvider struct {
 	SpotLabelValue          string
 	GPULabel                string
 	GPULabelValue           string
-	clusterRegion           string
-	clusterAccountID        string
+	ClusterRegion           string
+	ClusterAccountID        string
 	DownloadPricingDataLock sync.RWMutex
-	Config                  *ProviderConfig
+	Config                  models.ProviderConfig
 }
 
 var volTypes = map[string]string{
@@ -147,8 +147,8 @@ func (cp *CustomProvider) ClusterInfo() (map[string]string, error) {
 		m["name"] = conf.ClusterName
 	}
 	m["provider"] = kubecost.CustomProvider
-	m["region"] = cp.clusterRegion
-	m["account"] = cp.clusterAccountID
+	m["region"] = cp.ClusterRegion
+	m["account"] = cp.ClusterAccountID
 	m["id"] = env.GetClusterID()
 	return m, nil
 }

+ 14 - 12
pkg/cloud/provider.go → pkg/cloud/provider/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package provider
 
 import (
 	"errors"
@@ -8,10 +8,12 @@ import (
 	"strings"
 	"time"
 
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
 	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/scaleway"
 	"github.com/opencost/opencost/pkg/kubecost"
 
 	"github.com/opencost/opencost/pkg/util"
@@ -167,8 +169,8 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 			CSVLocation: env.GetCSVPath(),
 			CustomProvider: &CustomProvider{
 				Clientset:        cache,
-				clusterRegion:    cp.region,
-				clusterAccountID: cp.accountID,
+				ClusterRegion:    cp.region,
+				ClusterAccountID: cp.accountID,
 				Config:           NewProviderConfig(config, cp.configFileName),
 			},
 		}, nil
@@ -215,19 +217,19 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		}, nil
 	case kubecost.AlibabaProvider:
 		log.Info("Found ProviderID starting with \"alibaba\", using Alibaba Cloud Provider")
-		return &Alibaba{
+		return &alibaba.Alibaba{
 			Clientset:            cache,
 			Config:               NewProviderConfig(config, cp.configFileName),
-			clusterRegion:        cp.region,
-			clusterAccountId:     cp.accountID,
-			serviceAccountChecks: models.NewServiceAccountChecks(),
+			ClusterRegion:        cp.region,
+			ClusterAccountId:     cp.accountID,
+			ServiceAccountChecks: models.NewServiceAccountChecks(),
 		}, nil
 	case kubecost.ScalewayProvider:
 		log.Info("Found ProviderID starting with \"scaleway\", using Scaleway Provider")
-		return &Scaleway{
+		return &scaleway.Scaleway{
 			Clientset:        cache,
-			clusterRegion:    cp.region,
-			clusterAccountID: cp.accountID,
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
 			Config:           NewProviderConfig(config, cp.configFileName),
 		}, nil
 
@@ -235,8 +237,8 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		log.Info("Unsupported provider, falling back to default")
 		return &CustomProvider{
 			Clientset:        cache,
-			clusterRegion:    cp.region,
-			clusterAccountID: cp.accountID,
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
 			Config:           NewProviderConfig(config, cp.configFileName),
 		}, nil
 	}

+ 2 - 3
pkg/cloud/providerconfig.go → pkg/cloud/provider/providerconfig.go

@@ -1,8 +1,7 @@
-package cloud
+package provider
 
 import (
 	"fmt"
-	"io/ioutil"
 	"os"
 	gopath "path"
 	"strconv"
@@ -277,7 +276,7 @@ func ReturnPricingFromConfigs(filename string) (*models.CustomPricing, error) {
 	if _, err := os.Stat(providerConfigFile); err != nil {
 		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to find file %s with err: %v", providerConfigFile, err)
 	}
-	configFile, err := ioutil.ReadFile(providerConfigFile)
+	configFile, err := os.ReadFile(providerConfigFile)
 	if err != nil {
 		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to open file %s with err: %v", providerConfigFile, err)
 	}

+ 6 - 6
pkg/cloud/scalewayprovider.go → pkg/cloud/scaleway/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package scaleway
 
 import (
 	"errors"
@@ -36,10 +36,10 @@ type ScalewayPricing struct {
 
 type Scaleway struct {
 	Clientset               clustercache.ClusterCache
-	Config                  *ProviderConfig
+	Config                  models.ProviderConfig
 	Pricing                 map[string]*ScalewayPricing
-	clusterRegion           string
-	clusterAccountID        string
+	ClusterRegion           string
+	ClusterAccountID        string
 	DownloadPricingDataLock sync.RWMutex
 }
 
@@ -288,8 +288,8 @@ func (scw *Scaleway) ClusterInfo() (map[string]string, error) {
 		m["name"] = c.ClusterName
 	}
 	m["provider"] = kubecost.ScalewayProvider
-	m["region"] = scw.clusterRegion
-	m["account"] = scw.clusterAccountID
+	m["region"] = scw.ClusterRegion
+	m["account"] = scw.ClusterAccountID
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
 	m["id"] = env.GetClusterID()
 	return m, nil

+ 3 - 3
pkg/cmd/agent/agent.go

@@ -7,7 +7,7 @@ import (
 	"path"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/costmodel"
@@ -157,13 +157,13 @@ func Execute(opts *AgentOpts) error {
 	})
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
-	cloudProvider, err := cloud.NewProvider(clusterCache, cloudProviderKey, confManager)
+	cloudProvider, err := provider.NewProvider(clusterCache, cloudProviderKey, confManager)
 	if err != nil {
 		panic(err.Error())
 	}
 
 	// Append the pricing config watcher
-	configWatchers.AddWatcher(cloud.ConfigWatcherFor(cloudProvider))
+	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	watchConfigFunc := configWatchers.ToWatchFunc()
 	watchedConfigs := configWatchers.GetWatchedConfigs()
 

+ 29 - 6
pkg/costmodel/aggregation.go

@@ -11,10 +11,10 @@ import (
 	"time"
 
 	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/patrickmn/go-cache"
 	prometheusClient "github.com/prometheus/client_golang/api"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/errors"
@@ -761,7 +761,7 @@ func getPriceVectors(cp models.Provider, costDatum *CostData, rate string, disco
 	if err != nil {
 		log.Errorf("failed to load custom pricing: %s", err)
 	}
-	if cloud.CustomPricesEnabled(cp) && err == nil {
+	if provider.CustomPricesEnabled(cp) && err == nil {
 		var cpuCostStr string
 		var ramCostStr string
 		var gpuCostStr string
@@ -839,7 +839,7 @@ func getPriceVectors(cp models.Provider, costDatum *CostData, rate string, disco
 			cost, _ := strconv.ParseFloat(pvcData.Volume.Cost, 64)
 
 			// override with custom pricing if enabled
-			if cloud.CustomPricesEnabled(cp) {
+			if provider.CustomPricesEnabled(cp) {
 				cost = pvCost
 			}
 
@@ -1768,10 +1768,10 @@ func (a *Accesses) warmAggregateCostModelCache() {
 		aggOpts.NoExpireCache = false
 		aggOpts.ShareSplit = SplitTypeWeighted
 		aggOpts.RemoteEnabled = env.IsRemoteEnabled()
-		aggOpts.AllocateIdle = cloud.AllocateIdleByDefault(a.CloudProvider)
+		aggOpts.AllocateIdle = provider.AllocateIdleByDefault(a.CloudProvider)
 
-		sharedNamespaces := cloud.SharedNamespaces(a.CloudProvider)
-		sharedLabelNames, sharedLabelValues := cloud.SharedLabels(a.CloudProvider)
+		sharedNamespaces := provider.SharedNamespaces(a.CloudProvider)
+		sharedLabelNames, sharedLabelValues := provider.SharedLabels(a.CloudProvider)
 
 		if len(sharedNamespaces) > 0 || len(sharedLabelNames) > 0 {
 			aggOpts.SharedResources = NewSharedResourceInfo(true, sharedNamespaces, sharedLabelNames, sharedLabelValues)
@@ -2242,6 +2242,19 @@ func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Reque
 
 	// IncludeIdle, if true, uses Asset data to incorporate Idle Allocation
 	includeIdle := qp.GetBool("includeIdle", false)
+	// Accumulate is an optional parameter, defaulting to false, which if true
+	// sums each Set in the Range, producing one Set.
+	accumulate := qp.GetBool("accumulate", false)
+
+	// Accumulate is an optional parameter that accumulates an AllocationSetRange
+	// by the resolution of the given time duration.
+	// Defaults to 0. If a value is not passed then the parameter is not used.
+	accumulateBy := kubecost.AccumulateOption(qp.Get("accumulateBy", ""))
+
+	// if accumulateBy is not explicitly set, and accumulate is true, ensure result is accumulated
+	if accumulateBy == kubecost.AccumulateOptionNone && accumulate {
+		accumulateBy = kubecost.AccumulateOptionAll
+	}
 
 	// IdleByNode, if true, computes idle allocations at the node level.
 	// Otherwise it is computed at the cluster level. (Not relevant if idle
@@ -2265,6 +2278,16 @@ func (a *Accesses) ComputeAllocationHandler(w http.ResponseWriter, r *http.Reque
 		return
 	}
 
+	// Accumulate, if requested
+	if accumulateBy != kubecost.AccumulateOptionNone {
+		asr, err = asr.Accumulate(accumulateBy)
+		if err != nil {
+			log.Errorf("error accumulating by %v: %s", accumulateBy, err)
+			WriteError(w, InternalServerError(fmt.Errorf("error accumulating by %v: %s", accumulateBy, err).Error()))
+			return
+		}
+	}
+
 	w.Write(WrapData(asr, nil))
 }
 

+ 5 - 5
pkg/costmodel/allocation_helpers.go

@@ -7,7 +7,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
@@ -1432,7 +1432,7 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1470,7 +1470,7 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRA
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1508,7 +1508,7 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1654,7 +1654,7 @@ func (cm *CostModel) getNodePricing(nodeMap map[nodeKey]*nodePricing, nodeKey no
 	if err != nil {
 		log.Warnf("CostModel: failed to load custom pricing: %s", err)
 	}
-	if cloud.CustomPricesEnabled(cm.Provider) && customPricingConfig != nil {
+	if provider.CustomPricesEnabled(cm.Provider) && customPricingConfig != nil {
 		return cm.getCustomNodePricing(node.Preemptible, node.ProviderID)
 	}
 

+ 11 - 0
pkg/costmodel/assets.go

@@ -133,6 +133,17 @@ func (cm *CostModel) ComputeAssets(start, end time.Time) (*kubecost.AssetSet, er
 		node.GPUCost = n.GPUCost
 		node.GPUCount = n.GPUCount
 		node.RAMCost = n.RAMCost
+
+		if n.Overhead != nil {
+			node.Overhead = &kubecost.NodeOverhead{
+				RamOverheadFraction: n.Overhead.RamOverheadFraction,
+				CpuOverheadFraction: n.Overhead.CpuOverheadFraction,
+				OverheadCostFraction: ((n.Overhead.CpuOverheadFraction * n.CPUCost) +
+					(n.Overhead.RamOverheadFraction * n.RAMCost)) / node.TotalCost(),
+			}
+		} else {
+			node.Overhead = &kubecost.NodeOverhead{}
+		}
 		node.Discount = n.Discount
 		if n.Preemptible {
 			node.Preemptible = 1.0

+ 31 - 15
pkg/costmodel/cluster.go

@@ -5,10 +5,10 @@ import (
 	"strconv"
 	"time"
 
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	prometheus "github.com/prometheus/client_golang/api"
 	"golang.org/x/exp/slices"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/kubecost"
@@ -472,6 +472,10 @@ func ClusterDisks(client prometheus.Client, provider models.Provider, start, end
 	return diskMap, nil
 }
 
+type NodeOverhead struct {
+	CpuOverheadFraction float64
+	RamOverheadFraction float64
+}
 type Node struct {
 	Cluster         string
 	Name            string
@@ -494,6 +498,7 @@ type Node struct {
 	CostPerCPUHr    float64
 	CostPerRAMGiBHr float64
 	CostPerGPUHr    float64
+	Overhead        *NodeOverhead
 }
 
 // GKE lies about the number of cores e2 nodes have. This table
@@ -567,9 +572,11 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 	optionalCtx := prom.NewNamedContext(client, prom.ClusterOptionalContextName)
 
 	queryNodeCPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_cpu_hourly_cost[%s])) by (%s, node, instance_type, provider_id)`, durStr, env.GetPromClusterLabel())
-	queryNodeCPUCores := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_cpu_cores[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
+	queryNodeCPUCoresCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_cpu_cores[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
+	queryNodeCPUCoresAllocatable := fmt.Sprintf(`avg(avg_over_time(kube_node_status_allocatable_cpu_cores[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
 	queryNodeRAMHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_ram_hourly_cost[%s])) by (%s, node, instance_type, provider_id) / 1024 / 1024 / 1024`, durStr, env.GetPromClusterLabel())
-	queryNodeRAMBytes := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_memory_bytes[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
+	queryNodeRAMBytesCapacity := fmt.Sprintf(`avg(avg_over_time(kube_node_status_capacity_memory_bytes[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
+	queryNodeRAMBytesAllocatable := fmt.Sprintf(`avg(avg_over_time(kube_node_status_allocatable_memory_bytes[%s])) by (%s, node)`, durStr, env.GetPromClusterLabel())
 	queryNodeGPUCount := fmt.Sprintf(`avg(avg_over_time(node_gpu_count[%s])) by (%s, node, provider_id)`, durStr, env.GetPromClusterLabel())
 	queryNodeGPUHourlyCost := fmt.Sprintf(`avg(avg_over_time(node_gpu_hourly_cost[%s])) by (%s, node, instance_type, provider_id)`, durStr, env.GetPromClusterLabel())
 	queryNodeCPUModeTotal := fmt.Sprintf(`sum(rate(node_cpu_seconds_total[%s:%dm])) by (kubernetes_node, %s, mode)`, durStr, minsPerResolution, env.GetPromClusterLabel())
@@ -581,9 +588,11 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 
 	// Return errors if these fail
 	resChNodeCPUHourlyCost := requiredCtx.QueryAtTime(queryNodeCPUHourlyCost, t)
-	resChNodeCPUCores := requiredCtx.QueryAtTime(queryNodeCPUCores, t)
+	resChNodeCPUCoresCapacity := requiredCtx.QueryAtTime(queryNodeCPUCoresCapacity, t)
+	resChNodeCPUCoresAllocatable := requiredCtx.QueryAtTime(queryNodeCPUCoresAllocatable, t)
 	resChNodeRAMHourlyCost := requiredCtx.QueryAtTime(queryNodeRAMHourlyCost, t)
-	resChNodeRAMBytes := requiredCtx.QueryAtTime(queryNodeRAMBytes, t)
+	resChNodeRAMBytesCapacity := requiredCtx.QueryAtTime(queryNodeRAMBytesCapacity, t)
+	resChNodeRAMBytesAllocatable := requiredCtx.QueryAtTime(queryNodeRAMBytesAllocatable, t)
 	resChNodeGPUCount := requiredCtx.QueryAtTime(queryNodeGPUCount, t)
 	resChNodeGPUHourlyCost := requiredCtx.QueryAtTime(queryNodeGPUHourlyCost, t)
 	resChActiveMins := requiredCtx.QueryAtTime(queryActiveMins, t)
@@ -596,11 +605,13 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 	resChLabels := optionalCtx.QueryAtTime(queryLabels, t)
 
 	resNodeCPUHourlyCost, _ := resChNodeCPUHourlyCost.Await()
-	resNodeCPUCores, _ := resChNodeCPUCores.Await()
+	resNodeCPUCoresCapacity, _ := resChNodeCPUCoresCapacity.Await()
+	resNodeCPUCoresAllocatable, _ := resChNodeCPUCoresAllocatable.Await()
 	resNodeGPUCount, _ := resChNodeGPUCount.Await()
 	resNodeGPUHourlyCost, _ := resChNodeGPUHourlyCost.Await()
 	resNodeRAMHourlyCost, _ := resChNodeRAMHourlyCost.Await()
-	resNodeRAMBytes, _ := resChNodeRAMBytes.Await()
+	resNodeRAMBytesCapacity, _ := resChNodeRAMBytesCapacity.Await()
+	resNodeRAMBytesAllocatable, _ := resChNodeRAMBytesAllocatable.Await()
 	resIsSpot, _ := resChIsSpot.Await()
 	resNodeCPUModeTotal, _ := resChNodeCPUModeTotal.Await()
 	resNodeRAMSystemPct, _ := resChNodeRAMSystemPct.Await()
@@ -633,8 +644,12 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 	clusterAndNameToTypeIntermediate := mergeTypeMaps(clusterAndNameToType1, clusterAndNameToType2)
 	clusterAndNameToType := mergeTypeMaps(clusterAndNameToTypeIntermediate, clusterAndNameToType3)
 
-	cpuCoresMap := buildCPUCoresMap(resNodeCPUCores)
-	ramBytesMap := buildRAMBytesMap(resNodeRAMBytes)
+	cpuCoresCapacityMap := buildCPUCoresMap(resNodeCPUCoresCapacity)
+	ramBytesCapacityMap := buildRAMBytesMap(resNodeRAMBytesCapacity)
+
+	cpuCoresAllocatableMap := buildCPUCoresMap(resNodeCPUCoresAllocatable)
+	ramBytesAllocatableMap := buildRAMBytesMap(resNodeRAMBytesAllocatable)
+	overheadMap := buildOverheadMap(ramBytesCapacityMap, ramBytesAllocatableMap, cpuCoresCapacityMap, cpuCoresAllocatableMap)
 
 	ramUserPctMap := buildRAMUserPctMap(resNodeRAMUserPct)
 	ramSystemPctMap := buildRAMSystemPctMap(resNodeRAMSystemPct)
@@ -643,13 +658,13 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 
 	labelsMap := buildLabelsMap(resLabels)
 
-	costTimesMinuteAndCount(activeDataMap, cpuCostMap, cpuCoresMap)
-	costTimesMinuteAndCount(activeDataMap, ramCostMap, ramBytesMap)
+	costTimesMinuteAndCount(activeDataMap, cpuCostMap, cpuCoresCapacityMap)
+	costTimesMinuteAndCount(activeDataMap, ramCostMap, ramBytesCapacityMap)
 	costTimesMinute(activeDataMap, gpuCostMap) // there's no need to do a weird "nodeIdentifierNoProviderID" type match since gpuCounts have a providerID
 
 	nodeMap := buildNodeMap(
 		cpuCostMap, ramCostMap, gpuCostMap, gpuCountMap,
-		cpuCoresMap, ramBytesMap, ramUserPctMap,
+		cpuCoresCapacityMap, ramBytesCapacityMap, ramUserPctMap,
 		ramSystemPctMap,
 		cpuBreakdownMap,
 		activeDataMap,
@@ -657,6 +672,7 @@ func ClusterNodes(cp models.Provider, client prometheus.Client, start, end time.
 		labelsMap,
 		clusterAndNameToType,
 		resolution,
+		overheadMap,
 	)
 
 	c, err := cp.GetConfig()
@@ -779,7 +795,7 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 				Cluster:    cluster,
 				Namespace:  namespace,
 				Name:       fmt.Sprintf("%s/%s", namespace, name), // TODO:ETL this is kept for backwards-compatibility, but not good
-				ProviderID: cloud.ParseLBID(providerID),
+				ProviderID: provider.ParseLBID(providerID),
 			}
 		}
 
@@ -1360,7 +1376,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		diskMap[key].Bytes = bytes
 	}
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterDisks: failed to load custom pricing: %s", err)
@@ -1405,7 +1421,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		diskMap[key].Cost = cost * (diskMap[key].Bytes / 1024 / 1024 / 1024) * (diskMap[key].Minutes / 60)
 		providerID, _ := result.GetString("provider_id") // just put the providerID set up here, it's the simplest query.
 		if providerID != "" {
-			diskMap[key].ProviderID = cloud.ParsePVID(providerID)
+			diskMap[key].ProviderID = provider.ParsePVID(providerID)
 		}
 	}
 

+ 58 - 10
pkg/costmodel/cluster_helpers.go

@@ -4,8 +4,8 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/log"
@@ -41,7 +41,7 @@ func buildCPUCostMap(
 	cpuCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -65,7 +65,7 @@ func buildCPUCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -115,7 +115,7 @@ func buildRAMCostMap(
 	ramCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -139,7 +139,7 @@ func buildRAMCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -190,7 +190,7 @@ func buildGPUCostMap(
 	gpuCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -214,7 +214,7 @@ func buildGPUCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -282,7 +282,7 @@ func buildGPUCountMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		gpuCountMap[key] = gpuCount
 	}
@@ -426,6 +426,43 @@ func buildCPUBreakdownMap(resNodeCPUModeTotal []*prom.QueryResult) map[nodeIdent
 	return cpuBreakdownMap
 }
 
+func buildOverheadMap(capRam, allocRam, capCPU, allocCPU map[nodeIdentifierNoProviderID]float64) map[nodeIdentifierNoProviderID]*NodeOverhead {
+	m := make(map[nodeIdentifierNoProviderID]*NodeOverhead, len(capRam))
+
+	for identifier, ramCapacity := range capRam {
+		allocatableRam, ok := allocRam[identifier]
+		if !ok {
+			log.Warnf("Could not find allocatable ram for node %s", identifier.Name)
+			continue
+		}
+		overheadBytes := ramCapacity - allocatableRam
+		m[identifier] = &NodeOverhead{
+			RamOverheadFraction: overheadBytes / ramCapacity,
+		}
+	}
+
+	for identifier, cpuCapacity := range capCPU {
+		allocatableCPU, ok := allocCPU[identifier]
+		if !ok {
+			log.Warnf("Could not find allocatable cpu for node %s", identifier.Name)
+			continue
+		}
+
+		overhead := cpuCapacity - allocatableCPU
+
+		if _, found := m[identifier]; found {
+			m[identifier].CpuOverheadFraction = overhead / cpuCapacity
+		} else {
+			m[identifier] = &NodeOverhead{
+				CpuOverheadFraction: overhead / cpuCapacity,
+			}
+		}
+
+	}
+
+	return m
+}
+
 func buildRAMUserPctMap(resNodeRAMUserPct []*prom.QueryResult) map[nodeIdentifierNoProviderID]float64 {
 
 	m := make(map[nodeIdentifierNoProviderID]float64)
@@ -511,7 +548,7 @@ func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Durat
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 
 		if len(result.Values) == 0 {
@@ -560,7 +597,7 @@ func buildPreemptibleMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       nodeName,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 
 		// TODO(michaelmdresser): check this condition at merge time?
@@ -707,6 +744,7 @@ func buildNodeMap(
 	labelsMap map[nodeIdentifierNoProviderID]map[string]string,
 	clusterAndNameToType map[nodeIdentifierNoProviderID]string,
 	res time.Duration,
+	overheadMap map[nodeIdentifierNoProviderID]*NodeOverhead,
 ) map[NodeIdentifier]*Node {
 
 	nodeMap := make(map[NodeIdentifier]*Node)
@@ -784,6 +822,16 @@ func buildNodeMap(
 		if labels, ok := labelsMap[clusterAndNameID]; ok {
 			nodePtr.Labels = labels
 		}
+
+		if overhead, ok := overheadMap[clusterAndNameID]; ok {
+			nodePtr.Overhead = overhead
+		} else {
+			// we were unable to compute overhead for this node
+			// assume default case of no overhead
+			nodePtr.Overhead = &NodeOverhead{}
+			log.Warnf("unable to compute overhead for node %s - defaulting to no overhead", clusterAndNameID.Name)
+		}
+
 	}
 
 	return nodeMap

+ 29 - 5
pkg/costmodel/cluster_helpers_test.go

@@ -5,7 +5,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/prom"
 	"github.com/opencost/opencost/pkg/util"
@@ -150,6 +150,7 @@ func TestBuildNodeMap(t *testing.T) {
 		labelsMap            map[nodeIdentifierNoProviderID]map[string]string
 		clusterAndNameToType map[nodeIdentifierNoProviderID]string
 		expected             map[NodeIdentifier]*Node
+		overheadMap          map[nodeIdentifierNoProviderID]*NodeOverhead
 	}{
 		{
 			name:     "empty",
@@ -183,6 +184,7 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUCost:      0.048,
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
+					Overhead:     &NodeOverhead{},
 				},
 			},
 		},
@@ -211,6 +213,7 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUCost:      0.048,
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
+					Overhead:     &NodeOverhead{},
 				},
 			},
 		},
@@ -247,6 +250,7 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUCost:      0.048,
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
+					Overhead:     &NodeOverhead{},
 				},
 				{
 					Cluster:    "cluster1",
@@ -260,6 +264,7 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUCost:      0.087,
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
+					Overhead:     &NodeOverhead{},
 				},
 			},
 		},
@@ -489,6 +494,7 @@ func TestBuildNodeMap(t *testing.T) {
 					End:         time.Date(2020, 6, 16, 9, 20, 39, 0, time.UTC),
 					Minutes:     5*60 + 35 + (11.0 / 60.0),
 					Preemptible: true,
+					Overhead:    &NodeOverhead{},
 					Labels: map[string]string{
 						"labelname1_A": "labelvalue1_A",
 						"labelname1_B": "labelvalue1_B",
@@ -525,6 +531,7 @@ func TestBuildNodeMap(t *testing.T) {
 						"labelname1_A": "labelvalue1_A",
 						"labelname1_B": "labelvalue1_B",
 					},
+					Overhead: &NodeOverhead{},
 				},
 				{
 					Cluster:    "cluster1",
@@ -557,6 +564,7 @@ func TestBuildNodeMap(t *testing.T) {
 						"labelname2_A": "labelvalue2_A",
 						"labelname2_B": "labelvalue2_B",
 					},
+					Overhead: &NodeOverhead{},
 				},
 			},
 		},
@@ -595,6 +603,7 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUCores:     partialCPUMap["e2-micro"],
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
+					Overhead:     &NodeOverhead{},
 				},
 			},
 		},
@@ -633,6 +642,7 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUCores:     partialCPUMap["e2-small"],
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
+					Overhead:     &NodeOverhead{},
 				},
 			},
 		},
@@ -657,6 +667,15 @@ func TestBuildNodeMap(t *testing.T) {
 					Name:    "node1",
 				}: "e2-medium", // for this node type
 			},
+			overheadMap: map[nodeIdentifierNoProviderID]*NodeOverhead{
+				{
+					Cluster: "cluster1",
+					Name:    "node1",
+				}: {
+					CpuOverheadFraction: 0.5,
+					RamOverheadFraction: 0.25,
+				}, // for this node type
+			},
 			expected: map[NodeIdentifier]*Node{
 				{
 					Cluster:    "cluster1",
@@ -671,6 +690,10 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUCores:     partialCPUMap["e2-medium"],
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
+					Overhead: &NodeOverhead{
+						CpuOverheadFraction: 0.5,
+						RamOverheadFraction: 0.25,
+					},
 				},
 			},
 		},
@@ -688,6 +711,7 @@ func TestBuildNodeMap(t *testing.T) {
 				testCase.labelsMap,
 				testCase.clusterAndNameToType,
 				time.Minute,
+				testCase.overheadMap,
 			)
 
 			if !reflect.DeepEqual(result, testCase.expected) {
@@ -853,8 +877,8 @@ func TestBuildGPUCostMap(t *testing.T) {
 
 	for _, testCase := range cases {
 		t.Run(testCase.name, func(t *testing.T) {
-			testProvider := &cloud.CustomProvider{
-				Config: cloud.NewProviderConfig(config.NewConfigFileManager(nil), "fakeFile"),
+			testProvider := &provider.CustomProvider{
+				Config: provider.NewProviderConfig(config.NewConfigFileManager(nil), "fakeFile"),
 			}
 			testPreemptible := make(map[NodeIdentifier]bool)
 			result, _ := buildGPUCostMap(testCase.promResult, testCase.countMap, testProvider, testPreemptible)
@@ -1042,8 +1066,8 @@ func TestAssetCustompricing(t *testing.T) {
 
 	for _, testCase := range cases {
 		t.Run(testCase.name, func(t *testing.T) {
-			testProvider := &cloud.CustomProvider{
-				Config: cloud.NewProviderConfig(config.NewConfigFileManager(nil), ""),
+			testProvider := &provider.CustomProvider{
+				Config: provider.NewProviderConfig(config.NewConfigFileManager(nil), ""),
 			}
 			testProvider.UpdateConfigFromConfigMap(testCase.customPricingMap)
 

+ 3 - 3
pkg/costmodel/router.go

@@ -18,6 +18,7 @@ import (
 	"github.com/microcosm-cc/bluemonday"
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/kubeconfig"
 	"github.com/opencost/opencost/pkg/metrics"
@@ -34,7 +35,6 @@ import (
 
 	"github.com/getsentry/sentry-go"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
@@ -1592,13 +1592,13 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	k8sCache.Run()
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
-	cloudProvider, err := cloud.NewProvider(k8sCache, cloudProviderKey, confManager)
+	cloudProvider, err := provider.NewProvider(k8sCache, cloudProviderKey, confManager)
 	if err != nil {
 		panic(err.Error())
 	}
 
 	// Append the pricing config watcher
-	configWatchers.AddWatcher(cloud.ConfigWatcherFor(cloudProvider))
+	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	configWatchers.AddWatcher(metrics.GetMetricsConfigWatcher())
 
 	watchConfigFunc := configWatchers.ToWatchFunc()

+ 294 - 100
pkg/kubecost/allocation.go

@@ -87,6 +87,7 @@ type Allocation struct {
 	// asset on which the allocation was run. It is optionally computed
 	// and appended to an Allocation, and so by default is is nil.
 	ProportionalAssetResourceCosts ProportionalAssetResourceCosts `json:"proportionalAssetResourceCosts"` //@bingen:field[ignore]
+	SharedCostBreakdown            SharedCostBreakdowns           `json:"sharedCostBreakdown"`            //@bingen:field[ignore]
 }
 
 // RawAllocationOnlyData is information that only belong in "raw" Allocations,
@@ -254,6 +255,12 @@ type ProportionalAssetResourceCost struct {
 	GPUPercentage              float64 `json:"gpuPercentage"`
 	RAMPercentage              float64 `json:"ramPercentage"`
 	NodeResourceCostPercentage float64 `json:"nodeResourceCostPercentage"`
+	GPUTotalCost               float64 `json:"-"`
+	GPUProportionalCost        float64 `json:"-"`
+	CPUTotalCost               float64 `json:"-"`
+	CPUProportionalCost        float64 `json:"-"`
+	RAMTotalCost               float64 `json:"-"`
+	RAMProportionalCost        float64 `json:"-"`
 }
 
 func (parc ProportionalAssetResourceCost) Key(insertByNode bool) string {
@@ -267,26 +274,77 @@ func (parc ProportionalAssetResourceCost) Key(insertByNode bool) string {
 
 type ProportionalAssetResourceCosts map[string]ProportionalAssetResourceCost
 
+func (parcs ProportionalAssetResourceCosts) Clone() ProportionalAssetResourceCosts {
+	cloned := ProportionalAssetResourceCosts{}
+
+	for key, parc := range parcs {
+		cloned[key] = parc
+	}
+	return cloned
+}
+
 func (parcs ProportionalAssetResourceCosts) Insert(parc ProportionalAssetResourceCost, insertByNode bool) {
 	if !insertByNode {
 		parc.Node = ""
 		parc.ProviderID = ""
 	}
 	if curr, ok := parcs[parc.Key(insertByNode)]; ok {
-		parcs[parc.Key(insertByNode)] = ProportionalAssetResourceCost{
-			Node:                       curr.Node,
-			Cluster:                    curr.Cluster,
-			ProviderID:                 curr.ProviderID,
-			CPUPercentage:              curr.CPUPercentage + parc.CPUPercentage,
-			GPUPercentage:              curr.GPUPercentage + parc.GPUPercentage,
-			RAMPercentage:              curr.RAMPercentage + parc.RAMPercentage,
-			NodeResourceCostPercentage: curr.NodeResourceCostPercentage + parc.NodeResourceCostPercentage,
+
+		toInsert := ProportionalAssetResourceCost{
+			Node:                curr.Node,
+			Cluster:             curr.Cluster,
+			ProviderID:          curr.ProviderID,
+			CPUTotalCost:        curr.CPUTotalCost + parc.CPUTotalCost,
+			CPUProportionalCost: curr.CPUProportionalCost + parc.CPUProportionalCost,
+			RAMTotalCost:        curr.RAMTotalCost + parc.RAMTotalCost,
+			RAMProportionalCost: curr.RAMProportionalCost + parc.RAMProportionalCost,
+			GPUProportionalCost: curr.GPUProportionalCost + parc.GPUProportionalCost,
+			GPUTotalCost:        curr.GPUTotalCost + parc.GPUTotalCost,
 		}
+
+		computePercentages(&toInsert)
+		parcs[parc.Key(insertByNode)] = toInsert
 	} else {
+		computePercentages(&parc)
 		parcs[parc.Key(insertByNode)] = parc
 	}
 }
 
+func computePercentages(toInsert *ProportionalAssetResourceCost) {
+	// compute percentages
+	totalCost := toInsert.RAMTotalCost + toInsert.CPUTotalCost + toInsert.GPUTotalCost
+
+	if toInsert.CPUTotalCost > 0 {
+		toInsert.CPUPercentage = toInsert.CPUProportionalCost / toInsert.CPUTotalCost
+	}
+
+	if toInsert.GPUTotalCost > 0 {
+		toInsert.GPUPercentage = toInsert.GPUProportionalCost / toInsert.GPUTotalCost
+	}
+
+	if toInsert.RAMTotalCost > 0 {
+		toInsert.RAMPercentage = toInsert.RAMProportionalCost / toInsert.RAMTotalCost
+	}
+
+	ramFraction := toInsert.RAMTotalCost / totalCost
+	if ramFraction != ramFraction || ramFraction < 0 {
+		ramFraction = 0
+	}
+
+	cpuFraction := toInsert.CPUTotalCost / totalCost
+	if cpuFraction != cpuFraction || cpuFraction < 0 {
+		cpuFraction = 0
+	}
+
+	gpuFraction := toInsert.GPUTotalCost / totalCost
+	if gpuFraction != gpuFraction || gpuFraction < 0 {
+		gpuFraction = 0
+	}
+
+	toInsert.NodeResourceCostPercentage = (toInsert.RAMPercentage * ramFraction) +
+		(toInsert.CPUPercentage * cpuFraction) + (toInsert.GPUPercentage * gpuFraction)
+}
+
 func (parcs ProportionalAssetResourceCosts) Add(that ProportionalAssetResourceCosts) {
 
 	for _, parc := range that {
@@ -299,6 +357,53 @@ func (parcs ProportionalAssetResourceCosts) Add(that ProportionalAssetResourceCo
 	}
 }
 
+type SharedCostBreakdown struct {
+	Name         string  `json:"name"`
+	TotalCost    float64 `json:"totalCost"`
+	CPUCost      float64 `json:"cpuCost,omitempty"`
+	GPUCost      float64 `json:"gpuCost,omitempty"`
+	RAMCost      float64 `json:"ramCost,omitempty"`
+	PVCost       float64 `json:"pvCost,omitempty"`
+	NetworkCost  float64 `json:"networkCost,omitempty"`
+	LBCost       float64 `json:"loadBalancerCost,omitempty"`
+	ExternalCost float64 `json:"externalCost,omitempty"`
+}
+
+type SharedCostBreakdowns map[string]SharedCostBreakdown
+
+func (scbs SharedCostBreakdowns) Clone() SharedCostBreakdowns {
+	cloned := SharedCostBreakdowns{}
+
+	for key, scb := range scbs {
+		cloned[key] = scb
+	}
+	return cloned
+}
+
+func (scbs SharedCostBreakdowns) Insert(scb SharedCostBreakdown) {
+	if curr, ok := scbs[scb.Name]; ok {
+		scbs[scb.Name] = SharedCostBreakdown{
+			Name:         curr.Name,
+			TotalCost:    curr.TotalCost + scb.TotalCost,
+			CPUCost:      curr.CPUCost + scb.CPUCost,
+			GPUCost:      curr.GPUCost + scb.GPUCost,
+			RAMCost:      curr.RAMCost + scb.RAMCost,
+			PVCost:       curr.PVCost + scb.PVCost,
+			NetworkCost:  curr.NetworkCost + scb.NetworkCost,
+			LBCost:       curr.LBCost + scb.LBCost,
+			ExternalCost: curr.ExternalCost + scb.ExternalCost,
+		}
+	} else {
+		scbs[scb.Name] = scb
+	}
+}
+
+func (scbs SharedCostBreakdowns) Add(that SharedCostBreakdowns) {
+	for _, scb := range that {
+		scbs.Insert(scb)
+	}
+}
+
 // GetWindow returns the window of the struct
 func (a *Allocation) GetWindow() Window {
 	return a.Window
@@ -334,38 +439,40 @@ func (a *Allocation) Clone() *Allocation {
 	}
 
 	return &Allocation{
-		Name:                       a.Name,
-		Properties:                 a.Properties.Clone(),
-		Window:                     a.Window.Clone(),
-		Start:                      a.Start,
-		End:                        a.End,
-		CPUCoreHours:               a.CPUCoreHours,
-		CPUCoreRequestAverage:      a.CPUCoreRequestAverage,
-		CPUCoreUsageAverage:        a.CPUCoreUsageAverage,
-		CPUCost:                    a.CPUCost,
-		CPUCostAdjustment:          a.CPUCostAdjustment,
-		GPUHours:                   a.GPUHours,
-		GPUCost:                    a.GPUCost,
-		GPUCostAdjustment:          a.GPUCostAdjustment,
-		NetworkTransferBytes:       a.NetworkTransferBytes,
-		NetworkReceiveBytes:        a.NetworkReceiveBytes,
-		NetworkCost:                a.NetworkCost,
-		NetworkCrossZoneCost:       a.NetworkCrossZoneCost,
-		NetworkCrossRegionCost:     a.NetworkCrossRegionCost,
-		NetworkInternetCost:        a.NetworkInternetCost,
-		NetworkCostAdjustment:      a.NetworkCostAdjustment,
-		LoadBalancerCost:           a.LoadBalancerCost,
-		LoadBalancerCostAdjustment: a.LoadBalancerCostAdjustment,
-		PVs:                        a.PVs.Clone(),
-		PVCostAdjustment:           a.PVCostAdjustment,
-		RAMByteHours:               a.RAMByteHours,
-		RAMBytesRequestAverage:     a.RAMBytesRequestAverage,
-		RAMBytesUsageAverage:       a.RAMBytesUsageAverage,
-		RAMCost:                    a.RAMCost,
-		RAMCostAdjustment:          a.RAMCostAdjustment,
-		SharedCost:                 a.SharedCost,
-		ExternalCost:               a.ExternalCost,
-		RawAllocationOnly:          a.RawAllocationOnly.Clone(),
+		Name:                           a.Name,
+		Properties:                     a.Properties.Clone(),
+		Window:                         a.Window.Clone(),
+		Start:                          a.Start,
+		End:                            a.End,
+		CPUCoreHours:                   a.CPUCoreHours,
+		CPUCoreRequestAverage:          a.CPUCoreRequestAverage,
+		CPUCoreUsageAverage:            a.CPUCoreUsageAverage,
+		CPUCost:                        a.CPUCost,
+		CPUCostAdjustment:              a.CPUCostAdjustment,
+		GPUHours:                       a.GPUHours,
+		GPUCost:                        a.GPUCost,
+		GPUCostAdjustment:              a.GPUCostAdjustment,
+		NetworkTransferBytes:           a.NetworkTransferBytes,
+		NetworkReceiveBytes:            a.NetworkReceiveBytes,
+		NetworkCost:                    a.NetworkCost,
+		NetworkCrossZoneCost:           a.NetworkCrossZoneCost,
+		NetworkCrossRegionCost:         a.NetworkCrossRegionCost,
+		NetworkInternetCost:            a.NetworkInternetCost,
+		NetworkCostAdjustment:          a.NetworkCostAdjustment,
+		LoadBalancerCost:               a.LoadBalancerCost,
+		LoadBalancerCostAdjustment:     a.LoadBalancerCostAdjustment,
+		PVs:                            a.PVs.Clone(),
+		PVCostAdjustment:               a.PVCostAdjustment,
+		RAMByteHours:                   a.RAMByteHours,
+		RAMBytesRequestAverage:         a.RAMBytesRequestAverage,
+		RAMBytesUsageAverage:           a.RAMBytesUsageAverage,
+		RAMCost:                        a.RAMCost,
+		RAMCostAdjustment:              a.RAMCostAdjustment,
+		SharedCost:                     a.SharedCost,
+		ExternalCost:                   a.ExternalCost,
+		RawAllocationOnly:              a.RawAllocationOnly.Clone(),
+		ProportionalAssetResourceCosts: a.ProportionalAssetResourceCosts.Clone(),
+		SharedCostBreakdown:            a.SharedCostBreakdown.Clone(),
 	}
 }
 
@@ -775,10 +882,29 @@ func (a *Allocation) add(that *Allocation) {
 
 	// If both Allocations have ProportionalAssetResourceCosts, then
 	// add those from the given Allocation into the receiver.
-	if a.ProportionalAssetResourceCosts != nil && that.ProportionalAssetResourceCosts != nil {
+	if a.ProportionalAssetResourceCosts != nil || that.ProportionalAssetResourceCosts != nil {
+		// init empty PARCs if either operand has nil PARCs
+		if a.ProportionalAssetResourceCosts == nil {
+			a.ProportionalAssetResourceCosts = ProportionalAssetResourceCosts{}
+		}
+		if that.ProportionalAssetResourceCosts == nil {
+			that.ProportionalAssetResourceCosts = ProportionalAssetResourceCosts{}
+		}
 		a.ProportionalAssetResourceCosts.Add(that.ProportionalAssetResourceCosts)
 	}
 
+	// If both Allocations have SharedCostBreakdowns, then
+	// add those from the given Allocation into the receiver.
+	if a.SharedCostBreakdown != nil || that.SharedCostBreakdown != nil {
+		if a.SharedCostBreakdown == nil {
+			a.SharedCostBreakdown = SharedCostBreakdowns{}
+		}
+		if that.SharedCostBreakdown == nil {
+			that.SharedCostBreakdown = SharedCostBreakdowns{}
+		}
+		a.SharedCostBreakdown.Add(that.SharedCostBreakdown)
+	}
+
 	// Overwrite regular intersection logic for the controller name property in the
 	// case that the Allocation keys are the same but the controllers are not.
 	if leftKey == rightKey &&
@@ -922,6 +1048,7 @@ type AllocationAggregationOptions struct {
 	ShareIdle                             string
 	ShareSplit                            string
 	SharedHourlyCosts                     map[string]float64
+	IncludeSharedCostBreakdown            bool
 	SplitIdle                             bool
 	IncludeAggregatedMetadata             bool
 }
@@ -1144,34 +1271,13 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 		}
 	}
 
-	// (2b) If proportional asset resource costs are to be included, derive them
-	// from idle coefficients and add them to the allocations.
+	// (2b) If proportional asset resource costs are to be included, compute them
+	// and add them to the allocations.
 	if options.IncludeProportionalAssetResourceCosts {
-		var parcCoefficients map[string]map[string]map[string]float64
-		if parcSet.Length() > 0 {
-			parcCoefficients, allocatedTotalsMap, err = computeIdleCoeffs(options, as, shareSet)
-			if err != nil {
-				log.Warnf("AllocationSet.AggregateBy: compute parc idle coeff: %s", err)
-				return fmt.Errorf("error computing parc coefficients: %s", err)
-			}
-		}
-		if parcCoefficients == nil {
-			return fmt.Errorf("cannot include proportional resource costs because parc coefficients are nil")
-		}
-
-		for _, alloc := range as.Allocations {
-			// Create an empty set of proportional asset resource costs,
-			// regardless of whether or not we're successful in deriving them.
-			alloc.ProportionalAssetResourceCosts = ProportionalAssetResourceCosts{}
-
-			// Attempt to derive proportional asset resource costs from idle
-			// coefficients, and insert them into the set if successful.
-			parc, err := deriveProportionalAssetResourceCostsFromIdleCoefficients(parcCoefficients, allocatedTotalsMap, alloc, options)
-			if err != nil {
-				log.Debugf("AggregateBy: failed to derive proportional asset resource costs from idle coefficients for %s: %s", alloc.Name, err)
-				continue
-			}
-			alloc.ProportionalAssetResourceCosts.Insert(parc, options.IdleByNode)
+		err := deriveProportionalAssetResourceCosts(options, as, shareSet)
+		if err != nil {
+			log.Debugf("AggregateBy: failed to derive proportional asset resource costs from idle coefficients: %s", err)
+			return fmt.Errorf("AggregateBy: failed to derive proportional asset resource costs from idle coefficients: %s", err)
 		}
 	}
 
@@ -1429,6 +1535,31 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 					continue
 				}
 
+				if options.IncludeSharedCostBreakdown {
+					if alloc.SharedCostBreakdown == nil {
+						alloc.SharedCostBreakdown = map[string]SharedCostBreakdown{}
+					}
+					sharedCostName := sharedAlloc.generateKey(aggregateBy, options.LabelConfig)
+					// check if current allocation is a shared flat overhead cost
+					if strings.Contains(sharedAlloc.Name, SharedSuffix) {
+						sharedCostName = "overheadCost"
+					}
+
+					scb := SharedCostBreakdown{
+						Name:         sharedCostName,
+						TotalCost:    sharedAlloc.TotalCost() * shareCoefficients[alloc.Name],
+						CPUCost:      sharedAlloc.CPUTotalCost() * shareCoefficients[alloc.Name],
+						GPUCost:      sharedAlloc.GPUTotalCost() * shareCoefficients[alloc.Name],
+						RAMCost:      sharedAlloc.RAMTotalCost() * shareCoefficients[alloc.Name],
+						PVCost:       sharedAlloc.PVCost() * shareCoefficients[alloc.Name],
+						NetworkCost:  sharedAlloc.NetworkTotalCost() * shareCoefficients[alloc.Name],
+						LBCost:       sharedAlloc.LBTotalCost() * shareCoefficients[alloc.Name],
+						ExternalCost: sharedAlloc.ExternalCost * shareCoefficients[alloc.Name],
+					}
+					// fmt.Printf("shared cost: %+v", scb)
+					alloc.SharedCostBreakdown.Insert(scb)
+				}
+
 				alloc.SharedCost += sharedAlloc.TotalCost() * shareCoefficients[alloc.Name]
 			}
 		}
@@ -1741,48 +1872,111 @@ func computeIdleCoeffs(options *AllocationAggregationOptions, as *AllocationSet,
 	return coeffs, totals, nil
 }
 
-func deriveProportionalAssetResourceCostsFromIdleCoefficients(idleCoeffs map[string]map[string]map[string]float64, totals map[string]map[string]float64, allocation *Allocation, options *AllocationAggregationOptions) (ProportionalAssetResourceCost, error) {
-	idleId, err := allocation.getIdleId(options)
-	if err != nil {
-		return ProportionalAssetResourceCost{}, fmt.Errorf("failed to get idle ID for allocation %s", allocation.Name)
-	}
+func deriveProportionalAssetResourceCosts(options *AllocationAggregationOptions, as *AllocationSet, shareSet *AllocationSet) error {
 
-	if _, ok := idleCoeffs[idleId]; !ok {
-		return ProportionalAssetResourceCost{}, fmt.Errorf("failed to find idle coeffs for idle ID %s", idleId)
-	}
+	// Compute idle coefficients, then save them in AllocationAggregationOptions
+	// [idle_id][allocation name][resource] = [coeff]
+	coeffs := map[string]map[string]map[string]float64{}
 
-	if _, ok := idleCoeffs[idleId][allocation.Name]; !ok {
-		return ProportionalAssetResourceCost{}, fmt.Errorf("failed to find idle coeffs for allocation %s", allocation.Name)
+	// Compute totals per resource for CPU, GPU, RAM, and PV
+	// [idle_id][resource] = [total]
+	totals := map[string]map[string]float64{}
+
+	// Record allocation values first, then normalize by totals to get percentages
+	for _, alloc := range as.Allocations {
+		if alloc.IsIdle() {
+			// Skip idle allocations in coefficient calculation
+			continue
+		}
+
+		idleId, err := alloc.getIdleId(options)
+		if err != nil {
+			log.DedupedWarningf(3, "Missing Idle Key for %s", alloc.Name)
+		}
+
+		// get the name key for the allocation
+		name := alloc.Name
+
+		// Create key based tables if they don't exist
+		if _, ok := coeffs[idleId]; !ok {
+			coeffs[idleId] = map[string]map[string]float64{}
+		}
+		if _, ok := totals[idleId]; !ok {
+			totals[idleId] = map[string]float64{}
+		}
+
+		if _, ok := coeffs[idleId][name]; !ok {
+			coeffs[idleId][name] = map[string]float64{}
+		}
+
+		coeffs[idleId][name]["cpu"] += alloc.CPUTotalCost()
+		coeffs[idleId][name]["gpu"] += alloc.GPUTotalCost()
+		coeffs[idleId][name]["ram"] += alloc.RAMTotalCost()
+
+		totals[idleId]["cpu"] += alloc.CPUTotalCost()
+		totals[idleId]["gpu"] += alloc.GPUTotalCost()
+		totals[idleId]["ram"] += alloc.RAMTotalCost()
 	}
 
-	cpuPct := idleCoeffs[idleId][allocation.Name]["cpu"]
-	gpuPct := idleCoeffs[idleId][allocation.Name]["gpu"]
-	ramPct := idleCoeffs[idleId][allocation.Name]["ram"]
+	// Do the same for shared allocations
+	for _, alloc := range shareSet.Allocations {
+		if alloc.IsIdle() {
+			// Skip idle allocations in coefficient calculation
+			continue
+		}
 
-	// compute how much each component (cpu, gpu, ram) contributes to the overall price
-	totalCost := totals[idleId]["ram"] + totals[idleId]["gpu"] + totals[idleId]["cpu"]
+		// idleId will be providerId or cluster
+		idleId, err := alloc.getIdleId(options)
+		if err != nil {
+			log.DedupedWarningf(3, "Missing Idle Key in share set for %s", alloc.Name)
+		}
+
+		// get the name key for the allocation
+		name := alloc.Name
 
-	var ramFraction, cpuFraction, gpuFraction float64
+		// Create idleId based tables if they don't exist
+		if _, ok := coeffs[idleId]; !ok {
+			coeffs[idleId] = map[string]map[string]float64{}
+		}
+		if _, ok := totals[idleId]; !ok {
+			totals[idleId] = map[string]float64{}
+		}
 
-	// only compute fraction if totalCost is nonzero, otherwise returns in NaN
-	if totalCost > 0 {
-		ramFraction = totals[idleId]["ram"] / totalCost
-		cpuFraction = totals[idleId]["cpu"] / totalCost
-		gpuFraction = totals[idleId]["gpu"] / totalCost
+		if _, ok := coeffs[idleId][name]; !ok {
+			coeffs[idleId][name] = map[string]float64{}
+		}
+
+		coeffs[idleId][name]["cpu"] += alloc.CPUTotalCost()
+		coeffs[idleId][name]["gpu"] += alloc.GPUTotalCost()
+		coeffs[idleId][name]["ram"] += alloc.RAMTotalCost()
+
+		totals[idleId]["cpu"] += alloc.CPUTotalCost()
+		totals[idleId]["gpu"] += alloc.GPUTotalCost()
+		totals[idleId]["ram"] += alloc.RAMTotalCost()
 	}
 
-	// compute the resource usage percentage based on the weighted fractions
-	nodeResourceCostPercentage := (ramPct * ramFraction) + (cpuPct * cpuFraction) + (gpuPct * gpuFraction)
+	// after totals are computed, loop through and set parcs on allocations
+	for _, alloc := range as.Allocations {
+		idleId, err := alloc.getIdleId(options)
+		if err != nil {
+			log.DedupedWarningf(3, "Missing Idle Key in share set for %s", alloc.Name)
+		}
+
+		alloc.ProportionalAssetResourceCosts = ProportionalAssetResourceCosts{}
+		alloc.ProportionalAssetResourceCosts.Insert(ProportionalAssetResourceCost{
+			Cluster:             alloc.Properties.Cluster,
+			Node:                alloc.Properties.Node,
+			ProviderID:          alloc.Properties.ProviderID,
+			GPUTotalCost:        totals[idleId]["gpu"],
+			CPUTotalCost:        totals[idleId]["cpu"],
+			RAMTotalCost:        totals[idleId]["ram"],
+			GPUProportionalCost: coeffs[idleId][alloc.Name]["gpu"],
+			CPUProportionalCost: coeffs[idleId][alloc.Name]["cpu"],
+			RAMProportionalCost: coeffs[idleId][alloc.Name]["ram"],
+		}, options.IdleByNode)
+	}
 
-	return ProportionalAssetResourceCost{
-		Cluster:                    allocation.Properties.Cluster,
-		Node:                       allocation.Properties.Node,
-		ProviderID:                 allocation.Properties.ProviderID,
-		CPUPercentage:              cpuPct,
-		GPUPercentage:              gpuPct,
-		RAMPercentage:              ramPct,
-		NodeResourceCostPercentage: nodeResourceCostPercentage,
-	}, nil
+	return nil
 }
 
 // getIdleId returns the providerId or cluster of an Allocation depending on the IdleByNode

+ 2 - 0
pkg/kubecost/allocation_json.go

@@ -55,6 +55,7 @@ type AllocationJSON struct {
 	TotalEfficiency                *float64                        `json:"totalEfficiency"`
 	RawAllocationOnly              *RawAllocationOnlyData          `json:"rawAllocationOnly,omitempty"`
 	ProportionalAssetResourceCosts *ProportionalAssetResourceCosts `json:"proportionalAssetResourceCosts,omitempty"`
+	SharedCostBreakdown            *SharedCostBreakdowns           `json:"sharedCostBreakdown,omitempty"`
 }
 
 func (aj *AllocationJSON) BuildFromAllocation(a *Allocation) {
@@ -105,6 +106,7 @@ func (aj *AllocationJSON) BuildFromAllocation(a *Allocation) {
 	aj.TotalEfficiency = formatFloat64ForResponse(a.TotalEfficiency())
 	aj.RawAllocationOnly = a.RawAllocationOnly
 	aj.ProportionalAssetResourceCosts = &a.ProportionalAssetResourceCosts
+	aj.SharedCostBreakdown = &a.SharedCostBreakdown
 
 }
 

+ 398 - 32
pkg/kubecost/allocation_test.go

@@ -9,8 +9,10 @@ import (
 	"time"
 
 	"github.com/davecgh/go-spew/spew"
+	"github.com/opencost/opencost/pkg/log"
 	"github.com/opencost/opencost/pkg/util"
 	"github.com/opencost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/timeutil"
 )
 
 func TestAllocation_Add(t *testing.T) {
@@ -517,6 +519,11 @@ func assertParcResults(t *testing.T, as *AllocationSet, msg string, exps map[str
 		for key, actualParc := range a.ProportionalAssetResourceCosts {
 			expectedParcs := exps[allocKey]
 
+			// round to prevent floating point issues from failing tests at ultra high precision
+			actualParc.NodeResourceCostPercentage = roundFloat(actualParc.NodeResourceCostPercentage)
+			actualParc.CPUPercentage = roundFloat(actualParc.CPUPercentage)
+			actualParc.RAMPercentage = roundFloat(actualParc.RAMPercentage)
+			actualParc.GPUPercentage = roundFloat(actualParc.GPUPercentage)
 			if !reflect.DeepEqual(expectedParcs[key], actualParc) {
 				t.Fatalf("actual PARC %v did not match expected PARC %v", actualParc, expectedParcs[key])
 			}
@@ -524,6 +531,10 @@ func assertParcResults(t *testing.T, as *AllocationSet, msg string, exps map[str
 
 	}
 }
+func roundFloat(val float64) float64 {
+	ratio := math.Pow(10, float64(5))
+	return math.Round(val*ratio) / ratio
+}
 
 func assertAllocationTotals(t *testing.T, as *AllocationSet, msg string, exps map[string]float64) {
 	for _, a := range as.Allocations {
@@ -1079,10 +1090,16 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						Cluster:                    "cluster1",
 						Node:                       "",
 						ProviderID:                 "",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.8125,
-						NodeResourceCostPercentage: 0.6785714285714285,
+						CPUPercentage:              0.16667,
+						GPUPercentage:              0.16667,
+						RAMPercentage:              0.27083,
+						NodeResourceCostPercentage: 0.22619,
+						GPUTotalCost:               18,
+						GPUProportionalCost:        3,
+						CPUTotalCost:               18,
+						CPUProportionalCost:        3,
+						RAMTotalCost:               48,
+						RAMProportionalCost:        13,
 					},
 				},
 				"namespace2": {
@@ -1090,19 +1107,31 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						Cluster:                    "cluster1",
 						Node:                       "",
 						ProviderID:                 "",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.1875,
-						NodeResourceCostPercentage: 0.3214285714285714,
+						CPUPercentage:              0.16667,
+						GPUPercentage:              0.16667,
+						RAMPercentage:              0.0625,
+						NodeResourceCostPercentage: 0.10714,
+						GPUTotalCost:               18,
+						GPUProportionalCost:        3,
+						CPUTotalCost:               18,
+						CPUProportionalCost:        3,
+						RAMTotalCost:               48,
+						RAMProportionalCost:        3,
 					},
 					"cluster2": ProportionalAssetResourceCost{
 						Cluster:                    "cluster2",
 						Node:                       "",
 						ProviderID:                 "",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.5,
-						NodeResourceCostPercentage: 0.5,
+						CPUPercentage:              0.16667,
+						GPUPercentage:              0.16667,
+						RAMPercentage:              0.16667,
+						NodeResourceCostPercentage: 0.16667,
+						GPUTotalCost:               18,
+						GPUProportionalCost:        3,
+						CPUTotalCost:               18,
+						CPUProportionalCost:        3,
+						RAMTotalCost:               18,
+						RAMProportionalCost:        3,
 					},
 				},
 			},
@@ -1517,19 +1546,31 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						Cluster:                    "cluster1",
 						Node:                       "c1nodes",
 						ProviderID:                 "c1nodes",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.8125,
-						NodeResourceCostPercentage: 0.6785714285714285,
+						CPUPercentage:              0.16667,
+						GPUPercentage:              0.16667,
+						RAMPercentage:              0.27083,
+						NodeResourceCostPercentage: 0.22619,
+						GPUTotalCost:               18,
+						GPUProportionalCost:        3,
+						CPUTotalCost:               18,
+						CPUProportionalCost:        3,
+						RAMTotalCost:               48,
+						RAMProportionalCost:        13,
 					},
 					"cluster2,node2": ProportionalAssetResourceCost{
 						Cluster:                    "cluster2",
 						Node:                       "node2",
 						ProviderID:                 "node2",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.5,
-						NodeResourceCostPercentage: 0.5,
+						CPUPercentage:              0.16667,
+						GPUPercentage:              0.16667,
+						RAMPercentage:              0.0625,
+						NodeResourceCostPercentage: 0.10714,
+						GPUTotalCost:               18,
+						GPUProportionalCost:        3,
+						CPUTotalCost:               18,
+						CPUProportionalCost:        3,
+						RAMTotalCost:               48,
+						RAMProportionalCost:        3,
 					},
 				},
 				"namespace2": {
@@ -1537,19 +1578,31 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						Cluster:                    "cluster1",
 						Node:                       "c1nodes",
 						ProviderID:                 "c1nodes",
-						CPUPercentage:              0.5,
-						GPUPercentage:              0.5,
-						RAMPercentage:              0.1875,
-						NodeResourceCostPercentage: 0.3214285714285714,
+						CPUPercentage:              0.16667,
+						GPUPercentage:              0.16667,
+						RAMPercentage:              0.0625,
+						NodeResourceCostPercentage: 0.10714,
+						GPUTotalCost:               18,
+						GPUProportionalCost:        3,
+						CPUTotalCost:               18,
+						CPUProportionalCost:        3,
+						RAMTotalCost:               48,
+						RAMProportionalCost:        3,
 					},
 					"cluster2,node1": ProportionalAssetResourceCost{
 						Cluster:                    "cluster2",
 						Node:                       "node1",
 						ProviderID:                 "node1",
-						CPUPercentage:              1,
-						GPUPercentage:              1,
-						RAMPercentage:              1,
-						NodeResourceCostPercentage: 1,
+						CPUPercentage:              0.5,
+						GPUPercentage:              0.5,
+						RAMPercentage:              0.5,
+						NodeResourceCostPercentage: 0.5,
+						GPUTotalCost:               4,
+						GPUProportionalCost:        2,
+						CPUTotalCost:               4,
+						CPUProportionalCost:        2,
+						RAMTotalCost:               4,
+						RAMProportionalCost:        2,
 					},
 					"cluster2,node2": ProportionalAssetResourceCost{
 						Cluster:                    "cluster2",
@@ -1559,6 +1612,12 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						GPUPercentage:              0.5,
 						RAMPercentage:              0.5,
 						NodeResourceCostPercentage: 0.5,
+						GPUTotalCost:               2,
+						GPUProportionalCost:        1,
+						CPUTotalCost:               2,
+						CPUProportionalCost:        1,
+						RAMTotalCost:               2,
+						RAMProportionalCost:        1,
 					},
 				},
 				"namespace3": {
@@ -1566,10 +1625,16 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						Cluster:                    "cluster2",
 						Node:                       "node3",
 						ProviderID:                 "node3",
-						CPUPercentage:              1,
-						GPUPercentage:              1,
-						RAMPercentage:              1,
-						NodeResourceCostPercentage: 1,
+						CPUPercentage:              0.5,
+						GPUPercentage:              0.5,
+						RAMPercentage:              0.5,
+						NodeResourceCostPercentage: 0.5,
+						GPUTotalCost:               4,
+						GPUProportionalCost:        2,
+						CPUTotalCost:               4,
+						CPUProportionalCost:        2,
+						RAMTotalCost:               4,
+						RAMProportionalCost:        2,
 					},
 					"cluster2,node2": ProportionalAssetResourceCost{
 						Cluster:                    "cluster2",
@@ -1579,6 +1644,12 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						GPUPercentage:              0.5,
 						RAMPercentage:              0.5,
 						NodeResourceCostPercentage: 0.5,
+						GPUTotalCost:               2,
+						GPUProportionalCost:        1,
+						CPUTotalCost:               2,
+						CPUProportionalCost:        1,
+						RAMTotalCost:               2,
+						RAMProportionalCost:        1,
 					},
 				},
 			},
@@ -1653,6 +1724,126 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 	}
 }
 
+func TestAllocationSet_AggregateBy_SharedCostBreakdown(t *testing.T) {
+	// Set generated by GenerateMockAllocationSet
+	// | Hierarchy                              | Cost |  CPU |  RAM |  GPU |   PV |  Net |  LB  |
+	// +----------------------------------------+------+------+------+------+------+------+------+
+	//   cluster1:
+	//     idle:                                  20.00   5.00  15.00   0.00   0.00   0.00   0.00
+	//     namespace1:
+	//       pod1:
+	//         container1: [app1, env1]   16.00   1.00  11.00   1.00   1.00   1.00   1.00
+	//       pod-abc: (deployment1)
+	//         container2:                         6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-def: (deployment1)
+	//         container3:                         6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//     namespace2:
+	//       pod-ghi: (deployment2)
+	//         container4: [app2, env2]    6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//         container5: [app2, env2]    6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-jkl: (daemonset1)
+	//         container6: {service1}              6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	// +-----------------------------------------+------+------+------+------+------+------+------+
+	//   cluster1 subtotal                        66.00  11.00  31.00   6.00   6.00   6.00   6.00
+	// +-----------------------------------------+------+------+------+------+------+------+------+
+	//   cluster2:
+	//     idle:                                  10.00   5.00   5.00   0.00   0.00   0.00   0.00
+	//     namespace2:
+	//       pod-mno: (deployment2)
+	//         container4: [app2]              6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//         container5: [app2]              6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-pqr: (daemonset1)
+	//         container6: {service1}              6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//     namespace3:
+	//       pod-stu: (deployment3)
+	//         container7: an[team1]          6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//       pod-vwx: (statefulset1)
+	//         container8: an[team2]          6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	//         container9: an[team1]          6.00   1.00   1.00   1.00   1.00   1.00   1.00
+	// +----------------------------------------+------+------+------+------+------+------+------+
+	//   cluster2 subtotal                        46.00  11.00  11.00   6.00   6.00   6.00   6.00
+	// +----------------------------------------+------+------+------+------+------+------+------+
+	//   total                                   112.00  22.00  42.00  12.00  12.00  12.00  12.00
+	// +----------------------------------------+------+------+------+------+------+------+------+
+	end := time.Now().UTC().Truncate(day)
+	start := end.Add(-day)
+
+	isNamespace1 := func(a *Allocation) bool {
+		ns := a.Properties.Namespace
+		return ns == "namespace1"
+	}
+
+	isNamespace3 := func(a *Allocation) bool {
+		ns := a.Properties.Namespace
+		return ns == "namespace3"
+	}
+
+	cases := map[string]struct {
+		start   time.Time
+		aggBy   []string
+		aggOpts *AllocationAggregationOptions
+	}{
+		"agg cluster, flat shared cost": {
+			start: start,
+			aggBy: []string{"cluster"},
+			aggOpts: &AllocationAggregationOptions{
+				SharedHourlyCosts:          map[string]float64{"share_hourly": 10.0 / timeutil.HoursPerDay},
+				IncludeSharedCostBreakdown: true,
+			},
+		},
+		"agg namespace, shared namespace: namespace1": {
+			start: start,
+			aggBy: []string{"namespace"},
+			aggOpts: &AllocationAggregationOptions{
+				ShareFuncs: []AllocationMatchFunc{
+					isNamespace1,
+				},
+				IncludeSharedCostBreakdown: true,
+			},
+		},
+		"agg namespace, shared namespace: namespace3": {
+			start: start,
+			aggBy: []string{"namespace"},
+			aggOpts: &AllocationAggregationOptions{
+				ShareFuncs: []AllocationMatchFunc{
+					isNamespace3,
+				},
+				IncludeSharedCostBreakdown: true,
+			},
+		},
+	}
+
+	for name, tc := range cases {
+		t.Run(name, func(t *testing.T) {
+			as := GenerateMockAllocationSetClusterIdle(tc.start)
+			err := as.AggregateBy(tc.aggBy, tc.aggOpts)
+			if err != nil {
+				t.Fatalf("error aggregating: %s", err)
+			}
+			for _, alloc := range as.Allocations {
+				var breakdownTotal float64
+				// ignore idle since it should never have shared costs
+				if strings.Contains(alloc.Name, IdleSuffix) {
+					continue
+				}
+				for _, sharedAlloc := range alloc.SharedCostBreakdown {
+					breakdownTotal += sharedAlloc.TotalCost
+					totalInternal := sharedAlloc.CPUCost + sharedAlloc.GPUCost + sharedAlloc.RAMCost + sharedAlloc.NetworkCost + sharedAlloc.LBCost + sharedAlloc.PVCost + sharedAlloc.ExternalCost
+					// check that the total cost of a single item in the breakdown equals the sum of its parts
+					// we can ignore the overheadCost breakdown since it only has a total
+					if totalInternal != sharedAlloc.TotalCost && sharedAlloc.Name != "overheadCost" {
+						t.Errorf("expected internal total: %f; got %f", sharedAlloc.TotalCost, totalInternal)
+					}
+				}
+				// check that the totals of all shared cost breakdowns equal the allocation's SharedCost
+				if breakdownTotal != alloc.SharedCost {
+					t.Errorf("expected breakdown total: %f; got %f", alloc.SharedCost, breakdownTotal)
+				}
+			}
+		})
+	}
+}
+
 // TODO niko/etl
 //func TestAllocationSet_Clone(t *testing.T) {}
 
@@ -1708,6 +1899,181 @@ func TestAllocationSet_insertMatchingWindow(t *testing.T) {
 	}
 }
 
+// This tests PARC accumulation. Assuming Node cost is $1 per core per hour
+// From https://github.com/opencost/opencost/pull/1867#discussion_r1174109388:
+// Over the span of hour 1:
+
+//     Pod 1 runs for 30 minutes, consuming 1 CPU while alive. PARC: 12.5% (0.5 core-hours / 4 available core-hours)
+//     Pod 2 runs for 1 hour, consuming 2 CPU while alive. PARC: 50% (2 core-hours)
+//     Pod 3 runs for 1 hour, consuming 1 CPU while alive. PARC: 25% (1 core-hour)
+
+// Over the span of hour 2:
+
+//     Pod 1 does not run. PARC: 0% (0 core-hours / 4 available core-hours)
+//     Pod 2 runs for 30 minutes, consuming 2 CPU while active. PARC: 25% (1 core-hour)
+//     Pod 3 runs for 1 hour, consuming 1 CPU while active. PARC: 25% (1 core-hour)
+
+// Over the span of hour 3:
+
+//     Pod 1 does not run. PARC: 0% (0 core-hours / 4 available)
+//     Pod 2 runs for 30 minutes, consuming 3 CPU while active. PARC: 37.5% (1.5 core-hours)
+//     Pod 3 runs for 1 hour, consuming 1 CPU while active. PARC: 25% (1 core-hour)
+
+// We expect the following accumulated PARC:
+
+//     Pod 1: (0.5 + 0 + 0) core-hours used / (4 + 4 + 4) core-hours available = 0.5/12 = 4.16%
+//     Pod 2: (2 + 1 + 1.5) / (4 + 4 + 4) = 4.5/12 = 37.5%
+//     Pod 3: (1 + 1 + 1) / (4 + 4 + 4) = 3/12 = 25%
+
+func TestParcInsert(t *testing.T) {
+	pod1_hour1 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node1",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.125,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+		CPUProportionalCost:        0.5,
+	}
+
+	pod1_hour2 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node1",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.0,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+	}
+
+	pod1_hour3 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node1",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.0,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+	}
+
+	pod2_hour1 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node2",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.0,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+		CPUProportionalCost:        2,
+	}
+
+	pod2_hour2 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node2",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.0,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+		CPUProportionalCost:        1,
+	}
+
+	pod2_hour3 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node2",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.0,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+		CPUProportionalCost:        1.5,
+	}
+
+	pod3_hour1 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node3",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.0,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+		CPUProportionalCost:        1,
+	}
+
+	pod3_hour2 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node3",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.0,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+		CPUProportionalCost:        1,
+	}
+
+	pod3_hour3 := ProportionalAssetResourceCost{
+		Cluster:                    "cluster1",
+		Node:                       "node3",
+		ProviderID:                 "i-1234",
+		CPUPercentage:              0.0,
+		GPUPercentage:              0,
+		RAMPercentage:              0,
+		NodeResourceCostPercentage: 0,
+		CPUTotalCost:               4,
+		CPUProportionalCost:        1,
+	}
+
+	parcs := ProportionalAssetResourceCosts{}
+	parcs.Insert(pod1_hour1, true)
+	parcs.Insert(pod1_hour2, true)
+	parcs.Insert(pod1_hour3, true)
+	parcs.Insert(pod2_hour1, true)
+	parcs.Insert(pod2_hour2, true)
+	parcs.Insert(pod2_hour3, true)
+	parcs.Insert(pod3_hour1, true)
+	parcs.Insert(pod3_hour2, true)
+	parcs.Insert(pod3_hour3, true)
+	log.Debug("added all parcs")
+
+	expectedParcs := ProportionalAssetResourceCosts{
+		"cluster1,node1": ProportionalAssetResourceCost{
+			CPUPercentage:              0.041666666666666664,
+			NodeResourceCostPercentage: 0.041666666666666664,
+		},
+		"cluster1,node2": ProportionalAssetResourceCost{
+			CPUPercentage:              0.375,
+			NodeResourceCostPercentage: 0.375,
+		},
+		"cluster1,node3": ProportionalAssetResourceCost{
+			CPUPercentage:              0.25,
+			NodeResourceCostPercentage: 0.25,
+		},
+	}
+
+	for key, expectedParc := range expectedParcs {
+		actualParc, ok := parcs[key]
+		if !ok {
+			t.Fatalf("did not find expected PARC: %s", key)
+		}
+
+		if actualParc.CPUPercentage != expectedParc.CPUPercentage {
+			t.Fatalf("actual parc cpu percentage: %f did not match expected: %f", actualParc.CPUPercentage, expectedParc.CPUPercentage)
+		}
+		if actualParc.NodeResourceCostPercentage != expectedParc.NodeResourceCostPercentage {
+			t.Fatalf("actual parc node percentage: %f did not match expected: %f", actualParc.NodeResourceCostPercentage, expectedParc.NodeResourceCostPercentage)
+		}
+	}
+}
+
 // TODO niko/etl
 //func TestAllocationSet_IsEmpty(t *testing.T) {}
 

+ 16 - 0
pkg/kubecost/asset.go

@@ -1753,6 +1753,14 @@ func (n *Network) String() string {
 	return toString(n)
 }
 
+// NodeOverhead represents the delta between the allocatable resources
+// of the node and the node nameplate capacity
+type NodeOverhead struct {
+	CpuOverheadFraction  float64
+	RamOverheadFraction  float64
+	OverheadCostFraction float64
+}
+
 // Node is an Asset representing a single node in a cluster
 type Node struct {
 	Properties   *AssetProperties
@@ -1773,6 +1781,7 @@ type Node struct {
 	RAMCost      float64
 	Discount     float64
 	Preemptible  float64
+	Overhead     *NodeOverhead // @bingen:field[version=19]
 }
 
 // NewNode creates and returns a new Node Asset
@@ -2001,6 +2010,13 @@ func (n *Node) add(that *Node) {
 	n.GPUCost += that.GPUCost
 	n.RAMCost += that.RAMCost
 	n.Adjustment += that.Adjustment
+
+	if n.Overhead != nil && that.Overhead != nil {
+
+		n.Overhead.RamOverheadFraction = (n.Overhead.RamOverheadFraction*n.RAMCost + that.Overhead.RamOverheadFraction*that.RAMCost) / totalRAMCost
+		n.Overhead.CpuOverheadFraction = (n.Overhead.CpuOverheadFraction*n.CPUCost + that.Overhead.CpuOverheadFraction*that.CPUCost) / totalCPUCost
+		n.Overhead.OverheadCostFraction = ((n.Overhead.CpuOverheadFraction * n.CPUCost) + (n.Overhead.RamOverheadFraction * n.RAMCost)) / n.TotalCost()
+	}
 }
 
 // Clone returns a deep copy of the given Node

+ 4 - 0
pkg/kubecost/asset_json.go

@@ -494,7 +494,11 @@ func (n *Node) MarshalJSON() ([]byte, error) {
 	jsonEncodeFloat64(buffer, "gpuCount", n.GPUs(), ",")
 	jsonEncodeFloat64(buffer, "ramCost", n.RAMCost, ",")
 	jsonEncodeFloat64(buffer, "adjustment", n.Adjustment, ",")
+	if n.Overhead != nil {
+		jsonEncode(buffer, "overhead", n.Overhead, ",")
+	}
 	jsonEncodeFloat64(buffer, "totalCost", n.TotalCost(), "")
+
 	buffer.WriteString("}")
 	return buffer.Bytes(), nil
 }

+ 3 - 2
pkg/kubecost/bingen.go

@@ -26,7 +26,7 @@ package kubecost
 // @bingen:generate:CoverageSet
 
 // Asset Version Set: Includes Asset pipeline specific resources
-// @bingen:set[name=Assets,version=18]
+// @bingen:set[name=Assets,version=19]
 // @bingen:generate:Any
 // @bingen:generate:Asset
 // @bingen:generate:AssetLabels
@@ -41,6 +41,7 @@ package kubecost
 // @bingen:generate:LoadBalancer
 // @bingen:generate:Network
 // @bingen:generate:Node
+// @bingen:generate:NodeOverhead
 // @bingen:generate:SharedAsset
 // @bingen:end
 
@@ -73,7 +74,7 @@ package kubecost
 // @bingen:generate:AuditSetRange
 // @bingen:end
 
-// @bingen:set[name=CloudCost,version=1]
+// @bingen:set[name=CloudCost,version=2]
 // @bingen:generate:CloudCost
 // @bingen:generate:CostMetric
 // @bingen:generate[stringtable]:CloudCostSet

+ 15 - 3
pkg/kubecost/cloudcost.go

@@ -18,10 +18,11 @@ type CloudCost struct {
 	NetCost          CostMetric           `json:"netCost"`
 	AmortizedNetCost CostMetric           `json:"amortizedNetCost"`
 	InvoicedCost     CostMetric           `json:"invoicedCost"`
+	AmortizedCost    CostMetric           `json:"amortizedCost"`
 }
 
 // NewCloudCost instantiates a new CloudCost
-func NewCloudCost(start, end time.Time, ccProperties *CloudCostProperties, kubernetesPercent, listCost, netCost, amortizedNetCost, invoicedCost float64) *CloudCost {
+func NewCloudCost(start, end time.Time, ccProperties *CloudCostProperties, kubernetesPercent, listCost, netCost, amortizedNetCost, invoicedCost, amortizedCost float64) *CloudCost {
 	return &CloudCost{
 		Properties: ccProperties,
 		Window:     NewWindow(&start, &end),
@@ -38,7 +39,11 @@ func NewCloudCost(start, end time.Time, ccProperties *CloudCostProperties, kuber
 			KubernetesPercent: kubernetesPercent,
 		},
 		InvoicedCost: CostMetric{
-			Cost:              listCost,
+			Cost:              invoicedCost,
+			KubernetesPercent: kubernetesPercent,
+		},
+		AmortizedCost: CostMetric{
+			Cost:              amortizedCost,
 			KubernetesPercent: kubernetesPercent,
 		},
 	}
@@ -52,6 +57,7 @@ func (cc *CloudCost) Clone() *CloudCost {
 		NetCost:          cc.NetCost.Clone(),
 		AmortizedNetCost: cc.AmortizedNetCost.Clone(),
 		InvoicedCost:     cc.InvoicedCost.Clone(),
+		AmortizedCost:    cc.AmortizedCost.Clone(),
 	}
 }
 
@@ -65,7 +71,8 @@ func (cc *CloudCost) Equal(that *CloudCost) bool {
 		cc.ListCost.Equal(that.ListCost) &&
 		cc.NetCost.Equal(that.NetCost) &&
 		cc.AmortizedNetCost.Equal(that.AmortizedNetCost) &&
-		cc.InvoicedCost.Equal(that.InvoicedCost)
+		cc.InvoicedCost.Equal(that.InvoicedCost) &&
+		cc.AmortizedCost.Equal(that.AmortizedCost)
 }
 
 func (cc *CloudCost) add(that *CloudCost) {
@@ -81,6 +88,7 @@ func (cc *CloudCost) add(that *CloudCost) {
 	cc.NetCost = cc.NetCost.add(that.NetCost)
 	cc.AmortizedNetCost = cc.AmortizedNetCost.add(that.AmortizedNetCost)
 	cc.InvoicedCost = cc.InvoicedCost.add(that.InvoicedCost)
+	cc.AmortizedCost = cc.AmortizedCost.add(that.AmortizedCost)
 
 	cc.Window = cc.Window.Expand(that.Window)
 }
@@ -131,6 +139,8 @@ func (cc *CloudCost) GetCostMetric(costMetricName string) (CostMetric, error) {
 		return cc.AmortizedNetCost, nil
 	case InvoicedCostMetric:
 		return cc.InvoicedCost, nil
+	case AmortizedCostMetric:
+		return cc.AmortizedCost, nil
 	}
 	return CostMetric{}, fmt.Errorf("invalid Cost Metric: %s", costMetricName)
 }
@@ -486,6 +496,7 @@ func (ccsr *CloudCostSetRange) LoadCloudCost(cloudCost *CloudCost) {
 				NetCost:          cloudCost.NetCost.percent(pct),
 				AmortizedNetCost: cloudCost.AmortizedNetCost.percent(pct),
 				InvoicedCost:     cloudCost.InvoicedCost.percent(pct),
+				AmortizedCost:    cloudCost.AmortizedCost.percent(pct),
 			}
 		}
 
@@ -507,6 +518,7 @@ const (
 	NetCostMetric          string = "NetCost"
 	AmortizedNetCostMetric string = "AmortizedNetCost"
 	InvoicedCostMetric     string = "InvoicedCost"
+	AmortizedCostMetric    string = "AmortizedCost"
 )
 
 type CostMetric struct {

+ 12 - 0
pkg/kubecost/cloudcost_test.go

@@ -43,6 +43,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 					NetCost:          CostMetric{Cost: 80, KubernetesPercent: 1},
 					AmortizedNetCost: CostMetric{Cost: 90, KubernetesPercent: 1},
 					InvoicedCost:     CostMetric{Cost: 95, KubernetesPercent: 1},
+					AmortizedCost:    CostMetric{Cost: 85, KubernetesPercent: 1},
 				},
 			},
 			ccsr: emtpyCCSR.Clone(),
@@ -58,6 +59,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 							NetCost:          CostMetric{Cost: 80, KubernetesPercent: 1},
 							AmortizedNetCost: CostMetric{Cost: 90, KubernetesPercent: 1},
 							InvoicedCost:     CostMetric{Cost: 95, KubernetesPercent: 1},
+							AmortizedCost:    CostMetric{Cost: 85, KubernetesPercent: 1},
 						},
 					},
 				},
@@ -82,6 +84,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 					NetCost:          CostMetric{Cost: 80, KubernetesPercent: 1},
 					AmortizedNetCost: CostMetric{Cost: 90, KubernetesPercent: 1},
 					InvoicedCost:     CostMetric{Cost: 95, KubernetesPercent: 1},
+					AmortizedCost:    CostMetric{Cost: 85, KubernetesPercent: 1},
 				},
 			},
 			ccsr: emtpyCCSR.Clone(),
@@ -97,6 +100,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 							NetCost:          CostMetric{Cost: 40, KubernetesPercent: 1},
 							AmortizedNetCost: CostMetric{Cost: 45, KubernetesPercent: 1},
 							InvoicedCost:     CostMetric{Cost: 47.5, KubernetesPercent: 1},
+							AmortizedCost:    CostMetric{Cost: 42.5, KubernetesPercent: 1},
 						},
 					},
 				},
@@ -111,6 +115,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 							NetCost:          CostMetric{Cost: 40, KubernetesPercent: 1},
 							AmortizedNetCost: CostMetric{Cost: 45, KubernetesPercent: 1},
 							InvoicedCost:     CostMetric{Cost: 47.5, KubernetesPercent: 1},
+							AmortizedCost:    CostMetric{Cost: 42.5, KubernetesPercent: 1},
 						},
 					},
 				},
@@ -130,6 +135,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 					NetCost:          CostMetric{Cost: 80, KubernetesPercent: 1},
 					AmortizedNetCost: CostMetric{Cost: 90, KubernetesPercent: 1},
 					InvoicedCost:     CostMetric{Cost: 95, KubernetesPercent: 1},
+					AmortizedCost:    CostMetric{Cost: 85, KubernetesPercent: 1},
 				},
 			},
 			ccsr: emtpyCCSR.Clone(),
@@ -145,6 +151,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 							NetCost:          CostMetric{Cost: 40, KubernetesPercent: 1},
 							AmortizedNetCost: CostMetric{Cost: 45, KubernetesPercent: 1},
 							InvoicedCost:     CostMetric{Cost: 47.5, KubernetesPercent: 1},
+							AmortizedCost:    CostMetric{Cost: 42.5, KubernetesPercent: 1},
 						},
 					},
 				},
@@ -169,6 +176,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 					NetCost:          CostMetric{Cost: 80, KubernetesPercent: 1},
 					AmortizedNetCost: CostMetric{Cost: 90, KubernetesPercent: 1},
 					InvoicedCost:     CostMetric{Cost: 95, KubernetesPercent: 1},
+					AmortizedCost:    CostMetric{Cost: 85, KubernetesPercent: 1},
 				},
 			},
 			ccsr: emtpyCCSR.Clone(),
@@ -194,6 +202,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 							NetCost:          CostMetric{Cost: 40, KubernetesPercent: 1},
 							AmortizedNetCost: CostMetric{Cost: 45, KubernetesPercent: 1},
 							InvoicedCost:     CostMetric{Cost: 47.5, KubernetesPercent: 1},
+							AmortizedCost:    CostMetric{Cost: 42.5, KubernetesPercent: 1},
 						},
 					},
 				},
@@ -208,6 +217,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 					NetCost:          CostMetric{Cost: 40, KubernetesPercent: 1},
 					AmortizedNetCost: CostMetric{Cost: 60, KubernetesPercent: 1},
 					InvoicedCost:     CostMetric{Cost: 50, KubernetesPercent: 1},
+					AmortizedCost:    CostMetric{Cost: 80, KubernetesPercent: 1},
 				},
 				{
 					Properties:       ccProperties1,
@@ -216,6 +226,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 					NetCost:          CostMetric{Cost: 60, KubernetesPercent: 0},
 					AmortizedNetCost: CostMetric{Cost: 40, KubernetesPercent: 0},
 					InvoicedCost:     CostMetric{Cost: 50, KubernetesPercent: 0},
+					AmortizedCost:    CostMetric{Cost: 20, KubernetesPercent: 0},
 				},
 			},
 			ccsr: emtpyCCSR.Clone(),
@@ -236,6 +247,7 @@ func TestCloudCost_LoadCloudCost(t *testing.T) {
 							NetCost:          CostMetric{Cost: 100, KubernetesPercent: 0.4},
 							AmortizedNetCost: CostMetric{Cost: 100, KubernetesPercent: 0.6},
 							InvoicedCost:     CostMetric{Cost: 100, KubernetesPercent: 0.5},
+							AmortizedCost:    CostMetric{Cost: 100, KubernetesPercent: 0.8},
 						},
 					},
 				},

+ 168 - 2
pkg/kubecost/kubecost_codecs.go

@@ -37,7 +37,7 @@ const (
 	DefaultCodecVersion uint8 = 17
 
 	// AssetsCodecVersion is used for any resources listed in the Assets version set
-	AssetsCodecVersion uint8 = 18
+	AssetsCodecVersion uint8 = 19
 
 	// AllocationCodecVersion is used for any resources listed in the Allocation version set
 	AllocationCodecVersion uint8 = 16
@@ -46,7 +46,7 @@ const (
 	AuditCodecVersion uint8 = 1
 
 	// CloudCostCodecVersion is used for any resources listed in the CloudCost version set
-	CloudCostCodecVersion uint8 = 1
+	CloudCostCodecVersion uint8 = 2
 )
 
 //--------------------------------------------------------------------------
@@ -86,6 +86,7 @@ var typeMap map[string]reflect.Type = map[string]reflect.Type{
 	"LoadBalancer":                  reflect.TypeOf((*LoadBalancer)(nil)).Elem(),
 	"Network":                       reflect.TypeOf((*Network)(nil)).Elem(),
 	"Node":                          reflect.TypeOf((*Node)(nil)).Elem(),
+	"NodeOverhead":                  reflect.TypeOf((*NodeOverhead)(nil)).Elem(),
 	"PVAllocation":                  reflect.TypeOf((*PVAllocation)(nil)).Elem(),
 	"PVKey":                         reflect.TypeOf((*PVKey)(nil)).Elem(),
 	"RawAllocationOnlyData":         reflect.TypeOf((*RawAllocationOnlyData)(nil)).Elem(),
@@ -4753,6 +4754,14 @@ func (target *CloudCost) MarshalBinaryWithContext(ctx *EncodingContext) (err err
 	}
 	// --- [end][write][struct](CostMetric) ---
 
+	// --- [begin][write][struct](CostMetric) ---
+	buff.WriteInt(0) // [compatibility, unused]
+	errG := target.AmortizedCost.MarshalBinaryWithContext(ctx)
+	if errG != nil {
+		return errG
+	}
+	// --- [end][write][struct](CostMetric) ---
+
 	return nil
 }
 
@@ -4874,6 +4883,16 @@ func (target *CloudCost) UnmarshalBinaryWithContext(ctx *DecodingContext) (err e
 	target.InvoicedCost = *f
 	// --- [end][read][struct](CostMetric) ---
 
+	// --- [begin][read][struct](CostMetric) ---
+	g := &CostMetric{}
+	buff.ReadInt() // [compatibility, unused]
+	errG := g.UnmarshalBinaryWithContext(ctx)
+	if errG != nil {
+		return errG
+	}
+	target.AmortizedCost = *g
+	// --- [end][read][struct](CostMetric) ---
+
 	return nil
 }
 
@@ -7712,6 +7731,20 @@ func (target *Node) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
 	buff.WriteFloat64(target.RAMCost)     // write float64
 	buff.WriteFloat64(target.Discount)    // write float64
 	buff.WriteFloat64(target.Preemptible) // write float64
+	if target.Overhead == nil {
+		buff.WriteUInt8(uint8(0)) // write nil byte
+	} else {
+		buff.WriteUInt8(uint8(1)) // write non-nil byte
+
+		// --- [begin][write][struct](NodeOverhead) ---
+		buff.WriteInt(0) // [compatibility, unused]
+		errG := target.Overhead.MarshalBinaryWithContext(ctx)
+		if errG != nil {
+			return errG
+		}
+		// --- [end][write][struct](NodeOverhead) ---
+
+	}
 	return nil
 }
 
@@ -7923,6 +7956,139 @@ func (target *Node) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error)
 	ll := buff.ReadFloat64() // read float64
 	target.Preemptible = ll
 
+	// field version check
+	if uint8(19) <= version {
+		if buff.ReadUInt8() == uint8(0) {
+			target.Overhead = nil
+		} else {
+			// --- [begin][read][struct](NodeOverhead) ---
+			mm := &NodeOverhead{}
+			buff.ReadInt() // [compatibility, unused]
+			errG := mm.UnmarshalBinaryWithContext(ctx)
+			if errG != nil {
+				return errG
+			}
+			target.Overhead = mm
+			// --- [end][read][struct](NodeOverhead) ---
+
+		}
+	} else {
+		target.Overhead = nil
+
+	}
+
+	return nil
+}
+
+//--------------------------------------------------------------------------
+//  NodeOverhead
+//--------------------------------------------------------------------------
+
+// MarshalBinary serializes the internal properties of this NodeOverhead instance
+// into a byte array
+func (target *NodeOverhead) MarshalBinary() (data []byte, err error) {
+	ctx := &EncodingContext{
+		Buffer: util.NewBuffer(),
+		Table:  nil,
+	}
+
+	e := target.MarshalBinaryWithContext(ctx)
+	if e != nil {
+		return nil, e
+	}
+
+	encBytes := ctx.Buffer.Bytes()
+	return encBytes, nil
+}
+
+// MarshalBinaryWithContext serializes the internal properties of this NodeOverhead instance
+// into a byte array leveraging a predefined context.
+func (target *NodeOverhead) MarshalBinaryWithContext(ctx *EncodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	buff.WriteUInt8(AssetsCodecVersion) // version
+
+	buff.WriteFloat64(target.CpuOverheadFraction)  // write float64
+	buff.WriteFloat64(target.RamOverheadFraction)  // write float64
+	buff.WriteFloat64(target.OverheadCostFraction) // write float64
+	return nil
+}
+
+// UnmarshalBinary uses the data passed byte array to set all the internal properties of
+// the NodeOverhead type
+func (target *NodeOverhead) UnmarshalBinary(data []byte) error {
+	var table []string
+	buff := util.NewBufferFromBytes(data)
+
+	// string table header validation
+	if isBinaryTag(data, BinaryTagStringTable) {
+		buff.ReadBytes(len(BinaryTagStringTable)) // strip tag length
+		tl := buff.ReadInt()                      // table length
+		if tl > 0 {
+			table = make([]string, tl, tl)
+			for i := 0; i < tl; i++ {
+				table[i] = buff.ReadString()
+			}
+		}
+	}
+
+	ctx := &DecodingContext{
+		Buffer: buff,
+		Table:  table,
+	}
+
+	err := target.UnmarshalBinaryWithContext(ctx)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// UnmarshalBinaryWithContext uses the context containing a string table and binary buffer to set all the internal properties of
+// the NodeOverhead type
+func (target *NodeOverhead) UnmarshalBinaryWithContext(ctx *DecodingContext) (err error) {
+	// panics are recovered and propagated as errors
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(error); ok {
+				err = e
+			} else if s, ok := r.(string); ok {
+				err = fmt.Errorf("Unexpected panic: %s", s)
+			} else {
+				err = fmt.Errorf("Unexpected panic: %+v", r)
+			}
+		}
+	}()
+
+	buff := ctx.Buffer
+	version := buff.ReadUInt8()
+
+	if version > AssetsCodecVersion {
+		return fmt.Errorf("Invalid Version Unmarshaling NodeOverhead. Expected %d or less, got %d", AssetsCodecVersion, version)
+	}
+
+	a := buff.ReadFloat64() // read float64
+	target.CpuOverheadFraction = a
+
+	b := buff.ReadFloat64() // read float64
+	target.RamOverheadFraction = b
+
+	c := buff.ReadFloat64() // read float64
+	target.OverheadCostFraction = c
+
 	return nil
 }
 

+ 4 - 4
pkg/kubecost/status.go

@@ -37,10 +37,10 @@ type FileStatus struct {
 
 // CloudStatus describes CloudStore metadata
 type CloudStatus struct {
-	CloudConnectionStatus string                `json:"cloudConnectionStatus"`
-	ProviderType          string                `json:"providerType"`
-	CloudUsage            *CloudAssetStatus     `json:"cloudUsage,omitempty"`
-	Reconciliation        *ReconciliationStatus `json:"reconciliation,omitempty"`
+	ConnectionStatus string                `json:"cloudConnectionStatus"`
+	ProviderType     string                `json:"providerType"`
+	CloudUsage       *CloudAssetStatus     `json:"cloudUsage,omitempty"`
+	Reconciliation   *ReconciliationStatus `json:"reconciliation,omitempty"`
 }
 
 // CloudAssetStatus describes CloudAsset metadata of a CloudStore

+ 26 - 0
pkg/util/allocationfilterutil/queryfilters.go

@@ -1,6 +1,7 @@
 package allocationfilterutil
 
 import (
+	"fmt"
 	"strings"
 
 	"github.com/opencost/opencost/pkg/costmodel/clusters"
@@ -30,6 +31,31 @@ const (
 	ParamFilterServices    = "filterServices"
 )
 
+var allocationFilterFieldMap = map[string]string{
+	kubecost.AllocationClusterProp:        ParamFilterClusters,
+	kubecost.FilterNode:                   ParamFilterNodes,
+	kubecost.AllocationNamespaceProp:      ParamFilterNamespaces,
+	kubecost.AllocationControllerKindProp: ParamFilterControllerKinds,
+	kubecost.AllocationControllerProp:     ParamFilterControllers,
+	kubecost.AllocationPodProp:            ParamFilterPods,
+	kubecost.AllocationContainerProp:      ParamFilterContainers,
+	kubecost.AllocationDepartmentProp:     ParamFilterDepartments,
+	kubecost.AllocationEnvironmentProp:    ParamFilterEnvironments,
+	kubecost.AllocationOwnerProp:          ParamFilterOwners,
+	kubecost.AllocationProductProp:        ParamFilterProducts,
+	kubecost.AllocationTeamProp:           ParamFilterTeams,
+	kubecost.AllocationAnnotationProp:     ParamFilterAnnotations,
+	kubecost.AllocationLabelProp:          ParamFilterLabels,
+	kubecost.AllocationServiceProp:        ParamFilterServices,
+}
+
+func GetAllocationFilterForTheAllocationProperty(allocationProp string) (string, error) {
+	if _, ok := allocationFilterFieldMap[allocationProp]; !ok {
+		return "", fmt.Errorf("unknown allocation property %s", allocationProp)
+	}
+	return allocationFilterFieldMap[allocationProp], nil
+}
+
 // AllHTTPParamKeys returns all HTTP GET parameters used for v1 filters. It is
 // intended to help validate HTTP queries in handlers to help avoid e.g.
 // spelling errors.

+ 38 - 38
test/cloud_test.go

@@ -8,7 +8,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/costmodel"
@@ -34,7 +34,7 @@ func TestRegionValueFromMapField(t *testing.T) {
 	n.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0"
 	n.Labels = make(map[string]string)
 	n.Labels[v1.LabelZoneRegion] = wantRegion
-	got := cloud.NodeValueFromMapField(providerIDMap, n, true)
+	got := provider.NodeValueFromMapField(providerIDMap, n, true)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
@@ -44,7 +44,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant := "i-05445591e0d182d42"
 	n := &v1.Node{}
 	n.Spec.ProviderID = "aws:///us-east-1a/i-05445591e0d182d42"
-	got := cloud.NodeValueFromMapField(providerIDMap, n, false)
+	got := provider.NodeValueFromMapField(providerIDMap, n, false)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
@@ -52,7 +52,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant2 := strings.ToLower("/subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0")
 	n2 := &v1.Node{}
 	n2.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0"
-	got2 := cloud.NodeValueFromMapField(providerIDMap, n2, false)
+	got2 := provider.NodeValueFromMapField(providerIDMap, n2, false)
 	if got2 != providerIDWant2 {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant2, got2)
 	}
@@ -60,7 +60,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant3 := strings.ToLower("/subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/mc_testspot_testspot_eastus/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-19213364-vmss/virtualMachines/0")
 	n3 := &v1.Node{}
 	n3.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/mc_testspot_testspot_eastus/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-19213364-vmss/virtualMachines/0"
-	got3 := cloud.NodeValueFromMapField(providerIDMap, n3, false)
+	got3 := provider.NodeValueFromMapField(providerIDMap, n3, false)
 	if got3 != providerIDWant3 {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant3, got3)
 	}
@@ -77,17 +77,17 @@ func TestNodeValueFromMapField(t *testing.T) {
 	n.Labels = make(map[string]string)
 	n.Labels["foo"] = labelFooWant
 
-	got := cloud.NodeValueFromMapField(providerIDMap, n, false)
+	got := provider.NodeValueFromMapField(providerIDMap, n, false)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
 
-	got = cloud.NodeValueFromMapField(nameMap, n, false)
+	got = provider.NodeValueFromMapField(nameMap, n, false)
 	if got != nameWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", nameMap, nameWant, got)
 	}
 
-	got = cloud.NodeValueFromMapField(labelMapFoo, n, false)
+	got = provider.NodeValueFromMapField(labelMapFoo, n, false)
 	if got != labelFooWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", labelMapFoo, labelFooWant, got)
 	}
@@ -104,10 +104,10 @@ func TestPVPriceFromCSV(t *testing.T) {
 	})
 
 	wantPrice := "0.1337"
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_pv.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -152,10 +152,10 @@ func TestNodePriceFromCSVWithGPU(t *testing.T) {
 	n2.Status.Capacity = v1.ResourceList{"nvidia.com/gpu": *resource.NewScaledQuantity(2, 0)}
 	wantPrice2 := "1.733700"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 
@@ -211,10 +211,10 @@ func TestNodePriceFromCSV(t *testing.T) {
 
 	wantPrice := "0.133700"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -241,10 +241,10 @@ func TestNodePriceFromCSV(t *testing.T) {
 		t.Errorf("CSV provider should return nil on missing node")
 	}
 
-	c2 := &cloud.CSVProvider{
+	c2 := &provider.CSVProvider{
 		CSVLocation: "../configs/fake.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	k3 := c.GetKey(n.Labels, n)
@@ -287,10 +287,10 @@ func TestNodePriceFromCSVWithRegion(t *testing.T) {
 	n3.Labels[v1.LabelZoneRegion] = "fakeregion"
 	wantPrice3 := "0.1339"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_region.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -337,10 +337,10 @@ func TestNodePriceFromCSVWithRegion(t *testing.T) {
 		t.Errorf("CSV provider should return nil on missing node, instead returned %+v", resN4)
 	}
 
-	c2 := &cloud.CSVProvider{
+	c2 := &provider.CSVProvider{
 		CSVLocation: "../configs/fake.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	k5 := c.GetKey(n.Labels, n)
@@ -379,10 +379,10 @@ func TestNodePriceFromCSVWithBadConfig(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "invalid.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "invalid.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -413,10 +413,10 @@ func TestSourceMatchesFromCSV(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -492,10 +492,10 @@ func TestNodePriceFromCSVWithCase(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 
@@ -526,10 +526,10 @@ func TestNodePriceFromCSVByClass(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}