Просмотр исходного кода

Merge pull request #1823 from opencost/sean/cloud-configs

Open Source CloudConfig and Cloud Service Integrations
Sean Holcomb 3 лет назад
Родитель
Сommit
c7ade1c1e1
62 измененных файлов с 5569 добавлено и 146 удалено
  1. 87 0
      pkg/cloud/alibaba/authorizer.go
  2. 130 0
      pkg/cloud/alibaba/boaconfiguration.go
  3. 289 0
      pkg/cloud/alibaba/boaconfiguration_test.go
  4. 127 0
      pkg/cloud/alibaba/boaquerier.go
  5. 16 15
      pkg/cloud/alibaba/provider.go
  6. 2 2
      pkg/cloud/alibaba/provider_test.go
  7. 233 0
      pkg/cloud/aws/athenaconfiguration.go
  8. 594 0
      pkg/cloud/aws/athenaconfiguration_test.go
  9. 208 0
      pkg/cloud/aws/athenaquerier.go
  10. 251 0
      pkg/cloud/aws/authorizer.go
  11. 67 0
      pkg/cloud/aws/authorizer_test.go
  12. 2 22
      pkg/cloud/aws/provider.go
  13. 0 0
      pkg/cloud/aws/provider_test.go
  14. 134 0
      pkg/cloud/aws/s3configuration.go
  15. 40 0
      pkg/cloud/aws/s3connection.go
  16. 387 0
      pkg/cloud/aws/s3connection_test.go
  17. 181 0
      pkg/cloud/aws/s3selectquerier.go
  18. 80 0
      pkg/cloud/azure/authorizer.go
  19. 322 0
      pkg/cloud/azure/billingexportparser.go
  20. 194 0
      pkg/cloud/azure/billingexportparser_test.go
  21. 0 0
      pkg/cloud/azure/pricesheetclient.go
  22. 0 0
      pkg/cloud/azure/pricesheetdownloader.go
  23. 0 0
      pkg/cloud/azure/pricesheetdownloader_test.go
  24. 3 1
      pkg/cloud/azure/provider.go
  25. 0 0
      pkg/cloud/azure/provider_test.go
  26. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/BOM.csv
  27. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/Enterprise.csv
  28. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/EnterpriseCamel.csv
  29. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/German.csv
  30. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/PayAsYouGo.csv
  31. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/YA.csv
  32. 2 0
      pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv
  33. 88 0
      pkg/cloud/azure/resources/billingexports/values/Template.csv
  34. 2 0
      pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv
  35. 170 0
      pkg/cloud/azure/storagebillingparser.go
  36. 204 0
      pkg/cloud/azure/storagebillingparser_test.go
  37. 179 0
      pkg/cloud/azure/storageconfiguration.go
  38. 446 0
      pkg/cloud/azure/storageconfiguration_test.go
  39. 77 0
      pkg/cloud/azure/storageconnection.go
  40. 53 0
      pkg/cloud/config/authorizer.go
  41. 37 0
      pkg/cloud/config/config.go
  42. 42 0
      pkg/cloud/connectionstatus.go
  43. 132 0
      pkg/cloud/gcp/authorizer.go
  44. 172 0
      pkg/cloud/gcp/bigqueryconfiguration.go
  45. 388 0
      pkg/cloud/gcp/bigqueryconfiguration_test.go
  46. 110 0
      pkg/cloud/gcp/bigqueryquerier.go
  47. 1 0
      pkg/cloud/gcp/provider.go
  48. 0 0
      pkg/cloud/gcp/provider_test.go
  49. 1 1
      pkg/cloud/provider/csvprovider.go
  50. 6 6
      pkg/cloud/provider/customprovider.go
  51. 14 12
      pkg/cloud/provider/provider.go
  52. 2 3
      pkg/cloud/provider/providerconfig.go
  53. 6 6
      pkg/cloud/scaleway/provider.go
  54. 3 3
      pkg/cmd/agent/agent.go
  55. 6 6
      pkg/costmodel/aggregation.go
  56. 5 5
      pkg/costmodel/allocation_helpers.go
  57. 4 4
      pkg/costmodel/cluster.go
  58. 10 10
      pkg/costmodel/cluster_helpers.go
  59. 5 5
      pkg/costmodel/cluster_helpers_test.go
  60. 3 3
      pkg/costmodel/router.go
  61. 4 4
      pkg/kubecost/status.go
  62. 38 38
      test/cloud_test.go

+ 87 - 0
pkg/cloud/alibaba/authorizer.go

@@ -0,0 +1,87 @@
+package alibaba
+
+import (
+	"fmt"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const AccessKeyAuthorizerType = "AlibabaAccessKey"
+
+// Authorizer provide *bssopenapi.Client for Alibaba cloud BOS for Billing related SDK calls
+type Authorizer interface {
+	config.Authorizer
+	GetCredentials() (auth.Credential, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	default:
+		return nil, fmt.Errorf("alibaba: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// AccessKey holds Alibaba credentials parsing from the service-key.json file.
+type AccessKey struct {
+	AccessKeyID     string `json:"accessKeyID"`
+	AccessKeySecret string `json:"accessKeySecret"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["accessKeyID"] = ak.AccessKeyID
+	fmap["accessKeySecret"] = ak.AccessKeySecret
+	return json.Marshal(fmap)
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.AccessKeyID == "" {
+		return fmt.Errorf("AccessKey: missing Access key ID")
+	}
+	if ak.AccessKeySecret == "" {
+		return fmt.Errorf("AccessKey: missing Access Key secret")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.AccessKeyID != thatConfig.AccessKeyID {
+		return false
+	}
+	if ak.AccessKeySecret != thatConfig.AccessKeySecret {
+		return false
+	}
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		AccessKeyID:     ak.AccessKeyID,
+		AccessKeySecret: config.Redacted,
+	}
+}
+
+// GetCredentials creates a credentials object to authorize the use of service sdk calls
+func (ak *AccessKey) GetCredentials() (auth.Credential, error) {
+	err := ak.Validate()
+	if err != nil {
+		return nil, err
+	}
+	return &credentials.AccessKeyCredential{AccessKeyId: ak.AccessKeyID, AccessKeySecret: ak.AccessKeySecret}, nil
+}

+ 130 - 0
pkg/cloud/alibaba/boaconfiguration.go

@@ -0,0 +1,130 @@
+package alibaba
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// BOAConfiguration is the BSS open API configuration for Alibaba's Billing information
+type BOAConfiguration struct {
+	Account    string     `json:"account"`
+	Region     string     `json:"region"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (bc *BOAConfiguration) Validate() error {
+	// Validate Authorizer
+	if bc.Authorizer == nil {
+		return fmt.Errorf("BOAConfiguration: missing authorizer")
+	}
+
+	err := bc.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	// Validate base properties
+	if bc.Region == "" {
+		return fmt.Errorf("BOAConfiguration: missing region")
+	}
+
+	if bc.Account == "" {
+		return fmt.Errorf("BOAConfiguration: missing account")
+	}
+	return nil
+}
+
+func (bc *BOAConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*BOAConfiguration)
+	if !ok {
+		return false
+	}
+
+	if bc.Authorizer != nil {
+		if !bc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if bc.Account != thatConfig.Account {
+		return false
+	}
+
+	if bc.Region != thatConfig.Region {
+		return false
+	}
+	return true
+}
+
+func (bc *BOAConfiguration) Sanitize() config.Config {
+	return &BOAConfiguration{
+		Account:    bc.Account,
+		Region:     bc.Region,
+		Authorizer: bc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (bc *BOAConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", bc.Account, bc.Region)
+}
+
+func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Account = account
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Region = region
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Authorizer = authorizer
+
+	return nil
+}
+
+func ConvertAlibabaInfoToConfig(acc AlibabaInfo) config.KeyedConfig {
+	if acc.IsEmpty() {
+		return nil
+	}
+	var configurer Authorizer
+
+	configurer = &AccessKey{
+		AccessKeyID:     acc.AlibabaServiceKeyName,
+		AccessKeySecret: acc.AlibabaServiceKeySecret,
+	}
+
+	return &BOAConfiguration{
+		Account:    acc.AlibabaAccountID,
+		Region:     acc.AlibabaClusterRegion,
+		Authorizer: configurer,
+	}
+}

+ 289 - 0
pkg/cloud/alibaba/boaconfiguration_test.go

@@ -0,0 +1,289 @@
+package alibaba
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestBoaConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   BOAConfiguration
+		expected error
+	}{
+		"valid config Azure AccessKey": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing Access key ID"),
+		},
+		"access secret invalid": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID: "accessKeyId",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing Access Key secret"),
+		},
+		"missing authorizer": {
+			config: BOAConfiguration{
+				Account:    "Account Number",
+				Region:     "Region",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing authorizer"),
+		},
+		"missing Account": {
+			config: BOAConfiguration{
+				Account: "",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing account"),
+		},
+		"missing Region": {
+			config: BOAConfiguration{
+				Account: "Account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing region"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestBOAConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     BOAConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id2",
+					AccessKeySecret: "secret2",
+				},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different region": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region2",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account2",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &AccessKey{
+				AccessKeyID:     "id",
+				AccessKeySecret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestBOAConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config BOAConfiguration
+	}{
+		"Empty Config": {
+			config: BOAConfiguration{},
+		},
+		"AccessKey": {
+			config: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &BOAConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 127 - 0
pkg/cloud/alibaba/boaquerier.go

@@ -0,0 +1,127 @@
+package alibaba
+
+import (
+	"fmt"
+	"strings"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+	"github.com/aliyun/alibaba-cloud-sdk-go/services/bssopenapi"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+const (
+	boaIsNode    = "i-"    // isNode if prefix of instance_id is i-
+	boaIsDisk    = "d-"    // isDisk if prefix is disk is d-
+	boaIsNetwork = "piece" //usage unit of network resource in Alibaba is Piece
+)
+
+type BoaQuerier struct {
+	BOAConfiguration
+}
+
+func (bq *BoaQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*BoaQuerier)
+	if !ok {
+		return false
+	}
+
+	return bq.BOAConfiguration.Equals(&thatConfig.BOAConfiguration)
+}
+
+// QueryInstanceBill performs the request to the BSS client and get the response for the current page number
+func (bq *BoaQuerier) QueryInstanceBill(client *bssopenapi.Client, isBillingItem bool, invocationScheme, granularity, billingCycle, billingDate string, pageNum int) (*bssopenapi.QueryInstanceBillResponse, error) {
+	log.Debugf("QueryInstanceBill: query for BSS Open API for billing date: %s with pageNum: %d ", billingDate, pageNum)
+	request := bssopenapi.CreateQueryInstanceBillRequest()
+	request.Scheme = invocationScheme
+	request.BillingCycle = billingCycle
+	request.IsBillingItem = requests.NewBoolean(true)
+	request.Granularity = granularity
+	request.BillingDate = billingDate
+	request.PageNum = requests.NewInteger(pageNum)
+	response, err := client.QueryInstanceBill(request)
+	if err != nil {
+		return nil, fmt.Errorf("QueryInstanceBill: Failed to hit the BSS Open API with error for page num %d: %v", pageNum, err)
+	}
+	log.Debugf("QueryInstanceBill: Total Number of total items for billing Date: %s pageNum: %d is %d", billingDate, pageNum, response.Data.TotalCount)
+	return response, nil
+}
+
+// QueryBoaPaginated Calls the API in a paginated fashion. There's no paramter in API that can distinguish if it hasMorePages
+// hence the logic of processedItem <= TotalItem.
+func (bq *BoaQuerier) QueryBoaPaginated(client *bssopenapi.Client, isBillingItem bool, invocationScheme, granularity, billingCycle, billingDate string, fn func(*bssopenapi.QueryInstanceBillResponse) bool) error {
+	pageNum := 1
+	processedItem := 0 // setting default here to hit the API for the first time
+	totalItem := 1
+	for processedItem < totalItem {
+		log.Debugf("QueryBoaPaginated: query for BSS Open API for billing date: %s with pageNum: %d", billingDate, pageNum)
+		response, err := bq.QueryInstanceBill(client, isBillingItem, invocationScheme, granularity, billingCycle, billingDate, pageNum)
+		if err != nil {
+			return fmt.Errorf("QueryBoaPaginated for billing cycle : %s, billing date: %s, page num %d: %v", billingCycle, billingDate, pageNum, err)
+		}
+		fn(response)
+		totalItem = response.Data.TotalCount
+		processedItem += response.Data.PageSize
+		pageNum += 1
+	}
+	return nil
+}
+
+// GetBoaQueryInstanceBillFunc gives the item to the handler function in boaIntegration.go to process
+// computeItem, topNItem and aggregatedItem
+func GetBoaQueryInstanceBillFunc(fn func(bssopenapi.Item) error, billingDate string) func(output *bssopenapi.QueryInstanceBillResponse) bool {
+	processBOAItems := func(output *bssopenapi.QueryInstanceBillResponse) bool {
+		// This could be connection error were unable to fetch response output from Client
+		if output == nil {
+			log.Errorf("BoaQuerier: No Response from the ALibaba BSS Open API client for billing Date: %s", billingDate)
+			return false
+		}
+
+		// These infer that the rest call was successful but the Cloud Usage resource for those days were 0
+		if output.Data.TotalCount == 0 {
+			log.Warnf("BoaQuerier: Total Item Count is 0 for billing Date: %s ", billingDate)
+			return false
+		}
+
+		for _, item := range output.Data.Items.Item {
+			fn(item)
+		}
+		return true
+	}
+	return processBOAItems
+}
+
+// SelectAlibabaCategory processes the Alibaba service to associated Kubecost category
+func SelectAlibabaCategory(item bssopenapi.Item) string {
+	if (item != bssopenapi.Item{}) {
+		// Provider ID has prefix "i-" for node in Alibaba
+		if strings.HasPrefix(item.InstanceID, boaIsNode) {
+			return kubecost.ComputeCategory
+		}
+		// Provider ID for disk start with "d-" for storage type in Alibaba
+		if strings.HasPrefix(item.InstanceID, boaIsDisk) {
+			return kubecost.StorageCategory
+		}
+		// Network has the highest priority and is based on the usage type of "piece" in Alibaba
+		if item.UsageUnit == boaIsNetwork {
+			return kubecost.NetworkCategory
+		}
+	}
+
+	// Alibaba CUR integration report has service lower case mostly unlike AWS
+	// TO-DO: Can investigate further product codes but bare minimal differentiation for start
+	switch strings.ToLower(item.ProductCode) {
+	case "slb", "eip", "nis", "gtm":
+		return kubecost.NetworkCategory
+	case "ecs", "eds", "sas":
+		return kubecost.ComputeCategory
+	case "ack":
+		return kubecost.ManagementCategory
+	case "ebs", "oss", "scu":
+		return kubecost.StorageCategory
+	default:
+		return kubecost.OtherCategory
+	}
+}

+ 16 - 15
pkg/cloud/aliyunprovider.go → pkg/cloud/alibaba/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package alibaba
 
 import (
 	"errors"
@@ -122,8 +122,9 @@ var alibabaInstanceFamilies = []string{
 }
 
 // AlibabaInfo contains configuration for Alibaba's CUR integration
+// Deprecated: v1.104 Use BOAConfiguration instead
 type AlibabaInfo struct {
-	AlibabaClusterRegion    string `json:"clusterRegion"`
+	AlibabaClusterRegion    string `json:"ClusterRegion"`
 	AlibabaServiceKeyName   string `json:"serviceKeyName"`
 	AlibabaServiceKeySecret string `json:"serviceKeySecret"`
 	AlibabaAccountID        string `json:"accountID"`
@@ -138,6 +139,7 @@ func (ai *AlibabaInfo) IsEmpty() bool {
 }
 
 // AlibabaAccessKey holds Alibaba credentials parsing from the service-key.json file.
+// Deprecated: v1.104 Use AccessKey instead
 type AlibabaAccessKey struct {
 	AccessKeyID     string `json:"alibaba_access_key_id"`
 	SecretAccessKey string `json:"alibaba_secret_access_key"`
@@ -323,15 +325,14 @@ type Alibaba struct {
 	// Lock Needed to provide thread safe
 	DownloadPricingDataLock sync.RWMutex
 	Clientset               clustercache.ClusterCache
-	Config                  *ProviderConfig
-	*CustomProvider
+	Config                  models.ProviderConfig
+	ServiceAccountChecks    *models.ServiceAccountChecks
+	ClusterAccountId        string
+	ClusterRegion           string
 
 	// The following fields are unexported because of avoiding any leak of secrets of these keys.
 	// Alibaba Access key used specifically in signer interface used to sign API calls
-	serviceAccountChecks *models.ServiceAccountChecks
-	clusterAccountId     string
-	clusterRegion        string
-	accessKey            *credentials.AccessKeyCredential
+	accessKey *credentials.AccessKeyCredential
 	// Map of regionID to sdk.client to call API for that region
 	clients map[string]*sdk.Client
 }
@@ -461,10 +462,10 @@ func (alibaba *Alibaba) DownloadPricingData() error {
 	}
 
 	// set the first occurrence of region from the node
-	if alibaba.clusterRegion == "" {
+	if alibaba.ClusterRegion == "" {
 		for _, node := range nodeList {
 			if regionID, ok := node.Labels["topology.kubernetes.io/region"]; ok {
-				alibaba.clusterRegion = regionID
+				alibaba.ClusterRegion = regionID
 				break
 			}
 		}
@@ -478,7 +479,7 @@ func (alibaba *Alibaba) DownloadPricingData() error {
 	for _, pv := range pvList {
 		pvRegion := determinePVRegion(pv)
 		if pvRegion == "" {
-			pvRegion = alibaba.clusterRegion
+			pvRegion = alibaba.ClusterRegion
 		}
 		pricingObj := &AlibabaPricing{}
 		slimK8sDisk := generateSlimK8sDiskFromV1PV(pv, pvRegion)
@@ -685,8 +686,8 @@ func (alibaba *Alibaba) ClusterInfo() (map[string]string, error) {
 	m := make(map[string]string)
 	m["name"] = clusterName
 	m["provider"] = kubecost.AlibabaProvider
-	m["project"] = alibaba.clusterAccountId
-	m["region"] = alibaba.clusterRegion
+	m["project"] = alibaba.ClusterAccountId
+	m["region"] = alibaba.ClusterRegion
 	m["id"] = env.GetClusterID()
 	return m, nil
 }
@@ -912,7 +913,7 @@ func (alibaba *Alibaba) GetPVKey(pv *v1.PersistentVolume, parameters map[string]
 	regionID := defaultRegion
 	// If default Region is not passed default it to cluster region ID.
 	if defaultRegion == "" {
-		regionID = alibaba.clusterRegion
+		regionID = alibaba.ClusterRegion
 	}
 	slimK8sDisk := generateSlimK8sDiskFromV1PV(pv, defaultRegion)
 	return &AlibabaPVKey{
@@ -1356,7 +1357,7 @@ func determinePVRegion(pv *v1.PersistentVolume) string {
 
 	if pvZone == "" {
 		// zone and regionID labels are optional in Alibaba PV creation, while PV through UI creation put's a zone PV is associated with and the region
-		// can be determined from this information. If pv is provision via yaml and the block is missing that's the only time it gets defaulted to clusterRegion.
+		// can be determined from this information. If pv is provision via yaml and the block is missing that's the only time it gets defaulted to ClusterRegion.
 		if pv.Spec.NodeAffinity != nil {
 			nodeAffinity := pv.Spec.NodeAffinity
 			if nodeAffinity.Required != nil && nodeAffinity.Required.NodeSelectorTerms != nil {

+ 2 - 2
pkg/cloud/aliyunprovider_test.go → pkg/cloud/alibaba/provider_test.go

@@ -1,4 +1,4 @@
-package cloud
+package alibaba
 
 import (
 	"fmt"
@@ -9,7 +9,7 @@ import (
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	v1 "k8s.io/api/core/v1"
-	resource "k8s.io/apimachinery/pkg/api/resource"
+	"k8s.io/apimachinery/pkg/api/resource"
 )
 
 func TestCreateDescribePriceACSRequest(t *testing.T) {

+ 233 - 0
pkg/cloud/aws/athenaconfiguration.go

@@ -0,0 +1,233 @@
+package aws
+
+import (
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/service/athena"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// AthenaConfiguration
+type AthenaConfiguration struct {
+	Bucket     string     `json:"bucket"`
+	Region     string     `json:"region"`
+	Database   string     `json:"database"`
+	Table      string     `json:"table"`
+	Workgroup  string     `json:"workgroup"`
+	Account    string     `json:"account"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (ac *AthenaConfiguration) Validate() error {
+
+	// Validate Authorizer
+	if ac.Authorizer == nil {
+		return fmt.Errorf("AthenaConfiguration: missing Authorizer")
+	}
+
+	err := ac.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: %s", err)
+	}
+
+	// Validate base properties
+	if ac.Bucket == "" {
+		return fmt.Errorf("AthenaConfiguration: missing bucket")
+	}
+
+	if ac.Region == "" {
+		return fmt.Errorf("AthenaConfiguration: missing region")
+	}
+
+	if ac.Database == "" {
+		return fmt.Errorf("AthenaConfiguration: missing database")
+	}
+
+	if ac.Table == "" {
+		return fmt.Errorf("AthenaConfiguration: missing table")
+	}
+
+	if ac.Account == "" {
+		return fmt.Errorf("AthenaConfiguration: missing account")
+	}
+
+	return nil
+}
+
+func (ac *AthenaConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AthenaConfiguration)
+	if !ok {
+		return false
+	}
+
+	if ac.Authorizer != nil {
+		if !ac.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if ac.Bucket != thatConfig.Bucket {
+		return false
+	}
+
+	if ac.Region != thatConfig.Region {
+		return false
+	}
+
+	if ac.Database != thatConfig.Database {
+		return false
+	}
+
+	if ac.Table != thatConfig.Table {
+		return false
+	}
+
+	if ac.Workgroup != thatConfig.Workgroup {
+		return false
+	}
+
+	if ac.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (ac *AthenaConfiguration) Sanitize() config.Config {
+	return &AthenaConfiguration{
+		Bucket:     ac.Bucket,
+		Region:     ac.Region,
+		Database:   ac.Database,
+		Table:      ac.Table,
+		Workgroup:  ac.Workgroup,
+		Account:    ac.Account,
+		Authorizer: ac.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (ac *AthenaConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", ac.Account, ac.Bucket)
+}
+
+func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Bucket = bucket
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Region = region
+
+	database, err := config.GetInterfaceValue[string](fmap, "database")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Database = database
+
+	table, err := config.GetInterfaceValue[string](fmap, "table")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Table = table
+
+	workgroup, err := config.GetInterfaceValue[string](fmap, "workgroup")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Workgroup = workgroup
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Account = account
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Authorizer = authorizer
+
+	return nil
+}
+
+func (ac *AthenaConfiguration) GetAthenaClient() (*athena.Client, error) {
+	cfg, err := ac.Authorizer.CreateAWSConfig(ac.Region)
+	if err != nil {
+		return nil, err
+	}
+	cli := athena.NewFromConfig(cfg)
+	return cli, nil
+}
+
+// ConvertAwsAthenaInfoToConfig takes a legacy config and generates a Config based on the presence of properties to match
+// legacy behavior
+func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) config.KeyedConfig {
+	if aai.IsEmpty() {
+		return nil
+	}
+
+	var authorizer Authorizer
+	if aai.ServiceKeyName == "" && aai.ServiceKeySecret == "" {
+		authorizer = &ServiceAccount{}
+	} else {
+		authorizer = &AccessKey{
+			ID:     aai.ServiceKeyName,
+			Secret: aai.ServiceKeySecret,
+		}
+	}
+
+	// Wrap Authorizer with AssumeRole if MasterPayerArn is set
+	if aai.MasterPayerARN != "" {
+		authorizer = &AssumeRole{
+			Authorizer: authorizer,
+			RoleARN:    aai.MasterPayerARN,
+		}
+	}
+
+	var config config.KeyedConfig
+	if aai.AthenaTable != "" || aai.AthenaDatabase != "" {
+		config = &AthenaConfiguration{
+			Bucket:     aai.AthenaBucketName,
+			Region:     aai.AthenaRegion,
+			Database:   aai.AthenaDatabase,
+			Table:      aai.AthenaTable,
+			Workgroup:  aai.AthenaWorkgroup,
+			Account:    aai.AccountID,
+			Authorizer: authorizer,
+		}
+	} else {
+		config = &S3Configuration{
+			Bucket:     aai.AthenaBucketName,
+			Region:     aai.AthenaRegion,
+			Account:    aai.AccountID,
+			Authorizer: authorizer,
+		}
+	}
+
+	return config
+}

+ 594 - 0
pkg/cloud/aws/athenaconfiguration_test.go

@@ -0,0 +1,594 @@
+package aws
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestAthenaConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   AthenaConfiguration
+		expected error
+	}{
+		"valid config access key": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: nil,
+		},
+		"valid config service account": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID: "id",
+				},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: AccessKey: missing Secret"),
+		},
+		"missing Authorizer": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing Authorizer"),
+		},
+		"missing bucket": {
+			config: AthenaConfiguration{
+				Bucket:     "",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing bucket"),
+		},
+		"missing region": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing region"),
+		},
+		"missing database": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing database"),
+		},
+		"missing table": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing table"),
+		},
+		"missing workgroup": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"missing account": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing account"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestAthenaConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     AthenaConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different bucket": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket2",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region2",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different database": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database2",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different table": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table2",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different workgroup": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup2",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account2",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AccessKey{
+				ID:     "id",
+				Secret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestAthenaConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config AthenaConfiguration
+	}{
+		"Empty Config": {
+			config: AthenaConfiguration{},
+		},
+		"AccessKey": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+		},
+
+		"ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+		},
+		"AssumeRole with AccessKey": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+		"AssumeRole with ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &ServiceAccount{},
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"RoleArnNil": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: nil,
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"AssumeRole with AssumeRole with ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AssumeRole{
+						RoleARN:    "12345",
+						Authorizer: &ServiceAccount{},
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &AthenaConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 208 - 0
pkg/cloud/aws/athenaquerier.go

@@ -0,0 +1,208 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/athena"
+	"github.com/aws/aws-sdk-go-v2/service/athena/types"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+)
+
+type AthenaQuerier struct {
+	AthenaConfiguration
+}
+
+func (aq *AthenaQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*AthenaQuerier)
+	if !ok {
+		return false
+	}
+
+	return aq.AthenaConfiguration.Equals(&thatConfig.AthenaConfiguration)
+}
+
+// QueryAthenaPaginated executes athena query and processes results. An error from this method indicates a
+// FAILED_CONNECTION CloudConnectionStatus and should immediately stop the caller to maintain the correct CloudConnectionStatus
+func (aq *AthenaQuerier) QueryAthenaPaginated(ctx context.Context, query string, fn func(*athena.GetQueryResultsOutput) bool) error {
+
+	queryExecutionCtx := &types.QueryExecutionContext{
+		Database: aws.String(aq.Database),
+	}
+
+	resultConfiguration := &types.ResultConfiguration{
+		OutputLocation: aws.String(aq.Bucket),
+	}
+	startQueryExecutionInput := &athena.StartQueryExecutionInput{
+		QueryString:           aws.String(query),
+		QueryExecutionContext: queryExecutionCtx,
+		ResultConfiguration:   resultConfiguration,
+	}
+
+	// Only set if there is a value, the default input is nil
+	if aq.Workgroup != "" {
+		startQueryExecutionInput.WorkGroup = aws.String(aq.Workgroup)
+	}
+
+	// Create Athena Client
+	cli, err := aq.AthenaConfiguration.GetAthenaClient()
+
+	// Query Athena
+	startQueryExecutionOutput, err := cli.StartQueryExecution(ctx, startQueryExecutionInput)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: start query error: %s", err.Error())
+	}
+	err = waitForQueryToComplete(ctx, cli, startQueryExecutionOutput.QueryExecutionId)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: query execution error: %s", err.Error())
+	}
+	queryResultsInput := &athena.GetQueryResultsInput{
+		QueryExecutionId: startQueryExecutionOutput.QueryExecutionId,
+	}
+	getQueryResultsPaginator := athena.NewGetQueryResultsPaginator(cli, queryResultsInput)
+	for getQueryResultsPaginator.HasMorePages() {
+		pg, err := getQueryResultsPaginator.NextPage(ctx)
+		if err != nil {
+			log.Errorf("queryAthenaPaginated: NextPage error: %s", err.Error())
+			continue
+		}
+		fn(pg)
+	}
+	return nil
+}
+
+func waitForQueryToComplete(ctx context.Context, client *athena.Client, queryExecutionID *string) error {
+	inp := &athena.GetQueryExecutionInput{
+		QueryExecutionId: queryExecutionID,
+	}
+	isQueryStillRunning := true
+	for isQueryStillRunning {
+		qe, err := client.GetQueryExecution(ctx, inp)
+		if err != nil {
+			return err
+		}
+		if qe.QueryExecution.Status.State == "SUCCEEDED" {
+			isQueryStillRunning = false
+			continue
+		}
+		if qe.QueryExecution.Status.State != "RUNNING" && qe.QueryExecution.Status.State != "QUEUED" {
+			return fmt.Errorf("no query results available for query %s", *queryExecutionID)
+		}
+		time.Sleep(2 * time.Second)
+	}
+	return nil
+}
+
+// GetAthenaRowValue retrieve value from athena row based on column names and used stringutil.Bank() to prevent duplicate
+// allocation of strings
+func GetAthenaRowValue(row types.Row, queryColumnIndexes map[string]int, columnName string) string {
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return ""
+	}
+	valuePointer := row.Data[columnIndex].VarCharValue
+	if valuePointer == nil {
+		return ""
+	}
+	return stringutil.Bank(*valuePointer)
+}
+
+// getAthenaRowValueFloat retrieve value from athena row based on column names and convert to float if possible
+func GetAthenaRowValueFloat(row types.Row, queryColumnIndexes map[string]int, columnName string) (float64, error) {
+
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return 0.0, fmt.Errorf("getAthenaRowValueFloat: missing column index: %s", columnName)
+	}
+
+	valuePointer := row.Data[columnIndex].VarCharValue
+	if valuePointer == nil {
+		return 0.0, fmt.Errorf("getAthenaRowValueFloat: nil field")
+	}
+
+	cost, err := strconv.ParseFloat(*valuePointer, 64)
+	if err != nil {
+		return cost, fmt.Errorf("getAthenaRowValueFloat: failed to parse %s: '%s': %s", columnName, *valuePointer, err.Error())
+	}
+	return cost, nil
+}
+
+func SelectAWSCategory(isNode, isVol, isNetwork bool, providerID, service string) string {
+	// Network has the highest priority and is based on the usage type ending in "Bytes"
+	if isNetwork {
+		return kubecost.NetworkCategory
+	}
+	// The node and volume conditions are mutually exclusive.
+	// Provider ID has prefix "i-"
+	if isNode {
+		return kubecost.ComputeCategory
+	}
+	// Provider ID has prefix "vol-"
+	if isVol {
+		return kubecost.StorageCategory
+	}
+
+	// Default categories based on service
+	switch strings.ToUpper(service) {
+	case "AWSELB", "AWSGLUE", "AMAZONROUTE53":
+		return kubecost.NetworkCategory
+	case "AMAZONEC2", "AWSLAMBDA", "AMAZONELASTICACHE":
+		return kubecost.ComputeCategory
+	case "AMAZONEKS":
+		// Check if line item is a fargate pod
+		if strings.Contains(providerID, ":pod/") {
+			return kubecost.ComputeCategory
+		}
+		return kubecost.ManagementCategory
+	case "AMAZONS3", "AMAZONATHENA", "AMAZONRDS", "AMAZONDYNAMODB", "AWSSECRETSMANAGER", "AMAZONFSX":
+		return kubecost.StorageCategory
+	default:
+		return kubecost.OtherCategory
+	}
+}
+
+var parseARNRx = regexp.MustCompile("^.+\\/(.+)?") // Capture "a406f7761142e4ef58a8f2ba478d2db2" from "arn:aws:elasticloadbalancing:us-east-1:297945954695:loadbalancer/a406f7761142e4ef58a8f2ba478d2db2"
+
+func ParseARN(id string) string {
+	match := parseARNRx.FindStringSubmatch(id)
+	if len(match) == 0 {
+		if id != "" {
+			log.DedupedInfof(10, "aws.parseARN: failed to parse %s", id)
+		}
+		return id
+	}
+	return match[len(match)-1]
+}
+
+func GetAthenaQueryFunc(fn func(types.Row)) func(*athena.GetQueryResultsOutput) bool {
+	pageNum := 0
+	processItemQueryResults := func(page *athena.GetQueryResultsOutput) bool {
+		if page == nil {
+			log.Errorf("AthenaQuerier: Athena page is nil")
+			return false
+		} else if page.ResultSet == nil {
+			log.Errorf("AthenaQuerier: Athena page.ResultSet is nil")
+			return false
+		}
+		rows := page.ResultSet.Rows
+		if pageNum == 0 {
+			rows = page.ResultSet.Rows[1:len(page.ResultSet.Rows)]
+		}
+
+		for _, row := range rows {
+			fn(row)
+		}
+		pageNum++
+		return true
+	}
+	return processItemQueryResults
+}

+ 251 - 0
pkg/cloud/aws/authorizer.go

@@ -0,0 +1,251 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsconfig "github.com/aws/aws-sdk-go-v2/config"
+	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+	"github.com/aws/aws-sdk-go-v2/service/sts"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const AccessKeyAuthorizerType = "AWSAccessKey"
+const ServiceAccountAuthorizerType = "AWSServiceAccount"
+const AssumeRoleAuthorizerType = "AWSAssumeRole"
+
+// Authorizer implementations provide aws.Config for AWS SDK calls
+type Authorizer interface {
+	config.Authorizer
+	CreateAWSConfig(string) (aws.Config, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	case ServiceAccountAuthorizerType:
+		return &ServiceAccount{}, nil
+	case AssumeRoleAuthorizerType:
+		return &AssumeRole{}, nil
+	default:
+		return nil, fmt.Errorf("AWS: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// AccessKey holds AWS credentials and fulfils the awsV2.CredentialsProvider interface
+type AccessKey struct {
+	ID     string `json:"id"`
+	Secret string `json:"secret"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["id"] = ak.ID
+	fmap["secret"] = ak.Secret
+	return json.Marshal(fmap)
+}
+
+// Retrieve returns a set of awsV2 credentials using the AccessKey's key and secret.
+// This fulfils the awsV2.CredentialsProvider interface contract.
+func (ak *AccessKey) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	return aws.Credentials{
+		AccessKeyID:     ak.ID,
+		SecretAccessKey: ak.Secret,
+	}, nil
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.ID == "" {
+		return fmt.Errorf("AccessKey: missing ID")
+	}
+	if ak.Secret == "" {
+		return fmt.Errorf("AccessKey: missing Secret")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.ID != thatConfig.ID {
+		return false
+	}
+	if ak.Secret != thatConfig.Secret {
+		return false
+	}
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		ID:     ak.ID,
+		Secret: config.Redacted,
+	}
+}
+
+// CreateAWSConfig creates an AWS SDK V2 Config for the credentials that it contains for the provided region
+func (ak *AccessKey) CreateAWSConfig(region string) (cfg aws.Config, err error) {
+	err = ak.Validate()
+	if err != nil {
+		return cfg, err
+	}
+	// The AWS SDK v2 requires an object fulfilling the CredentialsProvider interface, which cloud.AccessKey does
+	cfg, err = awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithCredentialsProvider(ak), awsconfig.WithRegion(region))
+	if err != nil {
+		return cfg, fmt.Errorf("failed to initialize AWS SDK config for region %s: %s", region, err)
+	}
+	return cfg, nil
+}
+
+// ServiceAccount uses pod annotations along with a service account to authenticate integrations
+type ServiceAccount struct{}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (sa *ServiceAccount) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 1)
+	fmap[config.AuthorizerTypeProperty] = ServiceAccountAuthorizerType
+	return json.Marshal(fmap)
+}
+
+// Check has nothing to check at this level, connection will fail if Pod Annotation and Service Account are not configured correctly
+func (sa *ServiceAccount) Validate() error {
+	return nil
+}
+
+func (sa *ServiceAccount) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	_, ok := config.(*ServiceAccount)
+	if !ok {
+		return false
+	}
+
+	return true
+}
+
+func (sa *ServiceAccount) Sanitize() config.Config {
+	return &ServiceAccount{}
+}
+
+func (sa *ServiceAccount) CreateAWSConfig(region string) (aws.Config, error) {
+	cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithRegion(region))
+	if err != nil {
+		return cfg, fmt.Errorf("failed to initialize AWS SDK config for region from annotation %s: %s", region, err)
+	}
+	return cfg, nil
+}
+
+// AssumeRole is a wrapper for another Authorizer which adds an assumed role to the configuration
+type AssumeRole struct {
+	Authorizer Authorizer `json:"authorizer"`
+	RoleARN    string     `json:"roleARN"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ara *AssumeRole) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AssumeRoleAuthorizerType
+	fmap["roleARN"] = ara.RoleARN
+	fmap["authorizer"] = ara.Authorizer
+	return json.Marshal(fmap)
+}
+
+// UnmarshalJSON is required for AssumeRole because it needs to unmarshal an Authorizer interface
+func (ara *AssumeRole) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	roleARN, err := config.GetInterfaceValue[string](fmap, "roleARN")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ara.RoleARN = roleARN
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("AssumeRole: UnmarshalJSON: missing Authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("AssumeRole: UnmarshalJSON: %s", err.Error())
+	}
+	ara.Authorizer = authorizer
+
+	return nil
+}
+
+func (ara *AssumeRole) CreateAWSConfig(region string) (aws.Config, error) {
+	cfg, _ := ara.Authorizer.CreateAWSConfig(region)
+	// Create the credentials from AssumeRoleProvider to assume the role
+	// referenced by the RoleARN.
+	stsSvc := sts.NewFromConfig(cfg)
+	creds := stscreds.NewAssumeRoleProvider(stsSvc, ara.RoleARN)
+	cfg.Credentials = aws.NewCredentialsCache(creds)
+	return cfg, nil
+}
+
+func (ara *AssumeRole) Validate() error {
+	if ara.Authorizer == nil {
+		return fmt.Errorf("AssumeRole: misisng base Authorizer")
+	}
+	err := ara.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	if ara.RoleARN == "" {
+		return fmt.Errorf("AssumeRole: misisng RoleARN configuration")
+	}
+
+	return nil
+}
+
+func (ara *AssumeRole) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AssumeRole)
+	if !ok {
+		return false
+	}
+	if ara.Authorizer != nil {
+		if !ara.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if ara.RoleARN != thatConfig.RoleARN {
+		return false
+	}
+
+	return true
+}
+
+func (ara *AssumeRole) Sanitize() config.Config {
+	return &AssumeRole{
+		Authorizer: ara.Authorizer.Sanitize().(Authorizer),
+		RoleARN:    ara.RoleARN,
+	}
+}

+ 67 - 0
pkg/cloud/aws/authorizer_test.go

@@ -0,0 +1,67 @@
+package aws
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+func TestAuthorizerJSON_Sanitize(t *testing.T) {
+
+	testCases := map[string]struct {
+		input    Authorizer
+		expected Authorizer
+	}{
+		"Access Key": {
+			input: &AccessKey{
+				ID:     "ID",
+				Secret: "Secret",
+			},
+			expected: &AccessKey{
+				ID:     "ID",
+				Secret: config.Redacted,
+			},
+		},
+		"Service Account": {
+			input:    &ServiceAccount{},
+			expected: &ServiceAccount{},
+		},
+		"Master Payer Access Key": {
+			input: &AssumeRole{
+				Authorizer: &AccessKey{
+					ID:     "ID",
+					Secret: "Secret",
+				},
+				RoleARN: "role arn",
+			},
+			expected: &AssumeRole{
+				Authorizer: &AccessKey{
+					ID:     "ID",
+					Secret: config.Redacted,
+				},
+				RoleARN: "role arn",
+			},
+		},
+		"Master Payer Service Account": {
+			input: &AssumeRole{
+				Authorizer: &ServiceAccount{},
+				RoleARN:    "role arn",
+			},
+			expected: &AssumeRole{
+				Authorizer: &ServiceAccount{},
+				RoleARN:    "role arn",
+			},
+		},
+	}
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// Convert to AuthorizerJSON for sanitization
+			sanitizedAuthorizer := tc.input.Sanitize()
+
+			if !tc.expected.Equals(sanitizedAuthorizer) {
+				t.Error("Authorizer was not as expected after Sanitization")
+			}
+
+		})
+	}
+}

+ 2 - 22
pkg/cloud/aws/awsprovider.go → pkg/cloud/aws/provider.go

@@ -187,6 +187,7 @@ type AWS struct {
 }
 
 // AWSAccessKey holds AWS credentials and fulfils the awsV2.CredentialsProvider interface
+// Deprecated: v1.104 Use AccessKey instead
 type AWSAccessKey struct {
 	AccessKeyID     string `json:"aws_access_key_id"`
 	SecretAccessKey string `json:"aws_secret_access_key"`
@@ -393,6 +394,7 @@ type AwsSpotFeedInfo struct {
 }
 
 // AwsAthenaInfo contains configuration for CUR integration
+// Deprecated: v1.104 Use AthenaConfiguration instead
 type AwsAthenaInfo struct {
 	AthenaBucketName string `json:"athenaBucketName"`
 	AthenaRegion     string `json:"athenaRegion"`
@@ -1848,28 +1850,6 @@ func (aws *AWS) QueryAthenaPaginated(ctx context.Context, query string, fn func(
 	return nil
 }
 
-func waitForQueryToComplete(ctx context.Context, client *athena.Client, queryExecutionID *string) error {
-	inp := &athena.GetQueryExecutionInput{
-		QueryExecutionId: queryExecutionID,
-	}
-	isQueryStillRunning := true
-	for isQueryStillRunning {
-		qe, err := client.GetQueryExecution(ctx, inp)
-		if err != nil {
-			return err
-		}
-		if qe.QueryExecution.Status.State == "SUCCEEDED" {
-			isQueryStillRunning = false
-			continue
-		}
-		if qe.QueryExecution.Status.State != "RUNNING" && qe.QueryExecution.Status.State != "QUEUED" {
-			return fmt.Errorf("no query results available for query %s", *queryExecutionID)
-		}
-		time.Sleep(2 * time.Second)
-	}
-	return nil
-}
-
 type SavingsPlanData struct {
 	ResourceID     string
 	EffectiveCost  float64

+ 0 - 0
pkg/cloud/aws/awsprovider_test.go → pkg/cloud/aws/provider_test.go


+ 134 - 0
pkg/cloud/aws/s3configuration.go

@@ -0,0 +1,134 @@
+package aws
+
+import (
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type S3Configuration struct {
+	Bucket     string     `json:"bucket"`
+	Region     string     `json:"region"`
+	Account    string     `json:"account"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (s3c *S3Configuration) Validate() error {
+	// Validate Authorizer
+	if s3c.Authorizer == nil {
+		return fmt.Errorf("S3Configuration: missing Authorizer")
+	}
+
+	err := s3c.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("S3Configuration: %s", err)
+	}
+
+	// Validate base properties
+	if s3c.Bucket == "" {
+		return fmt.Errorf("S3Configuration: missing bucket")
+	}
+
+	if s3c.Region == "" {
+		return fmt.Errorf("S3Configuration: missing region")
+	}
+
+	if s3c.Account == "" {
+		return fmt.Errorf("S3Configuration: missing account")
+	}
+
+	return nil
+}
+
+func (s3c *S3Configuration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*S3Configuration)
+	if !ok {
+		return false
+	}
+
+	if s3c.Authorizer != nil {
+		if !s3c.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if s3c.Bucket != thatConfig.Bucket {
+		return false
+	}
+
+	if s3c.Region != thatConfig.Region {
+		return false
+	}
+
+	if s3c.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (s3c *S3Configuration) Sanitize() config.Config {
+	return &S3Configuration{
+		Bucket:     s3c.Bucket,
+		Region:     s3c.Region,
+		Account:    s3c.Account,
+		Authorizer: s3c.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (s3c *S3Configuration) Key() string {
+	return fmt.Sprintf("%s/%s", s3c.Account, s3c.Bucket)
+}
+
+func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Bucket = bucket
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Region = region
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Account = account
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Authorizer = authorizer
+
+	return nil
+}
+
+func (s3c *S3Configuration) CreateAWSConfig() (aws.Config, error) {
+	return s3c.Authorizer.CreateAWSConfig(s3c.Region)
+}

+ 40 - 0
pkg/cloud/aws/s3connection.go

@@ -0,0 +1,40 @@
+package aws
+
+import (
+	"context"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+type S3Connection struct {
+	S3Configuration
+}
+
+func (s3c *S3Connection) Equals(config config.Config) bool {
+	thatConfig, ok := config.(*S3Connection)
+	if !ok {
+		return false
+	}
+
+	return s3c.S3Configuration.Equals(&thatConfig.S3Configuration)
+}
+
+func (s3c *S3Connection) GetS3Client() (*s3.Client, error) {
+	cfg, err := s3c.CreateAWSConfig()
+	if err != nil {
+		return nil, err
+	}
+	return s3.NewFromConfig(cfg), nil
+}
+
+func (s3c *S3Connection) ListObjects(cli *s3.Client) (*s3.ListObjectsOutput, error) {
+	objs, err := cli.ListObjects(context.TODO(), &s3.ListObjectsInput{
+		Bucket: aws.String(s3c.Bucket),
+	})
+	if err != nil {
+		return nil, err
+	}
+	return objs, err
+}

+ 387 - 0
pkg/cloud/aws/s3connection_test.go

@@ -0,0 +1,387 @@
+package aws
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestS3Configuration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   S3Configuration
+		expected error
+	}{
+		"valid config access key": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: nil,
+		},
+		"valid config service account": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID: "id",
+				},
+			},
+			expected: fmt.Errorf("S3Configuration: AccessKey: missing Secret"),
+		},
+		"missing Authorizer": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("S3Configuration: missing Authorizer"),
+		},
+		"missing bucket": {
+			config: S3Configuration{
+				Bucket:     "",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing bucket"),
+		},
+		"missing region": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing region"),
+		},
+		"missing account": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing account"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestS3Configuration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     S3Configuration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different bucket": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket2",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region2",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account2",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AccessKey{
+				ID:     "id",
+				Secret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestS3Configuration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config S3Configuration
+	}{
+		"Empty Config": {
+			config: S3Configuration{},
+		},
+		"AccessKey": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+		},
+
+		"ServiceAccount": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+		},
+		"AssumeRole with AccessKey": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+		"AssumeRole with ServiceAccount": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &ServiceAccount{},
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"RoleArnNil": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: nil,
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"AssumeRole with AssumeRole with ServiceAccount": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AssumeRole{
+						RoleARN:    "12345",
+						Authorizer: &ServiceAccount{},
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &S3Configuration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 181 - 0
pkg/cloud/aws/s3selectquerier.go

@@ -0,0 +1,181 @@
+package aws
+
+import (
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+)
+
+type S3SelectQuerier struct {
+	S3Connection
+}
+
+func (s3sq *S3SelectQuerier) Equals(config config.Config) bool {
+	thatConfig, ok := config.(*S3SelectQuerier)
+	if !ok {
+		return false
+	}
+
+	return s3sq.S3Connection.Equals(&thatConfig.S3Connection)
+}
+
+func (s3sq *S3SelectQuerier) Query(query string, queryKeys []string, cli *s3.Client, fn func(*csv.Reader) error) error {
+	for _, queryKey := range queryKeys {
+		reader, err2 := s3sq.fetchCSVReader(query, queryKey, cli, s3Types.FileHeaderInfoUse)
+		if err2 != nil {
+			return err2
+		}
+		err2 = fn(reader)
+		if err2 != nil {
+			return err2
+		}
+	}
+
+	return nil
+}
+
+// GetQueryKeys returns a list of s3 object names, where the there are 1 object for each month within the range between
+// start and end
+func (s3sq *S3SelectQuerier) GetQueryKeys(start, end time.Time, client *s3.Client) ([]string, error) {
+	objs, err := s3sq.ListObjects(client)
+	if err != nil {
+		return nil, err
+	}
+
+	monthStrings, err := getMonthStrings(start, end)
+	if err != err {
+		return nil, err
+	}
+
+	var queryKeys []string
+	// Find all matching "csv.gz" files per monthString
+	for _, monthStr := range monthStrings {
+		for _, obj := range objs.Contents {
+			if strings.Contains(*obj.Key, monthStr) && strings.HasSuffix(*obj.Key, ".csv.gz") {
+				queryKeys = append(queryKeys, *obj.Key)
+			}
+		}
+	}
+
+	if len(queryKeys) == 0 {
+		return nil, fmt.Errorf("no CUR files for given time range")
+	}
+
+	return queryKeys, nil
+}
+
+func (s3sq *S3SelectQuerier) fetchCSVReader(query string, queryKey string, client *s3.Client, fileHeaderInfo s3Types.FileHeaderInfo) (*csv.Reader, error) {
+	input := &s3.SelectObjectContentInput{
+		Bucket:         aws.String(s3sq.Bucket),
+		Key:            aws.String(queryKey),
+		Expression:     aws.String(query),
+		ExpressionType: s3Types.ExpressionTypeSql,
+		InputSerialization: &s3Types.InputSerialization{
+			CompressionType: s3Types.CompressionTypeGzip,
+			CSV: &s3Types.CSVInput{
+				FileHeaderInfo: fileHeaderInfo,
+			},
+		},
+		OutputSerialization: &s3Types.OutputSerialization{
+			CSV: &s3Types.CSVOutput{},
+		},
+	}
+
+	res, err := client.SelectObjectContent(context.TODO(), input)
+	if err != nil {
+		return nil, err
+	}
+	resStream := res.GetStream()
+	// todo: this needs work
+	results, resultWriter := io.Pipe()
+	go func() {
+		defer resultWriter.Close()
+		defer resStream.Close()
+		resStream.Events()
+		for event := range resStream.Events() {
+			switch e := event.(type) {
+			case *s3Types.SelectObjectContentEventStreamMemberRecords:
+				resultWriter.Write(e.Value.Payload)
+			case *s3Types.SelectObjectContentEventStreamMemberEnd:
+				break
+			}
+
+		}
+	}()
+
+	if err := resStream.Err(); err != nil {
+		return nil, fmt.Errorf("failed to read from SelectObjectContent EventStream, %v", err)
+	}
+
+	return csv.NewReader(results), nil
+}
+
+func getMonthStrings(start, end time.Time) ([]string, error) {
+	if start.After(end) {
+		return []string{}, fmt.Errorf("start date must be before end date")
+	}
+	if end.After(time.Now()) {
+		end = time.Now()
+	}
+	dateTemplate := "%d%02d01-%d%02d01/"
+	// set to first of the month
+	currMonth := start.AddDate(0, 0, -start.Day()+1)
+	nextMonth := currMonth.AddDate(0, 1, 0)
+	monthStr := fmt.Sprintf(dateTemplate, currMonth.Year(), int(currMonth.Month()), nextMonth.Year(), int(nextMonth.Month()))
+
+	// Create string for end condition
+	endMonth := end.AddDate(0, 0, -end.Day()+1)
+	endNextMonth := endMonth.AddDate(0, 1, 0)
+	endStr := fmt.Sprintf(dateTemplate, endMonth.Year(), int(endMonth.Month()), endNextMonth.Year(), int(endNextMonth.Month()))
+
+	var monthStrs []string
+	monthStrs = append(monthStrs, monthStr)
+
+	for monthStr != endStr {
+		currMonth = nextMonth
+		nextMonth = nextMonth.AddDate(0, 1, 0)
+		monthStr = fmt.Sprintf(dateTemplate, currMonth.Year(), int(currMonth.Month()), nextMonth.Year(), int(nextMonth.Month()))
+		monthStrs = append(monthStrs, monthStr)
+	}
+
+	return monthStrs, nil
+}
+
+// GetCSVRowValue retrieve value from athena row based on column names and used stringutil.Bank() to prevent duplicate
+// allocation of strings
+func GetCSVRowValue(row []string, queryColumnIndexes map[string]int, columnName string) string {
+	if row == nil {
+		return ""
+	}
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return ""
+	}
+	return stringutil.Bank(row[columnIndex])
+}
+
+// GetCSVRowValueFloat retrieve value from athena row based on column names and convert to float if possible.
+func GetCSVRowValueFloat(row []string, queryColumnIndexes map[string]int, columnName string) (float64, error) {
+	if row == nil {
+		return 0.0, fmt.Errorf("getCSVRowValueFloat: nil row")
+	}
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return 0.0, fmt.Errorf("getCSVRowValueFloat: missing column index: %s", columnName)
+	}
+	cost, err := strconv.ParseFloat(row[columnIndex], 64)
+	if err != nil {
+		return cost, fmt.Errorf("getCSVRowValueFloat: failed to parse %s: '%s': %s", columnName, row[columnIndex], err.Error())
+	}
+	return cost, nil
+}

+ 80 - 0
pkg/cloud/azure/authorizer.go

@@ -0,0 +1,80 @@
+package azure
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+const AccessKeyAuthorizerType = "AzureAccessKey"
+
+type Authorizer interface {
+	config.Authorizer
+	GetBlobCredentials() (azblob.Credential, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	default:
+		return nil, fmt.Errorf("azure: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+type AccessKey struct {
+	AccessKey string `json:"accessKey"`
+	Account   string `json:"account"`
+}
+
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["accessKey"] = ak.AccessKey
+	fmap["account"] = ak.Account
+	return json.Marshal(fmap)
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.AccessKey == "" {
+		return fmt.Errorf("AccessKey: missing access key")
+	}
+	if ak.Account == "" {
+		return fmt.Errorf("AccessKey: missing account")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.AccessKey != thatConfig.AccessKey {
+		return false
+	}
+	if ak.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		AccessKey: config.Redacted,
+		Account:   ak.Account,
+	}
+}
+
+func (ak *AccessKey) GetBlobCredentials() (azblob.Credential, error) {
+	// Create a default request pipeline using your storage account name and account key.
+	return azblob.NewSharedKeyCredential(ak.Account, ak.AccessKey)
+}

+ 322 - 0
pkg/cloud/azure/billingexportparser.go

@@ -0,0 +1,322 @@
+package azure
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const azureDateLayout = "2006-01-02"
+const AzureEnterpriseDateLayout = "01/02/2006"
+
+var groupRegex = regexp.MustCompile("(/[^/]+)")
+
+// BillingRowValues holder for Azure Billing Values
+type BillingRowValues struct {
+	Date            time.Time
+	MeterCategory   string
+	SubscriptionID  string
+	InvoiceEntityID string
+	InstanceID      string
+	Service         string
+	Tags            map[string]string
+	AdditionalInfo  map[string]any
+	Cost            float64
+	NetCost         float64
+}
+
+func (brv *BillingRowValues) IsCompute(category string) bool {
+	if category == kubecost.ComputeCategory {
+		return true
+	}
+
+	if category == kubecost.StorageCategory || category == kubecost.NetworkCategory {
+		if brv.Service == "Microsoft.Compute" {
+			return true
+		}
+	}
+	if category == kubecost.NetworkCategory && brv.MeterCategory == "Virtual Network" {
+		return true
+	}
+	return false
+}
+
+// BillingExportParser holds indexes of relevent fields in Azure Billing CSV in addition to the correct data format
+type BillingExportParser struct {
+	Date            int
+	MeterCategory   int
+	InvoiceEntityID int
+	SubscriptionID  int
+	InstanceID      int
+	Service         int
+	Tags            int
+	AdditionalInfo  int
+	Cost            int
+	NetCost         int
+	DateFormat      string
+}
+
+// match "SubscriptionGuid" in "Abonnement-GUID (SubscriptionGuid)"
+var getParenContentRegEx = regexp.MustCompile("\\((.*?)\\)")
+
+func NewBillingParseSchema(headers []string) (*BillingExportParser, error) {
+	// clear BOM from headers
+	if len(headers) != 0 {
+		headers[0] = strings.TrimPrefix(headers[0], "\xEF\xBB\xBF")
+	}
+
+	headerIndexes := map[string]int{}
+	for i, header := range headers {
+		// Azure Headers in different regions will have english headers in parentheses
+		match := getParenContentRegEx.FindStringSubmatch(header)
+		if len(match) != 0 {
+			header = match[len(match)-1]
+		}
+		headerIndexes[strings.ToLower(header)] = i
+	}
+
+	abp := &BillingExportParser{}
+
+	// Set Date Column and Date Format
+	if i, ok := headerIndexes["usagedatetime"]; ok {
+		abp.Date = i
+		abp.DateFormat = azureDateLayout
+	} else if j, ok2 := headerIndexes["date"]; ok2 {
+		abp.Date = j
+		abp.DateFormat = AzureEnterpriseDateLayout
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Date field")
+	}
+
+	// set Subscription ID
+	if i, ok := headerIndexes["subscriptionid"]; ok {
+		abp.SubscriptionID = i
+	} else if j, ok2 := headerIndexes["subscriptionguid"]; ok2 {
+		abp.SubscriptionID = j
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Subscription ID field")
+	}
+
+	// Set Billing ID
+	if i, ok := headerIndexes["billingaccountid"]; ok {
+		abp.InvoiceEntityID = i
+	} else if j, ok2 := headerIndexes["billingaccountname"]; ok2 {
+		abp.InvoiceEntityID = j
+	} else {
+		// if no billing ID column is present use subscription ID
+		abp.InvoiceEntityID = abp.SubscriptionID
+	}
+
+	// Set Instance ID
+	if i, ok := headerIndexes["instanceid"]; ok {
+		abp.InstanceID = i
+	} else if j, ok2 := headerIndexes["instancename"]; ok2 {
+		abp.InstanceID = j
+	} else if k, ok3 := headerIndexes["resourceid"]; ok3 {
+		abp.InstanceID = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Instance ID field")
+	}
+
+	// Set Meter Category
+	if i, ok := headerIndexes["metercategory"]; ok {
+		abp.MeterCategory = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Meter Category field")
+	}
+
+	// Set Tags
+	if i, ok := headerIndexes["tags"]; ok {
+		abp.Tags = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Tags field")
+	}
+
+	// Set Additional Info
+	if i, ok := headerIndexes["additionalinfo"]; ok {
+		abp.AdditionalInfo = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Additional Info field")
+	}
+
+	// Set Service
+	if i, ok := headerIndexes["consumedservice"]; ok {
+		abp.Service = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Service field")
+	}
+
+	// Set Net Cost
+	if i, ok := headerIndexes["costinbillingcurrency"]; ok {
+		abp.NetCost = i
+	} else if j, ok2 := headerIndexes["pretaxcost"]; ok2 {
+		abp.NetCost = j
+	} else if k, ok3 := headerIndexes["cost"]; ok3 {
+		abp.NetCost = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Net Cost field")
+	}
+
+	// Set Cost
+	if i, ok := headerIndexes["paygcostinbillingcurrency"]; ok {
+		abp.Cost = i
+	} else {
+		// if no Cost column is present use Net Cost column
+		abp.Cost = abp.NetCost
+	}
+
+	return abp, nil
+}
+
+func (bep *BillingExportParser) ParseRow(start, end time.Time, record []string) *BillingRowValues {
+	usageDate, err := time.Parse(bep.DateFormat, record[bep.Date])
+	if err != nil {
+		// try other format, and switch if successful
+		if bep.DateFormat == azureDateLayout {
+			bep.DateFormat = AzureEnterpriseDateLayout
+		} else {
+			bep.DateFormat = azureDateLayout
+		}
+		usageDate, err = time.Parse(bep.DateFormat, record[bep.Date])
+		// If parse still fails then return line
+		if err != nil {
+			log.Errorf("failed to parse usage date: '%s'", record[bep.Date])
+			return nil
+		}
+	}
+
+	// skip if usage data isn't in subject window
+	if usageDate.Before(start) || !usageDate.Before(end) {
+		return nil
+	}
+
+	cost, err := strconv.ParseFloat(record[bep.Cost], 64)
+	if err != nil {
+		log.Errorf("failed to parse cost: '%s'", record[bep.Cost])
+		return nil
+	}
+
+	netCost, err := strconv.ParseFloat(record[bep.NetCost], 64)
+	if err != nil {
+		log.Errorf("failed to parse net cost: '%s'", record[bep.NetCost])
+		return nil
+	}
+
+	additionalInfo := make(map[string]any)
+	additionalInfoJson := encloseInBrackets(record[bep.AdditionalInfo])
+	if additionalInfoJson != "" {
+		err = json.Unmarshal([]byte(additionalInfoJson), &additionalInfo)
+		if err != nil {
+			log.Errorf("Could not parse additional information %s, with Error: %s", additionalInfoJson, err.Error())
+		}
+	}
+
+	tags := make(map[string]string)
+	tagJson := encloseInBrackets(record[bep.Tags])
+	if tagJson != "" {
+		tagsAny := make(map[string]any)
+		err = json.Unmarshal([]byte(tagJson), &tagsAny)
+		if err != nil {
+			log.Errorf("Could not parse tags: %v, with Error: %s", tagJson, err.Error())
+		}
+
+		for name, value := range tagsAny {
+			if valueStr, ok := value.(string); ok && valueStr != "" {
+				tags[name] = valueStr
+			}
+		}
+	}
+
+	return &BillingRowValues{
+		Date:            usageDate,
+		MeterCategory:   record[bep.MeterCategory],
+		SubscriptionID:  record[bep.SubscriptionID],
+		InvoiceEntityID: record[bep.InvoiceEntityID],
+		InstanceID:      record[bep.InstanceID],
+		Service:         record[bep.Service],
+		Tags:            tags,
+		AdditionalInfo:  additionalInfo,
+		Cost:            cost,
+		NetCost:         netCost,
+	}
+}
+
+// enclose json strings in brackets if they are missing
+func encloseInBrackets(jsonString string) string {
+	if jsonString == "" || (jsonString[0] == '{' && jsonString[len(jsonString)-1] == '}') {
+		return jsonString
+	}
+	return fmt.Sprintf("{%s}", jsonString)
+}
+
+func AzureSetProviderID(abv *BillingRowValues) string {
+	category := SelectAzureCategory(abv.MeterCategory)
+	if value, ok := abv.AdditionalInfo["VMName"]; ok {
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+	} else if value, ok := abv.AdditionalInfo["VmName"]; ok {
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+	} else if value2, ook := abv.AdditionalInfo["IpAddress"]; ook && abv.MeterCategory == "Virtual Network" {
+		return fmt.Sprintf("%v", value2)
+	}
+
+	if category == kubecost.StorageCategory {
+		if value2, ok2 := abv.Tags["creationSource"]; ok2 {
+			creationSource := fmt.Sprintf("%v", value2)
+			return strings.TrimPrefix(creationSource, "aks-")
+		} else if value2, ok2 := abv.Tags["aks-managed-creationSource"]; ok2 {
+			creationSource := fmt.Sprintf("%v", value2)
+			return strings.TrimPrefix(creationSource, "vmssclient-")
+		} else {
+			return getSubStringAfterFinalSlash(abv.InstanceID)
+		}
+	}
+	return "azure://" + resourceGroupToLowerCase(abv.InstanceID)
+}
+
+func SelectAzureCategory(meterCategory string) string {
+	if meterCategory == "Virtual Machines" {
+		return kubecost.ComputeCategory
+	} else if meterCategory == "Storage" {
+		return kubecost.StorageCategory
+	} else if meterCategory == "Load Balancer" || meterCategory == "Bandwidth" || meterCategory == "Virtual Network" {
+		return kubecost.NetworkCategory
+	} else {
+		return kubecost.OtherCategory
+	}
+}
+
+func resourceGroupToLowerCase(providerID string) string {
+	var sb strings.Builder
+	for matchNum, group := range groupRegex.FindAllString(providerID, -1) {
+		if matchNum == 3 {
+			sb.WriteString(strings.ToLower(group))
+		} else {
+			sb.WriteString(group)
+		}
+	}
+	return sb.String()
+}
+
+// Returns the substring after the final "/" in a string
+func getSubStringAfterFinalSlash(id string) string {
+	index := strings.LastIndex(id, "/")
+	if index == -1 {
+		log.DedupedInfof(5, "azure.getSubStringAfterFinalSlash: failed to parse %s", id)
+		return id
+	}
+	return id[index+1:]
+}
+
+func getVMNumberForVMSS(vmName string) string {
+	vmNameSplit := strings.Split(vmName, "_")
+	if len(vmNameSplit) > 1 {
+		return "/virtualMachines/" + vmNameSplit[1]
+	}
+	return ""
+}

+ 194 - 0
pkg/cloud/azure/billingexportparser_test.go

@@ -0,0 +1,194 @@
+package azure
+
+import (
+	"encoding/csv"
+	"os"
+	"testing"
+	"time"
+)
+
+const billingExportPath = "./resources/billingexports/"
+const headerSetPath = billingExportPath + "headersets/"
+const valueCasesPath = billingExportPath + "values/"
+
+type TestCSVRetriever struct {
+	CSVName string
+}
+
+func (tcr TestCSVRetriever) getCSVReaders(start, end time.Time) ([]*csv.Reader, error) {
+	csvFile, err := os.Open(tcr.CSVName)
+	if err != nil {
+		return nil, err
+	}
+	reader := csv.NewReader(csvFile)
+	return append([]*csv.Reader{}, reader), nil
+}
+
+func Test_NewBillingExportParser(t *testing.T) {
+	loc, _ := time.LoadLocation("UTC")
+	start := time.Date(2021, 2, 1, 00, 00, 00, 00, loc)
+	end := time.Date(2021, 2, 3, 00, 00, 00, 00, loc)
+	tests := map[string]struct {
+		input    string
+		expected BillingExportParser
+	}{
+		"English Headers": {
+			input: "PayAsYouGo.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+		"Enterprise Camel Headers": {
+			input: "EnterpriseCamel.csv",
+			expected: BillingExportParser{
+				Date:            11,
+				MeterCategory:   18,
+				InvoiceEntityID: 0,
+				SubscriptionID:  23,
+				InstanceID:      29,
+				Service:         15,
+				Tags:            45,
+				AdditionalInfo:  44,
+				Cost:            38,
+				NetCost:         38,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"Enterprise Headers": {
+			input: "Enterprise.csv",
+			expected: BillingExportParser{
+				Date:            7,
+				MeterCategory:   9,
+				InvoiceEntityID: 39,
+				SubscriptionID:  3,
+				InstanceID:      20,
+				Service:         19,
+				Tags:            21,
+				AdditionalInfo:  23,
+				Cost:            17,
+				NetCost:         17,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"German Headers": {
+			input: "German.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+		"YA Headers": {
+			input: "YA.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"BOM Prefixed Headers": {
+			input: "BOM.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			csvRetriever := TestCSVRetriever{
+				CSVName: headerSetPath + tc.input,
+			}
+			csvs, err := csvRetriever.getCSVReaders(start, end)
+			if err != nil {
+				t.Errorf("Failed to read specified CSV: %s", err.Error())
+			}
+			reader := csvs[0]
+			headers, _ := reader.Read()
+			abp, err := NewBillingParseSchema(headers)
+			if err != nil {
+				t.Errorf("failed to create Azure Billing Parser from headers with error: %s", err.Error())
+			}
+
+			if abp.DateFormat != tc.expected.DateFormat {
+				t.Errorf("Azure Billing Parser does not have expected DateFormat index. Expected: %s, Actual: %s", tc.expected.DateFormat, abp.DateFormat)
+			}
+
+			if abp.Date != tc.expected.Date {
+				t.Errorf("Azure Billing Parser does not have expected Date index. Expected: %d, Actual: %d", tc.expected.Date, abp.Date)
+			}
+
+			if abp.MeterCategory != tc.expected.MeterCategory {
+				t.Errorf("Azure Billing Parser does not have expected MeterCategory index. Expected: %d, Actual: %d", tc.expected.MeterCategory, abp.MeterCategory)
+			}
+
+			if abp.InvoiceEntityID != tc.expected.InvoiceEntityID {
+				t.Errorf("Azure Billing Parser does not have expected InvoiceEntityID index. Expected: %d, Actual: %d", tc.expected.InvoiceEntityID, abp.InvoiceEntityID)
+			}
+
+			if abp.SubscriptionID != tc.expected.SubscriptionID {
+				t.Errorf("Azure Billing Parser does not have expected SubscriptionID index. Expected: %d, Actual: %d", tc.expected.SubscriptionID, abp.SubscriptionID)
+			}
+
+			if abp.InstanceID != tc.expected.InstanceID {
+				t.Errorf("Azure Billing Parser does not have expected InstanceID index. Expected: %d, Actual: %d", tc.expected.InstanceID, abp.InstanceID)
+			}
+
+			if abp.Service != tc.expected.Service {
+				t.Errorf("Azure Billing Parser does not have expected Service index. Expected: %d, Actual: %d", tc.expected.Service, abp.Service)
+			}
+
+			if abp.Tags != tc.expected.Tags {
+				t.Errorf("Azure Billing Parser does not have expected Tags index. Expected: %d, Actual: %d", tc.expected.Tags, abp.Tags)
+			}
+
+			if abp.AdditionalInfo != tc.expected.AdditionalInfo {
+				t.Errorf("Azure Billing Parser does not have expected AdditionalInfo index. Expected: %d, Actual: %d", tc.expected.AdditionalInfo, abp.AdditionalInfo)
+			}
+
+			if abp.Cost != tc.expected.Cost {
+				t.Errorf("Azure Billing Parser does not have expected Cost index. Expected: %d, Actual: %d", tc.expected.Cost, abp.Cost)
+			}
+
+			if abp.NetCost != tc.expected.NetCost {
+				t.Errorf("Azure Billing Parser does not have expected NetCost index. Expected: %d, Actual: %d", tc.expected.NetCost, abp.NetCost)
+			}
+		})
+	}
+}

+ 0 - 0
pkg/cloud/azure/client.go → pkg/cloud/azure/pricesheetclient.go


+ 0 - 0
pkg/cloud/azure/downloader.go → pkg/cloud/azure/pricesheetdownloader.go


+ 0 - 0
pkg/cloud/azure/downloader_test.go → pkg/cloud/azure/pricesheetdownloader_test.go


+ 3 - 1
pkg/cloud/azure/azureprovider.go → pkg/cloud/azure/provider.go

@@ -489,6 +489,7 @@ func (k *azureKey) GetGPUCount() string {
 }
 
 // AzureStorageConfig Represents an azure storage config
+// Deprecated: v1.104 Use StorageConfiguration instead
 type AzureStorageConfig struct {
 	SubscriptionId string `json:"azureSubscriptionID"`
 	AccountName    string `json:"azureStorageAccount"`
@@ -517,7 +518,8 @@ type AzureAppKey struct {
 	Tenant      string `json:"tenant"`
 }
 
-// Azure service key for a specific subscription
+// AzureServiceKey service key for a specific subscription
+// Deprecated: v1.104 Use ServiceKey instead
 type AzureServiceKey struct {
 	SubscriptionID string       `json:"subscriptionId"`
 	ServiceKey     *AzureAppKey `json:"serviceKey"`

+ 0 - 0
pkg/cloud/azure/azureprovider_test.go → pkg/cloud/azure/provider_test.go


+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/BOM.csv

@@ -0,0 +1,2 @@
+SubscriptionGuid,ResourceGroup,ResourceLocation,UsageDateTime,MeterCategory,MeterSubcategory,MeterId,MeterName,MeterRegion,UsageQuantity,ResourceRate,PreTaxCost,ConsumedService,ResourceType,InstanceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ServiceName,ServiceTier,Currency,UnitOfMeasure
+,,,2022-11-03,,,,,,,,,,,,,,,,,,,,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/Enterprise.csv

@@ -0,0 +1,2 @@
+InvoiceSectionName,AccountName,AccountOwnerId,SubscriptionId,SubscriptionName,ResourceGroup,ResourceLocation,Date,ProductName,MeterCategory,MeterSubCategory,MeterId,MeterName,MeterRegion,UnitOfMeasure,Quantity,EffectivePrice,CostInBillingCurrency,CostCenter,ConsumedService,ResourceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ResourceName,ReservationId,ReservationName,UnitPrice,ProductOrderId,ProductOrderName,Term,PublisherType,PublisherName,ChargeType,Frequency,PricingModel,AvailabilityZone,BillingAccountId,BillingAccountName,BillingCurrencyCode,BillingPeriodStartDate,BillingPeriodEndDate,BillingProfileId,BillingProfileName,InvoiceSectionId,IsAzureCreditEligible,PartNumber,PayGPrice,PlanName,ServiceFamily,CostAllocationRuleName
+Unassigned,Azure Service,email@email.com,11111111-12ab-34dc-56ef-123456abcdef,Example-Subscription,Example-Resource-Group,canadacentral,02/02/2021,Virtual Machines Ev3/ESv3 Series - E4 v3/E4s v3 - CA Central,Virtual Machines,Ev3/ESv3 Series,3dbc3a0c-32b6-4c4d-adbb-3ee577aaba4d,E4 v3/E4s v3,CA Central,10 Hours,10,1.2,0,,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-defaultpool-12345678-vmss,"""createOperationID"": ""11111111-12ab-34dc-56ef-123456abcdef"",""creationSource"": ""vmssclient-aks-defaultpool-12345678-vmss"",""orchestrator"": ""Kubernetes:1.19.9"",""poolName"": ""defaultpool"",""resourceNameSuffix"": ""12345678""",MS-AZR-0017P,"{""UsageType"":""ComputeHR"",""ImageType"":""Canonical"",""ServiceType"":""Standard_E4s_v3"",""VMName"":""aks-defaultpool-12345678-vmss_2"",""VMProperties"":null,""VCPUs"":4,""CPUs"":0,""ReservationOrderId"":""11111111-12ab-34dc-56ef-123456abcdef"",""ReservationId"":""4f18e7c9-9ae8-4251-886b-8bd942a41bdf"",""ConsumptionMeter"":""11111111-12ab-34dc-56ef-123456abcdef"",""RINormalizationRatio"":2.0}",,Canonical,aks-defaultpool-12345678-vmss,11111111-12ab-34dc-56ef-123456abcdef,ExampleReservationName,0.1,b13f2808-a13e-49a3-a899-06d83b8f5d32,"Reserved VM Instance, Standard_E2s_v3, CA Central, 3 Years",36,Azure,,Usage,UsageBased,,,12345678,Example Company,CAD,05/01/2021,05/31/2021,12345678,Example Company,,TRUE,ABC-12345,0,,Compute,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/EnterpriseCamel.csv

@@ -0,0 +1,2 @@
+billingAccountName,partnerName,resellerName,resellerMpnId,customerTenantId,customerName,costCenter,billingPeriodEndDate,billingPeriodStartDate,servicePeriodEndDate,servicePeriodStartDate,date,serviceFamily,productOrderId,productOrderName,consumedService,meterId,meterName,meterCategory,meterSubCategory,meterRegion,ProductId,ProductName,SubscriptionId,subscriptionName,publisherType,publisherId,publisherName,resourceGroupName,ResourceId,resourceLocation,location,effectivePrice,quantity,unitOfMeasure,chargeType,billingCurrency,pricingCurrency,costInBillingCurrency,costInUsd,exchangeRatePricingToBilling,exchangeRateDate,serviceInfo1,serviceInfo2,additionalInfo,tags,PayGPrice,frequency,term,reservationId,reservationName,pricingModel
+,PartnerName,,,11111111-1111-1111-1111-123456789012,Customer Name,,,,02/01/2021,02/01/2021,02/02/2021,Networking,11111111-1111-1111-1111-123456789012,Azure plan,Microsoft.Network,11111111-1111-1111-1111-123456789012,Dynamic Public IP,Virtual Network,IP Addresses,,DZH318Z0BNXN0032,IP Addresses - Basic,11111111-1111-1111-1111-123456789012,Microsoft Azure,Azure,,Microsoft,databricks,/subscriptions/11111111-1111-1111-1111-123456789012/resourceGroups/testspot/providers/Microsoft.Storage/storageAccounts/storename,WESTUS,US West,0.004,3,1 Hour,Usage,USD,USD,0.012,0.012,1,3/1/21,,,,"{  ""ClusterId"": ""0103-212455-stash756"",  ""ServiceType"": ""DataAnalysis"",  ""ClusterName"": ""SrgExtractsPartDeux"",  ""databricks-instance-name"": ""0c1ef59764casdf0c0e094e1cc"",  ""Creator"": ""email@email.com"",  ""Vendor"": ""Databricks"",  ""DatabricksEnvironment"": ""workerenv-6448504491843616""}",0.004,UsageBased,,,,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/German.csv

@@ -0,0 +1,2 @@
+Abonnement-GUID (SubscriptionGuid),Ressourcengruppe (ResourceGroup),Ressourcenstandort (ResourceLocation),UsageDateTime (UsageDateTime),Kategorie der Verbrauchseinheit (MeterCategory),MeterSubcategory (MeterSubcategory),ID der Verbrauchseinheit (MeterId),Name der Verbrauchseinheit (MeterName),Region der Verbrauchseinheit (MeterRegion),UsageQuantity (UsageQuantity),Ressourcensatz (ResourceRate),PreTaxCost (PreTaxCost),Genutzter Dienst (ConsumedService),ResourceType (ResourceType),InstanceId (InstanceId),Tags (Tags),OfferId (OfferId),Zusätzliche Informationen (AdditionalInfo),Dienstinformation 1 (ServiceInfo1),Dienstinformation 2 (ServiceInfo2),ServiceName,ServiceTier,Currency,Maßeinheit (UnitOfMeasure)
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,2021-02-02,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/PayAsYouGo.csv

@@ -0,0 +1,2 @@
+SubscriptionGuid,ResourceGroup,ResourceLocation,UsageDateTime,MeterCategory,MeterSubcategory,MeterId,MeterName,MeterRegion,UsageQuantity,ResourceRate,PreTaxCost,ConsumedService,ResourceType,InstanceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ServiceName,ServiceTier,Currency,UnitOfMeasure
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,2021-02-02,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/YA.csv

@@ -0,0 +1,2 @@
+subscriptionId,Ressourcengruppe (ResourceGroup),Ressourcenstandort (ResourceLocation),date,meterCategory,MeterSubcategory (MeterSubcategory),ID der Verbrauchseinheit (MeterId),Name der Verbrauchseinheit (MeterName),Region der Verbrauchseinheit (MeterRegion),UsageQuantity (UsageQuantity),Ressourcensatz (ResourceRate),costInBillingCurrency,consumedService,ResourceType (ResourceType),InstanceName,tags,OfferId (OfferId),additionalInfo,Dienstinformation 1 (ServiceInfo1),Dienstinformation 2 (ServiceInfo2),ServiceName,ServiceTier,Currency,Maßeinheit (UnitOfMeasure)
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,02/02/2021,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv

@@ -0,0 +1,2 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456abcdef,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""","""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2"

+ 88 - 0
pkg/cloud/azure/resources/billingexports/values/Template.csv

@@ -0,0 +1,88 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0,  ""ReservationOrderId"": ""689aadb1-13ea-40bb-a8f9-e705dbe57543"",  ""ReservationId"": ""770228a7-62da-4155-802b-0422e1c62efc"",  ""ConsumptionMeter"": ""14fc9a21-4919-4cb1-b495-5666966556bc""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Log Analytics,0,0,microsoft.operationalinsights,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourcegroups/defaultresourcegroup-eus/providers/microsoft.operationalinsights/workspaces/defaultworkspace-11111111-12ab-34dc-56ef-123456abcdef-eus,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-agentpool-23456789-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Log Analytics,0,0,microsoft.operationalinsights,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourcegroups/defaultresourcegroup-eus/providers/microsoft.operationalinsights/workspaces/defaultworkspace-11111111-12ab-34dc-56ef-123456abcdef-eus,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd08,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd08"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-192133aks-nodepool1-1921336OS__1_0a5e4b97e5ca4c2ab46328ca392a02f5,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.001301934407093,0.001301934407093,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.0828,0.0828,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-aef001b536d4711ea86115a2af700dc9,"{""service"":""kubecost/kubecost-frontend-test""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-agentpool-23456789-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.09,0.09,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a173cf24babf311e98b7f8e5ecb03810,"{""service"":""kubecost/kubecost-frontend""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0015896,0.0015896,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0000362,0.0000362,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.01236783717759,0.01236783717759,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.00000204,0.00000204,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0821376,0.0821376,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-229217aks-agentpool-2292178OS__1_7fcada7aa38e4d5ca6d15257b8998b7a,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.005,0.005,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/bc6b73c3-5689-4f72-9a15-103d0c48d98f,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000060000000000000000000,0.000000060000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000140000000000000000,0.000000140000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000020000000000000000000,0.000000020000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0013522,0.0013522,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000160000000000000000,0.000000160000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000100000000000000000,0.000000100000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.001686412831768,0.001686412831768,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.005,0.005,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a4969d597c5674b4480ec987cc6b24a1,"{""service"":""kubecost/kubecost-frontend"",""kubernetes-cluster-name"":""kubernetes""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-34567890-0_OsDisk_1_c523fe080d784f55a7cd3868bf989fde,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.125,0.125,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/7b21b77b-4ed1-474b-b068-6ab6d1ecf549,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0004494,0.0004494,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.00000154,0.00000154,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-45678901-0_OsDisk_1_6bb726d077d84b238780857a380772ea,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.15,0.15,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/bc6b73c3-5689-4f72-9a15-103d0c48d98f,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0,  ""ReservationOrderId"": ""689aadb1-13ea-40bb-a8f9-e705dbe57543"",  ""ReservationId"": ""770228a7-62da-4155-802b-0422e1c62efc"",  ""ConsumptionMeter"": ""14fc9a21-4919-4cb1-b495-5666966556bc""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000040000000000000000000,0.000000040000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0002082,0.0002082,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.000177,0.000177,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-192133aks-nodepool1-1921336OS__1_0a5e4b97e5ca4c2ab46328ca392a02f5,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0107136,0.0107136,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0032604,0.0032604,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.0072,0.0072,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-aef001b536d4711ea86115a2af700dc9,"{""service"":""kubecost/kubecost-frontend-test""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.00000445,0.00000445,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.575,0.575,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.00992768780794,0.00992768780794,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.14,0.14,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a4969d597c5674b4480ec987cc6b24a1,"{""service"":""kubecost/kubecost-frontend"",""kubernetes-cluster-name"":""kubernetes""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd08,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd08"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.000191,0.000191,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0014714,0.0014714,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.575,0.575,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.0144,0.0144,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a173cf24babf311e98b7f8e5ecb03810,"{""service"":""kubecost/kubecost-frontend""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.015,0.015,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/7b21b77b-4ed1-474b-b068-6ab6d1ecf549,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-34567890-0_OsDisk_1_c523fe080d784f55a7cd3868bf989fde,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000280000000000000000,0.000000280000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-229217aks-agentpool-2292178OS__1_7fcada7aa38e4d5ca6d15257b8998b7a,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-45678901-0_OsDisk_1_6bb726d077d84b238780857a380772ea,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",

+ 2 - 0
pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv

@@ -0,0 +1,2 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456billing,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""}","{ ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2  }"

+ 170 - 0
pkg/cloud/azure/storagebillingparser.go

@@ -0,0 +1,170 @@
+package azure
+
+import (
+	"bytes"
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	"github.com/opencost/opencost/pkg/cloud"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// AzureStorageBillingParser accesses billing data stored in CSV files in Azure Storage
+type AzureStorageBillingParser struct {
+	StorageConnection
+}
+
+func (asbp *AzureStorageBillingParser) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*AzureStorageBillingParser)
+	if !ok {
+		return false
+	}
+	return asbp.StorageConnection.Equals(&thatConfig.StorageConnection)
+}
+
+type AzureBillingResultFunc func(*BillingRowValues) error
+
+func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, resultFn AzureBillingResultFunc) (cloud.ConnectionStatus, error) {
+	err := asbp.Validate()
+	if err != nil {
+		return cloud.InvalidConfiguration, err
+	}
+
+	containerURL, err := asbp.getContainer()
+	if err != nil {
+		return cloud.FailedConnection, err
+	}
+	ctx := context.Background()
+	blobNames, err := asbp.getMostRecentBlobs(start, end, containerURL, ctx)
+	if err != nil {
+		return cloud.FailedConnection, err
+	}
+	for _, blobName := range blobNames {
+		blobBytes, err2 := asbp.DownloadBlob(blobName, containerURL, ctx)
+		if err2 != nil {
+			return cloud.FailedConnection, err2
+		}
+		err2 = asbp.parseCSV(start, end, csv.NewReader(bytes.NewReader(blobBytes)), resultFn)
+		if err2 != nil {
+			return cloud.ParseError, err2
+		}
+
+	}
+	return cloud.SuccessfulConnection, nil
+}
+
+func (asbp *AzureStorageBillingParser) parseCSV(start, end time.Time, reader *csv.Reader, resultFn AzureBillingResultFunc) error {
+	headers, err := reader.Read()
+	if err != nil {
+		return err
+	}
+	abp, err := NewBillingParseSchema(headers)
+	if err != nil {
+		return err
+	}
+	for {
+		var record, err = reader.Read()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		abv := abp.ParseRow(start, end, record)
+		if abv == nil {
+			continue
+		}
+
+		err = resultFn(abv)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (asbp *AzureStorageBillingParser) getMostRecentBlobs(start, end time.Time, containerURL *azblob.ContainerURL, ctx context.Context) ([]string, error) {
+	log.Infof("Azure Storage: retrieving most recent reports from: %v - %v", start, end)
+
+	// Get list of month substrings for months contained in the start to end range
+	monthStrs, err := asbp.getMonthStrings(start, end)
+	if err != nil {
+		return nil, err
+	}
+	mostResentBlobs := make(map[string]azblob.BlobItemInternal)
+	for marker := (azblob.Marker{}); marker.NotDone(); {
+		// Get a result segment starting with the blob indicated by the current Marker.
+		listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{})
+		if err != nil {
+			return nil, err
+		}
+
+		// ListBlobs returns the start of the next segment; you MUST use this to get
+		// the next segment (after processing the current result segment).
+		marker = listBlob.NextMarker
+
+		// Using the list of months strings find the most resent blob for each month in the range
+		for _, blobInfo := range listBlob.Segment.BlobItems {
+			for _, month := range monthStrs {
+				if strings.Contains(blobInfo.Name, month) {
+					// If Container Path configuration exists, check if it is in the blobs name
+					if asbp.Path != "" && !strings.Contains(blobInfo.Name, asbp.Path) {
+						continue
+					}
+
+					if prevBlob, ok := mostResentBlobs[month]; ok {
+						if prevBlob.Properties.CreationTime.After(*blobInfo.Properties.CreationTime) {
+							continue
+						}
+					}
+					mostResentBlobs[month] = blobInfo
+				}
+			}
+		}
+	}
+
+	// convert blob names into blob urls and move from map into ordered list of blob names
+	var blobNames []string
+	for _, month := range monthStrs {
+		if blob, ok := mostResentBlobs[month]; ok {
+			blobNames = append(blobNames, blob.Name)
+		}
+	}
+
+	return blobNames, nil
+}
+
+func (asbp *AzureStorageBillingParser) getMonthStrings(start, end time.Time) ([]string, error) {
+	if start.After(end) {
+		return []string{}, fmt.Errorf("start date must be before end date")
+	}
+	if end.After(time.Now()) {
+		end = time.Now()
+	}
+	var monthStrs []string
+	monthStr := asbp.timeToMonthString(start)
+	endStr := asbp.timeToMonthString(end)
+	monthStrs = append(monthStrs, monthStr)
+	currMonth := start.AddDate(0, 0, -start.Day()+1)
+	for monthStr != endStr {
+		currMonth = currMonth.AddDate(0, 1, 0)
+		monthStr = asbp.timeToMonthString(currMonth)
+		monthStrs = append(monthStrs, monthStr)
+	}
+
+	return monthStrs, nil
+}
+
+func (asbp *AzureStorageBillingParser) timeToMonthString(input time.Time) string {
+	format := "20060102"
+	startOfMonth := input.AddDate(0, 0, -input.Day()+1)
+	endOfMonth := input.AddDate(0, 1, -input.Day())
+	return startOfMonth.Format(format) + "-" + endOfMonth.Format(format)
+}

+ 204 - 0
pkg/cloud/azure/storagebillingparser_test.go

@@ -0,0 +1,204 @@
+package azure
+
+import (
+	"testing"
+	"time"
+)
+
+func TestAzureStorageBillingParser_getMonthStrings(t *testing.T) {
+	asbp := AzureStorageBillingParser{}
+	loc, _ := time.LoadLocation("UTC")
+	testCases := map[string]struct {
+		start    time.Time
+		end      time.Time
+		expected []string
+	}{
+		"Single Month": {
+			start: time.Date(2021, 2, 1, 00, 00, 00, 00, loc),
+			end:   time.Date(2021, 2, 3, 00, 00, 00, 00, loc),
+			expected: []string{
+				"20210201-20210228",
+			},
+		},
+		"Two Month": {
+			start: time.Date(2021, 2, 1, 00, 00, 00, 00, loc),
+			end:   time.Date(2021, 3, 3, 00, 00, 00, 00, loc),
+			expected: []string{
+				"20210201-20210228",
+				"20210301-20210331",
+			},
+		},
+	}
+
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			months, err := asbp.getMonthStrings(tc.start, tc.end)
+			if err != nil {
+				t.Errorf("Could not retrieve month strings %v", err)
+			}
+
+			if len(months) != len(tc.expected) {
+				t.Errorf("Did not create the expected number of month strings. Expected: %d, Actual: %d", len(tc.expected), len(months))
+			}
+
+			for i, monthStr := range months {
+				if monthStr != tc.expected[i] {
+					t.Errorf("Incorrect month string at index %d. Expected: %s, Actual: %s", i, tc.expected[i], monthStr)
+				}
+			}
+		})
+	}
+}
+
+func TestAzureStorageBillingParser_parseCSV(t *testing.T) {
+	loc, _ := time.LoadLocation("UTC")
+	start := time.Date(2021, 2, 1, 00, 00, 00, 00, loc)
+	end := time.Date(2021, 2, 3, 00, 00, 00, 00, loc)
+	tests := map[string]struct {
+		input    string
+		expected []BillingRowValues
+	}{
+		"Virtual Machine": {
+			input: "VirtualMachine.csv",
+			expected: []BillingRowValues{
+				{
+					Date:            start,
+					MeterCategory:   "Virtual Machines",
+					SubscriptionID:  "11111111-12ab-34dc-56ef-123456abcdef",
+					InvoiceEntityID: "11111111-12ab-34dc-56ef-123456billing",
+					InstanceID:      "/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss",
+					Service:         "Microsoft.Compute",
+					Tags: map[string]string{
+						"resourceNameSuffix": "12345678",
+						"aksEngineVersion":   "aks-release-v0.47.0-1-aks",
+						"creationSource":     "aks-aks-nodepool1-12345678-vmss",
+					},
+					AdditionalInfo: map[string]any{
+						"ServiceType": "Standard_DS2_v2",
+						"VMName":      "aks-nodepool1-12345678-vmss_0",
+						"VCPUs":       2.0,
+					},
+					Cost:    5,
+					NetCost: 4,
+				},
+			},
+		},
+		"Missing Brackets": {
+			input: "MissingBrackets.csv",
+			expected: []BillingRowValues{
+				{
+					Date:            start,
+					MeterCategory:   "Virtual Machines",
+					SubscriptionID:  "11111111-12ab-34dc-56ef-123456abcdef",
+					InvoiceEntityID: "11111111-12ab-34dc-56ef-123456abcdef",
+					InstanceID:      "/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss",
+					Service:         "Microsoft.Compute",
+					Tags: map[string]string{
+						"resourceNameSuffix": "12345678",
+						"aksEngineVersion":   "aks-release-v0.47.0-1-aks",
+						"creationSource":     "aks-aks-nodepool1-12345678-vmss",
+					},
+					AdditionalInfo: map[string]any{
+						"ServiceType": "Standard_DS2_v2",
+						"VMName":      "aks-nodepool1-12345678-vmss_0",
+						"VCPUs":       2.0,
+					},
+					Cost:    5,
+					NetCost: 4,
+				},
+			},
+		},
+	}
+	asbp := &AzureStorageBillingParser{}
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			csvRetriever := &TestCSVRetriever{
+				CSVName: valueCasesPath + tc.input,
+			}
+			csvs, err := csvRetriever.getCSVReaders(start, end)
+			if err != nil {
+				t.Errorf("Failed to read specified CSV: %s", err.Error())
+			}
+			reader := csvs[0]
+
+			var actual []*BillingRowValues
+			resultFn := func(abv *BillingRowValues) error {
+				actual = append(actual, abv)
+				return nil
+			}
+
+			err = asbp.parseCSV(start, end, reader, resultFn)
+			if err != nil {
+				t.Errorf("Error generating BillingRowValues: %s", err.Error())
+			}
+
+			if len(actual) != len(tc.expected) {
+				t.Errorf("Actual output length did not match expected. Expected: %d, Actual: %d", len(tc.expected), len(actual))
+			}
+
+			for i, this := range actual {
+				that := tc.expected[i]
+
+				if !this.Date.Equal(that.Date) {
+					t.Errorf("Parsed data at index %d has incorrect Date value. Expected: %s, Actual: %s", i, this.Date.String(), that.Date.String())
+				}
+
+				if this.MeterCategory != that.MeterCategory {
+					t.Errorf("Parsed data at index %d has incorrect MeterCategroy value. Expected: %s, Actual: %s", i, this.MeterCategory, that.MeterCategory)
+				}
+
+				if this.SubscriptionID != that.SubscriptionID {
+					t.Errorf("Parsed data at index %d has incorrect SubscriptionID value. Expected: %s, Actual: %s", i, this.SubscriptionID, that.SubscriptionID)
+				}
+
+				if this.InvoiceEntityID != that.InvoiceEntityID {
+					t.Errorf("Parsed data at index %d has incorrect InvoiceEntityID value. Expected: %s, Actual: %s", i, this.InvoiceEntityID, that.InvoiceEntityID)
+				}
+
+				if this.InstanceID != that.InstanceID {
+					t.Errorf("Parsed data at index %d has incorrect InstanceID value. Expected: %s, Actual: %s", i, this.InstanceID, that.InstanceID)
+				}
+
+				if this.Service != that.Service {
+					t.Errorf("Parsed data at index %d has incorrect Service value. Expected: %s, Actual: %s", i, this.Service, that.Service)
+				}
+
+				if this.Cost != that.Cost {
+					t.Errorf("Parsed data at index %d has incorrect Cost value. Expected: %f, Actual: %f", i, this.Cost, that.Cost)
+				}
+
+				if this.NetCost != that.NetCost {
+					t.Errorf("Parsed data at index %d has incorrect NetCost value. Expected: %f, Actual: %f", i, this.NetCost, that.NetCost)
+				}
+
+				if len(this.Tags) != len(that.Tags) {
+					t.Errorf("Parsed data at index %d did not have the expected number of tags. Expected: %d, Actual: %d", i, len(that.Tags), len(this.Tags))
+				}
+
+				for key, thisTag := range this.Tags {
+					thatTag, ok := that.Tags[key]
+					if !ok {
+						t.Errorf("Parsed data at index %d is has unexpected entry in Tags with key: %s", i, key)
+					}
+
+					if thisTag != thatTag {
+						t.Errorf("Parsed data at index %d is has unexpected value in Tags for key: %s. Expected: %s, Actual: %s", i, key, thatTag, thisTag)
+					}
+				}
+
+				for key, thisAI := range this.AdditionalInfo {
+					thatAI, ok := that.AdditionalInfo[key]
+					if !ok {
+						t.Errorf("Parsed data at index %d is has unexpected entry in Additional Inforamation with key: %s", i, key)
+					}
+
+					if thisAI != thatAI {
+						t.Errorf("Parsed data at index %d is has unexpected value in Tags for key: %s. Expected: %v, Actual: %v", i, key, thisAI, thatAI)
+					}
+				}
+			}
+
+		})
+
+	}
+}

+ 179 - 0
pkg/cloud/azure/storageconfiguration.go

@@ -0,0 +1,179 @@
+package azure
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type StorageConfiguration struct {
+	SubscriptionID string     `json:"subscriptionID"`
+	Account        string     `json:"account"`
+	Container      string     `json:"container"`
+	Path           string     `json:"path"`
+	Cloud          string     `json:"cloud"`
+	Authorizer     Authorizer `json:"authorizer"`
+}
+
+// Check ensures that all required fields are set, and throws an error if they are not
+func (sc *StorageConfiguration) Validate() error {
+
+	if sc.Authorizer == nil {
+		return fmt.Errorf("StorageConfiguration: missing authorizer")
+	}
+
+	err := sc.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	if sc.SubscriptionID == "" {
+		return fmt.Errorf("StorageConfiguration: missing Subcription ID")
+	}
+
+	if sc.Account == "" {
+		return fmt.Errorf("StorageConfiguration: missing Account")
+	}
+
+	if sc.Container == "" {
+		return fmt.Errorf("StorageConfiguration: missing Container")
+	}
+
+	return nil
+}
+
+func (sc *StorageConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*StorageConfiguration)
+	if !ok {
+		return false
+	}
+
+	if sc.Authorizer != nil {
+		if !sc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if sc.SubscriptionID != thatConfig.SubscriptionID {
+		return false
+	}
+
+	if sc.Account != thatConfig.Account {
+		return false
+	}
+
+	if sc.Container != thatConfig.Container {
+		return false
+	}
+
+	if sc.Path != thatConfig.Path {
+		return false
+	}
+
+	if sc.Cloud != thatConfig.Cloud {
+		return false
+	}
+
+	return true
+}
+
+func (sc *StorageConfiguration) Sanitize() config.Config {
+	return &StorageConfiguration{
+		SubscriptionID: sc.SubscriptionID,
+		Account:        sc.Account,
+		Container:      sc.Container,
+		Path:           sc.Path,
+		Cloud:          sc.Cloud,
+		Authorizer:     sc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (sc *StorageConfiguration) Key() string {
+	key := fmt.Sprintf("%s/%s", sc.SubscriptionID, sc.Container)
+	// append container path to key if it exists
+	if sc.Path != "" {
+		key = fmt.Sprintf("%s/%s", key, sc.Path)
+	}
+	return key
+}
+
+func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	subscriptionID, err := config.GetInterfaceValue[string](fmap, "subscriptionID")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.SubscriptionID = subscriptionID
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Account = account
+
+	container, err := config.GetInterfaceValue[string](fmap, "container")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Container = container
+
+	path, err := config.GetInterfaceValue[string](fmap, "path")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Path = path
+
+	cloud, err := config.GetInterfaceValue[string](fmap, "cloud")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Cloud = cloud
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Authorizer = authorizer
+
+	return nil
+}
+
+func ConvertAzureStorageConfigToConfig(asc AzureStorageConfig) config.KeyedConfig {
+	if asc.IsEmpty() {
+		return nil
+	}
+
+	var authorizer Authorizer
+	authorizer = &AccessKey{
+		AccessKey: asc.AccessKey,
+		Account:   asc.AccountName,
+	}
+
+	return &StorageConfiguration{
+		SubscriptionID: asc.SubscriptionId,
+		Account:        asc.AccountName,
+		Container:      asc.ContainerName,
+		Path:           asc.ContainerPath,
+		Cloud:          asc.AzureCloud,
+		Authorizer:     authorizer,
+	}
+}

+ 446 - 0
pkg/cloud/azure/storageconfiguration_test.go

@@ -0,0 +1,446 @@
+package azure
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestStorageConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   StorageConfiguration
+		expected error
+	}{
+		"valid config Azure AccessKey": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					Account: "account",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing access key"),
+		},
+		"missing authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing authorizer"),
+		},
+		"missing subscriptionID": {
+			config: StorageConfiguration{
+				SubscriptionID: "",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Subcription ID"),
+		},
+		"missing account": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Account"),
+		},
+		"missing container": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Container"),
+		},
+		"missing path": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+		"missing cloud": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestStorageConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     StorageConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: true,
+		},
+
+		"missing both authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: true,
+		},
+		"missing left authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"missing right authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: false,
+		},
+		"different subscriptionID": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID2",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account2",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different container": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container2",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different path": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path2",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different cloud": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud2",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &AccessKey{
+				AccessKey: "accessKey",
+				Account:   "account",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestStorageConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config StorageConfiguration
+	}{
+		"Empty Config": {
+			config: StorageConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+		},
+		"AccessKey Authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &StorageConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 77 - 0
pkg/cloud/azure/storageconnection.go

@@ -0,0 +1,77 @@
+package azure
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// StorageConnection provides access to Azure Storage
+type StorageConnection struct {
+	StorageConfiguration
+}
+
+func (sc *StorageConnection) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*StorageConnection)
+	if !ok {
+		return false
+	}
+
+	return sc.StorageConfiguration.Equals(&thatConfig.StorageConfiguration)
+}
+
+func (sc *StorageConnection) getContainer() (*azblob.ContainerURL, error) {
+
+	credential, err := sc.Authorizer.GetBlobCredentials()
+	if err != nil {
+		return nil, err
+	}
+
+	p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
+
+	// From the Azure portal, get your storage account blob service URL endpoint.
+	URL, _ := url.Parse(
+		fmt.Sprintf(sc.getBlobURLTemplate(), sc.Account, sc.Container))
+
+	// Create a ContainerURL object that wraps the container URL and a request
+	// pipeline to make requests.
+	containerURL := azblob.NewContainerURL(*URL, p)
+	return &containerURL, nil
+}
+
+// getBlobURLTemplate returns the correct BlobUrl for whichever Cloud storage account is specified by the AzureCloud configuration
+// defaults to the Public Cloud template
+func (sc *StorageConnection) getBlobURLTemplate() string {
+	// Use gov cloud blob url if gov is detected in AzureCloud
+	if strings.Contains(strings.ToLower(sc.Cloud), "gov") {
+		return "https://%s.blob.core.usgovcloudapi.net/%s"
+	}
+	// default to Public Cloud template
+	return "https://%s.blob.core.windows.net/%s"
+}
+
+func (sc *StorageConnection) DownloadBlob(blobName string, containerURL *azblob.ContainerURL, ctx context.Context) ([]byte, error) {
+	log.Infof("Azure Storage: retrieving blob: %v", blobName)
+
+	blobURL := containerURL.NewBlobURL(blobName)
+	downloadResponse, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
+	if err != nil {
+		return nil, err
+	}
+	// NOTE: automatically retries are performed if the connection fails
+	bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
+
+	// read the body into a buffer
+	downloadedData := bytes.Buffer{}
+	_, err = downloadedData.ReadFrom(bodyStream)
+	if err != nil {
+		return nil, err
+	}
+	return downloadedData.Bytes(), nil
+}

+ 53 - 0
pkg/cloud/config/authorizer.go

@@ -0,0 +1,53 @@
+package config
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// AuthorizerTypeProperty is the property where the id of an Authorizer should be placed in its custom MarshalJSON function
+const AuthorizerTypeProperty = "authorizerType"
+
+type Authorizer interface {
+	Config
+	json.Marshaler
+}
+
+// AuthorizerSelectorFn implementations of this function should be a simple switch
+// and acts as a register for the Authorizer types, returned Authorizer should be empty
+// except for its default type property and will have other values marshalled into it
+type AuthorizerSelectorFn[T Authorizer] func(string) (T, error)
+
+// AuthorizerFromInterface this generic function provides Authorizer unmarshalling for all providers
+func AuthorizerFromInterface[T Authorizer](f any, authSelectFn AuthorizerSelectorFn[T]) (T, error) {
+	var emptyAuth T
+	if f == nil {
+		return emptyAuth, nil
+	}
+	fmap, ok := f.(map[string]interface{})
+	if !ok {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not cast interface as map")
+	}
+
+	authType, err := GetInterfaceValue[string](fmap, AuthorizerTypeProperty)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not retrieve type property: %w", err)
+	}
+	authorizer, err := authSelectFn(authType)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: %w", err)
+	}
+
+	// convert the interface back to a []Byte so that it can be unmarshalled into the correct type
+	fBin, err := json.Marshal(f)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not marshal value %v: %w", f, err)
+	}
+
+	err = json.Unmarshal(fBin, authorizer)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: failed to unmarshal into Authorizer type %T from value %v: %w", authorizer, f, err)
+	}
+	return authorizer, nil
+}

+ 37 - 0
pkg/cloud/config/config.go

@@ -0,0 +1,37 @@
+package config
+
+import (
+	"fmt"
+)
+
+const Redacted = "REDACTED"
+
+// Config allows for nested configurations which encapsulate their functionality to be validated and compared easily
+type Config interface {
+	Validate() error
+	Sanitize() Config
+	Equals(Config) bool
+}
+
+// KeyedConfig is a top level Config which uses its public values as a unique identifier allowing duplicates to be identified
+type KeyedConfig interface {
+	Config
+	Key() string
+}
+
+type KeyedConfigWatcher interface {
+	GetConfigs() []KeyedConfig
+}
+
+func GetInterfaceValue[T any](fmap map[string]interface{}, key string) (T, error) {
+	var value T
+	interfaceValue, ok := fmap[key]
+	if !ok {
+		return value, fmt.Errorf("FromInterface: missing '%s' property", key)
+	}
+	typedValue, ok := interfaceValue.(T)
+	if !ok {
+		return value, fmt.Errorf("GetInterfaceValue: property '%s' had expected type '%T' but did not match", key, value)
+	}
+	return typedValue, nil
+}

+ 42 - 0
pkg/cloud/connectionstatus.go

@@ -0,0 +1,42 @@
+package cloud
+
+// ConnectionStatus communicates the status of a cloud connection in a way that is general enough to apply to each
+// Cloud Provider, but still give actionable information on how to trouble shoot one the four failing statuses.
+type ConnectionStatus string
+
+const (
+	// InitialStatus is the zero value of CloudConnectionStatus and means that cloud connection is untested. Once
+	// CloudConnection Status has been changed in should not return to this value. This status is assigned on creation
+	// to the cloud provider
+	InitialStatus ConnectionStatus = "No Connection"
+
+	// InvalidConfiguration means that Cloud Configuration is missing required values to connect to cloud provider.
+	// This status is assigned during failures in the provider implementation of getCloudConfig()
+	InvalidConfiguration = "Invalid Configuration"
+
+	// FailedConnection means that all required Cloud Configuration values are filled in, but a connection with the
+	// Cloud Provider cannot be established. This is indicative of a typo in one of the Cloud Configuration values or an
+	// issue in how the connection was set up in the Cloud Provider's Console. The assignment of this status varies
+	// between implementations, but should happen if an error is thrown when an interaction with an object from
+	// the Cloud Service Provider's sdk occurs.
+	FailedConnection = "Failed Connection"
+
+	// ParseError indicates an issue with our functions which parse responses
+	ParseError = "Parse Error"
+
+	// MissingData means that the Cloud Integration is properly configured, but the cloud provider is not returning
+	// billing/cost and usage data. This status is indicative of the billing/cost and usage data export of the Cloud Provider
+	// being incorrectly set up or the export being set up in the last 48 hours and not having started populating data yet.
+	// This status is set when a query has been successfully made but the results come back empty. If the cloud provider,
+	// already has a SUCCESSFUL_CONNECTION status then this status should not be set, because this indicates that the specific
+	// query made may have been empty.
+	MissingData = "Data Missing"
+
+	// SuccessfulConnection means that the Cloud Integration is properly configured and returning data. This status is
+	// set on any successful query where data is returned
+	SuccessfulConnection = "Connection Successful"
+)
+
+func (cs ConnectionStatus) String() string {
+	return string(cs)
+}

+ 132 - 0
pkg/cloud/gcp/authorizer.go

@@ -0,0 +1,132 @@
+package gcp
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"google.golang.org/api/option"
+)
+
+const ServiceAccountKeyAuthorizerType = "GCPServiceAccountKey"
+const WorkloadIdentityAuthorizerType = "GCPWorkloadIdentity"
+
+// Authorizer provide a []option.ClientOption which is used in when creating clients in the GCP SDK
+type Authorizer interface {
+	config.Authorizer
+	CreateGCPClientOptions() ([]option.ClientOption, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case ServiceAccountKeyAuthorizerType:
+		return &ServiceAccountKey{}, nil
+	case WorkloadIdentityAuthorizerType:
+		return &WorkloadIdentity{}, nil
+	default:
+		return nil, fmt.Errorf("GCP: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+type ServiceAccountKey struct {
+	Key map[string]string `json:"key"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (gkc *ServiceAccountKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 2)
+	fmap[config.AuthorizerTypeProperty] = ServiceAccountKeyAuthorizerType
+	fmap["key"] = gkc.Key
+	return json.Marshal(fmap)
+}
+
+func (gkc *ServiceAccountKey) Validate() error {
+	if gkc.Key == nil || len(gkc.Key) == 0 {
+		return fmt.Errorf("ServiceAccountKey: missing Key")
+	}
+
+	return nil
+}
+
+func (gkc *ServiceAccountKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*ServiceAccountKey)
+	if !ok {
+		return false
+	}
+
+	if len(gkc.Key) != len(thatConfig.Key) {
+		return false
+	}
+
+	for k, v := range gkc.Key {
+		if thatConfig.Key[k] != v {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (gkc *ServiceAccountKey) Sanitize() config.Config {
+	redactedMap := make(map[string]string, len(gkc.Key))
+	for key, _ := range gkc.Key {
+		redactedMap[key] = config.Redacted
+	}
+	return &ServiceAccountKey{
+		Key: redactedMap,
+	}
+}
+
+func (gkc *ServiceAccountKey) CreateGCPClientOptions() ([]option.ClientOption, error) {
+	err := gkc.Validate()
+	if err != nil {
+		return nil, err
+	}
+
+	b, err := json.Marshal(gkc.Key)
+	if err != nil {
+		return nil, fmt.Errorf("Key: failed to marshal Key: %s", err.Error())
+	}
+	clientOption := option.WithCredentialsJSON(b)
+
+	// The creation of the BigQuery Client is where FAILED_CONNECTION CloudConnectionStatus is recorded for GCP
+	return []option.ClientOption{clientOption}, nil
+}
+
+// WorkloadIdentity passes an empty slice of client options which causes the GCP SDK to check for the workload identity in the environment
+type WorkloadIdentity struct{}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (wi *WorkloadIdentity) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 1)
+	fmap[config.AuthorizerTypeProperty] = WorkloadIdentityAuthorizerType
+	return json.Marshal(fmap)
+}
+
+func (wi *WorkloadIdentity) Validate() error {
+	return nil
+}
+
+func (wi *WorkloadIdentity) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	_, ok := config.(*WorkloadIdentity)
+	if !ok {
+		return false
+	}
+
+	return true
+}
+
+func (wi *WorkloadIdentity) Sanitize() config.Config {
+	return &WorkloadIdentity{}
+}
+
+func (wi *WorkloadIdentity) CreateGCPClientOptions() ([]option.ClientOption, error) {
+	return []option.ClientOption{}, nil
+}

+ 172 - 0
pkg/cloud/gcp/bigqueryconfiguration.go

@@ -0,0 +1,172 @@
+package gcp
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"cloud.google.com/go/bigquery"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type BigQueryConfiguration struct {
+	ProjectID  string     `json:"projectID"`
+	Dataset    string     `json:"dataset"`
+	Table      string     `json:"table"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (bqc *BigQueryConfiguration) Validate() error {
+
+	if bqc.Authorizer == nil {
+		return fmt.Errorf("BigQueryConfig: missing configurer")
+	}
+
+	err := bqc.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("BigQueryConfig: issue with GCP Authorizer: %s", err.Error())
+	}
+
+	if bqc.ProjectID == "" {
+		return fmt.Errorf("BigQueryConfig: missing ProjectID")
+	}
+
+	if bqc.Dataset == "" {
+		return fmt.Errorf("BigQueryConfig: missing Dataset")
+	}
+
+	if bqc.Table == "" {
+		return fmt.Errorf("BigQueryConfig: missing Table")
+	}
+
+	return nil
+}
+
+func (bqc *BigQueryConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*BigQueryConfiguration)
+	if !ok {
+		return false
+	}
+
+	if bqc.Authorizer != nil {
+		if !bqc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if bqc.ProjectID != thatConfig.ProjectID {
+		return false
+	}
+
+	if bqc.Dataset != thatConfig.Dataset {
+		return false
+	}
+
+	if bqc.Table != thatConfig.Table {
+		return false
+	}
+
+	return true
+}
+
+func (bqc *BigQueryConfiguration) Sanitize() config.Config {
+	return &BigQueryConfiguration{
+		ProjectID:  bqc.ProjectID,
+		Dataset:    bqc.Dataset,
+		Table:      bqc.Table,
+		Authorizer: bqc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+// Key uses the Usage Project Id as the Provider Key for GCP
+func (bqc *BigQueryConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", bqc.ProjectID, bqc.GetBillingDataDataset())
+}
+
+func (bqc *BigQueryConfiguration) GetBillingDataDataset() string {
+	return fmt.Sprintf("%s.%s", bqc.Dataset, bqc.Table)
+}
+
+func (bqc *BigQueryConfiguration) GetBigQueryClient(ctx context.Context) (*bigquery.Client, error) {
+	clientOpts, err := bqc.Authorizer.CreateGCPClientOptions()
+	if err != nil {
+		return nil, err
+	}
+	return bigquery.NewClient(ctx, bqc.ProjectID, clientOpts...)
+}
+
+// UnmarshalJSON assumes data is save as an BigQueryConfigurationDTO
+func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	projectID, err := config.GetInterfaceValue[string](fmap, "projectID")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.ProjectID = projectID
+
+	dataset, err := config.GetInterfaceValue[string](fmap, "dataset")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.Dataset = dataset
+
+	table, err := config.GetInterfaceValue[string](fmap, "table")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.Table = table
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bqc.Authorizer = authorizer
+	return nil
+}
+
+func ConvertBigQueryConfigToConfig(bqc BigQueryConfig) config.KeyedConfig {
+	if bqc.IsEmpty() {
+		return nil
+	}
+
+	BillingDataDataset := strings.Split(bqc.BillingDataDataset, ".")
+	dataset := BillingDataDataset[0]
+	var table string
+	if len(BillingDataDataset) > 1 {
+		table = BillingDataDataset[1]
+	}
+
+	bigQueryConfiguration := &BigQueryConfiguration{
+		ProjectID:  bqc.ProjectID,
+		Dataset:    dataset,
+		Table:      table,
+		Authorizer: &WorkloadIdentity{}, // Default to WorkloadIdentity
+	}
+
+	if len(bqc.Key) != 0 {
+		bigQueryConfiguration.Authorizer = &ServiceAccountKey{
+			Key: bqc.Key,
+		}
+	}
+
+	return bigQueryConfiguration
+}

+ 388 - 0
pkg/cloud/gcp/bigqueryconfiguration_test.go

@@ -0,0 +1,388 @@
+package gcp
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestBigQueryConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   BigQueryConfiguration
+		expected error
+	}{
+		"valid config GCP Key": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: nil,
+		},
+		"valid config WorkloadIdentity": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: nil,
+		},
+		"access Key invalid": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: nil,
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: issue with GCP Authorizer: ServiceAccountKey: missing Key"),
+		},
+		"missing configurer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing configurer"),
+		},
+		"missing projectID": {
+			config: BigQueryConfiguration{
+				ProjectID: "",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing ProjectID"),
+		},
+		"missing dataset": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing Dataset"),
+		},
+		"missing table": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing Table"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestBigQueryConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     BigQueryConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: true,
+		},
+		"different configurer": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: false,
+		},
+		"missing both configurer": {
+			left: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left configurer": {
+			left: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: false,
+		},
+		"missing right configurer": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different projectID": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID2",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different dataset": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset2",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different table": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table2",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &ServiceAccountKey{
+
+				Key: map[string]string{
+					"Key":  "Key",
+					"key1": "key2",
+				},
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestBigQueryConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config BigQueryConfiguration
+	}{
+		"Empty Config": {
+			config: BigQueryConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+		},
+		"ServiceAccountKeyConfigurer": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+		},
+		"WorkLoadIdentityConfigurer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &BigQueryConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 110 - 0
pkg/cloud/gcp/bigqueryquerier.go

@@ -0,0 +1,110 @@
+package gcp
+
+import (
+	"context"
+	"regexp"
+	"strings"
+
+	"cloud.google.com/go/bigquery"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+type BigQueryQuerier struct {
+	BigQueryConfiguration
+}
+
+func (bqq *BigQueryQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*BigQueryQuerier)
+	if !ok {
+		return false
+	}
+
+	return bqq.BigQueryConfiguration.Equals(&thatConfig.BigQueryConfiguration)
+}
+
+func (bqq *BigQueryQuerier) QueryBigQuery(ctx context.Context, queryStr string) (*bigquery.RowIterator, error) {
+	client, err := bqq.GetBigQueryClient(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	query := client.Query(queryStr)
+	return query.Read(ctx)
+}
+
+func GCPSelectCategory(service, description string) string {
+	s := strings.ToLower(service)
+	d := strings.ToLower(description)
+
+	// Network descriptions
+	if strings.Contains(d, "download") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "network") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "ingress") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "egress") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "static ip") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "external ip") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "load balanced") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "licensing fee") {
+		return kubecost.OtherCategory
+	}
+
+	// Storage Descriptions
+	if strings.Contains(d, "storage") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd capacity") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd iops") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd snapshot") {
+		return kubecost.StorageCategory
+	}
+
+	// Service Defaults
+	if strings.Contains(s, "storage") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "compute") {
+		return kubecost.ComputeCategory
+	}
+	if strings.Contains(s, "sql") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "bigquery") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "kubernetes") {
+		return kubecost.ManagementCategory
+	} else if strings.Contains(s, "pub/sub") {
+		return kubecost.NetworkCategory
+	}
+
+	return kubecost.OtherCategory
+}
+
+var parseProviderIDRx = regexp.MustCompile("^.+\\/(.+)?") // Capture "gke-cluster-3-default-pool-xxxx-yy" from "projects/###/instances/gke-cluster-3-default-pool-xxxx-yy"
+
+func GCPParseProviderID(id string) string {
+	match := parseProviderIDRx.FindStringSubmatch(id)
+	if len(match) == 0 {
+		return id
+	}
+	return match[len(match)-1]
+}

+ 1 - 0
pkg/cloud/gcp/gcpprovider.go → pkg/cloud/gcp/provider.go

@@ -176,6 +176,7 @@ func (gcp *GCP) GetConfig() (*models.CustomPricing, error) {
 }
 
 // BigQueryConfig contain the required config and credentials to access OOC resources for GCP
+// Deprecated: v1.104 Use BigQueryConfiguration instead
 type BigQueryConfig struct {
 	ProjectID          string            `json:"projectID"`
 	BillingDataDataset string            `json:"billingDataDataset"`

+ 0 - 0
pkg/cloud/gcp/gcpprovider_test.go → pkg/cloud/gcp/provider_test.go


+ 1 - 1
pkg/cloud/csvprovider.go → pkg/cloud/provider/csvprovider.go

@@ -1,4 +1,4 @@
-package cloud
+package provider
 
 import (
 	"encoding/csv"

+ 6 - 6
pkg/cloud/customprovider.go → pkg/cloud/provider/customprovider.go

@@ -1,4 +1,4 @@
-package cloud
+package provider
 
 import (
 	"errors"
@@ -33,10 +33,10 @@ type CustomProvider struct {
 	SpotLabelValue          string
 	GPULabel                string
 	GPULabelValue           string
-	clusterRegion           string
-	clusterAccountID        string
+	ClusterRegion           string
+	ClusterAccountID        string
 	DownloadPricingDataLock sync.RWMutex
-	Config                  *ProviderConfig
+	Config                  models.ProviderConfig
 }
 
 var volTypes = map[string]string{
@@ -147,8 +147,8 @@ func (cp *CustomProvider) ClusterInfo() (map[string]string, error) {
 		m["name"] = conf.ClusterName
 	}
 	m["provider"] = kubecost.CustomProvider
-	m["region"] = cp.clusterRegion
-	m["account"] = cp.clusterAccountID
+	m["region"] = cp.ClusterRegion
+	m["account"] = cp.ClusterAccountID
 	m["id"] = env.GetClusterID()
 	return m, nil
 }

+ 14 - 12
pkg/cloud/provider.go → pkg/cloud/provider/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package provider
 
 import (
 	"errors"
@@ -8,10 +8,12 @@ import (
 	"strings"
 	"time"
 
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
 	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/scaleway"
 	"github.com/opencost/opencost/pkg/kubecost"
 
 	"github.com/opencost/opencost/pkg/util"
@@ -167,8 +169,8 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 			CSVLocation: env.GetCSVPath(),
 			CustomProvider: &CustomProvider{
 				Clientset:        cache,
-				clusterRegion:    cp.region,
-				clusterAccountID: cp.accountID,
+				ClusterRegion:    cp.region,
+				ClusterAccountID: cp.accountID,
 				Config:           NewProviderConfig(config, cp.configFileName),
 			},
 		}, nil
@@ -215,19 +217,19 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		}, nil
 	case kubecost.AlibabaProvider:
 		log.Info("Found ProviderID starting with \"alibaba\", using Alibaba Cloud Provider")
-		return &Alibaba{
+		return &alibaba.Alibaba{
 			Clientset:            cache,
 			Config:               NewProviderConfig(config, cp.configFileName),
-			clusterRegion:        cp.region,
-			clusterAccountId:     cp.accountID,
-			serviceAccountChecks: models.NewServiceAccountChecks(),
+			ClusterRegion:        cp.region,
+			ClusterAccountId:     cp.accountID,
+			ServiceAccountChecks: models.NewServiceAccountChecks(),
 		}, nil
 	case kubecost.ScalewayProvider:
 		log.Info("Found ProviderID starting with \"scaleway\", using Scaleway Provider")
-		return &Scaleway{
+		return &scaleway.Scaleway{
 			Clientset:        cache,
-			clusterRegion:    cp.region,
-			clusterAccountID: cp.accountID,
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
 			Config:           NewProviderConfig(config, cp.configFileName),
 		}, nil
 
@@ -235,8 +237,8 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		log.Info("Unsupported provider, falling back to default")
 		return &CustomProvider{
 			Clientset:        cache,
-			clusterRegion:    cp.region,
-			clusterAccountID: cp.accountID,
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
 			Config:           NewProviderConfig(config, cp.configFileName),
 		}, nil
 	}

+ 2 - 3
pkg/cloud/providerconfig.go → pkg/cloud/provider/providerconfig.go

@@ -1,8 +1,7 @@
-package cloud
+package provider
 
 import (
 	"fmt"
-	"io/ioutil"
 	"os"
 	gopath "path"
 	"strconv"
@@ -277,7 +276,7 @@ func ReturnPricingFromConfigs(filename string) (*models.CustomPricing, error) {
 	if _, err := os.Stat(providerConfigFile); err != nil {
 		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to find file %s with err: %v", providerConfigFile, err)
 	}
-	configFile, err := ioutil.ReadFile(providerConfigFile)
+	configFile, err := os.ReadFile(providerConfigFile)
 	if err != nil {
 		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to open file %s with err: %v", providerConfigFile, err)
 	}

+ 6 - 6
pkg/cloud/scalewayprovider.go → pkg/cloud/scaleway/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package scaleway
 
 import (
 	"errors"
@@ -36,10 +36,10 @@ type ScalewayPricing struct {
 
 type Scaleway struct {
 	Clientset               clustercache.ClusterCache
-	Config                  *ProviderConfig
+	Config                  models.ProviderConfig
 	Pricing                 map[string]*ScalewayPricing
-	clusterRegion           string
-	clusterAccountID        string
+	ClusterRegion           string
+	ClusterAccountID        string
 	DownloadPricingDataLock sync.RWMutex
 }
 
@@ -288,8 +288,8 @@ func (scw *Scaleway) ClusterInfo() (map[string]string, error) {
 		m["name"] = c.ClusterName
 	}
 	m["provider"] = kubecost.ScalewayProvider
-	m["region"] = scw.clusterRegion
-	m["account"] = scw.clusterAccountID
+	m["region"] = scw.ClusterRegion
+	m["account"] = scw.ClusterAccountID
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
 	m["id"] = env.GetClusterID()
 	return m, nil

+ 3 - 3
pkg/cmd/agent/agent.go

@@ -7,7 +7,7 @@ import (
 	"path"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/costmodel"
@@ -157,13 +157,13 @@ func Execute(opts *AgentOpts) error {
 	})
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
-	cloudProvider, err := cloud.NewProvider(clusterCache, cloudProviderKey, confManager)
+	cloudProvider, err := provider.NewProvider(clusterCache, cloudProviderKey, confManager)
 	if err != nil {
 		panic(err.Error())
 	}
 
 	// Append the pricing config watcher
-	configWatchers.AddWatcher(cloud.ConfigWatcherFor(cloudProvider))
+	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	watchConfigFunc := configWatchers.ToWatchFunc()
 	watchedConfigs := configWatchers.GetWatchedConfigs()
 

+ 6 - 6
pkg/costmodel/aggregation.go

@@ -11,10 +11,10 @@ import (
 	"time"
 
 	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/patrickmn/go-cache"
 	prometheusClient "github.com/prometheus/client_golang/api"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/errors"
@@ -761,7 +761,7 @@ func getPriceVectors(cp models.Provider, costDatum *CostData, rate string, disco
 	if err != nil {
 		log.Errorf("failed to load custom pricing: %s", err)
 	}
-	if cloud.CustomPricesEnabled(cp) && err == nil {
+	if provider.CustomPricesEnabled(cp) && err == nil {
 		var cpuCostStr string
 		var ramCostStr string
 		var gpuCostStr string
@@ -839,7 +839,7 @@ func getPriceVectors(cp models.Provider, costDatum *CostData, rate string, disco
 			cost, _ := strconv.ParseFloat(pvcData.Volume.Cost, 64)
 
 			// override with custom pricing if enabled
-			if cloud.CustomPricesEnabled(cp) {
+			if provider.CustomPricesEnabled(cp) {
 				cost = pvCost
 			}
 
@@ -1768,10 +1768,10 @@ func (a *Accesses) warmAggregateCostModelCache() {
 		aggOpts.NoExpireCache = false
 		aggOpts.ShareSplit = SplitTypeWeighted
 		aggOpts.RemoteEnabled = env.IsRemoteEnabled()
-		aggOpts.AllocateIdle = cloud.AllocateIdleByDefault(a.CloudProvider)
+		aggOpts.AllocateIdle = provider.AllocateIdleByDefault(a.CloudProvider)
 
-		sharedNamespaces := cloud.SharedNamespaces(a.CloudProvider)
-		sharedLabelNames, sharedLabelValues := cloud.SharedLabels(a.CloudProvider)
+		sharedNamespaces := provider.SharedNamespaces(a.CloudProvider)
+		sharedLabelNames, sharedLabelValues := provider.SharedLabels(a.CloudProvider)
 
 		if len(sharedNamespaces) > 0 || len(sharedLabelNames) > 0 {
 			aggOpts.SharedResources = NewSharedResourceInfo(true, sharedNamespaces, sharedLabelNames, sharedLabelValues)

+ 5 - 5
pkg/costmodel/allocation_helpers.go

@@ -7,7 +7,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
@@ -1432,7 +1432,7 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1470,7 +1470,7 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRA
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1508,7 +1508,7 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1654,7 +1654,7 @@ func (cm *CostModel) getNodePricing(nodeMap map[nodeKey]*nodePricing, nodeKey no
 	if err != nil {
 		log.Warnf("CostModel: failed to load custom pricing: %s", err)
 	}
-	if cloud.CustomPricesEnabled(cm.Provider) && customPricingConfig != nil {
+	if provider.CustomPricesEnabled(cm.Provider) && customPricingConfig != nil {
 		return cm.getCustomNodePricing(node.Preemptible, node.ProviderID)
 	}
 

+ 4 - 4
pkg/costmodel/cluster.go

@@ -5,10 +5,10 @@ import (
 	"strconv"
 	"time"
 
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	prometheus "github.com/prometheus/client_golang/api"
 	"golang.org/x/exp/slices"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/kubecost"
@@ -779,7 +779,7 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 				Cluster:    cluster,
 				Namespace:  namespace,
 				Name:       fmt.Sprintf("%s/%s", namespace, name), // TODO:ETL this is kept for backwards-compatibility, but not good
-				ProviderID: cloud.ParseLBID(providerID),
+				ProviderID: provider.ParseLBID(providerID),
 			}
 		}
 
@@ -1360,7 +1360,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		diskMap[key].Bytes = bytes
 	}
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterDisks: failed to load custom pricing: %s", err)
@@ -1405,7 +1405,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		diskMap[key].Cost = cost * (diskMap[key].Bytes / 1024 / 1024 / 1024) * (diskMap[key].Minutes / 60)
 		providerID, _ := result.GetString("provider_id") // just put the providerID set up here, it's the simplest query.
 		if providerID != "" {
-			diskMap[key].ProviderID = cloud.ParsePVID(providerID)
+			diskMap[key].ProviderID = provider.ParsePVID(providerID)
 		}
 	}
 

+ 10 - 10
pkg/costmodel/cluster_helpers.go

@@ -4,8 +4,8 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/log"
@@ -41,7 +41,7 @@ func buildCPUCostMap(
 	cpuCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -65,7 +65,7 @@ func buildCPUCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -115,7 +115,7 @@ func buildRAMCostMap(
 	ramCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -139,7 +139,7 @@ func buildRAMCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -190,7 +190,7 @@ func buildGPUCostMap(
 	gpuCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -214,7 +214,7 @@ func buildGPUCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -282,7 +282,7 @@ func buildGPUCountMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		gpuCountMap[key] = gpuCount
 	}
@@ -511,7 +511,7 @@ func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Durat
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 
 		if len(result.Values) == 0 {
@@ -560,7 +560,7 @@ func buildPreemptibleMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       nodeName,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 
 		// TODO(michaelmdresser): check this condition at merge time?

+ 5 - 5
pkg/costmodel/cluster_helpers_test.go

@@ -5,7 +5,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/prom"
 	"github.com/opencost/opencost/pkg/util"
@@ -853,8 +853,8 @@ func TestBuildGPUCostMap(t *testing.T) {
 
 	for _, testCase := range cases {
 		t.Run(testCase.name, func(t *testing.T) {
-			testProvider := &cloud.CustomProvider{
-				Config: cloud.NewProviderConfig(config.NewConfigFileManager(nil), "fakeFile"),
+			testProvider := &provider.CustomProvider{
+				Config: provider.NewProviderConfig(config.NewConfigFileManager(nil), "fakeFile"),
 			}
 			testPreemptible := make(map[NodeIdentifier]bool)
 			result, _ := buildGPUCostMap(testCase.promResult, testCase.countMap, testProvider, testPreemptible)
@@ -1042,8 +1042,8 @@ func TestAssetCustompricing(t *testing.T) {
 
 	for _, testCase := range cases {
 		t.Run(testCase.name, func(t *testing.T) {
-			testProvider := &cloud.CustomProvider{
-				Config: cloud.NewProviderConfig(config.NewConfigFileManager(nil), ""),
+			testProvider := &provider.CustomProvider{
+				Config: provider.NewProviderConfig(config.NewConfigFileManager(nil), ""),
 			}
 			testProvider.UpdateConfigFromConfigMap(testCase.customPricingMap)
 

+ 3 - 3
pkg/costmodel/router.go

@@ -18,6 +18,7 @@ import (
 	"github.com/microcosm-cc/bluemonday"
 	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/kubeconfig"
 	"github.com/opencost/opencost/pkg/metrics"
@@ -34,7 +35,6 @@ import (
 
 	"github.com/getsentry/sentry-go"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
@@ -1592,13 +1592,13 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	k8sCache.Run()
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
-	cloudProvider, err := cloud.NewProvider(k8sCache, cloudProviderKey, confManager)
+	cloudProvider, err := provider.NewProvider(k8sCache, cloudProviderKey, confManager)
 	if err != nil {
 		panic(err.Error())
 	}
 
 	// Append the pricing config watcher
-	configWatchers.AddWatcher(cloud.ConfigWatcherFor(cloudProvider))
+	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	configWatchers.AddWatcher(metrics.GetMetricsConfigWatcher())
 
 	watchConfigFunc := configWatchers.ToWatchFunc()

+ 4 - 4
pkg/kubecost/status.go

@@ -37,10 +37,10 @@ type FileStatus struct {
 
 // CloudStatus describes CloudStore metadata
 type CloudStatus struct {
-	CloudConnectionStatus string                `json:"cloudConnectionStatus"`
-	ProviderType          string                `json:"providerType"`
-	CloudUsage            *CloudAssetStatus     `json:"cloudUsage,omitempty"`
-	Reconciliation        *ReconciliationStatus `json:"reconciliation,omitempty"`
+	ConnectionStatus string                `json:"cloudConnectionStatus"`
+	ProviderType     string                `json:"providerType"`
+	CloudUsage       *CloudAssetStatus     `json:"cloudUsage,omitempty"`
+	Reconciliation   *ReconciliationStatus `json:"reconciliation,omitempty"`
 }
 
 // CloudAssetStatus describes CloudAsset metadata of a CloudStore

+ 38 - 38
test/cloud_test.go

@@ -8,7 +8,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/costmodel"
@@ -34,7 +34,7 @@ func TestRegionValueFromMapField(t *testing.T) {
 	n.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0"
 	n.Labels = make(map[string]string)
 	n.Labels[v1.LabelZoneRegion] = wantRegion
-	got := cloud.NodeValueFromMapField(providerIDMap, n, true)
+	got := provider.NodeValueFromMapField(providerIDMap, n, true)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
@@ -44,7 +44,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant := "i-05445591e0d182d42"
 	n := &v1.Node{}
 	n.Spec.ProviderID = "aws:///us-east-1a/i-05445591e0d182d42"
-	got := cloud.NodeValueFromMapField(providerIDMap, n, false)
+	got := provider.NodeValueFromMapField(providerIDMap, n, false)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
@@ -52,7 +52,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant2 := strings.ToLower("/subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0")
 	n2 := &v1.Node{}
 	n2.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0"
-	got2 := cloud.NodeValueFromMapField(providerIDMap, n2, false)
+	got2 := provider.NodeValueFromMapField(providerIDMap, n2, false)
 	if got2 != providerIDWant2 {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant2, got2)
 	}
@@ -60,7 +60,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant3 := strings.ToLower("/subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/mc_testspot_testspot_eastus/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-19213364-vmss/virtualMachines/0")
 	n3 := &v1.Node{}
 	n3.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/mc_testspot_testspot_eastus/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-19213364-vmss/virtualMachines/0"
-	got3 := cloud.NodeValueFromMapField(providerIDMap, n3, false)
+	got3 := provider.NodeValueFromMapField(providerIDMap, n3, false)
 	if got3 != providerIDWant3 {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant3, got3)
 	}
@@ -77,17 +77,17 @@ func TestNodeValueFromMapField(t *testing.T) {
 	n.Labels = make(map[string]string)
 	n.Labels["foo"] = labelFooWant
 
-	got := cloud.NodeValueFromMapField(providerIDMap, n, false)
+	got := provider.NodeValueFromMapField(providerIDMap, n, false)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
 
-	got = cloud.NodeValueFromMapField(nameMap, n, false)
+	got = provider.NodeValueFromMapField(nameMap, n, false)
 	if got != nameWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", nameMap, nameWant, got)
 	}
 
-	got = cloud.NodeValueFromMapField(labelMapFoo, n, false)
+	got = provider.NodeValueFromMapField(labelMapFoo, n, false)
 	if got != labelFooWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", labelMapFoo, labelFooWant, got)
 	}
@@ -104,10 +104,10 @@ func TestPVPriceFromCSV(t *testing.T) {
 	})
 
 	wantPrice := "0.1337"
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_pv.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -152,10 +152,10 @@ func TestNodePriceFromCSVWithGPU(t *testing.T) {
 	n2.Status.Capacity = v1.ResourceList{"nvidia.com/gpu": *resource.NewScaledQuantity(2, 0)}
 	wantPrice2 := "1.733700"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 
@@ -211,10 +211,10 @@ func TestNodePriceFromCSV(t *testing.T) {
 
 	wantPrice := "0.133700"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -241,10 +241,10 @@ func TestNodePriceFromCSV(t *testing.T) {
 		t.Errorf("CSV provider should return nil on missing node")
 	}
 
-	c2 := &cloud.CSVProvider{
+	c2 := &provider.CSVProvider{
 		CSVLocation: "../configs/fake.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	k3 := c.GetKey(n.Labels, n)
@@ -287,10 +287,10 @@ func TestNodePriceFromCSVWithRegion(t *testing.T) {
 	n3.Labels[v1.LabelZoneRegion] = "fakeregion"
 	wantPrice3 := "0.1339"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_region.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -337,10 +337,10 @@ func TestNodePriceFromCSVWithRegion(t *testing.T) {
 		t.Errorf("CSV provider should return nil on missing node, instead returned %+v", resN4)
 	}
 
-	c2 := &cloud.CSVProvider{
+	c2 := &provider.CSVProvider{
 		CSVLocation: "../configs/fake.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	k5 := c.GetKey(n.Labels, n)
@@ -379,10 +379,10 @@ func TestNodePriceFromCSVWithBadConfig(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "invalid.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "invalid.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -413,10 +413,10 @@ func TestSourceMatchesFromCSV(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -492,10 +492,10 @@ func TestNodePriceFromCSVWithCase(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 
@@ -526,10 +526,10 @@ func TestNodePriceFromCSVByClass(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}