Ajay Tripathy 3 лет назад
Родитель
Сommit
dad87d9821
54 измененных файлов с 16604 добавлено и 0 удалено
  1. 1 0
      config/invalid.json
  2. 87 0
      pkg/cloud/alibaba/authorizer.go
  3. 130 0
      pkg/cloud/alibaba/boaconfiguration.go
  4. 289 0
      pkg/cloud/alibaba/boaconfiguration_test.go
  5. 127 0
      pkg/cloud/alibaba/boaquerier.go
  6. 1398 0
      pkg/cloud/alibaba/provider.go
  7. 839 0
      pkg/cloud/alibaba/provider_test.go
  8. 233 0
      pkg/cloud/aws/athenaconfiguration.go
  9. 594 0
      pkg/cloud/aws/athenaconfiguration_test.go
  10. 208 0
      pkg/cloud/aws/athenaquerier.go
  11. 251 0
      pkg/cloud/aws/authorizer.go
  12. 67 0
      pkg/cloud/aws/authorizer_test.go
  13. 2307 0
      pkg/cloud/aws/provider.go
  14. 496 0
      pkg/cloud/aws/provider_test.go
  15. 134 0
      pkg/cloud/aws/s3configuration.go
  16. 40 0
      pkg/cloud/aws/s3connection.go
  17. 387 0
      pkg/cloud/aws/s3connection_test.go
  18. 181 0
      pkg/cloud/aws/s3selectquerier.go
  19. 80 0
      pkg/cloud/azure/authorizer.go
  20. 322 0
      pkg/cloud/azure/billingexportparser.go
  21. 194 0
      pkg/cloud/azure/billingexportparser_test.go
  22. 124 0
      pkg/cloud/azure/pricesheetclient.go
  23. 300 0
      pkg/cloud/azure/pricesheetdownloader.go
  24. 99 0
      pkg/cloud/azure/pricesheetdownloader_test.go
  25. 1649 0
      pkg/cloud/azure/provider.go
  26. 97 0
      pkg/cloud/azure/provider_test.go
  27. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/BOM.csv
  28. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/Enterprise.csv
  29. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/EnterpriseCamel.csv
  30. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/German.csv
  31. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/PayAsYouGo.csv
  32. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/YA.csv
  33. 2 0
      pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv
  34. 88 0
      pkg/cloud/azure/resources/billingexports/values/Template.csv
  35. 2 0
      pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv
  36. 170 0
      pkg/cloud/azure/storagebillingparser.go
  37. 204 0
      pkg/cloud/azure/storagebillingparser_test.go
  38. 179 0
      pkg/cloud/azure/storageconfiguration.go
  39. 446 0
      pkg/cloud/azure/storageconfiguration_test.go
  40. 77 0
      pkg/cloud/azure/storageconnection.go
  41. 53 0
      pkg/cloud/config/authorizer.go
  42. 37 0
      pkg/cloud/config/config.go
  43. 42 0
      pkg/cloud/connectionstatus.go
  44. 132 0
      pkg/cloud/gcp/authorizer.go
  45. 172 0
      pkg/cloud/gcp/bigqueryconfiguration.go
  46. 388 0
      pkg/cloud/gcp/bigqueryconfiguration_test.go
  47. 110 0
      pkg/cloud/gcp/bigqueryquerier.go
  48. 1632 0
      pkg/cloud/gcp/provider.go
  49. 369 0
      pkg/cloud/gcp/provider_test.go
  50. 438 0
      pkg/cloud/provider/csvprovider.go
  51. 406 0
      pkg/cloud/provider/customprovider.go
  52. 342 0
      pkg/cloud/provider/provider.go
  53. 290 0
      pkg/cloud/provider/providerconfig.go
  54. 379 0
      pkg/cloud/scaleway/provider.go

+ 1 - 0
config/invalid.json

@@ -0,0 +1 @@
+{"provider":"base","description":"Default prices based on GCP us-central1","CPU":"0.031611","spotCPU":"0.006655","RAM":"0.004237","spotRAM":"0.000892","GPU":"0.95","spotGPU":"0.308","storage":"0.00005479452","zoneNetworkEgress":"0.01","regionNetworkEgress":"0.01","internetNetworkEgress":"0.12","firstFiveForwardingRulesCost":"","additionalForwardingRuleCost":"","LBIngressDataCost":"","athenaBucketName":"","athenaRegion":"","athenaDatabase":"","athenaTable":"","athenaWorkgroup":"","masterPayerARN":"","customPricesEnabled":"false","defaultIdle":"","azureSubscriptionID":"","azureClientID":"","azureClientSecret":"","azureTenantID":"","azureBillingRegion":"","azureOfferDurableID":"","azureStorageSubscriptionID":"","azureStorageAccount":"","azureStorageAccessKey":"","azureStorageContainer":"","azureContainerPath":"","azureCloud":"","currencyCode":"","discount":"","negotiatedDiscount":"","sharedOverhead":"","clusterName":"","sharedNamespaces":"","sharedLabelNames":"","sharedLabelValues":"","shareTenancyCosts":"true","readOnly":"","editorAccess":"","kubecostToken":"","googleAnalyticsTag":"","excludeProviderID":""}

+ 87 - 0
pkg/cloud/alibaba/authorizer.go

@@ -0,0 +1,87 @@
+package alibaba
+
+import (
+	"fmt"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const AccessKeyAuthorizerType = "AlibabaAccessKey"
+
+// Authorizer provide *bssopenapi.Client for Alibaba cloud BOS for Billing related SDK calls
+type Authorizer interface {
+	config.Authorizer
+	GetCredentials() (auth.Credential, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	default:
+		return nil, fmt.Errorf("alibaba: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// AccessKey holds Alibaba credentials parsing from the service-key.json file.
+type AccessKey struct {
+	AccessKeyID     string `json:"accessKeyID"`
+	AccessKeySecret string `json:"accessKeySecret"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["accessKeyID"] = ak.AccessKeyID
+	fmap["accessKeySecret"] = ak.AccessKeySecret
+	return json.Marshal(fmap)
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.AccessKeyID == "" {
+		return fmt.Errorf("AccessKey: missing Access key ID")
+	}
+	if ak.AccessKeySecret == "" {
+		return fmt.Errorf("AccessKey: missing Access Key secret")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.AccessKeyID != thatConfig.AccessKeyID {
+		return false
+	}
+	if ak.AccessKeySecret != thatConfig.AccessKeySecret {
+		return false
+	}
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		AccessKeyID:     ak.AccessKeyID,
+		AccessKeySecret: config.Redacted,
+	}
+}
+
+// GetCredentials creates a credentials object to authorize the use of service sdk calls
+func (ak *AccessKey) GetCredentials() (auth.Credential, error) {
+	err := ak.Validate()
+	if err != nil {
+		return nil, err
+	}
+	return &credentials.AccessKeyCredential{AccessKeyId: ak.AccessKeyID, AccessKeySecret: ak.AccessKeySecret}, nil
+}

+ 130 - 0
pkg/cloud/alibaba/boaconfiguration.go

@@ -0,0 +1,130 @@
+package alibaba
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// BOAConfiguration is the BSS open API configuration for Alibaba's Billing information
+type BOAConfiguration struct {
+	Account    string     `json:"account"`
+	Region     string     `json:"region"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (bc *BOAConfiguration) Validate() error {
+	// Validate Authorizer
+	if bc.Authorizer == nil {
+		return fmt.Errorf("BOAConfiguration: missing authorizer")
+	}
+
+	err := bc.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	// Validate base properties
+	if bc.Region == "" {
+		return fmt.Errorf("BOAConfiguration: missing region")
+	}
+
+	if bc.Account == "" {
+		return fmt.Errorf("BOAConfiguration: missing account")
+	}
+	return nil
+}
+
+func (bc *BOAConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*BOAConfiguration)
+	if !ok {
+		return false
+	}
+
+	if bc.Authorizer != nil {
+		if !bc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if bc.Account != thatConfig.Account {
+		return false
+	}
+
+	if bc.Region != thatConfig.Region {
+		return false
+	}
+	return true
+}
+
+func (bc *BOAConfiguration) Sanitize() config.Config {
+	return &BOAConfiguration{
+		Account:    bc.Account,
+		Region:     bc.Region,
+		Authorizer: bc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (bc *BOAConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", bc.Account, bc.Region)
+}
+
+func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Account = account
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Region = region
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Authorizer = authorizer
+
+	return nil
+}
+
+func ConvertAlibabaInfoToConfig(acc AlibabaInfo) config.KeyedConfig {
+	if acc.IsEmpty() {
+		return nil
+	}
+	var configurer Authorizer
+
+	configurer = &AccessKey{
+		AccessKeyID:     acc.AlibabaServiceKeyName,
+		AccessKeySecret: acc.AlibabaServiceKeySecret,
+	}
+
+	return &BOAConfiguration{
+		Account:    acc.AlibabaAccountID,
+		Region:     acc.AlibabaClusterRegion,
+		Authorizer: configurer,
+	}
+}

+ 289 - 0
pkg/cloud/alibaba/boaconfiguration_test.go

@@ -0,0 +1,289 @@
+package alibaba
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestBoaConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   BOAConfiguration
+		expected error
+	}{
+		"valid config Azure AccessKey": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing Access key ID"),
+		},
+		"access secret invalid": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID: "accessKeyId",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing Access Key secret"),
+		},
+		"missing authorizer": {
+			config: BOAConfiguration{
+				Account:    "Account Number",
+				Region:     "Region",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing authorizer"),
+		},
+		"missing Account": {
+			config: BOAConfiguration{
+				Account: "",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing account"),
+		},
+		"missing Region": {
+			config: BOAConfiguration{
+				Account: "Account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing region"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestBOAConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     BOAConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id2",
+					AccessKeySecret: "secret2",
+				},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different region": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region2",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account2",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &AccessKey{
+				AccessKeyID:     "id",
+				AccessKeySecret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestBOAConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config BOAConfiguration
+	}{
+		"Empty Config": {
+			config: BOAConfiguration{},
+		},
+		"AccessKey": {
+			config: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &BOAConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 127 - 0
pkg/cloud/alibaba/boaquerier.go

@@ -0,0 +1,127 @@
+package alibaba
+
+import (
+	"fmt"
+	"strings"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+	"github.com/aliyun/alibaba-cloud-sdk-go/services/bssopenapi"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+const (
+	boaIsNode    = "i-"    // isNode if prefix of instance_id is i-
+	boaIsDisk    = "d-"    // isDisk if prefix is disk is d-
+	boaIsNetwork = "piece" //usage unit of network resource in Alibaba is Piece
+)
+
+type BoaQuerier struct {
+	BOAConfiguration
+}
+
+func (bq *BoaQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*BoaQuerier)
+	if !ok {
+		return false
+	}
+
+	return bq.BOAConfiguration.Equals(&thatConfig.BOAConfiguration)
+}
+
+// QueryInstanceBill performs the request to the BSS client and get the response for the current page number
+func (bq *BoaQuerier) QueryInstanceBill(client *bssopenapi.Client, isBillingItem bool, invocationScheme, granularity, billingCycle, billingDate string, pageNum int) (*bssopenapi.QueryInstanceBillResponse, error) {
+	log.Debugf("QueryInstanceBill: query for BSS Open API for billing date: %s with pageNum: %d ", billingDate, pageNum)
+	request := bssopenapi.CreateQueryInstanceBillRequest()
+	request.Scheme = invocationScheme
+	request.BillingCycle = billingCycle
+	request.IsBillingItem = requests.NewBoolean(true)
+	request.Granularity = granularity
+	request.BillingDate = billingDate
+	request.PageNum = requests.NewInteger(pageNum)
+	response, err := client.QueryInstanceBill(request)
+	if err != nil {
+		return nil, fmt.Errorf("QueryInstanceBill: Failed to hit the BSS Open API with error for page num %d: %v", pageNum, err)
+	}
+	log.Debugf("QueryInstanceBill: Total Number of total items for billing Date: %s pageNum: %d is %d", billingDate, pageNum, response.Data.TotalCount)
+	return response, nil
+}
+
+// QueryBoaPaginated Calls the API in a paginated fashion. There's no paramter in API that can distinguish if it hasMorePages
+// hence the logic of processedItem <= TotalItem.
+func (bq *BoaQuerier) QueryBoaPaginated(client *bssopenapi.Client, isBillingItem bool, invocationScheme, granularity, billingCycle, billingDate string, fn func(*bssopenapi.QueryInstanceBillResponse) bool) error {
+	pageNum := 1
+	processedItem := 0 // setting default here to hit the API for the first time
+	totalItem := 1
+	for processedItem < totalItem {
+		log.Debugf("QueryBoaPaginated: query for BSS Open API for billing date: %s with pageNum: %d", billingDate, pageNum)
+		response, err := bq.QueryInstanceBill(client, isBillingItem, invocationScheme, granularity, billingCycle, billingDate, pageNum)
+		if err != nil {
+			return fmt.Errorf("QueryBoaPaginated for billing cycle : %s, billing date: %s, page num %d: %v", billingCycle, billingDate, pageNum, err)
+		}
+		fn(response)
+		totalItem = response.Data.TotalCount
+		processedItem += response.Data.PageSize
+		pageNum += 1
+	}
+	return nil
+}
+
+// GetBoaQueryInstanceBillFunc gives the item to the handler function in boaIntegration.go to process
+// computeItem, topNItem and aggregatedItem
+func GetBoaQueryInstanceBillFunc(fn func(bssopenapi.Item) error, billingDate string) func(output *bssopenapi.QueryInstanceBillResponse) bool {
+	processBOAItems := func(output *bssopenapi.QueryInstanceBillResponse) bool {
+		// This could be connection error were unable to fetch response output from Client
+		if output == nil {
+			log.Errorf("BoaQuerier: No Response from the ALibaba BSS Open API client for billing Date: %s", billingDate)
+			return false
+		}
+
+		// These infer that the rest call was successful but the Cloud Usage resource for those days were 0
+		if output.Data.TotalCount == 0 {
+			log.Warnf("BoaQuerier: Total Item Count is 0 for billing Date: %s ", billingDate)
+			return false
+		}
+
+		for _, item := range output.Data.Items.Item {
+			fn(item)
+		}
+		return true
+	}
+	return processBOAItems
+}
+
+// SelectAlibabaCategory processes the Alibaba service to associated Kubecost category
+func SelectAlibabaCategory(item bssopenapi.Item) string {
+	if (item != bssopenapi.Item{}) {
+		// Provider ID has prefix "i-" for node in Alibaba
+		if strings.HasPrefix(item.InstanceID, boaIsNode) {
+			return kubecost.ComputeCategory
+		}
+		// Provider ID for disk start with "d-" for storage type in Alibaba
+		if strings.HasPrefix(item.InstanceID, boaIsDisk) {
+			return kubecost.StorageCategory
+		}
+		// Network has the highest priority and is based on the usage type of "piece" in Alibaba
+		if item.UsageUnit == boaIsNetwork {
+			return kubecost.NetworkCategory
+		}
+	}
+
+	// Alibaba CUR integration report has service lower case mostly unlike AWS
+	// TO-DO: Can investigate further product codes but bare minimal differentiation for start
+	switch strings.ToLower(item.ProductCode) {
+	case "slb", "eip", "nis", "gtm":
+		return kubecost.NetworkCategory
+	case "ecs", "eds", "sas":
+		return kubecost.ComputeCategory
+	case "ack":
+		return kubecost.ManagementCategory
+	case "ebs", "oss", "scu":
+		return kubecost.StorageCategory
+	default:
+		return kubecost.OtherCategory
+	}
+}

+ 1398 - 0
pkg/cloud/alibaba/provider.go

@@ -0,0 +1,1398 @@
+package alibaba
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/utils"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+	"golang.org/x/exp/slices"
+	v1 "k8s.io/api/core/v1"
+)
+
+const (
+	ALIBABA_ECS_PRODUCT_CODE                   = "ecs"
+	ALIBABA_ECS_VERSION                        = "2014-05-26"
+	ALIBABA_ECS_DOMAIN                         = "ecs.aliyuncs.com"
+	ALIBABA_DESCRIBE_PRICE_API_ACTION          = "DescribePrice"
+	ALIBABA_DESCRIBE_DISK_API_ACTION           = "DescribeDisks"
+	ALIBABA_INSTANCE_RESOURCE_TYPE             = "instance"
+	ALIBABA_DISK_RESOURCE_TYPE                 = "disk"
+	ALIBABA_PAY_AS_YOU_GO_BILLING              = "Pay-As-You-Go"
+	ALIBABA_SUBSCRIPTION_BILLING               = "Subscription"
+	ALIBABA_PREEMPTIBLE_BILLING                = "Preemptible"
+	ALIBABA_OPTIMIZE_KEYWORD                   = "optimize"
+	ALIBABA_NON_OPTIMIZE_KEYWORD               = "nonoptimize"
+	ALIBABA_HOUR_PRICE_UNIT                    = "Hour"
+	ALIBABA_MONTH_PRICE_UNIT                   = "Month"
+	ALIBABA_YEAR_PRICE_UNIT                    = "Year"
+	ALIBABA_UNKNOWN_INSTANCE_FAMILY_TYPE       = "unknown"
+	ALIBABA_NOT_SUPPORTED_INSTANCE_FAMILY_TYPE = "unsupported"
+	ALIBABA_DISK_CLOUD_ESSD_CATEGORY           = "cloud_essd"
+	ALIBABA_DISK_CLOUD_CATEGORY                = "cloud"
+	ALIBABA_DATA_DISK_CATEGORY                 = "data"
+	ALIBABA_SYSTEM_DISK_CATEGORY               = "system"
+	ALIBABA_DATA_DISK_PREFIX                   = "DataDisk"
+	ALIBABA_PV_CLOUD_DISK_TYPE                 = "CloudDisk"
+	ALIBABA_PV_NAS_TYPE                        = "NAS"
+	ALIBABA_PV_OSS_TYPE                        = "OSS"
+	ALIBABA_DEFAULT_DATADISK_SIZE              = "2000"
+	ALIBABA_DISK_TOPOLOGY_REGION_LABEL         = "topology.diskplugin.csi.alibabacloud.com/region"
+	ALIBABA_DISK_TOPOLOGY_ZONE_LABEL           = "topology.diskplugin.csi.alibabacloud.com/zone"
+)
+
+var (
+	// Regular expression to get the numerical value of PV suffix with GiB from *v1.PersistentVolume.
+	sizeRegEx = regexp.MustCompile("(.*?)Gi")
+)
+
+// Variable to keep track of instance families that fail in DescribePrice API due improper defaulting of systemDisk if the information is not available
+var alibabaDefaultToCloudEssd = []string{"g6e", "r6e", "r7", "g7", "g7a", "r7a"}
+
+// Why predefined and dependency on code? Can be converted to API call - https://www.alibabacloud.com/help/en/elastic-compute-service/latest/regions-describeregions
+var alibabaRegions = []string{
+	"cn-qingdao",
+	"cn-beijing",
+	"cn-zhangjiakou",
+	"cn-huhehaote",
+	"cn-wulanchabu",
+	"cn-hangzhou",
+	"cn-shanghai",
+	"cn-nanjing",
+	"cn-fuzhou",
+	"cn-shenzhen",
+	"cn-guangzhou",
+	"cn-chengdu",
+	"cn-hongkong",
+	"ap-southeast-1",
+	"ap-southeast-2",
+	"ap-southeast-3",
+	"ap-southeast-5",
+	"ap-southeast-6",
+	"ap-southeast-7",
+	"ap-south-1",
+	"ap-northeast-1",
+	"ap-northeast-2",
+	"us-west-1",
+	"us-east-1",
+	"eu-central-1",
+	"me-east-1",
+}
+
+// To-Do: Convert to API call - https://www.alibabacloud.com/help/en/elastic-compute-service/latest/describeinstancetypefamilies
+// Also first pass only completely tested pricing API for General pupose instances families & memory optimized instance families
+var alibabaInstanceFamilies = []string{
+	"g7",
+	"g7a",
+	"g6e",
+	"g6",
+	"g5",
+	"sn2",
+	"sn2ne",
+	"r7",
+	"r7a",
+	"r6e",
+	"r6a",
+	"r6",
+	"r5",
+	"se1",
+	"se1ne",
+	"re6",
+	"re6p",
+	"re4",
+	"se1",
+}
+
+// AlibabaInfo contains configuration for Alibaba's CUR integration
+// Deprecated: v1.104 Use BOAConfiguration instead
+type AlibabaInfo struct {
+	AlibabaClusterRegion    string `json:"ClusterRegion"`
+	AlibabaServiceKeyName   string `json:"serviceKeyName"`
+	AlibabaServiceKeySecret string `json:"serviceKeySecret"`
+	AlibabaAccountID        string `json:"accountID"`
+}
+
+// IsEmpty returns true if all fields in config are empty, false if not.
+func (ai *AlibabaInfo) IsEmpty() bool {
+	return ai.AlibabaClusterRegion == "" &&
+		ai.AlibabaServiceKeyName == "" &&
+		ai.AlibabaServiceKeySecret == "" &&
+		ai.AlibabaAccountID == ""
+}
+
+// AlibabaAccessKey holds Alibaba credentials parsing from the service-key.json file.
+// Deprecated: v1.104 Use AccessKey instead
+type AlibabaAccessKey struct {
+	AccessKeyID     string `json:"alibaba_access_key_id"`
+	SecretAccessKey string `json:"alibaba_secret_access_key"`
+}
+
+// Slim Version of k8s disk assigned to a node or PV.
+type SlimK8sDisk struct {
+	DiskType         string
+	RegionID         string
+	PriceUnit        string
+	SizeInGiB        string
+	DiskCategory     string
+	PerformanceLevel string
+	ProviderID       string
+	StorageClass     string
+}
+
+func NewSlimK8sDisk(diskType, regionID, priceUnit, diskCategory, performanceLevel, providerID, storageClass, sizeInGiB string) *SlimK8sDisk {
+	return &SlimK8sDisk{
+		DiskType:         diskType,
+		RegionID:         regionID,
+		PriceUnit:        priceUnit,
+		SizeInGiB:        sizeInGiB,
+		DiskCategory:     diskCategory,
+		PerformanceLevel: performanceLevel,
+		ProviderID:       providerID,
+		StorageClass:     storageClass,
+	}
+}
+
+// Slim version of a k8s v1.node just to pass along the object of this struct instead of constant getting the labels from within v1.Node & unit testing.
+type SlimK8sNode struct {
+	InstanceType       string
+	RegionID           string
+	PriceUnit          string
+	MemorySizeInKiB    string // TO-DO : Possible to convert to float?
+	IsIoOptimized      bool
+	OSType             string
+	ProviderID         string
+	SystemDisk         *SlimK8sDisk
+	InstanceTypeFamily string // Bug in DescribePrice, doesn't default to enhanced type correctly and you get an error in DescribePrice to get around need the family of the InstanceType.
+}
+
+func NewSlimK8sNode(instanceType, regionID, priceUnit, memorySizeInKiB, osType, providerID, instanceTypeFamily string, isIOOptimized bool, systemDiskInfo *SlimK8sDisk) *SlimK8sNode {
+	return &SlimK8sNode{
+		InstanceType:       instanceType,
+		RegionID:           regionID,
+		PriceUnit:          priceUnit,
+		MemorySizeInKiB:    memorySizeInKiB,
+		IsIoOptimized:      isIOOptimized,
+		OSType:             osType,
+		SystemDisk:         systemDiskInfo,
+		ProviderID:         providerID,
+		InstanceTypeFamily: instanceTypeFamily,
+	}
+}
+
+// AlibabaNodeAttributes represents metadata about the Node in its pricing information.
+// Basic Attributes needed atleast to get the key, Some attributes from k8s Node response
+// be populated directly into *Node object.
+type AlibabaNodeAttributes struct {
+	// InstanceType represents the type of instance.
+	InstanceType string `json:"instanceType"`
+	// MemorySizeInKiB represents the size of memory of instance.
+	MemorySizeInKiB string `json:"memorySizeInKiB"`
+	// IsIoOptimized represents the if instance is I/O optimized.
+	IsIoOptimized bool `json:"isIoOptimized"`
+	// OSType represents the OS installed in the Instance.
+	OSType string `json:"osType"`
+	// SystemDiskCategory represents the exact category of the system disk attached to the node.
+	SystemDiskCategory string `json:"systemDiskCategory"`
+	// SystemDiskSizeInGiB represents the size of the system disk attached to the node.
+	SystemDiskSizeInGiB string `json:"systemDiskSizeInGiB"`
+	// SystemDiskPerformanceLevel represents the performance level of the system disk attached to the node.
+	SystemDiskPerformanceLevel string `json:"systemPerformanceLevel"`
+}
+
+func NewAlibabaNodeAttributes(node *SlimK8sNode) *AlibabaNodeAttributes {
+	if node == nil {
+		return nil
+	}
+	var diskCategory, sizeInGiB, performanceLevel string
+	if node.SystemDisk != nil {
+		diskCategory = node.SystemDisk.DiskCategory
+		sizeInGiB = node.SystemDisk.SizeInGiB
+		performanceLevel = node.SystemDisk.PerformanceLevel
+	}
+	return &AlibabaNodeAttributes{
+		InstanceType:               node.InstanceType,
+		MemorySizeInKiB:            node.MemorySizeInKiB,
+		IsIoOptimized:              node.IsIoOptimized,
+		OSType:                     node.OSType,
+		SystemDiskCategory:         diskCategory,
+		SystemDiskSizeInGiB:        sizeInGiB,
+		SystemDiskPerformanceLevel: performanceLevel,
+	}
+}
+
+// AlibabaPVAttributes represents metadata the PV in its pricing information.
+// Basic Attributes needed atleast to get the keys. Some attributes from k8s PV response
+// be populated directly into *PV object.
+type AlibabaPVAttributes struct {
+	// PVType can be Cloud Disk, NetWork Attached Storage(NAS) or Object Storage Service (OSS).
+	// Represents the way the PV was attached
+	PVType string `json:"pvType"`
+	// PVSubType represent the sub category of PVType. This is Data in case of Cloud Disk.
+	PVSubType string `json:"pvSubType"`
+	// Example for PVCategory with cloudDisk PVType are cloud, cloud_efficiency, cloud_ssd,
+	// ephemeral_ssd and cloud_essd. If not present returns empty.
+	PVCategory string `json:"pvCategory"`
+	// Example for PerformanceLevel with cloudDisk PVType are PL0,PL1,PL2 &PL3. If not present returns empty.
+	PVPerformanceLevel string `json:"performanceLevel"`
+	// The Size of the PV in terms of GiB
+	SizeInGiB string `json:"sizeInGiB"`
+}
+
+// TO-Do: next iteration of Alibaba provider support NetWork Attached Storage(NAS) and Object Storage Service (OSS type PVs).
+// Currently defaulting to cloudDisk with provision to add work in future.
+func NewAlibabaPVAttributes(disk *SlimK8sDisk) *AlibabaPVAttributes {
+	if disk == nil {
+		return nil
+	}
+	return &AlibabaPVAttributes{
+		PVType:             ALIBABA_PV_CLOUD_DISK_TYPE,
+		PVSubType:          disk.DiskType,
+		PVCategory:         disk.DiskCategory,
+		PVPerformanceLevel: disk.PerformanceLevel,
+		SizeInGiB:          disk.SizeInGiB,
+	}
+}
+
+// Stage 1 support will be Pay-As-You-Go with HourlyPrice equal to TradePrice with PriceUnit as Hour
+// TO-DO: Subscription and Premptible support, Information can be gathered from describing instance for subscription type
+// and spotprice can be gather from DescribeSpotPriceHistory API.
+// TO-DO: how would you calculate hourly price for subscription type, is it PRICE_YEARLY/HOURS_IN_THE_YEAR|MONTH?
+type AlibabaPricingDetails struct {
+	// Represents hourly price for the given Alibaba cloud Product.
+	HourlyPrice float32 `json:"hourlyPrice"`
+	// Represents the unit in which Alibaba Product is billed can be Hour, Month or Year based on the billingMethod.
+	PriceUnit string `json:"priceUnit"`
+	// Original Price paid to acquire the Alibaba Product.
+	TradePrice float32 `json:"tradePrice"`
+	// Represents the currency unit of the price for billing Alibaba Product.
+	CurrencyCode string `json:"currencyCode"`
+}
+
+func NewAlibabaPricingDetails(hourlyPrice float32, priceUnit string, tradePrice float32, currencyCode string) *AlibabaPricingDetails {
+	return &AlibabaPricingDetails{
+		HourlyPrice:  hourlyPrice,
+		PriceUnit:    priceUnit,
+		TradePrice:   tradePrice,
+		CurrencyCode: currencyCode,
+	}
+}
+
+// AlibabaPricingTerms can have three types of supported billing method Pay-As-You-Go, Subscription and Premptible
+type AlibabaPricingTerms struct {
+	BillingMethod  string                 `json:"billingMethod"`
+	PricingDetails *AlibabaPricingDetails `json:"pricingDetails"`
+}
+
+func NewAlibabaPricingTerms(billingMethod string, pricingDetails *AlibabaPricingDetails) *AlibabaPricingTerms {
+	return &AlibabaPricingTerms{
+		BillingMethod:  billingMethod,
+		PricingDetails: pricingDetails,
+	}
+}
+
+// Alibaba Pricing struct carry the Attributes and pricing information for Node or PV
+type AlibabaPricing struct {
+	NodeAttributes *AlibabaNodeAttributes
+	PVAttributes   *AlibabaPVAttributes
+	PricingTerms   *AlibabaPricingTerms
+	Node           *models.Node
+	PV             *models.PV
+}
+
+// Alibaba cloud's Provider struct
+type Alibaba struct {
+	// Data to store Alibaba cloud's pricing struct, key in the map represents exact match to
+	// node.features() or pv.features for easy lookup
+	Pricing map[string]*AlibabaPricing
+	// Lock Needed to provide thread safe
+	DownloadPricingDataLock sync.RWMutex
+	Clientset               clustercache.ClusterCache
+	Config                  models.ProviderConfig
+	ServiceAccountChecks    *models.ServiceAccountChecks
+	ClusterAccountId        string
+	ClusterRegion           string
+
+	// The following fields are unexported because of avoiding any leak of secrets of these keys.
+	// Alibaba Access key used specifically in signer interface used to sign API calls
+	accessKey *credentials.AccessKeyCredential
+	// Map of regionID to sdk.client to call API for that region
+	clients map[string]*sdk.Client
+}
+
+// GetAlibabaAccessKey return the Access Key used to interact with the Alibaba cloud, if not set it
+// set it first by looking at env variables else load it from secret files.
+func (alibaba *Alibaba) GetAlibabaAccessKey() (*credentials.AccessKeyCredential, error) {
+	if alibaba.accessKeyisLoaded() {
+		return alibaba.accessKey, nil
+	}
+
+	config, err := alibaba.GetConfig()
+	if err != nil {
+		return nil, fmt.Errorf("error getting the default config for Alibaba Cloud provider: %w", err)
+	}
+
+	if config.AlibabaServiceKeyName == "" {
+		config.AlibabaServiceKeyName = env.GetAlibabaAccessKeyID()
+	}
+	if config.AlibabaServiceKeySecret == "" {
+		config.AlibabaServiceKeySecret = env.GetAlibabaAccessKeySecret()
+	}
+
+	if config.AlibabaServiceKeyName == "" && config.AlibabaServiceKeySecret == "" {
+		log.Debugf("missing service key values for Alibaba cloud integration attempting to use service account integration")
+		err := alibaba.loadAlibabaAuthSecretAndSetEnv(true)
+		if err != nil {
+			return nil, fmt.Errorf("unable to set the Alibaba Cloud key/secret from config file %w", err)
+		}
+		config.AlibabaServiceKeyName = env.GetAlibabaAccessKeyID()
+		config.AlibabaServiceKeySecret = env.GetAlibabaAccessKeySecret()
+	}
+
+	if config.AlibabaServiceKeyName == "" && config.AlibabaServiceKeySecret == "" {
+		return nil, fmt.Errorf("failed to get the access key for the current alibaba account")
+	}
+
+	// At this point either user is using the alibaba key and secret from secret passed in helm config if not he will use the secret that is passed in custom pricing
+	// There's no check at this time for if the custom pricing key and secret is valid and that's on the user else there will be errors recorded.
+	// Key and secret passed in config will supersede key and secret passed while installing Closed source helm chart.
+	alibaba.accessKey = &credentials.AccessKeyCredential{AccessKeyId: config.AlibabaServiceKeyName, AccessKeySecret: config.AlibabaServiceKeySecret}
+
+	return alibaba.accessKey, nil
+}
+
+func (alibaba *Alibaba) GetAlibabaCloudInfo() (*AlibabaInfo, error) {
+	config, err := alibaba.GetConfig()
+	if err != nil {
+		return nil, fmt.Errorf("could not retrieve AlibabaCloudInfo %s", err)
+	}
+
+	aak, err := alibaba.GetAlibabaAccessKey()
+	if err != nil {
+		return nil, err
+	}
+
+	return &AlibabaInfo{
+		AlibabaClusterRegion:    config.AlibabaClusterRegion,
+		AlibabaServiceKeyName:   aak.AccessKeyId,
+		AlibabaServiceKeySecret: aak.AccessKeySecret,
+		AlibabaAccountID:        config.ProjectID,
+	}, nil
+}
+
+// DownloadPricingData satisfies the provider interface and downloads the prices for Node instances and PVs.
+func (alibaba *Alibaba) DownloadPricingData() error {
+	alibaba.DownloadPricingDataLock.Lock()
+	defer alibaba.DownloadPricingDataLock.Unlock()
+
+	var aak *credentials.AccessKeyCredential
+	var err error
+
+	if !alibaba.accessKeyisLoaded() {
+		aak, err = alibaba.GetAlibabaAccessKey()
+		if err != nil {
+			return fmt.Errorf("unable to get the access key information: %w", err)
+		}
+	} else {
+		aak = alibaba.accessKey
+	}
+
+	c, err := alibaba.Config.GetCustomPricingData()
+	if err != nil {
+		return fmt.Errorf("error downloading default pricing data: %w", err)
+	}
+
+	// Get all the nodes from Alibaba cluster.
+	nodeList := alibaba.Clientset.GetAllNodes()
+
+	var client *sdk.Client
+	var signer *signers.AccessKeySigner
+	var ok bool
+	var lookupKey string
+	alibaba.clients = make(map[string]*sdk.Client)
+	alibaba.Pricing = make(map[string]*AlibabaPricing)
+
+	for _, node := range nodeList {
+		pricingObj := &AlibabaPricing{}
+		slimK8sNode := generateSlimK8sNodeFromV1Node(node)
+
+		if client, ok = alibaba.clients[slimK8sNode.RegionID]; !ok {
+			client, err = sdk.NewClientWithAccessKey(slimK8sNode.RegionID, aak.AccessKeyId, aak.AccessKeySecret)
+			if err != nil {
+				return fmt.Errorf("unable to initiate alibaba cloud sdk client for region %s : %w", slimK8sNode.RegionID, err)
+			}
+			alibaba.clients[slimK8sNode.RegionID] = client
+		}
+		signer = signers.NewAccessKeySigner(aak)
+
+		// Adjust the system Disk information of a Node by retrieving the details of associated disk. If unable to retrieve set it to empty
+		// system disk to pass through and use defaults with Alibaba pricing API.
+		instanceID := getInstanceIDFromProviderID(slimK8sNode.ProviderID)
+		slimK8sNode.SystemDisk = getSystemDiskInfoOfANode(instanceID, slimK8sNode.RegionID, client, signer)
+
+		lookupKey, err = determineKeyForPricing(slimK8sNode)
+		if _, ok := alibaba.Pricing[lookupKey]; ok {
+			log.Debugf("Pricing information for node with same features %s already exists hence skipping", lookupKey)
+			continue
+		}
+
+		pricingObj, err = processDescribePriceAndCreateAlibabaPricing(client, slimK8sNode, signer, c)
+
+		if err != nil {
+			return fmt.Errorf("failed to create pricing information for node with type %s with error: %w", slimK8sNode.InstanceType, err)
+		}
+		alibaba.Pricing[lookupKey] = pricingObj
+	}
+
+	// set the first occurrence of region from the node
+	if alibaba.ClusterRegion == "" {
+		for _, node := range nodeList {
+			if regionID, ok := node.Labels["topology.kubernetes.io/region"]; ok {
+				alibaba.ClusterRegion = regionID
+				break
+			}
+		}
+	}
+
+	// PV pricing for only Cloud Disk for now.
+	// TO-DO: Support both NAS(Network Attached storage) and OSS(Object Storage Service) type PVs
+
+	pvList := alibaba.Clientset.GetAllPersistentVolumes()
+
+	for _, pv := range pvList {
+		pvRegion := determinePVRegion(pv)
+		if pvRegion == "" {
+			pvRegion = alibaba.ClusterRegion
+		}
+		pricingObj := &AlibabaPricing{}
+		slimK8sDisk := generateSlimK8sDiskFromV1PV(pv, pvRegion)
+		lookupKey, err = determineKeyForPricing(slimK8sDisk)
+		if _, ok := alibaba.Pricing[lookupKey]; ok {
+			log.Debugf("Pricing information for pv with same features %s already exists hence skipping", lookupKey)
+			continue
+		}
+		if client, ok = alibaba.clients[slimK8sDisk.RegionID]; !ok {
+			client, err = sdk.NewClientWithAccessKey(slimK8sDisk.RegionID, aak.AccessKeyId, aak.AccessKeySecret)
+			if err != nil {
+				return fmt.Errorf("unable to initiate alibaba cloud sdk client for region %s : %w", slimK8sDisk.RegionID, err)
+			}
+			alibaba.clients[slimK8sDisk.RegionID] = client
+		}
+		signer = signers.NewAccessKeySigner(aak)
+		pricingObj, err = processDescribePriceAndCreateAlibabaPricing(client, slimK8sDisk, signer, c)
+		if err != nil {
+			return fmt.Errorf("failed to create pricing information for pv with category %s with error: %w", slimK8sDisk.DiskCategory, err)
+		}
+		alibaba.Pricing[lookupKey] = pricingObj
+	}
+
+	return nil
+}
+
+// AllNodePricing returns all the pricing data for all nodes and pvs
+func (alibaba *Alibaba) AllNodePricing() (interface{}, error) {
+	alibaba.DownloadPricingDataLock.RLock()
+	defer alibaba.DownloadPricingDataLock.RUnlock()
+	return alibaba.Pricing, nil
+}
+
+// NodePricing gives pricing information of a specific node given by the key
+func (alibaba *Alibaba) NodePricing(key models.Key) (*models.Node, error) {
+	alibaba.DownloadPricingDataLock.RLock()
+	defer alibaba.DownloadPricingDataLock.RUnlock()
+
+	// Get node features for the key
+	keyFeature := key.Features()
+
+	pricing, ok := alibaba.Pricing[keyFeature]
+	if !ok {
+		log.Errorf("Node pricing information not found for node with feature: %s", keyFeature)
+		return nil, fmt.Errorf("Node pricing information not found for node with feature: %s letting it use default values", keyFeature)
+	}
+
+	log.Debugf("returning the node price for the node with feature: %s", keyFeature)
+	returnNode := pricing.Node
+
+	return returnNode, nil
+}
+
+// PVPricing gives a pricing information of a specific PV given by PVkey
+func (alibaba *Alibaba) PVPricing(pvk models.PVKey) (*models.PV, error) {
+	alibaba.DownloadPricingDataLock.RLock()
+	defer alibaba.DownloadPricingDataLock.RUnlock()
+
+	keyFeature := pvk.Features()
+
+	pricing, ok := alibaba.Pricing[keyFeature]
+
+	if !ok {
+		log.Errorf("Persistent Volume pricing not found for PV with feature: %s", keyFeature)
+		return nil, fmt.Errorf("Persistent Volume pricing not found for PV with feature: %s letting it use default values", keyFeature)
+	}
+
+	log.Debugf("returning the PV price for the node with feature: %s", keyFeature)
+	return pricing.PV, nil
+}
+
+// Inter zone and Inter region network cost are defaulted based on https://www.alibabacloud.com/help/en/cloud-data-transmission/latest/cross-region-data-transfers
+// Internet cost is default based on https://www.alibabacloud.com/help/en/elastic-compute-service/latest/public-bandwidth to $0.123
+func (alibaba *Alibaba) NetworkPricing() (*models.Network, error) {
+	cpricing, err := alibaba.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	znec, err := strconv.ParseFloat(cpricing.ZoneNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	rnec, err := strconv.ParseFloat(cpricing.RegionNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	inec, err := strconv.ParseFloat(cpricing.InternetNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return &models.Network{
+		ZoneNetworkEgressCost:     znec,
+		RegionNetworkEgressCost:   rnec,
+		InternetNetworkEgressCost: inec,
+	}, nil
+}
+
+// Alibaba loadbalancer has three different types https://www.alibabacloud.com/product/server-load-balancer,
+// defaulted price to classic load balancer https://www.alibabacloud.com/help/en/server-load-balancer/latest/pay-as-you-go.
+func (alibaba *Alibaba) LoadBalancerPricing() (*models.LoadBalancer, error) {
+	cpricing, err := alibaba.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	lbPricing, err := strconv.ParseFloat(cpricing.DefaultLBPrice, 64)
+	if err != nil {
+		return nil, err
+	}
+	return &models.LoadBalancer{
+		Cost: lbPricing,
+	}, nil
+}
+
+func (alibaba *Alibaba) GetConfig() (*models.CustomPricing, error) {
+	c, err := alibaba.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	if c.Discount == "" {
+		c.Discount = "0%"
+	}
+	if c.NegotiatedDiscount == "" {
+		c.NegotiatedDiscount = "0%"
+	}
+	if c.ShareTenancyCosts == "" {
+		c.ShareTenancyCosts = models.DefaultShareTenancyCost
+	}
+
+	return c, nil
+}
+
+// Load once and cache the result (even on failure). This is an install time secret, so
+// we don't expect the secret to change. If it does, however, we can force reload using
+// the input parameter.
+func (alibaba *Alibaba) loadAlibabaAuthSecretAndSetEnv(force bool) error {
+	if !force && alibaba.accessKeyisLoaded() {
+		return nil
+	}
+
+	exists, err := fileutil.FileExists(models.AuthSecretPath)
+	if !exists || err != nil {
+		return fmt.Errorf("failed to locate service account file: %s with err: %w", models.AuthSecretPath, err)
+	}
+
+	result, err := os.ReadFile(models.AuthSecretPath)
+	if err != nil {
+		return fmt.Errorf("failed to read service account file: %s with err: %w", models.AuthSecretPath, err)
+	}
+
+	var ak *AlibabaAccessKey
+	err = json.Unmarshal(result, &ak)
+	if err != nil {
+		return fmt.Errorf("failed to unmarshall access key id and access key secret with err: %w", err)
+	}
+
+	err = env.Set(env.AlibabaAccessKeyIDEnvVar, ak.AccessKeyID)
+	if err != nil {
+		return fmt.Errorf("failed to set environment variable: %s with err: %w", env.AlibabaAccessKeyIDEnvVar, err)
+	}
+	err = env.Set(env.AlibabaAccessKeySecretEnvVar, ak.SecretAccessKey)
+	if err != nil {
+		return fmt.Errorf("failed to set environment variable: %s with err: %w", env.AlibabaAccessKeySecretEnvVar, err)
+	}
+
+	alibaba.accessKey = &credentials.AccessKeyCredential{
+		AccessKeyId:     ak.AccessKeyID,
+		AccessKeySecret: ak.SecretAccessKey,
+	}
+	return nil
+}
+
+// Regions returns a current supported list of Alibaba regions
+func (alibaba *Alibaba) Regions() []string {
+
+	regionOverrides := env.GetRegionOverrideList()
+
+	if len(regionOverrides) > 0 {
+		log.Debugf("Overriding Alibaba regions with configured region list: %+v", regionOverrides)
+		return regionOverrides
+	}
+
+	return alibabaRegions
+}
+
+// ClusterInfo returns information about Alibaba Cloud cluster, as provided by metadata.
+func (alibaba *Alibaba) ClusterInfo() (map[string]string, error) {
+
+	c, err := alibaba.GetConfig()
+	if err != nil {
+		return nil, fmt.Errorf("failed to getConfig with err: %w", err)
+	}
+
+	var clusterName string
+	if c.ClusterName != "" {
+		clusterName = c.ClusterName
+	}
+
+	// Set it to environment clusterID if not set at this point
+	if clusterName == "" {
+		clusterName = env.GetClusterID()
+	}
+
+	m := make(map[string]string)
+	m["name"] = clusterName
+	m["provider"] = kubecost.AlibabaProvider
+	m["project"] = alibaba.ClusterAccountId
+	m["region"] = alibaba.ClusterRegion
+	m["id"] = env.GetClusterID()
+	return m, nil
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) GetAddresses() ([]byte, error) {
+	return nil, nil
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) GetDisks() ([]byte, error) {
+	return nil, nil
+}
+
+func (alibaba *Alibaba) GetOrphanedResources() ([]models.OrphanedResource, error) {
+	return nil, errors.New("not implemented")
+}
+
+func (alibaba *Alibaba) UpdateConfig(r io.Reader, updateType string) (*models.CustomPricing, error) {
+	return alibaba.Config.Update(func(c *models.CustomPricing) error {
+		if updateType != "" {
+			return fmt.Errorf("UpdateConfig for Alibaba Provider doesn't support updateType %s at this time", updateType)
+
+		} else {
+			a := make(map[string]interface{})
+			err := json.NewDecoder(r).Decode(&a)
+			if err != nil {
+				return err
+			}
+			for k, v := range a {
+				kUpper := utils.ToTitle.String(k) // Just so we consistently supply / receive the same values, uppercase the first letter.
+				vstr, ok := v.(string)
+				if ok {
+					err := models.SetCustomPricingField(c, kUpper, vstr)
+					if err != nil {
+						return err
+					}
+				} else {
+					return fmt.Errorf("type error while updating config for %s", kUpper)
+				}
+			}
+		}
+
+		if env.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+}
+
+func (alibaba *Alibaba) UpdateConfigFromConfigMap(cm map[string]string) (*models.CustomPricing, error) {
+	return alibaba.Config.UpdateFromMap(cm)
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) GetManagementPlatform() (string, error) {
+	return "", nil
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
+	return ""
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) ApplyReservedInstancePricing(nodes map[string]*models.Node) {
+
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) ServiceAccountStatus() *models.ServiceAccountStatus {
+	return &models.ServiceAccountStatus{}
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) PricingSourceStatus() map[string]*models.PricingSource {
+	return map[string]*models.PricingSource{}
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) ClusterManagementPricing() (string, float64, error) {
+	return "", 0.0, nil
+}
+
+// Will look at this in Next PR if needed
+func (alibaba *Alibaba) CombinedDiscountForNode(string, bool, float64, float64) float64 {
+	return 0.0
+}
+
+func (alibaba *Alibaba) accessKeyisLoaded() bool {
+	if alibaba.accessKey == nil {
+		return false
+	}
+	if alibaba.accessKey.AccessKeyId == "" {
+		return false
+	}
+	if alibaba.accessKey.AccessKeySecret == "" {
+		return false
+	}
+	return true
+}
+
+type AlibabaNodeKey struct {
+	ProviderID                 string
+	RegionID                   string
+	InstanceType               string
+	OSType                     string
+	OptimizedKeyword           string //If IsIoOptimized is true use the word optimize in the Node key and if its not optimized use the word nonoptimize
+	SystemDiskCategory         string
+	SystemDiskSizeInGiB        string
+	SystemDiskPerformanceLevel string
+}
+
+func NewAlibabaNodeKey(node *SlimK8sNode, optimizedKeyword, systemDiskCategory, systemDiskSizeInGiB, systemDiskPerfromanceLevel string) *AlibabaNodeKey {
+	var providerID, regionID, instanceType, osType string
+	if node != nil {
+		providerID = node.ProviderID
+		regionID = node.RegionID
+		instanceType = node.InstanceType
+		osType = node.OSType
+	}
+	return &AlibabaNodeKey{
+		ProviderID:                 providerID,
+		RegionID:                   regionID,
+		InstanceType:               instanceType,
+		OSType:                     osType,
+		OptimizedKeyword:           optimizedKeyword,
+		SystemDiskCategory:         systemDiskCategory,
+		SystemDiskSizeInGiB:        systemDiskSizeInGiB,
+		SystemDiskPerformanceLevel: systemDiskPerfromanceLevel,
+	}
+}
+
+func (alibabaNodeKey *AlibabaNodeKey) ID() string {
+	return alibabaNodeKey.ProviderID
+}
+
+func (alibabaNodeKey *AlibabaNodeKey) Features() string {
+	keyLookup := stringutil.DeleteEmptyStringsFromArray([]string{alibabaNodeKey.RegionID, alibabaNodeKey.InstanceType, alibabaNodeKey.OSType,
+		alibabaNodeKey.OptimizedKeyword, alibabaNodeKey.SystemDiskCategory, alibabaNodeKey.SystemDiskSizeInGiB, alibabaNodeKey.SystemDiskPerformanceLevel})
+	return strings.Join(keyLookup, "::")
+}
+
+func (alibabaNodeKey *AlibabaNodeKey) GPUType() string {
+	return ""
+}
+
+func (alibabaNodeKey *AlibabaNodeKey) GPUCount() int {
+	return 0
+}
+
+// Get's the key for the k8s node input
+func (alibaba *Alibaba) GetKey(mapValue map[string]string, node *v1.Node) models.Key {
+	slimK8sNode := generateSlimK8sNodeFromV1Node(node)
+
+	var aak *credentials.AccessKeyCredential
+	var err error
+	var ok bool
+	var client *sdk.Client
+	var signer *signers.AccessKeySigner
+
+	optimizedKeyword := ""
+	if slimK8sNode.IsIoOptimized {
+		optimizedKeyword = ALIBABA_OPTIMIZE_KEYWORD
+	} else {
+		optimizedKeyword = ALIBABA_NON_OPTIMIZE_KEYWORD
+	}
+
+	var diskCategory, diskSizeInGiB, diskPerformanceLevel string
+
+	if !alibaba.accessKeyisLoaded() {
+		aak, err = alibaba.GetAlibabaAccessKey()
+		if err != nil {
+			log.Warnf("unable to set the signer for node with providerID %s to retrieve the key skipping SystemDisk Retrieval with err: %v", slimK8sNode.ProviderID, err)
+			return NewAlibabaNodeKey(slimK8sNode, optimizedKeyword, diskCategory, diskSizeInGiB, diskPerformanceLevel)
+		}
+	} else {
+		aak = alibaba.accessKey
+	}
+
+	signer = signers.NewAccessKeySigner(aak)
+
+	if aak == nil {
+		log.Warnf("unable to retrieve the Alibaba API keys for node with providerID %s hence skipping SystemDisk Retrieval", slimK8sNode.ProviderID)
+		return NewAlibabaNodeKey(slimK8sNode, optimizedKeyword, diskCategory, diskSizeInGiB, diskPerformanceLevel)
+	}
+
+	if client, ok = alibaba.clients[slimK8sNode.RegionID]; !ok {
+		client, err = sdk.NewClientWithAccessKey(slimK8sNode.RegionID, aak.AccessKeyId, aak.AccessKeySecret)
+		if err != nil {
+			log.Warnf("unable to set the client  for node with providerID %s to retrieve the key skipping SystemDisk Retrieval with err: %v", slimK8sNode.ProviderID, err)
+			return NewAlibabaNodeKey(slimK8sNode, optimizedKeyword, diskCategory, diskSizeInGiB, diskPerformanceLevel)
+		}
+		alibaba.clients[slimK8sNode.RegionID] = client
+	}
+
+	instanceID := getInstanceIDFromProviderID(slimK8sNode.ProviderID)
+	slimK8sNode.SystemDisk = getSystemDiskInfoOfANode(instanceID, slimK8sNode.RegionID, client, signer)
+
+	if slimK8sNode.SystemDisk != nil {
+		diskCategory = slimK8sNode.SystemDisk.DiskCategory
+		diskSizeInGiB = slimK8sNode.SystemDisk.SizeInGiB
+		diskPerformanceLevel = slimK8sNode.SystemDisk.PerformanceLevel
+	}
+	return NewAlibabaNodeKey(slimK8sNode, optimizedKeyword, diskCategory, diskSizeInGiB, diskPerformanceLevel)
+}
+
+type AlibabaPVKey struct {
+	ProviderID        string
+	RegionID          string
+	PVType            string
+	PVSubType         string
+	PVCategory        string
+	PVPerformaceLevel string
+	StorageClassName  string
+	SizeInGiB         string
+}
+
+func (alibaba *Alibaba) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string, defaultRegion string) models.PVKey {
+	regionID := defaultRegion
+	// If default Region is not passed default it to cluster region ID.
+	if defaultRegion == "" {
+		regionID = alibaba.ClusterRegion
+	}
+	slimK8sDisk := generateSlimK8sDiskFromV1PV(pv, defaultRegion)
+	return &AlibabaPVKey{
+		ProviderID:        slimK8sDisk.ProviderID,
+		RegionID:          regionID,
+		PVType:            ALIBABA_PV_CLOUD_DISK_TYPE,
+		PVSubType:         slimK8sDisk.DiskType,
+		PVCategory:        slimK8sDisk.DiskCategory,
+		PVPerformaceLevel: slimK8sDisk.PerformanceLevel,
+		StorageClassName:  pv.Spec.StorageClassName,
+		SizeInGiB:         slimK8sDisk.SizeInGiB,
+	}
+}
+
+func (alibabaPVKey *AlibabaPVKey) Features() string {
+	keyLookup := stringutil.DeleteEmptyStringsFromArray([]string{alibabaPVKey.RegionID, alibabaPVKey.PVSubType, alibabaPVKey.PVCategory, alibabaPVKey.PVPerformaceLevel, alibabaPVKey.SizeInGiB})
+	return strings.Join(keyLookup, "::")
+}
+
+func (alibabaPVKey *AlibabaPVKey) ID() string {
+	return alibabaPVKey.ProviderID
+}
+
+// Get storage class information for PV.
+func (alibabaPVKey *AlibabaPVKey) GetStorageClass() string {
+	return alibabaPVKey.StorageClassName
+}
+
+// Helper functions for alibabaprovider.go
+
+// createDescribePriceACSRequest creates the HTTP GET request for the required resources' Price information,
+// When supporting subscription and Premptible resources this HTTP call needs to be modified with PriceUnit information
+// When supporting different new type of instances like Compute Optimized, Memory Optimized etc make sure you add the instance type
+// in unit test and check if it works or not to create the ack request and processDescribePriceAndCreateAlibabaPricing function
+// else more parameters need to be pulled from kubernetes node response or gather information from elsewhere and function modified.
+func createDescribePriceACSRequest(i interface{}) (*requests.CommonRequest, error) {
+	request := requests.NewCommonRequest()
+	request.Method = requests.GET
+	request.Product = ALIBABA_ECS_PRODUCT_CODE
+	request.Domain = ALIBABA_ECS_DOMAIN
+	request.Version = ALIBABA_ECS_VERSION
+	request.Scheme = requests.HTTPS
+	request.ApiName = ALIBABA_DESCRIBE_PRICE_API_ACTION
+	switch i.(type) {
+	case *SlimK8sNode:
+		node := i.(*SlimK8sNode)
+		request.QueryParams["RegionId"] = node.RegionID
+		request.QueryParams["ResourceType"] = ALIBABA_INSTANCE_RESOURCE_TYPE
+		request.QueryParams["InstanceType"] = node.InstanceType
+		request.QueryParams["PriceUnit"] = node.PriceUnit
+		if node.SystemDisk != nil {
+			// Only if the required information is present it should be overridden else default it via the API
+			if node.SystemDisk.DiskCategory != "" {
+				request.QueryParams["SystemDisk.Category"] = node.SystemDisk.DiskCategory
+			}
+			if node.SystemDisk.SizeInGiB != "" {
+				request.QueryParams["SystemDisk.Size"] = node.SystemDisk.SizeInGiB
+			}
+			if node.SystemDisk.PerformanceLevel != "" {
+				request.QueryParams["SystemDisk.PerformanceLevel"] = node.SystemDisk.PerformanceLevel
+			}
+		} else {
+			// When System Disk information is not available for instance family g6e, r7 and r6e the defaults in
+			// DescribePrice dont default rightly to cloud_essd for these instances.
+			if slices.Contains(alibabaDefaultToCloudEssd, node.InstanceTypeFamily) {
+				request.QueryParams["SystemDisk.Category"] = ALIBABA_DISK_CLOUD_ESSD_CATEGORY
+			}
+		}
+		request.TransToAcsRequest()
+		return request, nil
+	case *SlimK8sDisk:
+		disk := i.(*SlimK8sDisk)
+		request.QueryParams["RegionId"] = disk.RegionID
+		request.QueryParams["PriceUnit"] = disk.PriceUnit
+		request.QueryParams["ResourceType"] = ALIBABA_DISK_RESOURCE_TYPE
+		request.QueryParams[fmt.Sprintf("%s.%d.Size", ALIBABA_DATA_DISK_PREFIX, 1)] = disk.SizeInGiB
+		request.QueryParams[fmt.Sprintf("%s.%d.Category", ALIBABA_DATA_DISK_PREFIX, 1)] = disk.DiskCategory
+		// Performance level defaults to PL1 if not present in volume attribute.
+		if disk.PerformanceLevel != "" {
+			request.QueryParams[fmt.Sprintf("%s.%d.PerformanceLevel", ALIBABA_DATA_DISK_PREFIX, 1)] = disk.PerformanceLevel
+		}
+		request.TransToAcsRequest()
+		return request, nil
+	default:
+		return nil, fmt.Errorf("unsupported ECS type (%T) for DescribePrice at this time", i)
+	}
+}
+
+// createDescribeDisksCSRequest creates the HTTP GET Request to map the system disk to the InstanceID
+func createDescribeDisksACSRequest(instanceID, regionID, diskType string) (*requests.CommonRequest, error) {
+	request := requests.NewCommonRequest()
+	request.Method = requests.GET
+	request.Product = ALIBABA_ECS_PRODUCT_CODE
+	request.Domain = ALIBABA_ECS_DOMAIN
+	request.Version = ALIBABA_ECS_VERSION
+	request.Scheme = requests.HTTPS
+	request.ApiName = ALIBABA_DESCRIBE_DISK_API_ACTION
+	request.QueryParams["RegionId"] = regionID
+	request.QueryParams["InstanceId"] = instanceID
+	request.QueryParams["DiskType"] = diskType
+	request.TransToAcsRequest()
+	return request, nil
+}
+
+// determineKeyForPricing generate a unique key from SlimK8sNode object that is constructed from v1.Node object and
+// SlimK8sDisk that is constructed from v1.PersistentVolume.
+func determineKeyForPricing(i interface{}) (string, error) {
+	if i == nil {
+		return "", fmt.Errorf("nil component passed to determine key")
+	}
+	switch i.(type) {
+	case *SlimK8sNode:
+		node := i.(*SlimK8sNode)
+		var diskCategory, diskSizeInGiB, diskPerformanceLevel string
+		if node.SystemDisk != nil {
+			diskCategory = node.SystemDisk.DiskCategory
+			diskSizeInGiB = node.SystemDisk.SizeInGiB
+			diskPerformanceLevel = node.SystemDisk.PerformanceLevel
+		}
+		if node.IsIoOptimized {
+			keyLookup := stringutil.DeleteEmptyStringsFromArray([]string{node.RegionID, node.InstanceType, node.OSType, ALIBABA_OPTIMIZE_KEYWORD, diskCategory, diskSizeInGiB, diskPerformanceLevel})
+			return strings.Join(keyLookup, "::"), nil
+		} else {
+			keyLookup := stringutil.DeleteEmptyStringsFromArray([]string{node.RegionID, node.InstanceType, node.OSType, ALIBABA_NON_OPTIMIZE_KEYWORD, diskCategory, diskSizeInGiB, diskPerformanceLevel})
+			return strings.Join(keyLookup, "::"), nil
+		}
+	case *SlimK8sDisk:
+		disk := i.(*SlimK8sDisk)
+		keyLookup := stringutil.DeleteEmptyStringsFromArray([]string{disk.RegionID, disk.DiskType, disk.DiskCategory, disk.PerformanceLevel, disk.SizeInGiB})
+		return strings.Join(keyLookup, "::"), nil
+	default:
+		return "", fmt.Errorf("unsupported ECS type (%T) at this time", i)
+	}
+}
+
+// Below structs are used to unmarshal json response of Alibaba cloud's API DescribePrice
+type Price struct {
+	OriginalPrice             float32 `json:"OriginalPrice"`
+	ReservedInstanceHourPrice float32 `json:"ReservedInstanceHourPrice"`
+	DiscountPrice             float32 `json:"DiscountPrice"`
+	Currency                  string  `json:"Currency"`
+	TradePrice                float32 `json:"TradePrice"`
+}
+
+type PriceInfo struct {
+	Price Price `json:"Price"`
+}
+
+type DescribePriceResponse struct {
+	RequestId string    `json:"RequestId"`
+	PriceInfo PriceInfo `json:"PriceInfo"`
+}
+
+// processDescribePriceAndCreateAlibabaPricing processes the DescribePrice API and generates the pricing information for alibaba node resource and alibaba pv resource that's backed by cloud disk.
+func processDescribePriceAndCreateAlibabaPricing(client *sdk.Client, i interface{}, signer *signers.AccessKeySigner, custom *models.CustomPricing) (pricing *AlibabaPricing, err error) {
+	pricing = &AlibabaPricing{}
+	var response DescribePriceResponse
+
+	if i == nil {
+		return nil, fmt.Errorf("nil component passed to process the pricing information")
+	}
+	switch i.(type) {
+	case *SlimK8sNode:
+		node := i.(*SlimK8sNode)
+		req, err := createDescribePriceACSRequest(node)
+		if err != nil {
+			return nil, err
+		}
+		resp, err := client.ProcessCommonRequestWithSigner(req, signer)
+		pricing.NodeAttributes = NewAlibabaNodeAttributes(node)
+		if err != nil || resp.GetHttpStatus() != 200 {
+			// Can be defaulted to some value here?
+			return nil, fmt.Errorf("unable to fetch information for node with InstanceType: %v", node.InstanceType)
+		} else {
+			// This is where population of Pricing happens
+			err = json.Unmarshal(resp.GetHttpContentBytes(), &response)
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshall json response to custom struct with err: %w", err)
+			}
+			// TO-DO : Ask in PR How to get the defaults is it equal to AWS/GCP defaults? And what needs to be returned
+			pricing.Node = &models.Node{
+				Cost:         fmt.Sprintf("%f", response.PriceInfo.Price.TradePrice),
+				BaseCPUPrice: custom.CPU,
+				BaseRAMPrice: custom.RAM,
+				BaseGPUPrice: custom.GPU,
+			}
+			// TO-DO : Currently with Pay-As-You-go Offering TradePrice = HourlyPrice , When support happens to other type HourlyPrice Need to be determined.
+			pricing.PricingTerms = NewAlibabaPricingTerms(ALIBABA_PAY_AS_YOU_GO_BILLING, NewAlibabaPricingDetails(response.PriceInfo.Price.TradePrice, ALIBABA_HOUR_PRICE_UNIT, response.PriceInfo.Price.TradePrice, response.PriceInfo.Price.Currency))
+		}
+	case *SlimK8sDisk:
+		disk := i.(*SlimK8sDisk)
+		req, err := createDescribePriceACSRequest(disk)
+		if err != nil {
+			return nil, err
+		}
+		resp, err := client.ProcessCommonRequestWithSigner(req, signer)
+		if err != nil || resp.GetHttpStatus() != 200 {
+			return nil, fmt.Errorf("unable to fetch information for disk with DiskType: %v with err: %w", disk.DiskCategory, err)
+		} else {
+			// This is where population of Pricing happens
+			err = json.Unmarshal(resp.GetHttpContentBytes(), &response)
+			if err != nil {
+				return nil, fmt.Errorf("unable to unmarshall json response to custom struct with err: %w", err)
+			}
+			pricing.PVAttributes = NewAlibabaPVAttributes(disk)
+			pricing.PV = &models.PV{
+				Cost: fmt.Sprintf("%f", response.PriceInfo.Price.TradePrice),
+			}
+			// TO-DO : Disk has support for Hour and Month but pricing API is failing for month for disk(Research why?) and same challenge as node pricing no prepaid/postpaid distinction in v1.PersistentVolume object have to look at APIs for th information.
+			pricing.PricingTerms = NewAlibabaPricingTerms(ALIBABA_PAY_AS_YOU_GO_BILLING, NewAlibabaPricingDetails(response.PriceInfo.Price.TradePrice, ALIBABA_HOUR_PRICE_UNIT, response.PriceInfo.Price.TradePrice, response.PriceInfo.Price.Currency))
+		}
+	default:
+		return nil, fmt.Errorf("unsupported ECS Pricing component of type (%T) at this time", i)
+	}
+
+	return pricing, nil
+}
+
+// This function is to get the InstanceFamily from the InstanceType , convention followed in
+// instance type is ecs.[FamilyName].[DifferentSize], it gets the familyName , if it is unable to get it
+// it lists the instance family name as Unknown.
+func getInstanceFamilyFromType(instanceType string) string {
+	splitinstanceType := strings.Split(instanceType, ".")
+	if len(splitinstanceType) != 3 {
+		log.Warnf("unable to find the family of the instance type %s, returning its family type unknown", instanceType)
+		return ALIBABA_UNKNOWN_INSTANCE_FAMILY_TYPE
+	}
+	if !slices.Contains(alibabaInstanceFamilies, splitinstanceType[1]) {
+		log.Warnf("currently the instance family type %s is not valid or not tested completely for pricing API", instanceType)
+		return ALIBABA_NOT_SUPPORTED_INSTANCE_FAMILY_TYPE
+	}
+	return splitinstanceType[1]
+}
+
+// getInstanceIDFromProviderID returns the instance ID associated with the Node. A *v1.Node providerID in Alibaba cloud
+// is of <REGION-ID>.<INSTANCE-ID>. This function returns the Instance ID for the given ProviderID. if its unable to interpret
+// it defaults to empty string.
+func getInstanceIDFromProviderID(providerID string) string {
+	if providerID == "" {
+		return ""
+	}
+	splitStrings := strings.Split(providerID, ".")
+	if len(splitStrings) < 2 {
+		return ""
+	}
+	return splitStrings[1]
+}
+
+type Disk struct {
+	Category         string `json:"Category"`
+	Size             int    `json:"Size"`
+	PerformanceLevel string `json:"PerformanceLevel"`
+	Type             string `json:"Type"`
+	RegionId         string `json:"RegionId"`
+	DiskId           string `json:"DiskId"`
+	DiskChargeType   string `json:"DiskChargeType"`
+}
+
+type Disks struct {
+	Disk []*Disk `json:"Disk"`
+}
+
+type DescribeDiskResponse struct {
+	TotalCount int    `json:"TotalCount"`
+	Disks      *Disks `json:"Disks"`
+}
+
+// getSystemDiskInfoOfANode gets the relevant System disk information associated with the Node given by the instanceID
+// in form of a SlimK8sDisk with only relevant information that can adjust the node pricing. If any error occurs return
+// an empty disk to not impact any default set at the price retrieval of the node.
+func getSystemDiskInfoOfANode(instanceID, regionID string, client *sdk.Client, signer *signers.AccessKeySigner) (systemDisk *SlimK8sDisk) {
+	systemDisk = &SlimK8sDisk{}
+	var response DescribeDiskResponse
+	// if instanceID is empty string return an empty k8s
+	if instanceID == "" {
+		return
+	}
+	req, err := createDescribeDisksACSRequest(instanceID, regionID, ALIBABA_SYSTEM_DISK_CATEGORY)
+	// if any error occurs return an empty disk to not impact default pricing.
+	if err != nil {
+		log.Warnf("Unable to create Describe Disk Request with err: %v for node with InstanceID: %s, hence defaulting it to an empty system disk to pass through to defaults", err, instanceID)
+		return
+	}
+
+	resp, err := client.ProcessCommonRequestWithSigner(req, signer)
+	if err != nil || resp.GetHttpStatus() != 200 {
+		log.Warnf("Unable to process Describe Disk request with err: %v and errcode: %d for the node with InstanceID: %s, hence defaulting it to an empty system disk to pass through to defaults", err, resp.GetHttpStatus(), instanceID)
+		return
+	} else {
+		// This is where population of Pricing happens
+		err = json.Unmarshal(resp.GetHttpContentBytes(), &response)
+		if err != nil {
+			log.Warnf("Unable to unmarshall Describe Disk response with err: %v for the node with InstanceID: %s, hence defaulting it to an empty system disk to pass through to defaults", err, instanceID)
+			return
+		}
+		// Every instance should only have one system disk per Alibaba Cloud documentation https://www.alibabacloud.com/help/en/elastic-compute-service/latest/block-storage-overview-disks,
+		// if TotalCount is not 1 just return empty and let it not impact default pricing.
+		if response.TotalCount != 1 {
+			log.Warnf("Total count of system disk for node with InstanceID: %s is not 1, hence defaulting it to an empty system disk to pass through to defaults", instanceID)
+			return
+		}
+
+		if response.Disks == nil {
+			log.Warnf("Disks information missing for node with InstanceID: %s, hence defaulting it to an empty system disk to pass through to defaults", instanceID)
+			return
+		}
+
+		if len(response.Disks.Disk) < 1 {
+			log.Warnf("Total number of system disk for node with InstanceID: %s is less than 1, hence defaulting it to an empty system disk to pass through to defaults", instanceID)
+			return
+		}
+
+		// TO-DO: When supporting Subscription type disk, you can leverge the disk.DiskChargeType here to map it to subscription type.
+		systemDisk := response.Disks.Disk[0]
+		return NewSlimK8sDisk(systemDisk.Type, systemDisk.RegionId, ALIBABA_HOUR_PRICE_UNIT, systemDisk.Category, systemDisk.PerformanceLevel, systemDisk.DiskId, "", fmt.Sprintf("%d", systemDisk.Size))
+	}
+}
+
+// generateSlimK8sNodeFromV1Node generates SlimK8sNode struct from v1.Node to fetch pricing information and call alibaba API.
+func generateSlimK8sNodeFromV1Node(node *v1.Node) *SlimK8sNode {
+	var regionID, osType, instanceType, providerID, priceUnit, instanceFamily string
+	var memorySizeInKiB string // TO-DO: try to convert it into float
+	var ok, IsIoOptimized bool
+	if regionID, ok = node.Labels["topology.kubernetes.io/region"]; !ok {
+		// HIGHLY UNLIKELY THAT THIS LABEL WONT BE THERE.
+		log.Debugf("No RegionID label for the node: %s", node.Name)
+	}
+	if osType, ok = node.Labels["beta.kubernetes.io/os"]; !ok {
+		// HIGHLY UNLIKELY THAT THIS LABEL WONT BE THERE.
+		log.Debugf("OS type undetected for the node: %s", node.Name)
+	}
+	if instanceType, ok = node.Labels["node.kubernetes.io/instance-type"]; !ok {
+		// HIGHLY UNLIKELY THAT THIS LABEL WONT BE THERE.
+		log.Debugf("Instance Type undetected for the node: %s", node.Name)
+	}
+
+	instanceFamily = getInstanceFamilyFromType(instanceType)
+	memorySizeInKiB = fmt.Sprintf("%s", node.Status.Capacity.Memory())
+	providerID = node.Spec.ProviderID // Alibaba Cloud provider doesnt follow convention of prefix with cloud provider name
+
+	// Looking at current Instance offering , all of the Instances seem to be I/O optimized - https://www.alibabacloud.com/help/en/elastic-compute-service/latest/instance-family
+	// Basic price Json has it as part of the key so defaulting to true.
+	IsIoOptimized = true
+	priceUnit = ALIBABA_HOUR_PRICE_UNIT
+
+	systemDisk := &SlimK8sDisk{}
+	return NewSlimK8sNode(instanceType, regionID, priceUnit, memorySizeInKiB, osType, providerID, instanceFamily, IsIoOptimized, systemDisk)
+}
+
+// getNumericalValueFromResourceQuantity returns the numericalValue of the resourceQuantity
+// An example is: 20Gi returns to 20. If any error occurs it returns the default value used in describePrice API which is 2000.
+func getNumericalValueFromResourceQuantity(quantity string) (value string) {
+	// defaulting when any panic or empty string occurs.
+	defer func() {
+		log.Debugf("unable to determine the size of the PV so defaulting the size to %s", ALIBABA_DEFAULT_DATADISK_SIZE)
+		if err := recover(); err != nil {
+			value = ALIBABA_DEFAULT_DATADISK_SIZE
+		}
+		if value == "" {
+			value = ALIBABA_DEFAULT_DATADISK_SIZE
+		}
+	}()
+	res := sizeRegEx.FindAllStringSubmatch(quantity, 1)
+	value = res[0][1]
+	return
+}
+
+// generateSlimK8sDiskFromV1PV function generates SlimK8sDisk from v1.PersistentVolume
+// to generate slim disk type that can be used to fetch pricing information for Data disk type.
+func generateSlimK8sDiskFromV1PV(pv *v1.PersistentVolume, regionID string) *SlimK8sDisk {
+
+	// All PVs are data disks while local disk are categorized as system disk
+	diskType := ALIBABA_DATA_DISK_CATEGORY
+
+	//TO-DO: Disk supports month and hour prices , defaulting to hour
+	priceUnit := ALIBABA_HOUR_PRICE_UNIT
+
+	sizeQuantity := fmt.Sprintf("%s", pv.Spec.Capacity.Storage())
+
+	// res := sizeRegEx.FindAllStringSubmatch(sizeQuantity, 1)
+
+	sizeInGiB := getNumericalValueFromResourceQuantity(sizeQuantity)
+
+	providerID := ""
+	if pv.Spec.CSI != nil {
+		providerID = pv.Spec.CSI.VolumeHandle
+	} else {
+		providerID = pv.Name // Looks like pv name is same as providerID in Alibaba k8s cluster
+	}
+
+	// Performance level being empty string gets defaulted in describePrice to PL1.
+	performanceLevel := ""
+	diskCategory := ""
+	if pv.Spec.CSI != nil {
+		if val, ok := pv.Spec.CSI.VolumeAttributes["performanceLevel"]; ok {
+			performanceLevel = val
+		}
+		if val, ok := pv.Spec.CSI.VolumeAttributes["type"]; ok {
+			diskCategory = val
+		}
+	}
+
+	// Highly unlikely that label pv.Spec.CSI.VolumeAttributes["type"] doesn't exist but if occurred default to cloud (most basic disk type)
+	if diskCategory == "" {
+		diskCategory = ALIBABA_DISK_CLOUD_CATEGORY
+	}
+
+	return NewSlimK8sDisk(diskType, regionID, priceUnit, diskCategory, performanceLevel, providerID, pv.Spec.StorageClassName, sizeInGiB)
+}
+
+// determinePVRegion determines associated region for a particular PV based on the following priority, which can be changed and any other path to determine region can be added!
+// if topology.diskplugin.csi.alibabacloud.com/region label/annotation is passed during PV creation return that as the PV region.
+// if topology.diskplugin.csi.alibabacloud.com/zone label/annotation is passed during PV creation determine the region based on this pv label.
+// if neither of the above label/annotation is present check node affinity for the zone affinity and determine the region based on this zone.
+// if nether of the above yields a region , return empty string to default it to cluster region.
+func determinePVRegion(pv *v1.PersistentVolume) string {
+	// if "topology.diskplugin.csi.alibabacloud.com/region" is present as a label or annotation return that as the PV region
+	if val, ok := pv.Labels[ALIBABA_DISK_TOPOLOGY_REGION_LABEL]; ok {
+		log.Debugf("determinePVRegion returned a region value of: %s through label: %s for PV name: %s", val, ALIBABA_DISK_TOPOLOGY_REGION_LABEL, pv.Name)
+		return val
+	}
+	if val, ok := pv.Annotations[ALIBABA_DISK_TOPOLOGY_REGION_LABEL]; ok {
+		log.Debugf("determinePVRegion returned a region value of: %s through annotation: %s for PV name: %s", val, ALIBABA_DISK_TOPOLOGY_REGION_LABEL, pv.Name)
+		return val
+	}
+
+	// if "topology.diskplugin.csi.alibabacloud.com/zone" is present as a label or annotation set it as the PV zone before looking at node affinity to determine the region PV belongs too
+	var pvZone string
+
+	if val, ok := pv.Labels[ALIBABA_DISK_TOPOLOGY_ZONE_LABEL]; ok {
+		log.Debugf("determinePVRegion will set zone value to: %s through label: %s for PV name: %s", val, ALIBABA_DISK_TOPOLOGY_ZONE_LABEL, pv.Name)
+		pvZone = val
+	}
+
+	if pvZone == "" {
+		if val, ok := pv.Annotations[ALIBABA_DISK_TOPOLOGY_ZONE_LABEL]; ok {
+			log.Debugf("determinePVRegion will set zone value to: %s through annotation: %s for PV name: %s", val, ALIBABA_DISK_TOPOLOGY_ZONE_LABEL, pv.Name)
+			pvZone = val
+		}
+	}
+
+	if pvZone == "" {
+		// zone and regionID labels are optional in Alibaba PV creation, while PV through UI creation put's a zone PV is associated with and the region
+		// can be determined from this information. If pv is provision via yaml and the block is missing that's the only time it gets defaulted to ClusterRegion.
+		if pv.Spec.NodeAffinity != nil {
+			nodeAffinity := pv.Spec.NodeAffinity
+			if nodeAffinity.Required != nil && nodeAffinity.Required.NodeSelectorTerms != nil {
+				for _, nodeSelectorTerm := range nodeAffinity.Required.NodeSelectorTerms {
+					matchExpression := nodeSelectorTerm.MatchExpressions
+					for _, nodeSelectorRequirement := range matchExpression {
+						if nodeSelectorRequirement.Key == ALIBABA_DISK_TOPOLOGY_ZONE_LABEL {
+							log.Debugf("determinePVRegion will set zone value to: %s through node affinity label: %s for PV name: %s", nodeSelectorRequirement.Values[0], ALIBABA_DISK_TOPOLOGY_ZONE_LABEL, pv.Name)
+							pvZone = nodeSelectorRequirement.Values[0]
+						}
+					}
+				}
+			}
+		}
+	}
+
+	regionOverrides := env.GetRegionOverrideList()
+	regions := alibabaRegions
+
+	if len(regionOverrides) > 0 {
+		regions = regionOverrides
+	}
+
+	for _, region := range regions {
+		if strings.Contains(pvZone, region) {
+			log.Debugf("determinePVRegion determined region of %s through zone affiliation of the PV %s\n", region, pvZone)
+			return region
+		}
+	}
+	return ""
+}
+
+// PricingSourceSummary returns the pricing source summary for the provider.
+// The summary represents what was _parsed_ from the pricing source, not
+// everything that was _available_ in the pricing source.
+func (a *Alibaba) PricingSourceSummary() interface{} {
+	return a.Pricing
+}

+ 839 - 0
pkg/cloud/alibaba/provider_test.go

@@ -0,0 +1,839 @@
+package alibaba
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+)
+
+func TestCreateDescribePriceACSRequest(t *testing.T) {
+	node := &SlimK8sNode{
+		InstanceType:       "ecs.g6.large",
+		RegionID:           "cn-hangzhou",
+		PriceUnit:          "Hour",
+		MemorySizeInKiB:    "16KiB",
+		IsIoOptimized:      true,
+		OSType:             "Linux",
+		ProviderID:         "Ali-XXX-node-01",
+		InstanceTypeFamily: "g6",
+	}
+
+	disk := &SlimK8sDisk{
+		DiskType:         "data",
+		RegionID:         "cn-hangzhou",
+		PriceUnit:        "Hour",
+		SizeInGiB:        "20",
+		DiskCategory:     "diskCategory",
+		PerformanceLevel: "cloud_essd",
+		ProviderID:       "d-Ali-XXX-01",
+		StorageClass:     "testStorageClass",
+	}
+
+	cases := []struct {
+		name          string
+		testStruct    interface{}
+		expectedError error
+	}{
+		{
+			name:          "test CreateDescribePriceACSRequest with SlimK8sNode struct Object",
+			testStruct:    node,
+			expectedError: nil,
+		},
+		{
+			name:          "test CreateDescribePriceACSRequest with SlimK8sDisk struct Object",
+			testStruct:    disk,
+			expectedError: nil,
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			_, err := createDescribePriceACSRequest(c.testStruct)
+			if err != nil && c.expectedError == nil {
+				t.Fatalf("Case name %s: Error converting to Alibaba cloud request", c.name)
+			}
+		})
+	}
+}
+
+func TestProcessDescribePriceAndCreateAlibabaPricing(t *testing.T) {
+	// Skipping this test case since it exposes secret but a good test case to verify when
+	// supporting a new family of instances, steps to perform are
+	// STEP 1: Comment the t.Skip() line and then replace XXX_KEY_ID with the alibaba key id of your account and XXX_SECRET_ID with alibaba cloud secret of your account.
+	// STEP 2: Once you verify describePrice is working and no change needed in processDescribePriceAndCreateAlibabaPricing, you can go ahead and revert the step 1 changes.
+
+	// This test case was use to test all general puprose instances
+
+	t.Skip()
+
+	client, err := sdk.NewClientWithAccessKey("cn-hangzhou", "XXX_KEY_ID", "XXX_SECRET_ID")
+	if err != nil {
+		t.Errorf("Error connecting to the Alibaba cloud")
+	}
+	aak := credentials.NewAccessKeyCredential("XXX_KEY_ID", "XXX_SECRET_ID")
+	signer := signers.NewAccessKeySigner(aak)
+
+	cases := []struct {
+		name          string
+		teststruct    interface{}
+		expectedError error
+	}{
+		{
+			name: "test General Purpose Type g7 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.g7.4xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16777216KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-01a",
+				InstanceTypeFamily: "g7",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test General Purpose Type g7a instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.g7a.8xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-01b",
+				InstanceTypeFamily: "g7a",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Enhanced General Purpose Type g6e instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.g6e.xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16777216KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-01",
+				InstanceTypeFamily: "g6e",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test General Purpose Type g6 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.g6.3xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "50331648KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-02",
+				InstanceTypeFamily: "g6",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test General Purpose Type g5 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.g5.2xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-03",
+				InstanceTypeFamily: "g5",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test General Purpose Type sn2 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.sn2.large",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16777216KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-04",
+				InstanceTypeFamily: "sn2",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test General Purpose Type with Enhanced Network Performance sn2ne instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.sn2ne.2xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-05",
+				InstanceTypeFamily: "sn2ne",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory Optmized instance type r7 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.r7.6xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "2013265592KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-06",
+				InstanceTypeFamily: "r7",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory Optmized instance type r7a instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.r7a.8xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-06a",
+				InstanceTypeFamily: "r7a",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Enhanced Memory Optmized instance type r6e instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.r6e.4xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "2013265592KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-07",
+				InstanceTypeFamily: "r6e",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory Optmized instance type r6a instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.r6a.8xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-07a",
+				InstanceTypeFamily: "r6a",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory Optmized instance type r6 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.r6.8xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-08",
+				InstanceTypeFamily: "r6",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory type instance and r5 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.r5.xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-09",
+				InstanceTypeFamily: "r5",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory Optmized instance type with se1 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.se1.4xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16777216KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-10",
+				InstanceTypeFamily: "se1",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory Optmized instance type with Enhanced Network Performance se1ne instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.se1ne.3xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "100663296KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-11",
+				InstanceTypeFamily: "se1ne",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test High Memory type with re6 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.re6.8xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-12",
+				InstanceTypeFamily: "re6",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Persistent Memory Optimized type with re6p instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.re6p.4xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-13",
+				InstanceTypeFamily: "re6p",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory type with re4 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.re4.10xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "41943040KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-14",
+				InstanceTypeFamily: "re4",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Memory optimized type with se1 instance family",
+			teststruct: &SlimK8sNode{
+				InstanceType:       "ecs.se1.8xlarge",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "33554432KiB",
+				IsIoOptimized:      true,
+				OSType:             "Linux",
+				ProviderID:         "cn-hangzhou.i-test-15",
+				InstanceTypeFamily: "se1",
+			},
+			expectedError: nil,
+		},
+		{
+			name:          "test for a nil information",
+			teststruct:    nil,
+			expectedError: fmt.Errorf("unsupported ECS pricing component at this time"),
+		},
+		{
+			name: "test Cloud Disk with Category cloud representing basic disk",
+			teststruct: &SlimK8sDisk{
+				DiskType:     "data",
+				RegionID:     "cn-hangzhou",
+				PriceUnit:    "Hour",
+				SizeInGiB:    "20",
+				DiskCategory: "cloud",
+				ProviderID:   "d-Ali-cloud-XXX-01",
+				StorageClass: "temp",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Cloud Disk with Category cloud_efficiency representing ultra disk",
+			teststruct: &SlimK8sDisk{
+				DiskType:     "data",
+				RegionID:     "cn-hangzhou",
+				PriceUnit:    "Hour",
+				SizeInGiB:    "40",
+				DiskCategory: "cloud_efficiency",
+				ProviderID:   "d-Ali-cloud-XXX-02",
+				StorageClass: "temp",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Cloud Disk with Category cloud_ssd representing standard SSD",
+			teststruct: &SlimK8sDisk{
+				DiskType:     "data",
+				RegionID:     "cn-hangzhou",
+				PriceUnit:    "Hour",
+				SizeInGiB:    "40",
+				DiskCategory: "cloud_efficiency",
+				ProviderID:   "d-Ali-cloud-XXX-02",
+				StorageClass: "temp",
+			},
+			expectedError: nil,
+		},
+		{
+			name: "test Cloud Disk with Category cloud_essd representing Enhanced SSD with PL2 performance level",
+			teststruct: &SlimK8sDisk{
+				DiskType:         "data",
+				RegionID:         "cn-hangzhou",
+				PriceUnit:        "Hour",
+				SizeInGiB:        "80",
+				DiskCategory:     "cloud_ssd",
+				PerformanceLevel: "PL2",
+				ProviderID:       "d-Ali-cloud-XXX-04",
+				StorageClass:     "temp",
+			},
+			expectedError: nil,
+		},
+	}
+	custom := &models.CustomPricing{}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			pricingObj, err := processDescribePriceAndCreateAlibabaPricing(client, c.teststruct, signer, custom)
+			if err != nil && c.expectedError == nil {
+				t.Fatalf("Case name %s: got an error %s", c.name, err)
+			}
+			if c.teststruct != nil {
+				if pricingObj == nil {
+					t.Fatalf("Case name %s: got a nil pricing object", c.name)
+				}
+				t.Logf("Case name %s: Pricing Information gathered for instanceType is %v", c.name, pricingObj.PricingTerms.PricingDetails.TradePrice)
+			}
+		})
+	}
+}
+
+func TestGetInstanceFamilyFromType(t *testing.T) {
+	cases := []struct {
+		name                   string
+		instanceType           string
+		expectedInstanceFamily string
+	}{
+		{
+			name:                   "test if ecs.[instance-family].[different-type] work",
+			instanceType:           "ecs.sn2ne.2xlarge",
+			expectedInstanceFamily: "sn2ne",
+		},
+		{
+			name:                   "test if random word gives you ALIBABA_UNKNOWN_INSTANCE_FAMILY_TYPE value ",
+			instanceType:           "random.value",
+			expectedInstanceFamily: ALIBABA_UNKNOWN_INSTANCE_FAMILY_TYPE,
+		},
+		{
+			name:                   "test if random instance family gives you ALIBABA_NOT_SUPPORTED_INSTANCE_FAMILY_TYPE value ",
+			instanceType:           "ecs.g7e.2xlarge",
+			expectedInstanceFamily: ALIBABA_NOT_SUPPORTED_INSTANCE_FAMILY_TYPE,
+		},
+	}
+
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			returnValue := getInstanceFamilyFromType(c.instanceType)
+			if returnValue != c.expectedInstanceFamily {
+				t.Fatalf("Case name %s: expected instance family of type %s but got %s", c.name, c.expectedInstanceFamily, returnValue)
+			}
+		})
+	}
+}
+
+func TestDetermineKeyForPricing(t *testing.T) {
+	type randomK8sStruct struct {
+		name string
+	}
+	cases := []struct {
+		name          string
+		testVar       interface{}
+		expectedKey   string
+		expectedError error
+	}{
+		{
+			name: "test when all RegionID, InstanceType, OSType & ALIBABA_OPTIMIZE_KEYWORD words are used in Node key",
+			testVar: &SlimK8sNode{
+				InstanceType:       "ecs.sn2.large",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16777216KiB",
+				IsIoOptimized:      true,
+				OSType:             "linux",
+				ProviderID:         "cn-hangzhou.i-test-04",
+				InstanceTypeFamily: "sn2",
+			},
+			expectedKey:   "cn-hangzhou::ecs.sn2.large::linux::optimize",
+			expectedError: nil,
+		},
+		{
+			name: "test missing InstanceType to create Node key",
+			testVar: &SlimK8sNode{
+				RegionID:        "cn-hangzhou",
+				PriceUnit:       "Hour",
+				MemorySizeInKiB: "16777216KiB",
+				IsIoOptimized:   true,
+				OSType:          "linux",
+				ProviderID:      "cn-hangzhou.i-test-04",
+			},
+			expectedKey:   "cn-hangzhou::linux::optimize",
+			expectedError: nil,
+		},
+		{
+			name: "test when node has a systemDisk Information with missing Performance level",
+			testVar: &SlimK8sNode{
+				InstanceType:       "ecs.sn2.large",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16777216KiB",
+				IsIoOptimized:      true,
+				OSType:             "linux",
+				ProviderID:         "cn-hangzhou.i-test-04",
+				InstanceTypeFamily: "sn2",
+				SystemDisk: &SlimK8sDisk{
+					DiskType:     "system",
+					RegionID:     "cn-hangzhou",
+					PriceUnit:    "Hour",
+					SizeInGiB:    "40",
+					DiskCategory: "cloud_efficiency",
+					ProviderID:   "d-Ali-cloud-XXX-i1",
+					StorageClass: "",
+				},
+			},
+			expectedKey:   "cn-hangzhou::ecs.sn2.large::linux::optimize::cloud_efficiency::40",
+			expectedError: nil,
+		},
+		{
+			name: "test when node has a systemDisk Information with all information",
+			testVar: &SlimK8sNode{
+				InstanceType:       "ecs.sn2.large",
+				RegionID:           "cn-hangzhou",
+				PriceUnit:          "Hour",
+				MemorySizeInKiB:    "16777216KiB",
+				IsIoOptimized:      true,
+				OSType:             "linux",
+				ProviderID:         "cn-hangzhou.i-test-04",
+				InstanceTypeFamily: "sn2",
+				SystemDisk: &SlimK8sDisk{
+					DiskType:         "data",
+					RegionID:         "cn-hangzhou",
+					PriceUnit:        "Hour",
+					SizeInGiB:        "80",
+					DiskCategory:     "cloud_ssd",
+					PerformanceLevel: "PL2",
+					ProviderID:       "d-Ali-cloud-XXX-04",
+					StorageClass:     "",
+				},
+			},
+			expectedKey:   "cn-hangzhou::ecs.sn2.large::linux::optimize::cloud_ssd::80::PL2",
+			expectedError: nil,
+		},
+		{
+			name: "test random k8s struct should return unsupported error",
+			testVar: &randomK8sStruct{
+				name: "test struct",
+			},
+			expectedKey:   "",
+			expectedError: fmt.Errorf("unsupported ECS type randomK8sStruct for DescribePrice at this time"),
+		},
+		{
+			name:          "test for nil check",
+			testVar:       nil,
+			expectedKey:   "",
+			expectedError: fmt.Errorf("unsupported ECS type randomK8sStruct for DescribePrice at this time"),
+		},
+		{
+			name: "test when all RegionID, InstanceType, OSType & ALIBABA_OPTIMIZE_KEYWORD words are used to key",
+			testVar: &SlimK8sDisk{
+				DiskType:     "data",
+				RegionID:     "cn-hangzhou",
+				PriceUnit:    "Hour",
+				SizeInGiB:    "40",
+				DiskCategory: "cloud_efficiency",
+				ProviderID:   "d-Ali-cloud-XXX-02",
+				StorageClass: "temp",
+			},
+			expectedKey:   "cn-hangzhou::data::cloud_efficiency::40",
+			expectedError: nil,
+		},
+		{
+			name: "test missing InstanceType to create key",
+			testVar: &SlimK8sDisk{
+				DiskType:         "data",
+				RegionID:         "cn-hangzhou",
+				PriceUnit:        "Hour",
+				SizeInGiB:        "80",
+				DiskCategory:     "cloud_ssd",
+				PerformanceLevel: "PL2",
+				ProviderID:       "d-Ali-cloud-XXX-04",
+				StorageClass:     "temp",
+			},
+			expectedKey:   "cn-hangzhou::data::cloud_ssd::PL2::80",
+			expectedError: nil,
+		},
+	}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			returnString, returnErr := determineKeyForPricing(c.testVar)
+			if c.expectedError == nil && returnErr != nil {
+				t.Fatalf("Case name %s: expected error was nil but received error %v", c.name, returnErr)
+			}
+			if returnString != c.expectedKey {
+				t.Fatalf("Case name %s: determineKeyForPricing received %s but expected %s", c.name, returnString, c.expectedKey)
+			}
+		})
+	}
+}
+
+func TestGenerateSlimK8sNodeFromV1Node(t *testing.T) {
+	testv1Node := &v1.Node{}
+	testv1Node.Labels = make(map[string]string)
+	testv1Node.Labels["topology.kubernetes.io/region"] = "us-east-1"
+	testv1Node.Labels["beta.kubernetes.io/os"] = "linux"
+	testv1Node.Labels["node.kubernetes.io/instance-type"] = "ecs.sn2ne.2xlarge"
+	testv1Node.Status.Capacity = v1.ResourceList{
+		v1.ResourceMemory: *resource.NewQuantity(16, resource.BinarySI),
+	}
+	cases := []struct {
+		name             string
+		testNode         *v1.Node
+		expectedSlimNode *SlimK8sNode
+	}{
+		{
+			name:     "test a generic *v1.Node to *SlimK8sNode Conversion",
+			testNode: testv1Node,
+			expectedSlimNode: &SlimK8sNode{
+				InstanceType:       "ecs.sn2ne.2xlarge",
+				RegionID:           "us-east-1",
+				PriceUnit:          ALIBABA_HOUR_PRICE_UNIT,
+				MemorySizeInKiB:    "16",
+				IsIoOptimized:      true,
+				OSType:             "linux",
+				InstanceTypeFamily: "sn2ne",
+			},
+		},
+	}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			returnSlimK8sNode := generateSlimK8sNodeFromV1Node(c.testNode)
+			if returnSlimK8sNode.InstanceType != c.expectedSlimNode.InstanceType {
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceType: %s , received InstanceType: %s", c.expectedSlimNode.InstanceType, returnSlimK8sNode.InstanceType)
+			}
+			if returnSlimK8sNode.RegionID != c.expectedSlimNode.RegionID {
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected RegionID: %s , received RegionID: %s", c.expectedSlimNode.RegionID, returnSlimK8sNode.RegionID)
+			}
+			if returnSlimK8sNode.PriceUnit != c.expectedSlimNode.PriceUnit {
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected PriceUnit: %s , received PriceUnit: %s", c.expectedSlimNode.PriceUnit, returnSlimK8sNode.PriceUnit)
+			}
+			if returnSlimK8sNode.MemorySizeInKiB != c.expectedSlimNode.MemorySizeInKiB {
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected MemorySizeInKiB: %s , received MemorySizeInKiB: %s", c.expectedSlimNode.MemorySizeInKiB, returnSlimK8sNode.MemorySizeInKiB)
+			}
+			if returnSlimK8sNode.OSType != c.expectedSlimNode.OSType {
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected OSType: %s , received OSType: %s", c.expectedSlimNode.OSType, returnSlimK8sNode.OSType)
+			}
+			if returnSlimK8sNode.InstanceTypeFamily != c.expectedSlimNode.InstanceTypeFamily {
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceTypeFamily: %s , received InstanceTypeFamily: %s", c.expectedSlimNode.InstanceTypeFamily, returnSlimK8sNode.InstanceTypeFamily)
+			}
+		})
+	}
+}
+
+func TestGenerateSlimK8sDiskFromV1PV(t *testing.T) {
+	testv1PV := &v1.PersistentVolume{}
+	testv1PV.Spec.Capacity = v1.ResourceList{
+		v1.ResourceStorage: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
+	}
+	testv1PV.Spec.CSI = &v1.CSIPersistentVolumeSource{}
+	testv1PV.Spec.CSI.VolumeHandle = "testPV"
+	testv1PV.Spec.CSI.VolumeAttributes = map[string]string{
+		"performanceLevel": "PL2",
+		"type":             "cloud_essd",
+	}
+	testv1PV.Spec.CSI.VolumeHandle = "testPV"
+	testv1PV.Spec.StorageClassName = "testStorageClass"
+	cases := []struct {
+		name             string
+		testPV           *v1.PersistentVolume
+		expectedSlimDisk *SlimK8sDisk
+		inpRegionID      string
+	}{
+		{
+			name:   "test a generic *v1.Node to *SlimK8sNode Conversion",
+			testPV: testv1PV,
+			expectedSlimDisk: &SlimK8sDisk{
+				DiskType:         ALIBABA_DATA_DISK_CATEGORY,
+				RegionID:         "us-east-1",
+				PriceUnit:        ALIBABA_HOUR_PRICE_UNIT,
+				SizeInGiB:        "16",
+				DiskCategory:     "cloud_essd",
+				PerformanceLevel: "PL2",
+				ProviderID:       "testPV",
+				StorageClass:     "testStorageClass",
+			},
+			inpRegionID: "us-east-1",
+		},
+	}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			returnSlimK8sDisk := generateSlimK8sDiskFromV1PV(c.testPV, c.inpRegionID)
+			if returnSlimK8sDisk.DiskType != c.expectedSlimDisk.DiskType {
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskType: %s , received DiskType: %s", c.expectedSlimDisk.DiskType, returnSlimK8sDisk.DiskType)
+			}
+			if returnSlimK8sDisk.RegionID != c.expectedSlimDisk.RegionID {
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected RegionID: %s , received RegionID Type: %s", c.expectedSlimDisk.RegionID, returnSlimK8sDisk.RegionID)
+			}
+			if returnSlimK8sDisk.PriceUnit != c.expectedSlimDisk.PriceUnit {
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PriceUnit: %s , received PriceUnit Type: %s", c.expectedSlimDisk.PriceUnit, returnSlimK8sDisk.PriceUnit)
+			}
+			if returnSlimK8sDisk.SizeInGiB != c.expectedSlimDisk.SizeInGiB {
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected SizeInGiB: %s , received SizeInGiB Type: %s", c.expectedSlimDisk.SizeInGiB, returnSlimK8sDisk.SizeInGiB)
+			}
+			if returnSlimK8sDisk.DiskCategory != c.expectedSlimDisk.DiskCategory {
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskCategory: %s , received DiskCategory Type: %s", c.expectedSlimDisk.DiskCategory, returnSlimK8sDisk.DiskCategory)
+			}
+			if returnSlimK8sDisk.PerformanceLevel != c.expectedSlimDisk.PerformanceLevel {
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PerformanceLevel: %s , received PerformanceLevel Type: %s", c.expectedSlimDisk.PerformanceLevel, returnSlimK8sDisk.PerformanceLevel)
+			}
+			if returnSlimK8sDisk.ProviderID != c.expectedSlimDisk.ProviderID {
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected ProviderID: %s , received ProviderID Type: %s", c.expectedSlimDisk.ProviderID, returnSlimK8sDisk.ProviderID)
+			}
+			if returnSlimK8sDisk.StorageClass != c.expectedSlimDisk.StorageClass {
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected StorageClass: %s , received StorageClass Type: %s", c.expectedSlimDisk.StorageClass, returnSlimK8sDisk.StorageClass)
+			}
+		})
+	}
+}
+
+func TestGetNumericalValueFromResourceQuantity(t *testing.T) {
+	cases := []struct {
+		name                 string
+		inputResourceQuanity string
+		expectedValue        string
+	}{
+		{
+			name:                 "positive scenario: when inputResourceQuantity is 10Gi",
+			inputResourceQuanity: "10Gi",
+			expectedValue:        "10",
+		},
+		{
+			name:                 "negative scenario: when inputResourceQuantity is Gi",
+			inputResourceQuanity: "Gi",
+			expectedValue:        ALIBABA_DEFAULT_DATADISK_SIZE,
+		},
+		{
+			name:                 "negative scenario: when inputResourceQuantity is 10",
+			inputResourceQuanity: "10",
+			expectedValue:        ALIBABA_DEFAULT_DATADISK_SIZE,
+		},
+		{
+			name:                 "negative scenario: when inputResourceQuantity is empty string",
+			inputResourceQuanity: "",
+			expectedValue:        ALIBABA_DEFAULT_DATADISK_SIZE,
+		},
+	}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			returnValue := getNumericalValueFromResourceQuantity(c.inputResourceQuanity)
+			if c.expectedValue != returnValue {
+				t.Fatalf("Case name %s: getNumericalValueFromResourceQuantity received %s but expected %s", c.name, returnValue, c.expectedValue)
+			}
+		})
+	}
+}
+
+func TestDeterminePVRegion(t *testing.T) {
+	genericNodeAffinityTestStruct := v1.NodeSelectorTerm{
+		MatchExpressions: []v1.NodeSelectorRequirement{
+			{
+				Key:      "topology.diskplugin.csi.alibabacloud.com/zone",
+				Operator: v1.NodeSelectorOpIn,
+				Values:   []string{"us-east-1a"},
+			},
+		},
+		MatchFields: []v1.NodeSelectorRequirement{},
+	}
+
+	// testPV1 contains the Label with region information as well as node affinity in spec
+	testPV1 := &v1.PersistentVolume{}
+	testPV1.Name = "testPV1"
+	testPV1.Labels = make(map[string]string)
+	testPV1.Labels[ALIBABA_DISK_TOPOLOGY_REGION_LABEL] = "us-east-1"
+	testPV1.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
+		Required: &v1.NodeSelector{
+			NodeSelectorTerms: []v1.NodeSelectorTerm{genericNodeAffinityTestStruct},
+		},
+	}
+
+	// testPV2 contains the only zone label
+	testPV2 := &v1.PersistentVolume{}
+	testPV2.Name = "testPV2"
+	testPV2.Labels = make(map[string]string)
+	testPV2.Labels[ALIBABA_DISK_TOPOLOGY_ZONE_LABEL] = "us-east-1a"
+
+	// testPV3 contains only node affinity in spec
+	testPV3 := &v1.PersistentVolume{}
+	testPV3.Name = "testPV3"
+	testPV3.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
+		Required: &v1.NodeSelector{
+			NodeSelectorTerms: []v1.NodeSelectorTerm{genericNodeAffinityTestStruct},
+		},
+	}
+
+	// testPV4 contains no label/annotation or any node affinity
+	testPV4 := &v1.PersistentVolume{}
+	testPV4.Name = "testPV4"
+
+	cases := []struct {
+		name           string
+		inputPV        *v1.PersistentVolume
+		expectedRegion string
+	}{
+		{
+			name:           "When Region label topology.diskplugin.csi.alibabacloud.com/region is present along with node affinity details",
+			inputPV:        testPV1,
+			expectedRegion: "us-east-1",
+		},
+		{
+			name:           "When zone label topology.diskplugin.csi.alibabacloud.com/zone is present function has to determine region",
+			inputPV:        testPV2,
+			expectedRegion: "us-east-1",
+		},
+		{
+			name:           "When only node affinity detail is present function has to determine the region",
+			inputPV:        testPV3,
+			expectedRegion: "us-east-1",
+		},
+		{
+			name:           "When no region/zone information is present function returns empty to default to cluster region",
+			inputPV:        testPV4,
+			expectedRegion: "",
+		},
+	}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			returnRegion := determinePVRegion(c.inputPV)
+			if c.expectedRegion != returnRegion {
+				t.Fatalf("Case name %s: determinePVRegion received region :%s but expected region: %s", c.name, returnRegion, c.expectedRegion)
+			}
+		})
+	}
+
+}

+ 233 - 0
pkg/cloud/aws/athenaconfiguration.go

@@ -0,0 +1,233 @@
+package aws
+
+import (
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/service/athena"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// AthenaConfiguration
+type AthenaConfiguration struct {
+	Bucket     string     `json:"bucket"`
+	Region     string     `json:"region"`
+	Database   string     `json:"database"`
+	Table      string     `json:"table"`
+	Workgroup  string     `json:"workgroup"`
+	Account    string     `json:"account"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (ac *AthenaConfiguration) Validate() error {
+
+	// Validate Authorizer
+	if ac.Authorizer == nil {
+		return fmt.Errorf("AthenaConfiguration: missing Authorizer")
+	}
+
+	err := ac.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: %s", err)
+	}
+
+	// Validate base properties
+	if ac.Bucket == "" {
+		return fmt.Errorf("AthenaConfiguration: missing bucket")
+	}
+
+	if ac.Region == "" {
+		return fmt.Errorf("AthenaConfiguration: missing region")
+	}
+
+	if ac.Database == "" {
+		return fmt.Errorf("AthenaConfiguration: missing database")
+	}
+
+	if ac.Table == "" {
+		return fmt.Errorf("AthenaConfiguration: missing table")
+	}
+
+	if ac.Account == "" {
+		return fmt.Errorf("AthenaConfiguration: missing account")
+	}
+
+	return nil
+}
+
+func (ac *AthenaConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AthenaConfiguration)
+	if !ok {
+		return false
+	}
+
+	if ac.Authorizer != nil {
+		if !ac.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if ac.Bucket != thatConfig.Bucket {
+		return false
+	}
+
+	if ac.Region != thatConfig.Region {
+		return false
+	}
+
+	if ac.Database != thatConfig.Database {
+		return false
+	}
+
+	if ac.Table != thatConfig.Table {
+		return false
+	}
+
+	if ac.Workgroup != thatConfig.Workgroup {
+		return false
+	}
+
+	if ac.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (ac *AthenaConfiguration) Sanitize() config.Config {
+	return &AthenaConfiguration{
+		Bucket:     ac.Bucket,
+		Region:     ac.Region,
+		Database:   ac.Database,
+		Table:      ac.Table,
+		Workgroup:  ac.Workgroup,
+		Account:    ac.Account,
+		Authorizer: ac.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (ac *AthenaConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", ac.Account, ac.Bucket)
+}
+
+func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Bucket = bucket
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Region = region
+
+	database, err := config.GetInterfaceValue[string](fmap, "database")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Database = database
+
+	table, err := config.GetInterfaceValue[string](fmap, "table")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Table = table
+
+	workgroup, err := config.GetInterfaceValue[string](fmap, "workgroup")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Workgroup = workgroup
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Account = account
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Authorizer = authorizer
+
+	return nil
+}
+
+func (ac *AthenaConfiguration) GetAthenaClient() (*athena.Client, error) {
+	cfg, err := ac.Authorizer.CreateAWSConfig(ac.Region)
+	if err != nil {
+		return nil, err
+	}
+	cli := athena.NewFromConfig(cfg)
+	return cli, nil
+}
+
+// ConvertAwsAthenaInfoToConfig takes a legacy config and generates a Config based on the presence of properties to match
+// legacy behavior
+func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) config.KeyedConfig {
+	if aai.IsEmpty() {
+		return nil
+	}
+
+	var authorizer Authorizer
+	if aai.ServiceKeyName == "" && aai.ServiceKeySecret == "" {
+		authorizer = &ServiceAccount{}
+	} else {
+		authorizer = &AccessKey{
+			ID:     aai.ServiceKeyName,
+			Secret: aai.ServiceKeySecret,
+		}
+	}
+
+	// Wrap Authorizer with AssumeRole if MasterPayerArn is set
+	if aai.MasterPayerARN != "" {
+		authorizer = &AssumeRole{
+			Authorizer: authorizer,
+			RoleARN:    aai.MasterPayerARN,
+		}
+	}
+
+	var config config.KeyedConfig
+	if aai.AthenaTable != "" || aai.AthenaDatabase != "" {
+		config = &AthenaConfiguration{
+			Bucket:     aai.AthenaBucketName,
+			Region:     aai.AthenaRegion,
+			Database:   aai.AthenaDatabase,
+			Table:      aai.AthenaTable,
+			Workgroup:  aai.AthenaWorkgroup,
+			Account:    aai.AccountID,
+			Authorizer: authorizer,
+		}
+	} else {
+		config = &S3Configuration{
+			Bucket:     aai.AthenaBucketName,
+			Region:     aai.AthenaRegion,
+			Account:    aai.AccountID,
+			Authorizer: authorizer,
+		}
+	}
+
+	return config
+}

+ 594 - 0
pkg/cloud/aws/athenaconfiguration_test.go

@@ -0,0 +1,594 @@
+package aws
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestAthenaConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   AthenaConfiguration
+		expected error
+	}{
+		"valid config access key": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: nil,
+		},
+		"valid config service account": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID: "id",
+				},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: AccessKey: missing Secret"),
+		},
+		"missing Authorizer": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing Authorizer"),
+		},
+		"missing bucket": {
+			config: AthenaConfiguration{
+				Bucket:     "",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing bucket"),
+		},
+		"missing region": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing region"),
+		},
+		"missing database": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing database"),
+		},
+		"missing table": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing table"),
+		},
+		"missing workgroup": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"missing account": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing account"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestAthenaConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     AthenaConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different bucket": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket2",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region2",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different database": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database2",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different table": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table2",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different workgroup": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup2",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account2",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AccessKey{
+				ID:     "id",
+				Secret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestAthenaConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config AthenaConfiguration
+	}{
+		"Empty Config": {
+			config: AthenaConfiguration{},
+		},
+		"AccessKey": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+		},
+
+		"ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+		},
+		"AssumeRole with AccessKey": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+		"AssumeRole with ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &ServiceAccount{},
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"RoleArnNil": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: nil,
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"AssumeRole with AssumeRole with ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AssumeRole{
+						RoleARN:    "12345",
+						Authorizer: &ServiceAccount{},
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &AthenaConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 208 - 0
pkg/cloud/aws/athenaquerier.go

@@ -0,0 +1,208 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/athena"
+	"github.com/aws/aws-sdk-go-v2/service/athena/types"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+)
+
+type AthenaQuerier struct {
+	AthenaConfiguration
+}
+
+func (aq *AthenaQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*AthenaQuerier)
+	if !ok {
+		return false
+	}
+
+	return aq.AthenaConfiguration.Equals(&thatConfig.AthenaConfiguration)
+}
+
+// QueryAthenaPaginated executes athena query and processes results. An error from this method indicates a
+// FAILED_CONNECTION CloudConnectionStatus and should immediately stop the caller to maintain the correct CloudConnectionStatus
+func (aq *AthenaQuerier) QueryAthenaPaginated(ctx context.Context, query string, fn func(*athena.GetQueryResultsOutput) bool) error {
+
+	queryExecutionCtx := &types.QueryExecutionContext{
+		Database: aws.String(aq.Database),
+	}
+
+	resultConfiguration := &types.ResultConfiguration{
+		OutputLocation: aws.String(aq.Bucket),
+	}
+	startQueryExecutionInput := &athena.StartQueryExecutionInput{
+		QueryString:           aws.String(query),
+		QueryExecutionContext: queryExecutionCtx,
+		ResultConfiguration:   resultConfiguration,
+	}
+
+	// Only set if there is a value, the default input is nil
+	if aq.Workgroup != "" {
+		startQueryExecutionInput.WorkGroup = aws.String(aq.Workgroup)
+	}
+
+	// Create Athena Client
+	cli, err := aq.AthenaConfiguration.GetAthenaClient()
+
+	// Query Athena
+	startQueryExecutionOutput, err := cli.StartQueryExecution(ctx, startQueryExecutionInput)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: start query error: %s", err.Error())
+	}
+	err = waitForQueryToComplete(ctx, cli, startQueryExecutionOutput.QueryExecutionId)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: query execution error: %s", err.Error())
+	}
+	queryResultsInput := &athena.GetQueryResultsInput{
+		QueryExecutionId: startQueryExecutionOutput.QueryExecutionId,
+	}
+	getQueryResultsPaginator := athena.NewGetQueryResultsPaginator(cli, queryResultsInput)
+	for getQueryResultsPaginator.HasMorePages() {
+		pg, err := getQueryResultsPaginator.NextPage(ctx)
+		if err != nil {
+			log.Errorf("queryAthenaPaginated: NextPage error: %s", err.Error())
+			continue
+		}
+		fn(pg)
+	}
+	return nil
+}
+
+func waitForQueryToComplete(ctx context.Context, client *athena.Client, queryExecutionID *string) error {
+	inp := &athena.GetQueryExecutionInput{
+		QueryExecutionId: queryExecutionID,
+	}
+	isQueryStillRunning := true
+	for isQueryStillRunning {
+		qe, err := client.GetQueryExecution(ctx, inp)
+		if err != nil {
+			return err
+		}
+		if qe.QueryExecution.Status.State == "SUCCEEDED" {
+			isQueryStillRunning = false
+			continue
+		}
+		if qe.QueryExecution.Status.State != "RUNNING" && qe.QueryExecution.Status.State != "QUEUED" {
+			return fmt.Errorf("no query results available for query %s", *queryExecutionID)
+		}
+		time.Sleep(2 * time.Second)
+	}
+	return nil
+}
+
+// GetAthenaRowValue retrieve value from athena row based on column names and used stringutil.Bank() to prevent duplicate
+// allocation of strings
+func GetAthenaRowValue(row types.Row, queryColumnIndexes map[string]int, columnName string) string {
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return ""
+	}
+	valuePointer := row.Data[columnIndex].VarCharValue
+	if valuePointer == nil {
+		return ""
+	}
+	return stringutil.Bank(*valuePointer)
+}
+
+// getAthenaRowValueFloat retrieve value from athena row based on column names and convert to float if possible
+func GetAthenaRowValueFloat(row types.Row, queryColumnIndexes map[string]int, columnName string) (float64, error) {
+
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return 0.0, fmt.Errorf("getAthenaRowValueFloat: missing column index: %s", columnName)
+	}
+
+	valuePointer := row.Data[columnIndex].VarCharValue
+	if valuePointer == nil {
+		return 0.0, fmt.Errorf("getAthenaRowValueFloat: nil field")
+	}
+
+	cost, err := strconv.ParseFloat(*valuePointer, 64)
+	if err != nil {
+		return cost, fmt.Errorf("getAthenaRowValueFloat: failed to parse %s: '%s': %s", columnName, *valuePointer, err.Error())
+	}
+	return cost, nil
+}
+
+func SelectAWSCategory(isNode, isVol, isNetwork bool, providerID, service string) string {
+	// Network has the highest priority and is based on the usage type ending in "Bytes"
+	if isNetwork {
+		return kubecost.NetworkCategory
+	}
+	// The node and volume conditions are mutually exclusive.
+	// Provider ID has prefix "i-"
+	if isNode {
+		return kubecost.ComputeCategory
+	}
+	// Provider ID has prefix "vol-"
+	if isVol {
+		return kubecost.StorageCategory
+	}
+
+	// Default categories based on service
+	switch strings.ToUpper(service) {
+	case "AWSELB", "AWSGLUE", "AMAZONROUTE53":
+		return kubecost.NetworkCategory
+	case "AMAZONEC2", "AWSLAMBDA", "AMAZONELASTICACHE":
+		return kubecost.ComputeCategory
+	case "AMAZONEKS":
+		// Check if line item is a fargate pod
+		if strings.Contains(providerID, ":pod/") {
+			return kubecost.ComputeCategory
+		}
+		return kubecost.ManagementCategory
+	case "AMAZONS3", "AMAZONATHENA", "AMAZONRDS", "AMAZONDYNAMODB", "AWSSECRETSMANAGER", "AMAZONFSX":
+		return kubecost.StorageCategory
+	default:
+		return kubecost.OtherCategory
+	}
+}
+
+var parseARNRx = regexp.MustCompile("^.+\\/(.+)?") // Capture "a406f7761142e4ef58a8f2ba478d2db2" from "arn:aws:elasticloadbalancing:us-east-1:297945954695:loadbalancer/a406f7761142e4ef58a8f2ba478d2db2"
+
+func ParseARN(id string) string {
+	match := parseARNRx.FindStringSubmatch(id)
+	if len(match) == 0 {
+		if id != "" {
+			log.DedupedInfof(10, "aws.parseARN: failed to parse %s", id)
+		}
+		return id
+	}
+	return match[len(match)-1]
+}
+
+func GetAthenaQueryFunc(fn func(types.Row)) func(*athena.GetQueryResultsOutput) bool {
+	pageNum := 0
+	processItemQueryResults := func(page *athena.GetQueryResultsOutput) bool {
+		if page == nil {
+			log.Errorf("AthenaQuerier: Athena page is nil")
+			return false
+		} else if page.ResultSet == nil {
+			log.Errorf("AthenaQuerier: Athena page.ResultSet is nil")
+			return false
+		}
+		rows := page.ResultSet.Rows
+		if pageNum == 0 {
+			rows = page.ResultSet.Rows[1:len(page.ResultSet.Rows)]
+		}
+
+		for _, row := range rows {
+			fn(row)
+		}
+		pageNum++
+		return true
+	}
+	return processItemQueryResults
+}

+ 251 - 0
pkg/cloud/aws/authorizer.go

@@ -0,0 +1,251 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsconfig "github.com/aws/aws-sdk-go-v2/config"
+	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+	"github.com/aws/aws-sdk-go-v2/service/sts"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const AccessKeyAuthorizerType = "AWSAccessKey"
+const ServiceAccountAuthorizerType = "AWSServiceAccount"
+const AssumeRoleAuthorizerType = "AWSAssumeRole"
+
+// Authorizer implementations provide aws.Config for AWS SDK calls
+type Authorizer interface {
+	config.Authorizer
+	CreateAWSConfig(string) (aws.Config, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	case ServiceAccountAuthorizerType:
+		return &ServiceAccount{}, nil
+	case AssumeRoleAuthorizerType:
+		return &AssumeRole{}, nil
+	default:
+		return nil, fmt.Errorf("AWS: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// AccessKey holds AWS credentials and fulfils the awsV2.CredentialsProvider interface
+type AccessKey struct {
+	ID     string `json:"id"`
+	Secret string `json:"secret"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["id"] = ak.ID
+	fmap["secret"] = ak.Secret
+	return json.Marshal(fmap)
+}
+
+// Retrieve returns a set of awsV2 credentials using the AccessKey's key and secret.
+// This fulfils the awsV2.CredentialsProvider interface contract.
+func (ak *AccessKey) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	return aws.Credentials{
+		AccessKeyID:     ak.ID,
+		SecretAccessKey: ak.Secret,
+	}, nil
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.ID == "" {
+		return fmt.Errorf("AccessKey: missing ID")
+	}
+	if ak.Secret == "" {
+		return fmt.Errorf("AccessKey: missing Secret")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.ID != thatConfig.ID {
+		return false
+	}
+	if ak.Secret != thatConfig.Secret {
+		return false
+	}
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		ID:     ak.ID,
+		Secret: config.Redacted,
+	}
+}
+
+// CreateAWSConfig creates an AWS SDK V2 Config for the credentials that it contains for the provided region
+func (ak *AccessKey) CreateAWSConfig(region string) (cfg aws.Config, err error) {
+	err = ak.Validate()
+	if err != nil {
+		return cfg, err
+	}
+	// The AWS SDK v2 requires an object fulfilling the CredentialsProvider interface, which cloud.AccessKey does
+	cfg, err = awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithCredentialsProvider(ak), awsconfig.WithRegion(region))
+	if err != nil {
+		return cfg, fmt.Errorf("failed to initialize AWS SDK config for region %s: %s", region, err)
+	}
+	return cfg, nil
+}
+
+// ServiceAccount uses pod annotations along with a service account to authenticate integrations
+type ServiceAccount struct{}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (sa *ServiceAccount) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 1)
+	fmap[config.AuthorizerTypeProperty] = ServiceAccountAuthorizerType
+	return json.Marshal(fmap)
+}
+
+// Check has nothing to check at this level, connection will fail if Pod Annotation and Service Account are not configured correctly
+func (sa *ServiceAccount) Validate() error {
+	return nil
+}
+
+func (sa *ServiceAccount) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	_, ok := config.(*ServiceAccount)
+	if !ok {
+		return false
+	}
+
+	return true
+}
+
+func (sa *ServiceAccount) Sanitize() config.Config {
+	return &ServiceAccount{}
+}
+
+func (sa *ServiceAccount) CreateAWSConfig(region string) (aws.Config, error) {
+	cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithRegion(region))
+	if err != nil {
+		return cfg, fmt.Errorf("failed to initialize AWS SDK config for region from annotation %s: %s", region, err)
+	}
+	return cfg, nil
+}
+
+// AssumeRole is a wrapper for another Authorizer which adds an assumed role to the configuration
+type AssumeRole struct {
+	Authorizer Authorizer `json:"authorizer"`
+	RoleARN    string     `json:"roleARN"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ara *AssumeRole) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AssumeRoleAuthorizerType
+	fmap["roleARN"] = ara.RoleARN
+	fmap["authorizer"] = ara.Authorizer
+	return json.Marshal(fmap)
+}
+
+// UnmarshalJSON is required for AssumeRole because it needs to unmarshal an Authorizer interface
+func (ara *AssumeRole) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	roleARN, err := config.GetInterfaceValue[string](fmap, "roleARN")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ara.RoleARN = roleARN
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("AssumeRole: UnmarshalJSON: missing Authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("AssumeRole: UnmarshalJSON: %s", err.Error())
+	}
+	ara.Authorizer = authorizer
+
+	return nil
+}
+
+func (ara *AssumeRole) CreateAWSConfig(region string) (aws.Config, error) {
+	cfg, _ := ara.Authorizer.CreateAWSConfig(region)
+	// Create the credentials from AssumeRoleProvider to assume the role
+	// referenced by the RoleARN.
+	stsSvc := sts.NewFromConfig(cfg)
+	creds := stscreds.NewAssumeRoleProvider(stsSvc, ara.RoleARN)
+	cfg.Credentials = aws.NewCredentialsCache(creds)
+	return cfg, nil
+}
+
+func (ara *AssumeRole) Validate() error {
+	if ara.Authorizer == nil {
+		return fmt.Errorf("AssumeRole: misisng base Authorizer")
+	}
+	err := ara.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	if ara.RoleARN == "" {
+		return fmt.Errorf("AssumeRole: misisng RoleARN configuration")
+	}
+
+	return nil
+}
+
+func (ara *AssumeRole) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AssumeRole)
+	if !ok {
+		return false
+	}
+	if ara.Authorizer != nil {
+		if !ara.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if ara.RoleARN != thatConfig.RoleARN {
+		return false
+	}
+
+	return true
+}
+
+func (ara *AssumeRole) Sanitize() config.Config {
+	return &AssumeRole{
+		Authorizer: ara.Authorizer.Sanitize().(Authorizer),
+		RoleARN:    ara.RoleARN,
+	}
+}

+ 67 - 0
pkg/cloud/aws/authorizer_test.go

@@ -0,0 +1,67 @@
+package aws
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+func TestAuthorizerJSON_Sanitize(t *testing.T) {
+
+	testCases := map[string]struct {
+		input    Authorizer
+		expected Authorizer
+	}{
+		"Access Key": {
+			input: &AccessKey{
+				ID:     "ID",
+				Secret: "Secret",
+			},
+			expected: &AccessKey{
+				ID:     "ID",
+				Secret: config.Redacted,
+			},
+		},
+		"Service Account": {
+			input:    &ServiceAccount{},
+			expected: &ServiceAccount{},
+		},
+		"Master Payer Access Key": {
+			input: &AssumeRole{
+				Authorizer: &AccessKey{
+					ID:     "ID",
+					Secret: "Secret",
+				},
+				RoleARN: "role arn",
+			},
+			expected: &AssumeRole{
+				Authorizer: &AccessKey{
+					ID:     "ID",
+					Secret: config.Redacted,
+				},
+				RoleARN: "role arn",
+			},
+		},
+		"Master Payer Service Account": {
+			input: &AssumeRole{
+				Authorizer: &ServiceAccount{},
+				RoleARN:    "role arn",
+			},
+			expected: &AssumeRole{
+				Authorizer: &ServiceAccount{},
+				RoleARN:    "role arn",
+			},
+		},
+	}
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// Convert to AuthorizerJSON for sanitization
+			sanitizedAuthorizer := tc.input.Sanitize()
+
+			if !tc.expected.Equals(sanitizedAuthorizer) {
+				t.Error("Authorizer was not as expected after Sanitization")
+			}
+
+		})
+	}
+}

+ 2307 - 0
pkg/cloud/aws/provider.go

@@ -0,0 +1,2307 @@
+package aws
+
+import (
+	"bytes"
+	"compress/gzip"
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/utils"
+	"github.com/opencost/opencost/pkg/kubecost"
+
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	errs "github.com/opencost/opencost/pkg/errors"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+
+	awsSDK "github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/config"
+	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+	"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
+	"github.com/aws/aws-sdk-go-v2/service/athena"
+	athenaTypes "github.com/aws/aws-sdk-go-v2/service/athena/types"
+	"github.com/aws/aws-sdk-go-v2/service/ec2"
+	ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	"github.com/aws/aws-sdk-go-v2/service/sts"
+
+	"github.com/jszwec/csvutil"
+
+	v1 "k8s.io/api/core/v1"
+)
+
+const (
+	supportedSpotFeedVersion = "1"
+	SpotInfoUpdateType       = "spotinfo"
+	AthenaInfoUpdateType     = "athenainfo"
+	PreemptibleType          = "preemptible"
+
+	APIPricingSource              = "Public API"
+	SpotPricingSource             = "Spot Data Feed"
+	ReservedInstancePricingSource = "Savings Plan, Reserved Instance, and Out-Of-Cluster"
+
+	InUseState    = "in-use"
+	AttachedState = "attached"
+
+	AWSHourlyPublicIPCost    = 0.005
+	EKSCapacityTypeLabel     = "eks.amazonaws.com/capacityType"
+	EKSCapacitySpotTypeValue = "SPOT"
+)
+
+var (
+	// It's of the form aws:///us-east-2a/i-0fea4fd46592d050b and we want i-0fea4fd46592d050b, if it exists
+	provIdRx      = regexp.MustCompile("aws:///([^/]+)/([^/]+)")
+	usageTypeRegx = regexp.MustCompile(".*(-|^)(EBS.+)")
+	versionRx     = regexp.MustCompile(`^#Version: (\\d+)\\.\\d+$`)
+	regionRx      = regexp.MustCompile("([a-z]+-[a-z]+-[0-9])")
+)
+
+func (aws *AWS) PricingSourceStatus() map[string]*models.PricingSource {
+
+	sources := make(map[string]*models.PricingSource)
+
+	sps := &models.PricingSource{
+		Name:    SpotPricingSource,
+		Enabled: true,
+	}
+
+	if !aws.SpotRefreshEnabled() {
+		sps.Available = false
+		sps.Error = "Spot instances not set up"
+		sps.Enabled = false
+	} else {
+		sps.Error = ""
+		if aws.SpotPricingError != nil {
+			sps.Error = aws.SpotPricingError.Error()
+		}
+		if sps.Error != "" {
+			sps.Available = false
+		} else if len(aws.SpotPricingByInstanceID) > 0 {
+			sps.Available = true
+		} else {
+			sps.Error = "No spot instances detected"
+		}
+	}
+	sources[SpotPricingSource] = sps
+
+	rps := &models.PricingSource{
+		Name:    ReservedInstancePricingSource,
+		Enabled: true,
+	}
+	rps.Error = ""
+	if aws.RIPricingError != nil {
+		rps.Error = aws.RIPricingError.Error()
+	}
+	if rps.Error != "" {
+		rps.Available = false
+	} else {
+		rps.Available = true
+	}
+	sources[ReservedInstancePricingSource] = rps
+	return sources
+
+}
+
+// SpotRefreshDuration represents how much time must pass before we refresh
+const SpotRefreshDuration = 15 * time.Minute
+
+var awsRegions = []string{
+	"us-east-2",
+	"us-east-1",
+	"us-west-1",
+	"us-west-2",
+	"ap-east-1",
+	"ap-south-1",
+	"ap-northeast-3",
+	"ap-northeast-2",
+	"ap-southeast-1",
+	"ap-southeast-2",
+	"ap-northeast-1",
+	"ap-southeast-3",
+	"ca-central-1",
+	"cn-north-1",
+	"cn-northwest-1",
+	"eu-central-1",
+	"eu-west-1",
+	"eu-west-2",
+	"eu-west-3",
+	"eu-north-1",
+	"eu-south-1",
+	"me-south-1",
+	"sa-east-1",
+	"af-south-1",
+	"us-gov-east-1",
+	"us-gov-west-1",
+}
+
+// AWS represents an Amazon Provider
+type AWS struct {
+	Pricing                     map[string]*AWSProductTerms
+	SpotPricingByInstanceID     map[string]*spotInfo
+	SpotPricingUpdatedAt        *time.Time
+	SpotRefreshRunning          bool
+	SpotPricingLock             sync.RWMutex
+	SpotPricingError            error
+	RIPricingByInstanceID       map[string]*RIData
+	RIPricingError              error
+	RIDataRunning               bool
+	RIDataLock                  sync.RWMutex
+	SavingsPlanDataByInstanceID map[string]*SavingsPlanData
+	SavingsPlanDataRunning      bool
+	SavingsPlanDataLock         sync.RWMutex
+	ValidPricingKeys            map[string]bool
+	Clientset                   clustercache.ClusterCache
+	BaseCPUPrice                string
+	BaseRAMPrice                string
+	BaseGPUPrice                string
+	BaseSpotCPUPrice            string
+	BaseSpotRAMPrice            string
+	BaseSpotGPUPrice            string
+	SpotLabelName               string
+	SpotLabelValue              string
+	SpotDataRegion              string
+	SpotDataBucket              string
+	SpotDataPrefix              string
+	ProjectID                   string
+	DownloadPricingDataLock     sync.RWMutex
+	Config                      models.ProviderConfig
+	ServiceAccountChecks        *models.ServiceAccountChecks
+	clusterManagementPrice      float64
+	ClusterRegion               string
+	ClusterAccountID            string
+	clusterProvisioner          string
+}
+
+// AWSAccessKey holds AWS credentials and fulfils the awsV2.CredentialsProvider interface
+// Deprecated: v1.104 Use AccessKey instead
+type AWSAccessKey struct {
+	AccessKeyID     string `json:"aws_access_key_id"`
+	SecretAccessKey string `json:"aws_secret_access_key"`
+}
+
+// Retrieve returns a set of awsV2 credentials using the AWSAccessKey's key and secret.
+// This fulfils the awsV2.CredentialsProvider interface contract.
+func (accessKey AWSAccessKey) Retrieve(ctx context.Context) (awsSDK.Credentials, error) {
+	return awsSDK.Credentials{
+		AccessKeyID:     accessKey.AccessKeyID,
+		SecretAccessKey: accessKey.SecretAccessKey,
+	}, nil
+}
+
+// CreateConfig creates an AWS SDK V2 Config for the credentials that it contains for the provided region
+func (accessKey AWSAccessKey) CreateConfig(region string) (awsSDK.Config, error) {
+	var cfg awsSDK.Config
+	var err error
+	// If accessKey values have not been provided, attempt to load cfg from service key annotations
+	if accessKey.AccessKeyID == "" && accessKey.SecretAccessKey == "" {
+		cfg, err = config.LoadDefaultConfig(context.TODO(), config.WithRegion(region))
+		if err != nil {
+			return cfg, fmt.Errorf("failed to initialize AWS SDK config for region from annotation %s: %s", region, err)
+		}
+	} else {
+		// The AWS SDK v2 requires an object fulfilling the CredentialsProvider interface, which cloud.AWSAccessKey does
+		cfg, err = config.LoadDefaultConfig(context.TODO(), config.WithCredentialsProvider(accessKey), config.WithRegion(region))
+		if err != nil {
+			return cfg, fmt.Errorf("failed to initialize AWS SDK config for region %s: %s", region, err)
+		}
+	}
+
+	return cfg, nil
+}
+
+// AWSPricing maps a k8s node to an AWS Pricing "product"
+type AWSPricing struct {
+	Products map[string]*AWSProduct `json:"products"`
+	Terms    AWSPricingTerms        `json:"terms"`
+}
+
+// AWSProduct represents a purchased SKU
+type AWSProduct struct {
+	Sku        string               `json:"sku"`
+	Attributes AWSProductAttributes `json:"attributes"`
+}
+
+// AWSProductAttributes represents metadata about the product used to map to a node.
+type AWSProductAttributes struct {
+	Location        string `json:"location"`
+	InstanceType    string `json:"instanceType"`
+	Memory          string `json:"memory"`
+	Storage         string `json:"storage"`
+	VCpu            string `json:"vcpu"`
+	UsageType       string `json:"usagetype"`
+	OperatingSystem string `json:"operatingSystem"`
+	PreInstalledSw  string `json:"preInstalledSw"`
+	InstanceFamily  string `json:"instanceFamily"`
+	CapacityStatus  string `json:"capacitystatus"`
+	GPU             string `json:"gpu"` // GPU represents the number of GPU on the instance
+}
+
+// AWSPricingTerms are how you pay for the node: OnDemand, Reserved, or (TODO) Spot
+type AWSPricingTerms struct {
+	OnDemand map[string]map[string]*AWSOfferTerm `json:"OnDemand"`
+	Reserved map[string]map[string]*AWSOfferTerm `json:"Reserved"`
+}
+
+// AWSOfferTerm is a sku extension used to pay for the node.
+type AWSOfferTerm struct {
+	Sku             string                  `json:"sku"`
+	OfferTermCode   string                  `json:"offerTermCode"`
+	PriceDimensions map[string]*AWSRateCode `json:"priceDimensions"`
+}
+
+func (ot *AWSOfferTerm) String() string {
+	var strs []string
+	for k, rc := range ot.PriceDimensions {
+		strs = append(strs, fmt.Sprintf("%s:%s", k, rc.String()))
+	}
+	return fmt.Sprintf("%s:%s", ot.Sku, strings.Join(strs, ","))
+}
+
+// AWSRateCode encodes data about the price of a product
+type AWSRateCode struct {
+	Unit         string          `json:"unit"`
+	PricePerUnit AWSCurrencyCode `json:"pricePerUnit"`
+}
+
+func (rc *AWSRateCode) String() string {
+	return fmt.Sprintf("{unit: %s, pricePerUnit: %v", rc.Unit, rc.PricePerUnit)
+}
+
+// AWSCurrencyCode is the localized currency. (TODO: support non-USD)
+type AWSCurrencyCode struct {
+	USD string `json:"USD,omitempty"`
+	CNY string `json:"CNY,omitempty"`
+}
+
+// AWSProductTerms represents the full terms of the product
+type AWSProductTerms struct {
+	Sku      string        `json:"sku"`
+	OnDemand *AWSOfferTerm `json:"OnDemand"`
+	Reserved *AWSOfferTerm `json:"Reserved"`
+	Memory   string        `json:"memory"`
+	Storage  string        `json:"storage"`
+	VCpu     string        `json:"vcpu"`
+	GPU      string        `json:"gpu"` // GPU represents the number of GPU on the instance
+	PV       *models.PV    `json:"pv"`
+}
+
+// ClusterIdEnvVar is the environment variable in which one can manually set the ClusterId
+const ClusterIdEnvVar = "AWS_CLUSTER_ID"
+
+// OnDemandRateCodes is are sets of identifiers for offerTermCodes matching 'On Demand' rates
+var OnDemandRateCodes = map[string]struct{}{
+	"JRTCKXETXF": {},
+}
+
+var OnDemandRateCodesCn = map[string]struct{}{
+	"99YE2YK9UR": {},
+	"5Y9WH78GDR": {},
+	"KW44MY7SZN": {},
+}
+
+// HourlyRateCode is appended to a node sku
+const HourlyRateCode = "6YS6EN2CT7"
+const HourlyRateCodeCn = "Q7UJUT2CE6"
+
+// volTypes are used to map between AWS UsageTypes and
+// EBS volume types, as they would appear in K8s storage class
+// name and the EC2 API.
+var volTypes = map[string]string{
+	"EBS:VolumeUsage.gp2":    "gp2",
+	"EBS:VolumeUsage.gp3":    "gp3",
+	"EBS:VolumeUsage":        "standard",
+	"EBS:VolumeUsage.sc1":    "sc1",
+	"EBS:VolumeP-IOPS.piops": "io1",
+	"EBS:VolumeUsage.st1":    "st1",
+	"EBS:VolumeUsage.piops":  "io1",
+	"gp2":                    "EBS:VolumeUsage.gp2",
+	"gp3":                    "EBS:VolumeUsage.gp3",
+	"standard":               "EBS:VolumeUsage",
+	"sc1":                    "EBS:VolumeUsage.sc1",
+	"io1":                    "EBS:VolumeUsage.piops",
+	"st1":                    "EBS:VolumeUsage.st1",
+}
+
+// locationToRegion maps AWS region names (As they come from Billing)
+// to actual region identifiers
+var locationToRegion = map[string]string{
+	"US East (Ohio)":            "us-east-2",
+	"US East (N. Virginia)":     "us-east-1",
+	"US West (N. California)":   "us-west-1",
+	"US West (Oregon)":          "us-west-2",
+	"Asia Pacific (Hong Kong)":  "ap-east-1",
+	"Asia Pacific (Mumbai)":     "ap-south-1",
+	"Asia Pacific (Osaka)":      "ap-northeast-3",
+	"Asia Pacific (Seoul)":      "ap-northeast-2",
+	"Asia Pacific (Singapore)":  "ap-southeast-1",
+	"Asia Pacific (Sydney)":     "ap-southeast-2",
+	"Asia Pacific (Tokyo)":      "ap-northeast-1",
+	"Asia Pacific (Jakarta)":    "ap-southeast-3",
+	"Canada (Central)":          "ca-central-1",
+	"China (Beijing)":           "cn-north-1",
+	"China (Ningxia)":           "cn-northwest-1",
+	"EU (Frankfurt)":            "eu-central-1",
+	"EU (Ireland)":              "eu-west-1",
+	"EU (London)":               "eu-west-2",
+	"EU (Paris)":                "eu-west-3",
+	"EU (Stockholm)":            "eu-north-1",
+	"EU (Milan)":                "eu-south-1",
+	"South America (Sao Paulo)": "sa-east-1",
+	"Africa (Cape Town)":        "af-south-1",
+	"AWS GovCloud (US-East)":    "us-gov-east-1",
+	"AWS GovCloud (US-West)":    "us-gov-west-1",
+}
+
+var loadedAWSSecret bool = false
+var awsSecret *AWSAccessKey = nil
+
+func (aws *AWS) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
+	return ""
+}
+
+// KubeAttrConversion maps the k8s labels for region to an aws region
+func (aws *AWS) KubeAttrConversion(location, instanceType, operatingSystem string) string {
+	operatingSystem = strings.ToLower(operatingSystem)
+
+	region := locationToRegion[location]
+	return region + "," + instanceType + "," + operatingSystem
+}
+
+// AwsSpotFeedInfo contains configuration for spot feed integration
+type AwsSpotFeedInfo struct {
+	BucketName       string `json:"bucketName"`
+	Prefix           string `json:"prefix"`
+	Region           string `json:"region"`
+	AccountID        string `json:"projectID"`
+	ServiceKeyName   string `json:"serviceKeyName"`
+	ServiceKeySecret string `json:"serviceKeySecret"`
+	SpotLabel        string `json:"spotLabel"`
+	SpotLabelValue   string `json:"spotLabelValue"`
+}
+
+// AwsAthenaInfo contains configuration for CUR integration
+// Deprecated: v1.104 Use AthenaConfiguration instead
+type AwsAthenaInfo struct {
+	AthenaBucketName string `json:"athenaBucketName"`
+	AthenaRegion     string `json:"athenaRegion"`
+	AthenaDatabase   string `json:"athenaDatabase"`
+	AthenaTable      string `json:"athenaTable"`
+	AthenaWorkgroup  string `json:"athenaWorkgroup"`
+	ServiceKeyName   string `json:"serviceKeyName"`
+	ServiceKeySecret string `json:"serviceKeySecret"`
+	AccountID        string `json:"projectID"`
+	MasterPayerARN   string `json:"masterPayerARN"`
+}
+
+// IsEmpty returns true if all fields in config are empty, false if not.
+func (aai *AwsAthenaInfo) IsEmpty() bool {
+	return aai.AthenaBucketName == "" &&
+		aai.AthenaRegion == "" &&
+		aai.AthenaDatabase == "" &&
+		aai.AthenaTable == "" &&
+		aai.AthenaWorkgroup == "" &&
+		aai.ServiceKeyName == "" &&
+		aai.ServiceKeySecret == "" &&
+		aai.AccountID == "" &&
+		aai.MasterPayerARN == ""
+}
+
+// CreateConfig creates an AWS SDK V2 Config for the credentials that it contains
+func (aai *AwsAthenaInfo) CreateConfig() (awsSDK.Config, error) {
+	keyProvider := AWSAccessKey{AccessKeyID: aai.ServiceKeyName, SecretAccessKey: aai.ServiceKeySecret}
+	cfg, err := keyProvider.CreateConfig(aai.AthenaRegion)
+	if err != nil {
+		return cfg, err
+	}
+	if aai.MasterPayerARN != "" {
+		// Create the credentials from AssumeRoleProvider to assume the role
+		// referenced by the roleARN.
+		stsSvc := sts.NewFromConfig(cfg)
+		creds := stscreds.NewAssumeRoleProvider(stsSvc, aai.MasterPayerARN)
+		cfg.Credentials = awsSDK.NewCredentialsCache(creds)
+	}
+	return cfg, nil
+}
+
+func (aws *AWS) GetManagementPlatform() (string, error) {
+	nodes := aws.Clientset.GetAllNodes()
+
+	if len(nodes) > 0 {
+		n := nodes[0]
+		version := n.Status.NodeInfo.KubeletVersion
+		if strings.Contains(version, "eks") {
+			return "eks", nil
+		}
+		if _, ok := n.Labels["kops.k8s.io/instancegroup"]; ok {
+			return "kops", nil
+		}
+	}
+	return "", nil
+}
+
+func (aws *AWS) GetConfig() (*models.CustomPricing, error) {
+	c, err := aws.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	if c.Discount == "" {
+		c.Discount = "0%"
+	}
+	if c.NegotiatedDiscount == "" {
+		c.NegotiatedDiscount = "0%"
+	}
+	if c.ShareTenancyCosts == "" {
+		c.ShareTenancyCosts = models.DefaultShareTenancyCost
+	}
+
+	return c, nil
+}
+
+// GetAWSAccessKey generate an AWSAccessKey object from the config
+func (aws *AWS) GetAWSAccessKey() (*AWSAccessKey, error) {
+	config, err := aws.GetConfig()
+	if err != nil {
+		return nil, fmt.Errorf("could not retrieve AwsAthenaInfo %s", err)
+	}
+	err = aws.ConfigureAuthWith(config)
+	if err != nil {
+		return nil, fmt.Errorf("error configuring Cloud Provider %s", err)
+	}
+	//Look for service key values in env if not present in config
+	if config.ServiceKeyName == "" {
+		config.ServiceKeyName = env.GetAWSAccessKeyID()
+	}
+	if config.ServiceKeySecret == "" {
+		config.ServiceKeySecret = env.GetAWSAccessKeySecret()
+	}
+
+	if config.ServiceKeyName == "" && config.ServiceKeySecret == "" {
+		log.DedupedInfof(1, "missing service key values for AWS cloud integration attempting to use service account integration")
+	}
+
+	return &AWSAccessKey{AccessKeyID: config.ServiceKeyName, SecretAccessKey: config.ServiceKeySecret}, nil
+}
+
+// GetAWSAthenaInfo generate an AWSAthenaInfo object from the config
+func (aws *AWS) GetAWSAthenaInfo() (*AwsAthenaInfo, error) {
+	config, err := aws.GetConfig()
+	if err != nil {
+		return nil, fmt.Errorf("could not retrieve AwsAthenaInfo %s", err)
+	}
+
+	aak, err := aws.GetAWSAccessKey()
+	if err != nil {
+		return nil, err
+	}
+
+	return &AwsAthenaInfo{
+		AthenaBucketName: config.AthenaBucketName,
+		AthenaRegion:     config.AthenaRegion,
+		AthenaDatabase:   config.AthenaDatabase,
+		AthenaTable:      config.AthenaTable,
+		AthenaWorkgroup:  config.AthenaWorkgroup,
+		ServiceKeyName:   aak.AccessKeyID,
+		ServiceKeySecret: aak.SecretAccessKey,
+		AccountID:        config.AthenaProjectID,
+		MasterPayerARN:   config.MasterPayerARN,
+	}, nil
+}
+
+func (aws *AWS) UpdateConfigFromConfigMap(cm map[string]string) (*models.CustomPricing, error) {
+	return aws.Config.UpdateFromMap(cm)
+}
+
+func (aws *AWS) UpdateConfig(r io.Reader, updateType string) (*models.CustomPricing, error) {
+	return aws.Config.Update(func(c *models.CustomPricing) error {
+		if updateType == SpotInfoUpdateType {
+			asfi := AwsSpotFeedInfo{}
+			err := json.NewDecoder(r).Decode(&asfi)
+			if err != nil {
+				return err
+			}
+
+			c.ServiceKeyName = asfi.ServiceKeyName
+			if asfi.ServiceKeySecret != "" {
+				c.ServiceKeySecret = asfi.ServiceKeySecret
+			}
+			c.SpotDataPrefix = asfi.Prefix
+			c.SpotDataBucket = asfi.BucketName
+			c.ProjectID = asfi.AccountID
+			c.SpotDataRegion = asfi.Region
+			c.SpotLabel = asfi.SpotLabel
+			c.SpotLabelValue = asfi.SpotLabelValue
+
+		} else if updateType == AthenaInfoUpdateType {
+			aai := AwsAthenaInfo{}
+			err := json.NewDecoder(r).Decode(&aai)
+			if err != nil {
+				return err
+			}
+			c.AthenaBucketName = aai.AthenaBucketName
+			c.AthenaRegion = aai.AthenaRegion
+			c.AthenaDatabase = aai.AthenaDatabase
+			c.AthenaTable = aai.AthenaTable
+			c.AthenaWorkgroup = aai.AthenaWorkgroup
+			c.ServiceKeyName = aai.ServiceKeyName
+			if aai.ServiceKeySecret != "" {
+				c.ServiceKeySecret = aai.ServiceKeySecret
+			}
+			if aai.MasterPayerARN != "" {
+				c.MasterPayerARN = aai.MasterPayerARN
+			}
+			c.AthenaProjectID = aai.AccountID
+		} else {
+			a := make(map[string]interface{})
+			err := json.NewDecoder(r).Decode(&a)
+			if err != nil {
+				return err
+			}
+			for k, v := range a {
+				kUpper := utils.ToTitle.String(k) // Just so we consistently supply / receive the same values, uppercase the first letter.
+				vstr, ok := v.(string)
+				if ok {
+					err := models.SetCustomPricingField(c, kUpper, vstr)
+					if err != nil {
+						return err
+					}
+				} else {
+					return fmt.Errorf("type error while updating config for %s", kUpper)
+				}
+			}
+		}
+
+		if env.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+}
+
+type awsKey struct {
+	SpotLabelName  string
+	SpotLabelValue string
+	Labels         map[string]string
+	ProviderID     string
+}
+
+func (k *awsKey) GPUCount() int {
+	return 0
+}
+
+func (k *awsKey) GPUType() string {
+	return ""
+}
+
+func (k *awsKey) ID() string {
+	for matchNum, group := range provIdRx.FindStringSubmatch(k.ProviderID) {
+		if matchNum == 2 {
+			return group
+		}
+	}
+	log.Warnf("Could not find instance ID in \"%s\"", k.ProviderID)
+	return ""
+}
+
+// Features will return a comma separated list of features for the given node
+// If the node has a spot label, it will be included in the list
+// Otherwise, the list include instance type, operating system, and the region
+func (k *awsKey) Features() string {
+
+	instanceType, _ := util.GetInstanceType(k.Labels)
+	operatingSystem, _ := util.GetOperatingSystem(k.Labels)
+	region, _ := util.GetRegion(k.Labels)
+
+	key := region + "," + instanceType + "," + operatingSystem
+	usageType := k.getUsageType(k.Labels)
+	spotKey := key + "," + usageType
+	if l, ok := k.Labels["lifecycle"]; ok && l == "EC2Spot" {
+		return spotKey
+	}
+	if l, ok := k.Labels[k.SpotLabelName]; ok && l == k.SpotLabelValue {
+		return spotKey
+	}
+	if usageType == PreemptibleType {
+		return spotKey
+	}
+	return key
+}
+
+// getUsageType returns the usage type of the instance
+// If the instance is a spot instance, it will return PreemptibleType
+// Otherwise returns an empty string
+func (k *awsKey) getUsageType(labels map[string]string) string {
+	if eksLabel, ok := labels[EKSCapacityTypeLabel]; ok && eksLabel == EKSCapacitySpotTypeValue {
+		// We currently write out spot instances as "preemptible" in the pricing data, so these need to match
+		return PreemptibleType
+	}
+	if kLabel, ok := labels[models.KarpenterCapacityTypeLabel]; ok && kLabel == models.KarpenterCapacitySpotTypeValue {
+		return PreemptibleType
+	}
+	return ""
+}
+
+func (aws *AWS) PVPricing(pvk models.PVKey) (*models.PV, error) {
+	pricing, ok := aws.Pricing[pvk.Features()]
+	if !ok {
+		log.Debugf("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		return &models.PV{}, nil
+	}
+	return pricing.PV, nil
+}
+
+type awsPVKey struct {
+	Labels                 map[string]string
+	StorageClassParameters map[string]string
+	StorageClassName       string
+	Name                   string
+	DefaultRegion          string
+	ProviderID             string
+}
+
+func (aws *AWS) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string, defaultRegion string) models.PVKey {
+	providerID := ""
+	if pv.Spec.AWSElasticBlockStore != nil {
+		providerID = pv.Spec.AWSElasticBlockStore.VolumeID
+	} else if pv.Spec.CSI != nil {
+		providerID = pv.Spec.CSI.VolumeHandle
+	}
+	return &awsPVKey{
+		Labels:                 pv.Labels,
+		StorageClassName:       pv.Spec.StorageClassName,
+		StorageClassParameters: parameters,
+		Name:                   pv.Name,
+		DefaultRegion:          defaultRegion,
+		ProviderID:             providerID,
+	}
+}
+
+func (key *awsPVKey) ID() string {
+	return key.ProviderID
+}
+
+func (key *awsPVKey) GetStorageClass() string {
+	return key.StorageClassName
+}
+
+func (key *awsPVKey) Features() string {
+	storageClass := key.StorageClassParameters["type"]
+	if storageClass == "standard" {
+		storageClass = "gp2"
+	}
+	// Storage class names are generally EBS volume types (gp2)
+	// Keys in Pricing are based on UsageTypes (EBS:VolumeType.gp2)
+	// Converts between the 2
+	region, ok := util.GetRegion(key.Labels)
+	if !ok {
+		region = key.DefaultRegion
+	}
+	class, ok := volTypes[storageClass]
+	if !ok {
+		log.Debugf("No voltype mapping for %s's storageClass: %s", key.Name, storageClass)
+	}
+	return region + "," + class
+}
+
+// GetKey maps node labels to information needed to retrieve pricing data
+func (aws *AWS) GetKey(labels map[string]string, n *v1.Node) models.Key {
+	return &awsKey{
+		SpotLabelName:  aws.SpotLabelName,
+		SpotLabelValue: aws.SpotLabelValue,
+		Labels:         labels,
+		ProviderID:     labels["providerID"],
+	}
+}
+
+func (aws *AWS) isPreemptible(key string) bool {
+	s := strings.Split(key, ",")
+	if len(s) == 4 && s[3] == PreemptibleType {
+		return true
+	}
+	return false
+}
+
+func (aws *AWS) ClusterManagementPricing() (string, float64, error) {
+	return aws.clusterProvisioner, aws.clusterManagementPrice, nil
+}
+
+// Use the pricing data from the current region. Fall back to using all region data if needed.
+func (aws *AWS) getRegionPricing(nodeList []*v1.Node) (*http.Response, string, error) {
+
+	pricingURL := "https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/"
+	region := ""
+	multiregion := false
+	for _, n := range nodeList {
+		labels := n.GetLabels()
+		currentNodeRegion := ""
+		if r, ok := util.GetRegion(labels); ok {
+			currentNodeRegion = r
+			// Switch to Chinese endpoint for regions with the Chinese prefix
+			if strings.HasPrefix(currentNodeRegion, "cn-") {
+				pricingURL = "https://pricing.cn-north-1.amazonaws.com.cn/offers/v1.0/cn/AmazonEC2/current/"
+			}
+		} else {
+			multiregion = true // We weren't able to detect the node's region, so pull all data.
+			break
+		}
+		if region == "" { // We haven't set a region yet
+			region = currentNodeRegion
+		} else if region != "" && currentNodeRegion != region { // If two nodes have different regions here, we'll need to fetch all pricing data.
+			multiregion = true
+			break
+		}
+	}
+
+	// Chinese multiregion endpoint only contains data for Chinese regions and Chinese regions are excluded from other endpoint
+	if region != "" && !multiregion {
+		pricingURL += region + "/"
+	}
+
+	pricingURL += "index.json"
+
+	if env.GetAWSPricingURL() != "" { // Allow override of pricing URL
+		pricingURL = env.GetAWSPricingURL()
+	}
+
+	log.Infof("starting download of \"%s\", which is quite large ...", pricingURL)
+	resp, err := http.Get(pricingURL)
+	if err != nil {
+		log.Errorf("Bogus fetch of \"%s\": %v", pricingURL, err)
+		return nil, pricingURL, err
+	}
+	return resp, pricingURL, err
+}
+
+// SpotRefreshEnabled determines whether the required configs to run the spot feed query have been set up
+func (aws *AWS) SpotRefreshEnabled() bool {
+	// Need a valid value for at least one of these fields to consider spot pricing as enabled
+	return len(aws.SpotDataBucket) != 0 || len(aws.SpotDataRegion) != 0 || len(aws.ProjectID) != 0
+}
+
+// DownloadPricingData fetches data from the AWS Pricing API
+func (aws *AWS) DownloadPricingData() error {
+	aws.DownloadPricingDataLock.Lock()
+	defer aws.DownloadPricingDataLock.Unlock()
+	c, err := aws.Config.GetCustomPricingData()
+	if err != nil {
+		log.Errorf("Error downloading default pricing data: %s", err.Error())
+	}
+	aws.BaseCPUPrice = c.CPU
+	aws.BaseRAMPrice = c.RAM
+	aws.BaseGPUPrice = c.GPU
+	aws.BaseSpotCPUPrice = c.SpotCPU
+	aws.BaseSpotRAMPrice = c.SpotRAM
+	aws.BaseSpotGPUPrice = c.SpotGPU
+	aws.SpotLabelName = c.SpotLabel
+	aws.SpotLabelValue = c.SpotLabelValue
+	aws.SpotDataBucket = c.SpotDataBucket
+	aws.SpotDataPrefix = c.SpotDataPrefix
+	aws.ProjectID = c.ProjectID
+	aws.SpotDataRegion = c.SpotDataRegion
+
+	aws.ConfigureAuthWith(c) // load aws authentication from configuration or secret
+
+	if len(aws.SpotDataBucket) != 0 && len(aws.ProjectID) == 0 {
+		log.Warnf("using SpotDataBucket \"%s\" without ProjectID will not end well", aws.SpotDataBucket)
+	}
+	nodeList := aws.Clientset.GetAllNodes()
+
+	inputkeys := make(map[string]bool)
+	for _, n := range nodeList {
+
+		if _, ok := n.Labels["eks.amazonaws.com/nodegroup"]; ok {
+			aws.clusterManagementPrice = 0.10
+			aws.clusterProvisioner = "EKS"
+		} else if _, ok := n.Labels["kops.k8s.io/instancegroup"]; ok {
+			aws.clusterProvisioner = "KOPS"
+		}
+
+		labels := n.GetObjectMeta().GetLabels()
+		key := aws.GetKey(labels, n)
+		inputkeys[key.Features()] = true
+	}
+
+	pvList := aws.Clientset.GetAllPersistentVolumes()
+
+	storageClasses := aws.Clientset.GetAllStorageClasses()
+	storageClassMap := make(map[string]map[string]string)
+	for _, storageClass := range storageClasses {
+		params := storageClass.Parameters
+		storageClassMap[storageClass.ObjectMeta.Name] = params
+		if storageClass.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.GetAnnotations()["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
+			storageClassMap["default"] = params
+			storageClassMap[""] = params
+		}
+	}
+
+	pvkeys := make(map[string]models.PVKey)
+	for _, pv := range pvList {
+		params, ok := storageClassMap[pv.Spec.StorageClassName]
+		if !ok {
+			log.Infof("Unable to find params for storageClassName %s, falling back to default pricing", pv.Spec.StorageClassName)
+			continue
+		}
+		key := aws.GetPVKey(pv, params, "")
+		pvkeys[key.Features()] = key
+	}
+
+	// RIDataRunning establishes the existence of the goroutine. Since it's possible we
+	// run multiple downloads, we don't want to create multiple go routines if one already exists
+	if !aws.RIDataRunning {
+		err = aws.GetReservationDataFromAthena() // Block until one run has completed.
+		if err != nil {
+			log.Errorf("Failed to lookup reserved instance data: %s", err.Error())
+		} else { // If we make one successful run, check on new reservation data every hour
+			go func() {
+				defer errs.HandlePanic()
+				aws.RIDataRunning = true
+
+				for {
+					log.Infof("Reserved Instance watcher running... next update in 1h")
+					time.Sleep(time.Hour)
+					err := aws.GetReservationDataFromAthena()
+					if err != nil {
+						log.Infof("Error updating RI data: %s", err.Error())
+					}
+				}
+			}()
+		}
+	}
+	if !aws.SavingsPlanDataRunning {
+		err = aws.GetSavingsPlanDataFromAthena()
+		if err != nil {
+			log.Errorf("Failed to lookup savings plan data: %s", err.Error())
+		} else {
+			go func() {
+				defer errs.HandlePanic()
+				aws.SavingsPlanDataRunning = true
+				for {
+					log.Infof("Savings Plan watcher running... next update in 1h")
+					time.Sleep(time.Hour)
+					err := aws.GetSavingsPlanDataFromAthena()
+					if err != nil {
+						log.Infof("Error updating Savings Plan data: %s", err.Error())
+					}
+				}
+			}()
+		}
+	}
+
+	aws.ValidPricingKeys = make(map[string]bool)
+
+	resp, pricingURL, err := aws.getRegionPricing(nodeList)
+	if err != nil {
+		return err
+	}
+	err = aws.populatePricing(resp, inputkeys)
+	if err != nil {
+		return err
+	}
+	log.Infof("Finished downloading \"%s\"", pricingURL)
+
+	if !aws.SpotRefreshEnabled() {
+		return nil
+	}
+
+	// Always run spot pricing refresh when performing download
+	aws.refreshSpotPricing(true)
+
+	// Only start a single refresh goroutine
+	if !aws.SpotRefreshRunning {
+		aws.SpotRefreshRunning = true
+
+		go func() {
+			defer errs.HandlePanic()
+
+			for {
+				log.Infof("Spot Pricing Refresh scheduled in %.2f minutes.", SpotRefreshDuration.Minutes())
+				time.Sleep(SpotRefreshDuration)
+
+				// Reoccurring refresh checks update times
+				aws.refreshSpotPricing(false)
+			}
+		}()
+	}
+
+	return nil
+}
+
+func (aws *AWS) populatePricing(resp *http.Response, inputkeys map[string]bool) error {
+	aws.Pricing = make(map[string]*AWSProductTerms)
+	skusToKeys := make(map[string]string)
+	dec := json.NewDecoder(resp.Body)
+	for {
+		t, err := dec.Token()
+		if err == io.EOF {
+			log.Infof("done loading \"%s\"\n", resp.Request.URL.String())
+			break
+		} else if err != nil {
+			log.Errorf("error parsing response json %v", resp.Body)
+			break
+		}
+		if t == "products" {
+			_, err := dec.Token() // this should parse the opening "{""
+			if err != nil {
+				return err
+			}
+			for dec.More() {
+				_, err := dec.Token() // the sku token
+				if err != nil {
+					return err
+				}
+				product := &AWSProduct{}
+
+				err = dec.Decode(&product)
+				if err != nil {
+					log.Errorf("Error parsing response from \"%s\": %v", resp.Request.URL.String(), err.Error())
+					break
+				}
+
+				if product.Attributes.PreInstalledSw == "NA" &&
+					(strings.HasPrefix(product.Attributes.UsageType, "BoxUsage") || strings.Contains(product.Attributes.UsageType, "-BoxUsage")) &&
+					product.Attributes.CapacityStatus == "Used" {
+					key := aws.KubeAttrConversion(product.Attributes.Location, product.Attributes.InstanceType, product.Attributes.OperatingSystem)
+					spotKey := key + ",preemptible"
+					if inputkeys[key] || inputkeys[spotKey] { // Just grab the sku even if spot, and change the price later.
+						productTerms := &AWSProductTerms{
+							Sku:     product.Sku,
+							Memory:  product.Attributes.Memory,
+							Storage: product.Attributes.Storage,
+							VCpu:    product.Attributes.VCpu,
+							GPU:     product.Attributes.GPU,
+						}
+						aws.Pricing[key] = productTerms
+						aws.Pricing[spotKey] = productTerms
+						skusToKeys[product.Sku] = key
+					}
+					aws.ValidPricingKeys[key] = true
+					aws.ValidPricingKeys[spotKey] = true
+				} else if strings.Contains(product.Attributes.UsageType, "EBS:Volume") {
+					// UsageTypes may be prefixed with a region code - we're removing this when using
+					// volTypes to keep lookups generic
+					usageTypeMatch := usageTypeRegx.FindStringSubmatch(product.Attributes.UsageType)
+					usageTypeNoRegion := usageTypeMatch[len(usageTypeMatch)-1]
+					key := locationToRegion[product.Attributes.Location] + "," + usageTypeNoRegion
+					spotKey := key + ",preemptible"
+					pv := &models.PV{
+						Class:  volTypes[usageTypeNoRegion],
+						Region: locationToRegion[product.Attributes.Location],
+					}
+					productTerms := &AWSProductTerms{
+						Sku: product.Sku,
+						PV:  pv,
+					}
+					aws.Pricing[key] = productTerms
+					aws.Pricing[spotKey] = productTerms
+					skusToKeys[product.Sku] = key
+					aws.ValidPricingKeys[key] = true
+					aws.ValidPricingKeys[spotKey] = true
+				}
+			}
+		}
+		if t == "terms" {
+			_, err := dec.Token() // this should parse the opening "{""
+			if err != nil {
+				return err
+			}
+			termType, err := dec.Token()
+			if err != nil {
+				return err
+			}
+			if termType == "OnDemand" {
+				_, err := dec.Token()
+				if err != nil { // again, should parse an opening "{"
+					return err
+				}
+				for dec.More() {
+					sku, err := dec.Token()
+					if err != nil {
+						return err
+					}
+					_, err = dec.Token() // another opening "{"
+					if err != nil {
+						return err
+					}
+					// SKUOndemand
+					_, err = dec.Token()
+					if err != nil {
+						return err
+					}
+					offerTerm := &AWSOfferTerm{}
+					err = dec.Decode(&offerTerm)
+					if err != nil {
+						log.Errorf("Error decoding AWS Offer Term: " + err.Error())
+					}
+
+					key, ok := skusToKeys[sku.(string)]
+					spotKey := key + ",preemptible"
+					if ok {
+						aws.Pricing[key].OnDemand = offerTerm
+						aws.Pricing[spotKey].OnDemand = offerTerm
+						var cost string
+						if _, isMatch := OnDemandRateCodes[offerTerm.OfferTermCode]; isMatch {
+							cost = offerTerm.PriceDimensions[strings.Join([]string{sku.(string), offerTerm.OfferTermCode, HourlyRateCode}, ".")].PricePerUnit.USD
+						} else if _, isMatch := OnDemandRateCodesCn[offerTerm.OfferTermCode]; isMatch {
+							cost = offerTerm.PriceDimensions[strings.Join([]string{sku.(string), offerTerm.OfferTermCode, HourlyRateCodeCn}, ".")].PricePerUnit.CNY
+						}
+						if strings.Contains(key, "EBS:VolumeP-IOPS.piops") {
+							// If the specific UsageType is the per IO cost used on io1 volumes
+							// we need to add the per IO cost to the io1 PV cost
+
+							// Add the per IO cost to the PV object for the io1 volume type
+							aws.Pricing[key].PV.CostPerIO = cost
+						} else if strings.Contains(key, "EBS:Volume") {
+							// If volume, we need to get hourly cost and add it to the PV object
+							costFloat, _ := strconv.ParseFloat(cost, 64)
+							hourlyPrice := costFloat / 730
+
+							aws.Pricing[key].PV.Cost = strconv.FormatFloat(hourlyPrice, 'f', -1, 64)
+						}
+					}
+
+					_, err = dec.Token()
+					if err != nil {
+						return err
+					}
+				}
+				_, err = dec.Token()
+				if err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (aws *AWS) refreshSpotPricing(force bool) {
+	aws.SpotPricingLock.Lock()
+	defer aws.SpotPricingLock.Unlock()
+
+	now := time.Now().UTC()
+	updateTime := now.Add(-SpotRefreshDuration)
+
+	// Return if there was an update time set and an hour hasn't elapsed
+	if !force && aws.SpotPricingUpdatedAt != nil && aws.SpotPricingUpdatedAt.After(updateTime) {
+		return
+	}
+
+	sp, err := aws.parseSpotData(aws.SpotDataBucket, aws.SpotDataPrefix, aws.ProjectID, aws.SpotDataRegion)
+	if err != nil {
+		log.Warnf("Skipping AWS spot data download: %s", err.Error())
+		aws.SpotPricingError = err
+		return
+	}
+	aws.SpotPricingError = nil
+
+	// update time last updated
+	aws.SpotPricingUpdatedAt = &now
+	aws.SpotPricingByInstanceID = sp
+}
+
+// Stubbed NetworkPricing for AWS. Pull directly from aws.json for now
+func (aws *AWS) NetworkPricing() (*models.Network, error) {
+	cpricing, err := aws.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	znec, err := strconv.ParseFloat(cpricing.ZoneNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	rnec, err := strconv.ParseFloat(cpricing.RegionNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	inec, err := strconv.ParseFloat(cpricing.InternetNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return &models.Network{
+		ZoneNetworkEgressCost:     znec,
+		RegionNetworkEgressCost:   rnec,
+		InternetNetworkEgressCost: inec,
+	}, nil
+}
+
+func (aws *AWS) LoadBalancerPricing() (*models.LoadBalancer, error) {
+	fffrc := 0.025
+	afrc := 0.010
+	lbidc := 0.008
+
+	numForwardingRules := 1.0
+	dataIngressGB := 0.0
+
+	var totalCost float64
+	if numForwardingRules < 5 {
+		totalCost = fffrc*numForwardingRules + lbidc*dataIngressGB
+	} else {
+		totalCost = fffrc*5 + afrc*(numForwardingRules-5) + lbidc*dataIngressGB
+	}
+	return &models.LoadBalancer{
+		Cost: totalCost,
+	}, nil
+}
+
+// AllNodePricing returns all the billing data fetched.
+func (aws *AWS) AllNodePricing() (interface{}, error) {
+	aws.DownloadPricingDataLock.RLock()
+	defer aws.DownloadPricingDataLock.RUnlock()
+	return aws.Pricing, nil
+}
+
+func (aws *AWS) spotPricing(instanceID string) (*spotInfo, bool) {
+	aws.SpotPricingLock.RLock()
+	defer aws.SpotPricingLock.RUnlock()
+
+	info, ok := aws.SpotPricingByInstanceID[instanceID]
+	return info, ok
+}
+
+func (aws *AWS) reservedInstancePricing(instanceID string) (*RIData, bool) {
+	aws.RIDataLock.RLock()
+	defer aws.RIDataLock.RUnlock()
+
+	data, ok := aws.RIPricingByInstanceID[instanceID]
+	return data, ok
+}
+
+func (aws *AWS) savingsPlanPricing(instanceID string) (*SavingsPlanData, bool) {
+	aws.SavingsPlanDataLock.RLock()
+	defer aws.SavingsPlanDataLock.RUnlock()
+
+	data, ok := aws.SavingsPlanDataByInstanceID[instanceID]
+	return data, ok
+}
+
+func (aws *AWS) createNode(terms *AWSProductTerms, usageType string, k models.Key) (*models.Node, error) {
+	key := k.Features()
+
+	if spotInfo, ok := aws.spotPricing(k.ID()); ok {
+		var spotcost string
+		log.DedupedInfof(5, "Looking up spot data from feed for node %s", k.ID())
+		arr := strings.Split(spotInfo.Charge, " ")
+		if len(arr) == 2 {
+			spotcost = arr[0]
+		} else {
+			log.Infof("Spot data for node %s is missing", k.ID())
+		}
+		return &models.Node{
+			Cost:         spotcost,
+			VCPU:         terms.VCpu,
+			RAM:          terms.Memory,
+			GPU:          terms.GPU,
+			Storage:      terms.Storage,
+			BaseCPUPrice: aws.BaseCPUPrice,
+			BaseRAMPrice: aws.BaseRAMPrice,
+			BaseGPUPrice: aws.BaseGPUPrice,
+			UsageType:    PreemptibleType,
+		}, nil
+	} else if aws.isPreemptible(key) { // Preemptible but we don't have any data in the pricing report.
+		log.DedupedWarningf(5, "Node %s marked preemptible but we have no data in spot feed", k.ID())
+		return &models.Node{
+			VCPU:         terms.VCpu,
+			VCPUCost:     aws.BaseSpotCPUPrice,
+			RAM:          terms.Memory,
+			GPU:          terms.GPU,
+			Storage:      terms.Storage,
+			BaseCPUPrice: aws.BaseCPUPrice,
+			BaseRAMPrice: aws.BaseRAMPrice,
+			BaseGPUPrice: aws.BaseGPUPrice,
+			UsageType:    PreemptibleType,
+		}, nil
+	} else if sp, ok := aws.savingsPlanPricing(k.ID()); ok {
+		strCost := fmt.Sprintf("%f", sp.EffectiveCost)
+		return &models.Node{
+			Cost:         strCost,
+			VCPU:         terms.VCpu,
+			RAM:          terms.Memory,
+			GPU:          terms.GPU,
+			Storage:      terms.Storage,
+			BaseCPUPrice: aws.BaseCPUPrice,
+			BaseRAMPrice: aws.BaseRAMPrice,
+			BaseGPUPrice: aws.BaseGPUPrice,
+			UsageType:    usageType,
+		}, nil
+
+	} else if ri, ok := aws.reservedInstancePricing(k.ID()); ok {
+		strCost := fmt.Sprintf("%f", ri.EffectiveCost)
+		return &models.Node{
+			Cost:         strCost,
+			VCPU:         terms.VCpu,
+			RAM:          terms.Memory,
+			GPU:          terms.GPU,
+			Storage:      terms.Storage,
+			BaseCPUPrice: aws.BaseCPUPrice,
+			BaseRAMPrice: aws.BaseRAMPrice,
+			BaseGPUPrice: aws.BaseGPUPrice,
+			UsageType:    usageType,
+		}, nil
+
+	}
+	var cost string
+	c, ok := terms.OnDemand.PriceDimensions[strings.Join([]string{terms.Sku, terms.OnDemand.OfferTermCode, HourlyRateCode}, ".")]
+	if ok {
+		cost = c.PricePerUnit.USD
+	} else {
+		// Check for Chinese pricing before throwing error
+		c, ok = terms.OnDemand.PriceDimensions[strings.Join([]string{terms.Sku, terms.OnDemand.OfferTermCode, HourlyRateCodeCn}, ".")]
+		if ok {
+			cost = c.PricePerUnit.CNY
+		} else {
+			return nil, fmt.Errorf("Could not fetch data for \"%s\"", k.ID())
+		}
+	}
+
+	return &models.Node{
+		Cost:         cost,
+		VCPU:         terms.VCpu,
+		RAM:          terms.Memory,
+		GPU:          terms.GPU,
+		Storage:      terms.Storage,
+		BaseCPUPrice: aws.BaseCPUPrice,
+		BaseRAMPrice: aws.BaseRAMPrice,
+		BaseGPUPrice: aws.BaseGPUPrice,
+		UsageType:    usageType,
+	}, nil
+}
+
+// NodePricing takes in a key from GetKey and returns a Node object for use in building the cost model.
+func (aws *AWS) NodePricing(k models.Key) (*models.Node, error) {
+	aws.DownloadPricingDataLock.RLock()
+	defer aws.DownloadPricingDataLock.RUnlock()
+
+	key := k.Features()
+	usageType := "ondemand"
+	if aws.isPreemptible(key) {
+		usageType = PreemptibleType
+	}
+
+	terms, ok := aws.Pricing[key]
+	if ok {
+		return aws.createNode(terms, usageType, k)
+	} else if _, ok := aws.ValidPricingKeys[key]; ok {
+		aws.DownloadPricingDataLock.RUnlock()
+		err := aws.DownloadPricingData()
+		aws.DownloadPricingDataLock.RLock()
+		if err != nil {
+			return &models.Node{
+				Cost:             aws.BaseCPUPrice,
+				BaseCPUPrice:     aws.BaseCPUPrice,
+				BaseRAMPrice:     aws.BaseRAMPrice,
+				BaseGPUPrice:     aws.BaseGPUPrice,
+				UsageType:        usageType,
+				UsesBaseCPUPrice: true,
+			}, err
+		}
+		terms, termsOk := aws.Pricing[key]
+		if !termsOk {
+			return &models.Node{
+				Cost:             aws.BaseCPUPrice,
+				BaseCPUPrice:     aws.BaseCPUPrice,
+				BaseRAMPrice:     aws.BaseRAMPrice,
+				BaseGPUPrice:     aws.BaseGPUPrice,
+				UsageType:        usageType,
+				UsesBaseCPUPrice: true,
+			}, fmt.Errorf("Unable to find any Pricing data for \"%s\"", key)
+		}
+		return aws.createNode(terms, usageType, k)
+	} else { // Fall back to base pricing if we can't find the key. Base pricing is handled at the costmodel level.
+		return nil, fmt.Errorf("Invalid Pricing Key \"%s\"", key)
+
+	}
+}
+
+// ClusterInfo returns an object that represents the cluster. TODO: actually return the name of the cluster. Blocked on cluster federation.
+func (awsProvider *AWS) ClusterInfo() (map[string]string, error) {
+
+	c, err := awsProvider.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	const defaultClusterName = "AWS Cluster #1"
+	// Determine cluster name
+	clusterName := c.ClusterName
+	if clusterName == "" {
+		awsClusterID := env.GetAWSClusterID()
+		if awsClusterID != "" {
+			log.Infof("Returning \"%s\" as ClusterName", awsClusterID)
+			clusterName = awsClusterID
+			log.Warnf("Warning - %s will be deprecated in a future release. Use %s instead", env.AWSClusterIDEnvVar, env.ClusterIDEnvVar)
+		} else if clusterName = env.GetClusterID(); clusterName != "" {
+			log.Infof("Setting cluster name to %s from %s ", clusterName, env.ClusterIDEnvVar)
+		} else {
+			clusterName = defaultClusterName
+			log.Warnf("Unable to detect cluster name - using default of %s", defaultClusterName)
+			log.Warnf("Please set cluster name through configmap or via %s env var", env.ClusterIDEnvVar)
+		}
+
+	}
+
+	// this value requires configuration but is unavailable else where
+	clusterAccountID := c.ClusterAccountID
+	// Use AthenaProjectID if Cluster Account is not set to support older configs
+	if clusterAccountID == "" {
+		clusterAccountID = c.AthenaProjectID
+	}
+
+	m := make(map[string]string)
+	m["name"] = clusterName
+	m["provider"] = kubecost.AWSProvider
+	m["account"] = clusterAccountID
+	m["region"] = awsProvider.ClusterRegion
+	m["id"] = env.GetClusterID()
+	m["remoteReadEnabled"] = strconv.FormatBool(env.IsRemoteEnabled())
+	m["provisioner"] = awsProvider.clusterProvisioner
+	return m, nil
+}
+
+// updates the authentication to the latest values (via config or secret)
+func (aws *AWS) ConfigureAuth() error {
+	c, err := aws.Config.GetCustomPricingData()
+	if err != nil {
+		log.Errorf("Error downloading default pricing data: %s", err.Error())
+	}
+	return aws.ConfigureAuthWith(c)
+}
+
+// updates the authentication to the latest values (via config or secret)
+func (aws *AWS) ConfigureAuthWith(config *models.CustomPricing) error {
+	accessKeyID, accessKeySecret := aws.getAWSAuth(false, config)
+	if accessKeyID != "" && accessKeySecret != "" { // credentials may exist on the actual AWS node-- if so, use those. If not, override with the service key
+		err := env.Set(env.AWSAccessKeyIDEnvVar, accessKeyID)
+		if err != nil {
+			return err
+		}
+		err = env.Set(env.AWSAccessKeySecretEnvVar, accessKeySecret)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Gets the aws key id and secret
+func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string, string) {
+
+	// 1. Check config values first (set from frontend UI)
+	if cp.ServiceKeyName != "" && cp.ServiceKeySecret != "" {
+		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
+			Message: "AWS ServiceKey exists",
+			Status:  true,
+		})
+		return cp.ServiceKeyName, cp.ServiceKeySecret
+	}
+
+	// 2. Check for secret
+	s, _ := aws.loadAWSAuthSecret(forceReload)
+	if s != nil && s.AccessKeyID != "" && s.SecretAccessKey != "" {
+		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
+			Message: "AWS ServiceKey exists",
+			Status:  true,
+		})
+		return s.AccessKeyID, s.SecretAccessKey
+	}
+
+	// 3. Fall back to env vars
+	if env.GetAWSAccessKeyID() == "" || env.GetAWSAccessKeySecret() == "" {
+		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
+			Message: "AWS ServiceKey exists",
+			Status:  false,
+		})
+	} else {
+		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
+			Message: "AWS ServiceKey exists",
+			Status:  true,
+		})
+	}
+	return env.GetAWSAccessKeyID(), env.GetAWSAccessKeySecret()
+}
+
+// Load once and cache the result (even on failure). This is an install time secret, so
+// we don't expect the secret to change. If it does, however, we can force reload using
+// the input parameter.
+func (aws *AWS) loadAWSAuthSecret(force bool) (*AWSAccessKey, error) {
+	if !force && loadedAWSSecret {
+		return awsSecret, nil
+	}
+	loadedAWSSecret = true
+
+	exists, err := fileutil.FileExists(models.AuthSecretPath)
+	if !exists || err != nil {
+		return nil, fmt.Errorf("Failed to locate service account file: %s", models.AuthSecretPath)
+	}
+
+	result, err := os.ReadFile(models.AuthSecretPath)
+	if err != nil {
+		return nil, err
+	}
+
+	var ak AWSAccessKey
+	err = json.Unmarshal(result, &ak)
+	if err != nil {
+		return nil, err
+	}
+
+	awsSecret = &ak
+	return awsSecret, nil
+}
+
+func (aws *AWS) getAddressesForRegion(ctx context.Context, region string) (*ec2.DescribeAddressesOutput, error) {
+	aak, err := aws.GetAWSAccessKey()
+	if err != nil {
+		return nil, err
+	}
+	cfg, err := aak.CreateConfig(region)
+	if err != nil {
+		return nil, err
+	}
+
+	cli := ec2.NewFromConfig(cfg)
+	return cli.DescribeAddresses(ctx, &ec2.DescribeAddressesInput{})
+}
+
+func (aws *AWS) getAllAddresses() ([]*ec2Types.Address, error) {
+	aws.ConfigureAuth() // load authentication data into env vars
+
+	regions := aws.Regions()
+
+	addressCh := make(chan *ec2.DescribeAddressesOutput, len(regions))
+	errorCh := make(chan error, len(regions))
+
+	var wg sync.WaitGroup
+	wg.Add(len(regions))
+
+	// Get volumes from each AWS region
+	for _, r := range regions {
+		// Fetch IP address response and send results and errors to their
+		// respective channels
+		go func(region string) {
+			defer wg.Done()
+			defer errs.HandlePanic()
+
+			// Query for first page of volume results
+			resp, err := aws.getAddressesForRegion(context.TODO(), region)
+			if err != nil {
+				errorCh <- err
+				return
+			}
+			addressCh <- resp
+		}(r)
+	}
+
+	// Close the result channels after everything has been sent
+	go func() {
+		defer errs.HandlePanic()
+
+		wg.Wait()
+		close(errorCh)
+		close(addressCh)
+	}()
+
+	var addresses []*ec2Types.Address
+	for adds := range addressCh {
+		for _, add := range adds.Addresses {
+			a := add // duplicate to avoid pointer to iterator
+			addresses = append(addresses, &a)
+		}
+
+	}
+
+	var errs []error
+	for err := range errorCh {
+		log.DedupedWarningf(5, "unable to get addresses: %s", err)
+		errs = append(errs, err)
+	}
+
+	// Return error if no addresses are returned
+	if len(errs) > 0 && len(addresses) == 0 {
+		return nil, fmt.Errorf("%d error(s) retrieving addresses: %v", len(errs), errs)
+	}
+
+	return addresses, nil
+}
+
+// GetAddresses retrieves EC2 addresses
+func (aws *AWS) GetAddresses() ([]byte, error) {
+	addresses, err := aws.getAllAddresses()
+	if err != nil {
+		return nil, err
+	}
+
+	// Format the response this way to match the JSON-encoded formatting of a single response
+	// from DescribeAddresss, so that consumers can always expect AWS disk responses to have
+	// a "Addresss" key at the top level.
+	return json.Marshal(map[string][]*ec2Types.Address{
+		"Addresses": addresses,
+	})
+}
+
+func (aws *AWS) isAddressOrphaned(address *ec2Types.Address) bool {
+	if address.AssociationId != nil {
+		return false
+	}
+
+	return true
+}
+
+func (aws *AWS) getDisksForRegion(ctx context.Context, region string, maxResults int32, nextToken *string) (*ec2.DescribeVolumesOutput, error) {
+	aak, err := aws.GetAWSAccessKey()
+	if err != nil {
+		return nil, err
+	}
+
+	cfg, err := aak.CreateConfig(region)
+	if err != nil {
+		return nil, err
+	}
+
+	cli := ec2.NewFromConfig(cfg)
+	return cli.DescribeVolumes(ctx, &ec2.DescribeVolumesInput{
+		MaxResults: &maxResults,
+		NextToken:  nextToken,
+	})
+}
+
+func (aws *AWS) getAllDisks() ([]*ec2Types.Volume, error) {
+	aws.ConfigureAuth() // load authentication data into env vars
+
+	regions := aws.Regions()
+
+	volumeCh := make(chan *ec2.DescribeVolumesOutput, len(regions))
+	errorCh := make(chan error, len(regions))
+
+	var wg sync.WaitGroup
+	wg.Add(len(regions))
+
+	// Get volumes from each AWS region
+	for _, r := range regions {
+		// Fetch volume response and send results and errors to their
+		// respective channels
+		go func(region string) {
+			defer wg.Done()
+			defer errs.HandlePanic()
+
+			// Query for first page of volume results
+			resp, err := aws.getDisksForRegion(context.TODO(), region, 1000, nil)
+			if err != nil {
+				errorCh <- err
+				return
+			}
+			volumeCh <- resp
+
+			// A NextToken indicates more pages of results. Keep querying
+			// until all pages are retrieved.
+			for resp.NextToken != nil {
+				resp, err = aws.getDisksForRegion(context.TODO(), region, 100, resp.NextToken)
+				if err != nil {
+					errorCh <- err
+					return
+				}
+				volumeCh <- resp
+			}
+		}(r)
+	}
+
+	// Close the result channels after everything has been sent
+	go func() {
+		defer errs.HandlePanic()
+
+		wg.Wait()
+		close(errorCh)
+		close(volumeCh)
+	}()
+
+	var volumes []*ec2Types.Volume
+	for vols := range volumeCh {
+		for _, vol := range vols.Volumes {
+			v := vol // duplicate to avoid pointer to iterator
+			volumes = append(volumes, &v)
+		}
+	}
+
+	var errs []error
+	for err := range errorCh {
+		log.DedupedWarningf(5, "unable to get disks: %s", err)
+		errs = append(errs, err)
+	}
+
+	// Return error if no volumes are returned
+	if len(errs) > 0 && len(volumes) == 0 {
+		return nil, fmt.Errorf("%d error(s) retrieving volumes: %v", len(errs), errs)
+	}
+
+	return volumes, nil
+}
+
+// GetDisks returns the AWS disks backing PVs. Useful because sometimes k8s will not clean up PVs correctly. Requires a json config in /var/configs with key region.
+func (aws *AWS) GetDisks() ([]byte, error) {
+	volumes, err := aws.getAllDisks()
+	if err != nil {
+		return nil, err
+	}
+
+	// Format the response this way to match the JSON-encoded formatting of a single response
+	// from DescribeVolumes, so that consumers can always expect AWS disk responses to have
+	// a "Volumes" key at the top level.
+	return json.Marshal(map[string][]*ec2Types.Volume{
+		"Volumes": volumes,
+	})
+}
+
+func (aws *AWS) isDiskOrphaned(vol *ec2Types.Volume) bool {
+	// Do not consider volume orphaned if in use
+	if vol.State == InUseState {
+		return false
+	}
+
+	// Do not consider volume orphaned if volume is attached to any attachments
+	if len(vol.Attachments) != 0 {
+		for _, attachment := range vol.Attachments {
+			if attachment.State == AttachedState {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+func (aws *AWS) GetOrphanedResources() ([]models.OrphanedResource, error) {
+	volumes, err := aws.getAllDisks()
+	if err != nil {
+		return nil, err
+	}
+
+	addresses, err := aws.getAllAddresses()
+	if err != nil {
+		return nil, err
+	}
+
+	var orphanedResources []models.OrphanedResource
+
+	for _, volume := range volumes {
+		if aws.isDiskOrphaned(volume) {
+			cost, err := aws.findCostForDisk(volume)
+			if err != nil {
+				return nil, err
+			}
+
+			var volumeSize int64
+			if volume.Size != nil {
+				volumeSize = int64(*volume.Size)
+			}
+
+			// This is turning us-east-1a into us-east-1
+			var zone string
+			if volume.AvailabilityZone != nil {
+				zone = *volume.AvailabilityZone
+			}
+			var region, url string
+			region = regionRx.FindString(zone)
+			if region != "" {
+				url = "https://console.aws.amazon.com/ec2/home?region=" + region + "#Volumes:sort=desc:createTime"
+			} else {
+				url = "https://console.aws.amazon.com/ec2/home?#Volumes:sort=desc:createTime"
+			}
+
+			or := models.OrphanedResource{
+				Kind:        "disk",
+				Region:      zone,
+				Size:        &volumeSize,
+				DiskName:    *volume.VolumeId,
+				Url:         url,
+				MonthlyCost: cost,
+			}
+
+			orphanedResources = append(orphanedResources, or)
+		}
+	}
+
+	for _, address := range addresses {
+		if aws.isAddressOrphaned(address) {
+			cost := AWSHourlyPublicIPCost * timeutil.HoursPerMonth
+
+			desc := map[string]string{}
+			for _, tag := range address.Tags {
+				if tag.Key == nil {
+					continue
+				}
+				if tag.Value == nil {
+					desc[*tag.Key] = ""
+				} else {
+					desc[*tag.Key] = *tag.Value
+				}
+			}
+
+			or := models.OrphanedResource{
+				Kind:        "address",
+				Address:     *address.PublicIp,
+				Description: desc,
+				Url:         "http://console.aws.amazon.com/ec2/home?#Addresses",
+				MonthlyCost: &cost,
+			}
+
+			orphanedResources = append(orphanedResources, or)
+		}
+	}
+	return orphanedResources, nil
+}
+
+func (aws *AWS) findCostForDisk(disk *ec2Types.Volume) (*float64, error) {
+	//todo: use AWS pricing from all regions
+	if disk.AvailabilityZone == nil {
+		return nil, fmt.Errorf("nil region")
+	}
+	if disk.Size == nil {
+		return nil, fmt.Errorf("nil disk size")
+	}
+
+	class := volTypes[string(disk.VolumeType)]
+
+	key := "us-east-2" + "," + class
+
+	pricing, ok := aws.Pricing[key]
+	if !ok {
+		return nil, fmt.Errorf("no pricing data for key '%s'", key)
+	}
+	if pricing == nil {
+		return nil, fmt.Errorf("nil pricing data for key '%s'", key)
+	}
+	if pricing.PV == nil {
+		return nil, fmt.Errorf("pricing for key '%s' has nil PV", key)
+	}
+	priceStr := pricing.PV.Cost
+
+	price, err := strconv.ParseFloat(priceStr, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	cost := price * timeutil.HoursPerMonth * float64(*disk.Size)
+	return &cost, nil
+}
+
+// QueryAthenaPaginated executes athena query and processes results.
+func (aws *AWS) QueryAthenaPaginated(ctx context.Context, query string, fn func(*athena.GetQueryResultsOutput) bool) error {
+	awsAthenaInfo, err := aws.GetAWSAthenaInfo()
+	if err != nil {
+		return err
+	}
+	if awsAthenaInfo.AthenaDatabase == "" || awsAthenaInfo.AthenaTable == "" || awsAthenaInfo.AthenaRegion == "" ||
+		awsAthenaInfo.AthenaBucketName == "" || awsAthenaInfo.AccountID == "" {
+		return fmt.Errorf("QueryAthenaPaginated: athena configuration incomplete")
+	}
+
+	queryExecutionCtx := &athenaTypes.QueryExecutionContext{
+		Database: awsSDK.String(awsAthenaInfo.AthenaDatabase),
+	}
+
+	resultConfiguration := &athenaTypes.ResultConfiguration{
+		OutputLocation: awsSDK.String(awsAthenaInfo.AthenaBucketName),
+	}
+	startQueryExecutionInput := &athena.StartQueryExecutionInput{
+		QueryString:           awsSDK.String(query),
+		QueryExecutionContext: queryExecutionCtx,
+		ResultConfiguration:   resultConfiguration,
+	}
+
+	// Only set if there is a value, the default input is nil which defaults to the 'primary' workgroup
+	if awsAthenaInfo.AthenaWorkgroup != "" {
+		startQueryExecutionInput.WorkGroup = awsSDK.String(awsAthenaInfo.AthenaWorkgroup)
+	}
+
+	// Create Athena Client
+	cfg, err := awsAthenaInfo.CreateConfig()
+	if err != nil {
+		log.Errorf("Could not retrieve Athena Configuration: %s", err.Error())
+	}
+	cli := athena.NewFromConfig(cfg)
+
+	// Query Athena
+	startQueryExecutionOutput, err := cli.StartQueryExecution(ctx, startQueryExecutionInput)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: start query error: %s", err.Error())
+	}
+	err = waitForQueryToComplete(ctx, cli, startQueryExecutionOutput.QueryExecutionId)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: query execution error: %s", err.Error())
+	}
+	queryResultsInput := &athena.GetQueryResultsInput{
+		QueryExecutionId: startQueryExecutionOutput.QueryExecutionId,
+	}
+	getQueryResultsPaginator := athena.NewGetQueryResultsPaginator(cli, queryResultsInput)
+	for getQueryResultsPaginator.HasMorePages() {
+		pg, err := getQueryResultsPaginator.NextPage(ctx)
+		if err != nil {
+			log.Errorf("QueryAthenaPaginated: NextPage error: %s", err.Error())
+			continue
+		}
+		fn(pg)
+	}
+	return nil
+}
+
+type SavingsPlanData struct {
+	ResourceID     string
+	EffectiveCost  float64
+	SavingsPlanARN string
+	MostRecentDate string
+}
+
+func (aws *AWS) GetSavingsPlanDataFromAthena() error {
+	cfg, err := aws.GetConfig()
+	if err != nil {
+		aws.RIPricingError = err
+		return err
+	}
+	if cfg.AthenaBucketName == "" {
+		err = fmt.Errorf("No Athena Bucket configured")
+		aws.RIPricingError = err
+		return err
+	}
+	if aws.SavingsPlanDataByInstanceID == nil {
+		aws.SavingsPlanDataByInstanceID = make(map[string]*SavingsPlanData)
+	}
+	tNow := time.Now()
+	tOneDayAgo := tNow.Add(time.Duration(-25) * time.Hour) // Also get files from one day ago to avoid boundary conditions
+	start := tOneDayAgo.Format("2006-01-02")
+	end := tNow.Format("2006-01-02")
+	// Use Savings Plan Effective Rate as an estimation for cost, assuming the 1h most recent period got a fully loaded savings plan.
+	//
+	q := `SELECT
+		line_item_usage_start_date,
+		savings_plan_savings_plan_a_r_n,
+		line_item_resource_id,
+		savings_plan_savings_plan_rate
+	FROM %s as cost_data
+	WHERE line_item_usage_start_date BETWEEN date '%s' AND date '%s'
+	AND line_item_line_item_type = 'SavingsPlanCoveredUsage' ORDER BY
+	line_item_usage_start_date DESC`
+
+	page := 0
+	processResults := func(op *athena.GetQueryResultsOutput) bool {
+		if op == nil {
+			log.Errorf("GetSavingsPlanDataFromAthena: Athena page is nil")
+			return false
+		} else if op.ResultSet == nil {
+			log.Errorf("GetSavingsPlanDataFromAthena: Athena page.ResultSet is nil")
+			return false
+		}
+		aws.SavingsPlanDataLock.Lock()
+		aws.SavingsPlanDataByInstanceID = make(map[string]*SavingsPlanData) // Clean out the old data and only report a savingsplan price if its in the most recent run.
+		mostRecentDate := ""
+		iter := op.ResultSet.Rows
+		if page == 0 && len(iter) > 0 {
+			iter = op.ResultSet.Rows[1:len(op.ResultSet.Rows)]
+		}
+		page++
+		for _, r := range iter {
+			d := *r.Data[0].VarCharValue
+			if mostRecentDate == "" {
+				mostRecentDate = d
+			} else if mostRecentDate != d { // Get all most recent assignments
+				break
+			}
+			cost, err := strconv.ParseFloat(*r.Data[3].VarCharValue, 64)
+			if err != nil {
+				log.Infof("Error converting `%s` from float ", *r.Data[3].VarCharValue)
+			}
+			r := &SavingsPlanData{
+				ResourceID:     *r.Data[2].VarCharValue,
+				EffectiveCost:  cost,
+				SavingsPlanARN: *r.Data[1].VarCharValue,
+				MostRecentDate: d,
+			}
+			aws.SavingsPlanDataByInstanceID[r.ResourceID] = r
+		}
+		log.Debugf("Found %d savings plan applied instances", len(aws.SavingsPlanDataByInstanceID))
+		for k, r := range aws.SavingsPlanDataByInstanceID {
+			log.DedupedInfof(5, "Savings Plan Instance Data found for node %s : %f at time %s", k, r.EffectiveCost, r.MostRecentDate)
+		}
+		aws.SavingsPlanDataLock.Unlock()
+		return true
+	}
+
+	query := fmt.Sprintf(q, cfg.AthenaTable, start, end)
+
+	log.Debugf("Running Query: %s", query)
+
+	err = aws.QueryAthenaPaginated(context.TODO(), query, processResults)
+	if err != nil {
+		aws.RIPricingError = err
+		return fmt.Errorf("Error fetching Savings Plan Data: %s", err)
+	}
+
+	return nil
+}
+
+type RIData struct {
+	ResourceID     string
+	EffectiveCost  float64
+	ReservationARN string
+	MostRecentDate string
+}
+
+func (aws *AWS) GetReservationDataFromAthena() error {
+	cfg, err := aws.GetConfig()
+	if err != nil {
+		aws.RIPricingError = err
+		return err
+	}
+	if cfg.AthenaBucketName == "" {
+		err = fmt.Errorf("No Athena Bucket configured")
+		aws.RIPricingError = err
+		return err
+	}
+
+	// Query for all column names in advance in order to validate configured
+	// label columns
+	columns, _ := aws.fetchColumns()
+
+	if !columns["reservation_reservation_a_r_n"] || !columns["reservation_effective_cost"] {
+		err = fmt.Errorf("no reservation data available in Athena")
+		aws.RIPricingError = err
+		return err
+	}
+	if aws.RIPricingByInstanceID == nil {
+		aws.RIPricingByInstanceID = make(map[string]*RIData)
+	}
+	tNow := time.Now()
+	tOneDayAgo := tNow.Add(time.Duration(-25) * time.Hour) // Also get files from one day ago to avoid boundary conditions
+	start := tOneDayAgo.Format("2006-01-02")
+	end := tNow.Format("2006-01-02")
+	q := `SELECT
+		line_item_usage_start_date,
+		reservation_reservation_a_r_n,
+		line_item_resource_id,
+		reservation_effective_cost
+	FROM %s as cost_data
+	WHERE line_item_usage_start_date BETWEEN date '%s' AND date '%s'
+	AND reservation_reservation_a_r_n <> '' ORDER BY
+	line_item_usage_start_date DESC`
+
+	page := 0
+	processResults := func(op *athena.GetQueryResultsOutput) bool {
+		if op == nil {
+			log.Errorf("GetReservationDataFromAthena: Athena page is nil")
+			return false
+		} else if op.ResultSet == nil {
+			log.Errorf("GetReservationDataFromAthena: Athena page.ResultSet is nil")
+			return false
+		}
+		aws.RIDataLock.Lock()
+		aws.RIPricingByInstanceID = make(map[string]*RIData) // Clean out the old data and only report a RI price if its in the most recent run.
+		mostRecentDate := ""
+		iter := op.ResultSet.Rows
+		if page == 0 && len(iter) > 0 {
+			iter = op.ResultSet.Rows[1:len(op.ResultSet.Rows)]
+		}
+		page++
+		for _, r := range iter {
+			d := *r.Data[0].VarCharValue
+			if mostRecentDate == "" {
+				mostRecentDate = d
+			} else if mostRecentDate != d { // Get all most recent assignments
+				break
+			}
+			cost, err := strconv.ParseFloat(*r.Data[3].VarCharValue, 64)
+			if err != nil {
+				log.Infof("Error converting `%s` from float ", *r.Data[3].VarCharValue)
+			}
+			r := &RIData{
+				ResourceID:     *r.Data[2].VarCharValue,
+				EffectiveCost:  cost,
+				ReservationARN: *r.Data[1].VarCharValue,
+				MostRecentDate: d,
+			}
+			aws.RIPricingByInstanceID[r.ResourceID] = r
+		}
+		log.Debugf("Found %d reserved instances", len(aws.RIPricingByInstanceID))
+		for k, r := range aws.RIPricingByInstanceID {
+			log.DedupedInfof(5, "Reserved Instance Data found for node %s : %f at time %s", k, r.EffectiveCost, r.MostRecentDate)
+		}
+		aws.RIDataLock.Unlock()
+		return true
+	}
+
+	query := fmt.Sprintf(q, cfg.AthenaTable, start, end)
+
+	log.Debugf("Running Query: %s", query)
+
+	err = aws.QueryAthenaPaginated(context.TODO(), query, processResults)
+	if err != nil {
+		aws.RIPricingError = err
+		return fmt.Errorf("Error fetching Reserved Instance Data: %s", err)
+	}
+	aws.RIPricingError = nil
+	return nil
+}
+
+// fetchColumns returns a list of the names of all columns in the configured
+// Athena tables
+func (aws *AWS) fetchColumns() (map[string]bool, error) {
+	columnSet := map[string]bool{}
+
+	awsAthenaInfo, err := aws.GetAWSAthenaInfo()
+	if err != nil {
+		return nil, err
+	}
+
+	// This Query is supported by Athena tables and views
+	q := `SELECT column_name FROM information_schema.columns WHERE table_schema = '%s' AND table_name = '%s'`
+	query := fmt.Sprintf(q, awsAthenaInfo.AthenaDatabase, awsAthenaInfo.AthenaTable)
+	pageNum := 0
+	athenaErr := aws.QueryAthenaPaginated(context.TODO(), query, func(page *athena.GetQueryResultsOutput) bool {
+		if page == nil {
+			log.Errorf("fetchColumns: Athena page is nil")
+			return false
+		} else if page.ResultSet == nil {
+			log.Errorf("fetchColumns: Athena page.ResultSet is nil")
+			return false
+		}
+		// remove header row 'column_name'
+		rows := page.ResultSet.Rows[1:]
+		for _, row := range rows {
+			columnSet[*row.Data[0].VarCharValue] = true
+		}
+		pageNum++
+		return true
+	})
+
+	if athenaErr != nil {
+		return columnSet, athenaErr
+	}
+
+	if len(columnSet) == 0 {
+		log.Infof("No columns retrieved from Athena")
+	}
+
+	return columnSet, nil
+}
+
+type spotInfo struct {
+	Timestamp   string `csv:"Timestamp"`
+	UsageType   string `csv:"UsageType"`
+	Operation   string `csv:"Operation"`
+	InstanceID  string `csv:"InstanceID"`
+	MyBidID     string `csv:"MyBidID"`
+	MyMaxPrice  string `csv:"MyMaxPrice"`
+	MarketPrice string `csv:"MarketPrice"`
+	Charge      string `csv:"Charge"`
+	Version     string `csv:"Version"`
+}
+
+func (aws *AWS) parseSpotData(bucket string, prefix string, projectID string, region string) (map[string]*spotInfo, error) {
+
+	aws.ConfigureAuth() // configure aws api authentication by setting env vars
+
+	s3Prefix := projectID
+	if len(prefix) != 0 {
+		s3Prefix = prefix + "/" + s3Prefix
+	}
+
+	aak, err := aws.GetAWSAccessKey()
+	if err != nil {
+		return nil, err
+	}
+
+	cfg, err := aak.CreateConfig(region)
+	if err != nil {
+		return nil, err
+	}
+	cli := s3.NewFromConfig(cfg)
+	downloader := manager.NewDownloader(cli)
+
+	tNow := time.Now()
+	tOneDayAgo := tNow.Add(time.Duration(-24) * time.Hour) // Also get files from one day ago to avoid boundary conditions
+	ls := &s3.ListObjectsInput{
+		Bucket: awsSDK.String(bucket),
+		Prefix: awsSDK.String(s3Prefix + "." + tOneDayAgo.Format("2006-01-02")),
+	}
+	ls2 := &s3.ListObjectsInput{
+		Bucket: awsSDK.String(bucket),
+		Prefix: awsSDK.String(s3Prefix + "." + tNow.Format("2006-01-02")),
+	}
+	lso, err := cli.ListObjects(context.TODO(), ls)
+	if err != nil {
+		aws.ServiceAccountChecks.Set("bucketList", &models.ServiceAccountCheck{
+			Message:        "Bucket List Permissions Available",
+			Status:         false,
+			AdditionalInfo: err.Error(),
+		})
+		return nil, err
+	} else {
+		aws.ServiceAccountChecks.Set("bucketList", &models.ServiceAccountCheck{
+			Message: "Bucket List Permissions Available",
+			Status:  true,
+		})
+	}
+	lsoLen := len(lso.Contents)
+	log.Debugf("Found %d spot data files from yesterday", lsoLen)
+	if lsoLen == 0 {
+		log.Debugf("ListObjects \"s3://%s/%s\" produced no keys", *ls.Bucket, *ls.Prefix)
+	}
+	lso2, err := cli.ListObjects(context.TODO(), ls2)
+	if err != nil {
+		return nil, err
+	}
+	lso2Len := len(lso2.Contents)
+	log.Debugf("Found %d spot data files from today", lso2Len)
+	if lso2Len == 0 {
+		log.Debugf("ListObjects \"s3://%s/%s\" produced no keys", *ls2.Bucket, *ls2.Prefix)
+	}
+
+	// TODO: Worth it to use LastModifiedDate to determine if we should reparse the spot data?
+	var keys []*string
+	for _, obj := range lso.Contents {
+		keys = append(keys, obj.Key)
+	}
+	for _, obj := range lso2.Contents {
+		keys = append(keys, obj.Key)
+	}
+
+	header, err := csvutil.Header(spotInfo{}, "csv")
+	if err != nil {
+		return nil, err
+	}
+	fieldsPerRecord := len(header)
+
+	spots := make(map[string]*spotInfo)
+	for _, key := range keys {
+		getObj := &s3.GetObjectInput{
+			Bucket: awsSDK.String(bucket),
+			Key:    key,
+		}
+
+		buf := manager.NewWriteAtBuffer([]byte{})
+		_, err := downloader.Download(context.TODO(), buf, getObj)
+		if err != nil {
+			aws.ServiceAccountChecks.Set("objectList", &models.ServiceAccountCheck{
+				Message:        "Object Get Permissions Available",
+				Status:         false,
+				AdditionalInfo: err.Error(),
+			})
+			return nil, err
+		} else {
+			aws.ServiceAccountChecks.Set("objectList", &models.ServiceAccountCheck{
+				Message: "Object Get Permissions Available",
+				Status:  true,
+			})
+		}
+
+		r := bytes.NewReader(buf.Bytes())
+
+		gr, err := gzip.NewReader(r)
+		if err != nil {
+			return nil, err
+		}
+
+		csvReader := csv.NewReader(gr)
+		csvReader.Comma = '\t'
+		csvReader.FieldsPerRecord = fieldsPerRecord
+
+		dec, err := csvutil.NewDecoder(csvReader, header...)
+		if err != nil {
+			return nil, err
+		}
+
+		var foundVersion string
+		for {
+			spot := spotInfo{}
+			err := dec.Decode(&spot)
+			csvParseErr, isCsvParseErr := err.(*csv.ParseError)
+			if err == io.EOF {
+				break
+			} else if err == csvutil.ErrFieldCount || (isCsvParseErr && csvParseErr.Err == csv.ErrFieldCount) {
+				rec := dec.Record()
+				// the first two "Record()" will be the comment lines
+				// and they show up as len() == 1
+				// the first of which is "#Version"
+				// the second of which is "#Fields: "
+				if len(rec) != 1 {
+					log.Infof("Expected %d spot info fields but received %d: %s", fieldsPerRecord, len(rec), rec)
+					continue
+				}
+				if len(foundVersion) == 0 {
+					spotFeedVersion := rec[0]
+					log.Debugf("Spot feed version is \"%s\"", spotFeedVersion)
+					matches := versionRx.FindStringSubmatch(spotFeedVersion)
+					if matches != nil {
+						foundVersion = matches[1]
+						if foundVersion != supportedSpotFeedVersion {
+							log.Infof("Unsupported spot info feed version: wanted \"%s\" got \"%s\"", supportedSpotFeedVersion, foundVersion)
+							break
+						}
+					}
+					continue
+				} else if strings.Index(rec[0], "#") == 0 {
+					continue
+				} else {
+					log.Infof("skipping non-TSV line: %s", rec)
+					continue
+				}
+			} else if err != nil {
+				log.Warnf("Error during spot info decode: %+v", err)
+				continue
+			}
+
+			log.DedupedInfof(5, "Found spot info for: %s", spot.InstanceID)
+			spots[spot.InstanceID] = &spot
+		}
+		gr.Close()
+	}
+	return spots, nil
+}
+
+// ApplyReservedInstancePricing TODO
+func (aws *AWS) ApplyReservedInstancePricing(nodes map[string]*models.Node) {
+
+}
+
+func (aws *AWS) ServiceAccountStatus() *models.ServiceAccountStatus {
+	return aws.ServiceAccountChecks.GetStatus()
+}
+
+func (aws *AWS) CombinedDiscountForNode(instanceType string, isPreemptible bool, defaultDiscount, negotiatedDiscount float64) float64 {
+	return 1.0 - ((1.0 - defaultDiscount) * (1.0 - negotiatedDiscount))
+}
+
+// Regions returns a predefined list of AWS regions
+func (aws *AWS) Regions() []string {
+
+	regionOverrides := env.GetRegionOverrideList()
+
+	if len(regionOverrides) > 0 {
+		log.Debugf("Overriding AWS regions with configured region list: %+v", regionOverrides)
+		return regionOverrides
+	}
+
+	return awsRegions
+}
+
+// PricingSourceSummary returns the pricing source summary for the provider.
+// The summary represents what was _parsed_ from the pricing source, not
+// everything that was _available_ in the pricing source.
+func (aws *AWS) PricingSourceSummary() interface{} {
+	// encode the pricing source summary as a JSON string
+	return aws.Pricing
+}

+ 496 - 0
pkg/cloud/aws/provider_test.go

@@ -0,0 +1,496 @@
+package aws
+
+import (
+	"bytes"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"reflect"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+)
+
+func Test_awsKey_getUsageType(t *testing.T) {
+	type fields struct {
+		Labels     map[string]string
+		ProviderID string
+	}
+	type args struct {
+		labels map[string]string
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+		want   string
+	}{
+		{
+			// test with no labels should return false
+			name: "Label does not have the capacityType label associated with it",
+			args: args{
+				labels: map[string]string{},
+			},
+			want: "",
+		},
+		{
+			name: "EKS label with a capacityType set to empty string should return empty string",
+			args: args{
+				labels: map[string]string{
+					EKSCapacityTypeLabel: "",
+				},
+			},
+			want: "",
+		},
+		{
+			name: "EKS label with capacityType set to a random value should return empty string",
+			args: args{
+				labels: map[string]string{
+					EKSCapacityTypeLabel: "TEST_ME",
+				},
+			},
+			want: "",
+		},
+		{
+			name: "EKS label with capacityType set to spot should return spot",
+			args: args{
+				labels: map[string]string{
+					EKSCapacityTypeLabel: EKSCapacitySpotTypeValue,
+				},
+			},
+			want: PreemptibleType,
+		},
+		{
+			name: "Karpenter label with a capacityType set to empty string should return empty string",
+			args: args{
+				labels: map[string]string{
+					models.KarpenterCapacityTypeLabel: "",
+				},
+			},
+			want: "",
+		},
+		{
+			name: "Karpenter label with capacityType set to a random value should return empty string",
+			args: args{
+				labels: map[string]string{
+					models.KarpenterCapacityTypeLabel: "TEST_ME",
+				},
+			},
+			want: "",
+		},
+		{
+			name: "Karpenter label with capacityType set to spot should return spot",
+			args: args{
+				labels: map[string]string{
+					models.KarpenterCapacityTypeLabel: models.KarpenterCapacitySpotTypeValue,
+				},
+			},
+			want: PreemptibleType,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			k := &awsKey{
+				Labels:     tt.fields.Labels,
+				ProviderID: tt.fields.ProviderID,
+			}
+			if got := k.getUsageType(tt.args.labels); got != tt.want {
+				t.Errorf("getUsageType() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+// Test_populate_pricing
+//
+// Objective: To test core pricing population logic for AWS
+//
+//	Case 0: US endpoints
+//	 Take a portion of json returned from ondemand terms in us endpoints
+//	 load the request into the http response and give it to the function
+//	 inspect the resulting aws object after the function returns and validate fields
+//	Case 1: Chinese endpoints
+//	 Same as above US test case, except using CN PV offer codes
+//	 Validate populated fields in AWS object
+func Test_populate_pricing(t *testing.T) {
+	awsTest := AWS{
+		ValidPricingKeys: map[string]bool{},
+	}
+	inputkeys := map[string]bool{
+		"us-east-2,m5.large,linux": true,
+	}
+	// Case 0
+	awsUSEastString := `
+	{
+		"formatVersion" : "v1.0",
+		"disclaimer" : "This pricing list is for informational purposes only. All prices are subject to the additional terms included in the pricing pages on http://aws.amazon.com. All Free Tier prices are also subject to the terms included at https://aws.amazon.com/free/",
+		"offerCode" : "AmazonEC2",
+		"version" : "20230322145651",
+		"publicationDate" : "2023-03-22T14:56:51Z",
+		"products" : {
+			"8D49XP354UEYTHGM" : {
+				"sku" : "8D49XP354UEYTHGM",
+				"productFamily" : "Compute Instance",
+				"attributes" : {
+				  "servicecode" : "AmazonEC2",
+				  "location" : "US East (Ohio)",
+				  "locationType" : "AWS Region",
+				  "instanceType" : "m5.large",
+				  "currentGeneration" : "Yes",
+				  "instanceFamily" : "General purpose",
+				  "vcpu" : "2",
+				  "physicalProcessor" : "Intel Xeon Platinum 8175",
+				  "clockSpeed" : "3.1 GHz",
+				  "memory" : "8 GiB",
+				  "storage" : "EBS only",
+				  "networkPerformance" : "Up to 10 Gigabit",
+				  "processorArchitecture" : "64-bit",
+				  "tenancy" : "Shared",
+				  "operatingSystem" : "Linux",
+				  "licenseModel" : "No License required",
+				  "usagetype" : "USE2-BoxUsage:m5.large",
+				  "operation" : "RunInstances",
+				  "availabilityzone" : "NA",
+				  "capacitystatus" : "Used",
+				  "classicnetworkingsupport" : "false",
+				  "dedicatedEbsThroughput" : "Up to 2120 Mbps",
+				  "ecu" : "10",
+				  "enhancedNetworkingSupported" : "Yes",
+				  "gpuMemory" : "NA",
+				  "intelAvxAvailable" : "Yes",
+				  "intelAvx2Available" : "Yes",
+				  "intelTurboAvailable" : "Yes",
+				  "marketoption" : "OnDemand",
+				  "normalizationSizeFactor" : "4",
+				  "preInstalledSw" : "NA",
+				  "processorFeatures" : "Intel AVX; Intel AVX2; Intel AVX512; Intel Turbo",
+				  "regionCode" : "us-east-2",
+				  "servicename" : "Amazon Elastic Compute Cloud",
+				  "vpcnetworkingsupport" : "true"
+				}
+			},
+			"9ZEEN7WWWQKAG292" : {
+				"sku" : "9ZEEN7WWWQKAG292",
+				"productFamily" : "Compute Instance",
+				"attributes" : {
+				  "servicecode" : "AmazonEC2",
+				  "location" : "US East (Ohio)",
+				  "locationType" : "AWS Region",
+				  "instanceType" : "p3.8xlarge",
+				  "currentGeneration" : "Yes",
+				  "instanceFamily" : "GPU instance",
+				  "vcpu" : "32",
+				  "physicalProcessor" : "Intel Xeon E5-2686 v4 (Broadwell)",
+				  "clockSpeed" : "2.3 GHz",
+				  "memory" : "244 GiB",
+				  "storage" : "EBS only",
+				  "networkPerformance" : "10 Gigabit",
+				  "processorArchitecture" : "64-bit",
+				  "tenancy" : "Shared",
+				  "operatingSystem" : "Windows",
+				  "licenseModel" : "Bring your own license",
+				  "usagetype" : "USE2-BoxUsage:p3.8xlarge",
+				  "operation" : "RunInstances:0800",
+				  "availabilityzone" : "NA",
+				  "capacitystatus" : "Used",
+				  "classicnetworkingsupport" : "false",
+				  "dedicatedEbsThroughput" : "7000 Mbps",
+				  "ecu" : "97",
+				  "enhancedNetworkingSupported" : "Yes",
+				  "gpu" : "4",
+				  "gpuMemory" : "NA",
+				  "intelAvxAvailable" : "Yes",
+				  "intelAvx2Available" : "Yes",
+				  "intelTurboAvailable" : "Yes",
+				  "marketoption" : "OnDemand",
+				  "normalizationSizeFactor" : "64",
+				  "preInstalledSw" : "NA",
+				  "processorFeatures" : "Intel AVX; Intel AVX2; Intel Turbo",
+				  "regionCode" : "us-east-2",
+				  "servicename" : "Amazon Elastic Compute Cloud",
+				  "vpcnetworkingsupport" : "true"
+				}
+			},
+			"M6UGCCQ3CDJQAA37" : {
+				"sku" : "M6UGCCQ3CDJQAA37",
+				"productFamily" : "Storage",
+				"attributes" : {
+				  "servicecode" : "AmazonEC2",
+				  "location" : "US East (Ohio)",
+				  "locationType" : "AWS Region",
+				  "storageMedia" : "SSD-backed",
+				  "volumeType" : "General Purpose",
+				  "maxVolumeSize" : "16 TiB",
+				  "maxIopsvolume" : "16000",
+				  "maxThroughputvolume" : "1000 MiB/s",
+				  "usagetype" : "USE2-EBS:VolumeUsage.gp3",
+				  "operation" : "",
+				  "regionCode" : "us-east-2",
+				  "servicename" : "Amazon Elastic Compute Cloud",
+				  "volumeApiName" : "gp3"
+				}
+			  }
+		},
+		"terms" : {
+			"OnDemand" : {
+				"M6UGCCQ3CDJQAA37" : {
+					"M6UGCCQ3CDJQAA37.JRTCKXETXF" : {
+					  "offerTermCode" : "JRTCKXETXF",
+					  "sku" : "M6UGCCQ3CDJQAA37",
+					  "effectiveDate" : "2023-03-01T00:00:00Z",
+					  "priceDimensions" : {
+						"M6UGCCQ3CDJQAA37.JRTCKXETXF.6YS6EN2CT7" : {
+						  "rateCode" : "M6UGCCQ3CDJQAA37.JRTCKXETXF.6YS6EN2CT7",
+						  "description" : "$0.08 per GB-month of General Purpose (gp3) provisioned storage - US East (Ohio)",
+						  "beginRange" : "0",
+						  "endRange" : "Inf",
+						  "unit" : "GB-Mo",
+						  "pricePerUnit" : {
+							"USD" : "0.0800000000"
+						  },
+						  "appliesTo" : [ ]
+						}
+					  },
+					  "termAttributes" : { }
+					}
+				},
+				"9ZEEN7WWWQKAG292" : {
+					"9ZEEN7WWWQKAG292.JRTCKXETXF" : {
+					  "offerTermCode" : "JRTCKXETXF",
+					  "sku" : "9ZEEN7WWWQKAG292",
+					  "effectiveDate" : "2023-03-01T00:00:00Z",
+					  "priceDimensions" : {
+						"9ZEEN7WWWQKAG292.JRTCKXETXF.6YS6EN2CT7" : {
+						  "rateCode" : "9ZEEN7WWWQKAG292.JRTCKXETXF.6YS6EN2CT7",
+						  "description" : "$12.24 per On Demand Windows BYOL p3.8xlarge Instance Hour",
+						  "beginRange" : "0",
+						  "endRange" : "Inf",
+						  "unit" : "Hrs",
+						  "pricePerUnit" : {
+							"USD" : "12.2400000000"
+						  },
+						  "appliesTo" : [ ]
+						}
+					  },
+					  "termAttributes" : { }
+					}
+				},
+				"8D49XP354UEYTHGM" : {
+					"8D49XP354UEYTHGM.MZU6U2429S" : {
+					  "offerTermCode" : "MZU6U2429S",
+					  "sku" : "8D49XP354UEYTHGM",
+					  "effectiveDate" : "2019-01-01T00:00:00Z",
+					  "priceDimensions" : {
+						"8D49XP354UEYTHGM.MZU6U2429S.2TG2D8R56U" : {
+						  "rateCode" : "8D49XP354UEYTHGM.MZU6U2429S.2TG2D8R56U",
+						  "description" : "Upfront Fee",
+						  "unit" : "Quantity",
+						  "pricePerUnit" : {
+							"USD" : "1161"
+						  },
+						  "appliesTo" : [ ]
+						},
+					  },
+					  "termAttributes" : {
+						"LeaseContractLength" : "3yr",
+						"OfferingClass" : "convertible",
+						"PurchaseOption" : "All Upfront"
+					  }
+					}
+				}
+			}
+		},
+		"attributesList" : { }
+	}
+	`
+
+	testResponse := http.Response{
+		Body: ioutil.NopCloser(bytes.NewBufferString(awsUSEastString)),
+		Request: &http.Request{
+			URL: &url.URL{
+				Scheme: "https",
+				Host:   "test-aws-http-endpoint:443",
+			},
+		},
+	}
+
+	awsTest.populatePricing(&testResponse, inputkeys)
+
+	expectedProdTermsDisk := &AWSProductTerms{
+		Sku:     "M6UGCCQ3CDJQAA37",
+		Memory:  "",
+		Storage: "",
+		VCpu:    "",
+		GPU:     "",
+		OnDemand: &AWSOfferTerm{
+			Sku:           "M6UGCCQ3CDJQAA37",
+			OfferTermCode: "JRTCKXETXF",
+			PriceDimensions: map[string]*AWSRateCode{
+				"M6UGCCQ3CDJQAA37.JRTCKXETXF.6YS6EN2CT7": {
+					Unit: "GB-Mo",
+					PricePerUnit: AWSCurrencyCode{
+						USD: "0.0800000000",
+						CNY: "",
+					},
+				},
+			},
+		},
+		PV: &models.PV{
+			Cost:       "0.00010958904109589041",
+			CostPerIO:  "",
+			Class:      "gp3",
+			Size:       "",
+			Region:     "us-east-2",
+			ProviderID: "",
+		},
+	}
+
+	expectedProdTermsInstanceOndemand := &AWSProductTerms{
+		Sku:     "8D49XP354UEYTHGM",
+		Memory:  "8 GiB",
+		Storage: "EBS only",
+		VCpu:    "2",
+		GPU:     "",
+		OnDemand: &AWSOfferTerm{
+			Sku:             "",
+			OfferTermCode:   "",
+			PriceDimensions: nil,
+		},
+	}
+
+	expectedProdTermsInstanceSpot := &AWSProductTerms{
+		Sku:     "8D49XP354UEYTHGM",
+		Memory:  "8 GiB",
+		Storage: "EBS only",
+		VCpu:    "2",
+		GPU:     "",
+		OnDemand: &AWSOfferTerm{
+			Sku:             "",
+			OfferTermCode:   "",
+			PriceDimensions: nil,
+		},
+	}
+
+	expectedPricing := map[string]*AWSProductTerms{
+		"us-east-2,EBS:VolumeUsage.gp3":             expectedProdTermsDisk,
+		"us-east-2,EBS:VolumeUsage.gp3,preemptible": expectedProdTermsDisk,
+		"us-east-2,m5.large,linux":                  expectedProdTermsInstanceOndemand,
+		"us-east-2,m5.large,linux,preemptible":      expectedProdTermsInstanceSpot,
+	}
+
+	if !reflect.DeepEqual(expectedPricing, awsTest.Pricing) {
+		t.Fatalf("expected parsed pricing did not match actual parsed result (us-east-1)")
+	}
+
+	// Case 1
+	awsCnString := `
+	{
+		"formatVersion" : "v1.0",
+		"disclaimer" : "This pricing list is for informational purposes only. All prices are subject to the additional terms included in the pricing pages on http://www.amazonaws.cn.",
+		"offerCode" : "AmazonEC2",
+		"version" : "20230314154740",
+		"publicationDate" : "2023-03-14T15:47:40Z",
+		"products" : {
+			"R83VXG9NAPDASEGN" : {
+				"sku" : "R83VXG9NAPDASEGN",
+				"productFamily" : "Storage",
+				"attributes" : {
+				  "servicecode" : "AmazonEC2",
+				  "location" : "China (Ningxia)",
+				  "locationType" : "AWS Region",
+				  "storageMedia" : "SSD-backed",
+				  "volumeType" : "General Purpose",
+				  "maxVolumeSize" : "16 TiB",
+				  "maxIopsvolume" : "16000",
+				  "maxThroughputvolume" : "1000 MiB/s",
+				  "usagetype" : "CNW1-EBS:VolumeUsage.gp3",
+				  "operation" : "",
+				  "regionCode" : "cn-northwest-1",
+				  "servicename" : "Amazon Elastic Compute Cloud",
+				  "volumeApiName" : "gp3"
+				}
+			}
+		},
+		"terms" : {
+			"OnDemand" : {
+			  "R83VXG9NAPDASEGN" : {
+				"R83VXG9NAPDASEGN.5Y9WH78GDR" : {
+				  "offerTermCode" : "5Y9WH78GDR",
+				  "sku" : "R83VXG9NAPDASEGN",
+				  "effectiveDate" : "2023-03-01T00:00:00Z",
+				  "priceDimensions" : {
+					"R83VXG9NAPDASEGN.5Y9WH78GDR.Q7UJUT2CE6" : {
+					  "rateCode" : "R83VXG9NAPDASEGN.5Y9WH78GDR.Q7UJUT2CE6",
+					  "description" : "0.5312 CNY per GB-month of General Purpose (gp3) provisioned storage - China (Ningxia)",
+					  "beginRange" : "0",
+					  "endRange" : "Inf",
+					  "unit" : "GB-Mo",
+					  "pricePerUnit" : {
+						"CNY" : "0.5312000000"
+					  },
+					  "appliesTo" : [ ]
+					}
+				  },
+				  "termAttributes" : { }
+				}
+			  }
+			}
+	    },
+	  "attributesList" : { }
+	}
+	`
+	awsTest = AWS{
+		ValidPricingKeys: map[string]bool{},
+	}
+
+	testResponse = http.Response{
+		Body: ioutil.NopCloser(bytes.NewBufferString(awsCnString)),
+		Request: &http.Request{
+			URL: &url.URL{
+				Scheme: "https",
+				Host:   "test-aws-http-endpoint:443",
+			},
+		},
+	}
+
+	awsTest.populatePricing(&testResponse, inputkeys)
+
+	expectedProdTermsDisk = &AWSProductTerms{
+		Sku:     "R83VXG9NAPDASEGN",
+		Memory:  "",
+		Storage: "",
+		VCpu:    "",
+		GPU:     "",
+		OnDemand: &AWSOfferTerm{
+			Sku:           "R83VXG9NAPDASEGN",
+			OfferTermCode: "5Y9WH78GDR",
+			PriceDimensions: map[string]*AWSRateCode{
+				"R83VXG9NAPDASEGN.5Y9WH78GDR.Q7UJUT2CE6": {
+					Unit: "GB-Mo",
+					PricePerUnit: AWSCurrencyCode{
+						USD: "",
+						CNY: "0.5312000000",
+					},
+				},
+			},
+		},
+		PV: &models.PV{
+			Cost:       "0.0007276712328767123",
+			CostPerIO:  "",
+			Class:      "gp3",
+			Size:       "",
+			Region:     "cn-northwest-1",
+			ProviderID: "",
+		},
+	}
+
+	expectedPricing = map[string]*AWSProductTerms{
+		"cn-northwest-1,EBS:VolumeUsage.gp3":             expectedProdTermsDisk,
+		"cn-northwest-1,EBS:VolumeUsage.gp3,preemptible": expectedProdTermsDisk,
+	}
+
+	if !reflect.DeepEqual(expectedPricing, awsTest.Pricing) {
+		t.Fatalf("expected parsed pricing did not match actual parsed result (cn)")
+	}
+
+}

+ 134 - 0
pkg/cloud/aws/s3configuration.go

@@ -0,0 +1,134 @@
+package aws
+
+import (
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type S3Configuration struct {
+	Bucket     string     `json:"bucket"`
+	Region     string     `json:"region"`
+	Account    string     `json:"account"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (s3c *S3Configuration) Validate() error {
+	// Validate Authorizer
+	if s3c.Authorizer == nil {
+		return fmt.Errorf("S3Configuration: missing Authorizer")
+	}
+
+	err := s3c.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("S3Configuration: %s", err)
+	}
+
+	// Validate base properties
+	if s3c.Bucket == "" {
+		return fmt.Errorf("S3Configuration: missing bucket")
+	}
+
+	if s3c.Region == "" {
+		return fmt.Errorf("S3Configuration: missing region")
+	}
+
+	if s3c.Account == "" {
+		return fmt.Errorf("S3Configuration: missing account")
+	}
+
+	return nil
+}
+
+func (s3c *S3Configuration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*S3Configuration)
+	if !ok {
+		return false
+	}
+
+	if s3c.Authorizer != nil {
+		if !s3c.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if s3c.Bucket != thatConfig.Bucket {
+		return false
+	}
+
+	if s3c.Region != thatConfig.Region {
+		return false
+	}
+
+	if s3c.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (s3c *S3Configuration) Sanitize() config.Config {
+	return &S3Configuration{
+		Bucket:     s3c.Bucket,
+		Region:     s3c.Region,
+		Account:    s3c.Account,
+		Authorizer: s3c.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (s3c *S3Configuration) Key() string {
+	return fmt.Sprintf("%s/%s", s3c.Account, s3c.Bucket)
+}
+
+func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Bucket = bucket
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Region = region
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Account = account
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Authorizer = authorizer
+
+	return nil
+}
+
+func (s3c *S3Configuration) CreateAWSConfig() (aws.Config, error) {
+	return s3c.Authorizer.CreateAWSConfig(s3c.Region)
+}

+ 40 - 0
pkg/cloud/aws/s3connection.go

@@ -0,0 +1,40 @@
+package aws
+
+import (
+	"context"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+type S3Connection struct {
+	S3Configuration
+}
+
+func (s3c *S3Connection) Equals(config config.Config) bool {
+	thatConfig, ok := config.(*S3Connection)
+	if !ok {
+		return false
+	}
+
+	return s3c.S3Configuration.Equals(&thatConfig.S3Configuration)
+}
+
+func (s3c *S3Connection) GetS3Client() (*s3.Client, error) {
+	cfg, err := s3c.CreateAWSConfig()
+	if err != nil {
+		return nil, err
+	}
+	return s3.NewFromConfig(cfg), nil
+}
+
+func (s3c *S3Connection) ListObjects(cli *s3.Client) (*s3.ListObjectsOutput, error) {
+	objs, err := cli.ListObjects(context.TODO(), &s3.ListObjectsInput{
+		Bucket: aws.String(s3c.Bucket),
+	})
+	if err != nil {
+		return nil, err
+	}
+	return objs, err
+}

+ 387 - 0
pkg/cloud/aws/s3connection_test.go

@@ -0,0 +1,387 @@
+package aws
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestS3Configuration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   S3Configuration
+		expected error
+	}{
+		"valid config access key": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: nil,
+		},
+		"valid config service account": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID: "id",
+				},
+			},
+			expected: fmt.Errorf("S3Configuration: AccessKey: missing Secret"),
+		},
+		"missing Authorizer": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("S3Configuration: missing Authorizer"),
+		},
+		"missing bucket": {
+			config: S3Configuration{
+				Bucket:     "",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing bucket"),
+		},
+		"missing region": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing region"),
+		},
+		"missing account": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing account"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestS3Configuration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     S3Configuration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different bucket": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket2",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region2",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account2",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AccessKey{
+				ID:     "id",
+				Secret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestS3Configuration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config S3Configuration
+	}{
+		"Empty Config": {
+			config: S3Configuration{},
+		},
+		"AccessKey": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+		},
+
+		"ServiceAccount": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+		},
+		"AssumeRole with AccessKey": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+		"AssumeRole with ServiceAccount": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &ServiceAccount{},
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"RoleArnNil": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: nil,
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"AssumeRole with AssumeRole with ServiceAccount": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AssumeRole{
+						RoleARN:    "12345",
+						Authorizer: &ServiceAccount{},
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &S3Configuration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 181 - 0
pkg/cloud/aws/s3selectquerier.go

@@ -0,0 +1,181 @@
+package aws
+
+import (
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+)
+
+type S3SelectQuerier struct {
+	S3Connection
+}
+
+func (s3sq *S3SelectQuerier) Equals(config config.Config) bool {
+	thatConfig, ok := config.(*S3SelectQuerier)
+	if !ok {
+		return false
+	}
+
+	return s3sq.S3Connection.Equals(&thatConfig.S3Connection)
+}
+
+func (s3sq *S3SelectQuerier) Query(query string, queryKeys []string, cli *s3.Client, fn func(*csv.Reader) error) error {
+	for _, queryKey := range queryKeys {
+		reader, err2 := s3sq.fetchCSVReader(query, queryKey, cli, s3Types.FileHeaderInfoUse)
+		if err2 != nil {
+			return err2
+		}
+		err2 = fn(reader)
+		if err2 != nil {
+			return err2
+		}
+	}
+
+	return nil
+}
+
+// GetQueryKeys returns a list of s3 object names, where the there are 1 object for each month within the range between
+// start and end
+func (s3sq *S3SelectQuerier) GetQueryKeys(start, end time.Time, client *s3.Client) ([]string, error) {
+	objs, err := s3sq.ListObjects(client)
+	if err != nil {
+		return nil, err
+	}
+
+	monthStrings, err := getMonthStrings(start, end)
+	if err != err {
+		return nil, err
+	}
+
+	var queryKeys []string
+	// Find all matching "csv.gz" files per monthString
+	for _, monthStr := range monthStrings {
+		for _, obj := range objs.Contents {
+			if strings.Contains(*obj.Key, monthStr) && strings.HasSuffix(*obj.Key, ".csv.gz") {
+				queryKeys = append(queryKeys, *obj.Key)
+			}
+		}
+	}
+
+	if len(queryKeys) == 0 {
+		return nil, fmt.Errorf("no CUR files for given time range")
+	}
+
+	return queryKeys, nil
+}
+
+func (s3sq *S3SelectQuerier) fetchCSVReader(query string, queryKey string, client *s3.Client, fileHeaderInfo s3Types.FileHeaderInfo) (*csv.Reader, error) {
+	input := &s3.SelectObjectContentInput{
+		Bucket:         aws.String(s3sq.Bucket),
+		Key:            aws.String(queryKey),
+		Expression:     aws.String(query),
+		ExpressionType: s3Types.ExpressionTypeSql,
+		InputSerialization: &s3Types.InputSerialization{
+			CompressionType: s3Types.CompressionTypeGzip,
+			CSV: &s3Types.CSVInput{
+				FileHeaderInfo: fileHeaderInfo,
+			},
+		},
+		OutputSerialization: &s3Types.OutputSerialization{
+			CSV: &s3Types.CSVOutput{},
+		},
+	}
+
+	res, err := client.SelectObjectContent(context.TODO(), input)
+	if err != nil {
+		return nil, err
+	}
+	resStream := res.GetStream()
+	// todo: this needs work
+	results, resultWriter := io.Pipe()
+	go func() {
+		defer resultWriter.Close()
+		defer resStream.Close()
+		resStream.Events()
+		for event := range resStream.Events() {
+			switch e := event.(type) {
+			case *s3Types.SelectObjectContentEventStreamMemberRecords:
+				resultWriter.Write(e.Value.Payload)
+			case *s3Types.SelectObjectContentEventStreamMemberEnd:
+				break
+			}
+
+		}
+	}()
+
+	if err := resStream.Err(); err != nil {
+		return nil, fmt.Errorf("failed to read from SelectObjectContent EventStream, %v", err)
+	}
+
+	return csv.NewReader(results), nil
+}
+
+func getMonthStrings(start, end time.Time) ([]string, error) {
+	if start.After(end) {
+		return []string{}, fmt.Errorf("start date must be before end date")
+	}
+	if end.After(time.Now()) {
+		end = time.Now()
+	}
+	dateTemplate := "%d%02d01-%d%02d01/"
+	// set to first of the month
+	currMonth := start.AddDate(0, 0, -start.Day()+1)
+	nextMonth := currMonth.AddDate(0, 1, 0)
+	monthStr := fmt.Sprintf(dateTemplate, currMonth.Year(), int(currMonth.Month()), nextMonth.Year(), int(nextMonth.Month()))
+
+	// Create string for end condition
+	endMonth := end.AddDate(0, 0, -end.Day()+1)
+	endNextMonth := endMonth.AddDate(0, 1, 0)
+	endStr := fmt.Sprintf(dateTemplate, endMonth.Year(), int(endMonth.Month()), endNextMonth.Year(), int(endNextMonth.Month()))
+
+	var monthStrs []string
+	monthStrs = append(monthStrs, monthStr)
+
+	for monthStr != endStr {
+		currMonth = nextMonth
+		nextMonth = nextMonth.AddDate(0, 1, 0)
+		monthStr = fmt.Sprintf(dateTemplate, currMonth.Year(), int(currMonth.Month()), nextMonth.Year(), int(nextMonth.Month()))
+		monthStrs = append(monthStrs, monthStr)
+	}
+
+	return monthStrs, nil
+}
+
+// GetCSVRowValue retrieve value from athena row based on column names and used stringutil.Bank() to prevent duplicate
+// allocation of strings
+func GetCSVRowValue(row []string, queryColumnIndexes map[string]int, columnName string) string {
+	if row == nil {
+		return ""
+	}
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return ""
+	}
+	return stringutil.Bank(row[columnIndex])
+}
+
+// GetCSVRowValueFloat retrieve value from athena row based on column names and convert to float if possible.
+func GetCSVRowValueFloat(row []string, queryColumnIndexes map[string]int, columnName string) (float64, error) {
+	if row == nil {
+		return 0.0, fmt.Errorf("getCSVRowValueFloat: nil row")
+	}
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return 0.0, fmt.Errorf("getCSVRowValueFloat: missing column index: %s", columnName)
+	}
+	cost, err := strconv.ParseFloat(row[columnIndex], 64)
+	if err != nil {
+		return cost, fmt.Errorf("getCSVRowValueFloat: failed to parse %s: '%s': %s", columnName, row[columnIndex], err.Error())
+	}
+	return cost, nil
+}

+ 80 - 0
pkg/cloud/azure/authorizer.go

@@ -0,0 +1,80 @@
+package azure
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+const AccessKeyAuthorizerType = "AzureAccessKey"
+
+type Authorizer interface {
+	config.Authorizer
+	GetBlobCredentials() (azblob.Credential, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	default:
+		return nil, fmt.Errorf("azure: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+type AccessKey struct {
+	AccessKey string `json:"accessKey"`
+	Account   string `json:"account"`
+}
+
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["accessKey"] = ak.AccessKey
+	fmap["account"] = ak.Account
+	return json.Marshal(fmap)
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.AccessKey == "" {
+		return fmt.Errorf("AccessKey: missing access key")
+	}
+	if ak.Account == "" {
+		return fmt.Errorf("AccessKey: missing account")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.AccessKey != thatConfig.AccessKey {
+		return false
+	}
+	if ak.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		AccessKey: config.Redacted,
+		Account:   ak.Account,
+	}
+}
+
+func (ak *AccessKey) GetBlobCredentials() (azblob.Credential, error) {
+	// Create a default request pipeline using your storage account name and account key.
+	return azblob.NewSharedKeyCredential(ak.Account, ak.AccessKey)
+}

+ 322 - 0
pkg/cloud/azure/billingexportparser.go

@@ -0,0 +1,322 @@
+package azure
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const azureDateLayout = "2006-01-02"
+const AzureEnterpriseDateLayout = "01/02/2006"
+
+var groupRegex = regexp.MustCompile("(/[^/]+)")
+
+// BillingRowValues holder for Azure Billing Values
+type BillingRowValues struct {
+	Date            time.Time
+	MeterCategory   string
+	SubscriptionID  string
+	InvoiceEntityID string
+	InstanceID      string
+	Service         string
+	Tags            map[string]string
+	AdditionalInfo  map[string]any
+	Cost            float64
+	NetCost         float64
+}
+
+func (brv *BillingRowValues) IsCompute(category string) bool {
+	if category == kubecost.ComputeCategory {
+		return true
+	}
+
+	if category == kubecost.StorageCategory || category == kubecost.NetworkCategory {
+		if brv.Service == "Microsoft.Compute" {
+			return true
+		}
+	}
+	if category == kubecost.NetworkCategory && brv.MeterCategory == "Virtual Network" {
+		return true
+	}
+	return false
+}
+
+// BillingExportParser holds indexes of relevent fields in Azure Billing CSV in addition to the correct data format
+type BillingExportParser struct {
+	Date            int
+	MeterCategory   int
+	InvoiceEntityID int
+	SubscriptionID  int
+	InstanceID      int
+	Service         int
+	Tags            int
+	AdditionalInfo  int
+	Cost            int
+	NetCost         int
+	DateFormat      string
+}
+
+// match "SubscriptionGuid" in "Abonnement-GUID (SubscriptionGuid)"
+var getParenContentRegEx = regexp.MustCompile("\\((.*?)\\)")
+
+func NewBillingParseSchema(headers []string) (*BillingExportParser, error) {
+	// clear BOM from headers
+	if len(headers) != 0 {
+		headers[0] = strings.TrimPrefix(headers[0], "\xEF\xBB\xBF")
+	}
+
+	headerIndexes := map[string]int{}
+	for i, header := range headers {
+		// Azure Headers in different regions will have english headers in parentheses
+		match := getParenContentRegEx.FindStringSubmatch(header)
+		if len(match) != 0 {
+			header = match[len(match)-1]
+		}
+		headerIndexes[strings.ToLower(header)] = i
+	}
+
+	abp := &BillingExportParser{}
+
+	// Set Date Column and Date Format
+	if i, ok := headerIndexes["usagedatetime"]; ok {
+		abp.Date = i
+		abp.DateFormat = azureDateLayout
+	} else if j, ok2 := headerIndexes["date"]; ok2 {
+		abp.Date = j
+		abp.DateFormat = AzureEnterpriseDateLayout
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Date field")
+	}
+
+	// set Subscription ID
+	if i, ok := headerIndexes["subscriptionid"]; ok {
+		abp.SubscriptionID = i
+	} else if j, ok2 := headerIndexes["subscriptionguid"]; ok2 {
+		abp.SubscriptionID = j
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Subscription ID field")
+	}
+
+	// Set Billing ID
+	if i, ok := headerIndexes["billingaccountid"]; ok {
+		abp.InvoiceEntityID = i
+	} else if j, ok2 := headerIndexes["billingaccountname"]; ok2 {
+		abp.InvoiceEntityID = j
+	} else {
+		// if no billing ID column is present use subscription ID
+		abp.InvoiceEntityID = abp.SubscriptionID
+	}
+
+	// Set Instance ID
+	if i, ok := headerIndexes["instanceid"]; ok {
+		abp.InstanceID = i
+	} else if j, ok2 := headerIndexes["instancename"]; ok2 {
+		abp.InstanceID = j
+	} else if k, ok3 := headerIndexes["resourceid"]; ok3 {
+		abp.InstanceID = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Instance ID field")
+	}
+
+	// Set Meter Category
+	if i, ok := headerIndexes["metercategory"]; ok {
+		abp.MeterCategory = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Meter Category field")
+	}
+
+	// Set Tags
+	if i, ok := headerIndexes["tags"]; ok {
+		abp.Tags = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Tags field")
+	}
+
+	// Set Additional Info
+	if i, ok := headerIndexes["additionalinfo"]; ok {
+		abp.AdditionalInfo = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Additional Info field")
+	}
+
+	// Set Service
+	if i, ok := headerIndexes["consumedservice"]; ok {
+		abp.Service = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Service field")
+	}
+
+	// Set Net Cost
+	if i, ok := headerIndexes["costinbillingcurrency"]; ok {
+		abp.NetCost = i
+	} else if j, ok2 := headerIndexes["pretaxcost"]; ok2 {
+		abp.NetCost = j
+	} else if k, ok3 := headerIndexes["cost"]; ok3 {
+		abp.NetCost = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Net Cost field")
+	}
+
+	// Set Cost
+	if i, ok := headerIndexes["paygcostinbillingcurrency"]; ok {
+		abp.Cost = i
+	} else {
+		// if no Cost column is present use Net Cost column
+		abp.Cost = abp.NetCost
+	}
+
+	return abp, nil
+}
+
+func (bep *BillingExportParser) ParseRow(start, end time.Time, record []string) *BillingRowValues {
+	usageDate, err := time.Parse(bep.DateFormat, record[bep.Date])
+	if err != nil {
+		// try other format, and switch if successful
+		if bep.DateFormat == azureDateLayout {
+			bep.DateFormat = AzureEnterpriseDateLayout
+		} else {
+			bep.DateFormat = azureDateLayout
+		}
+		usageDate, err = time.Parse(bep.DateFormat, record[bep.Date])
+		// If parse still fails then return line
+		if err != nil {
+			log.Errorf("failed to parse usage date: '%s'", record[bep.Date])
+			return nil
+		}
+	}
+
+	// skip if usage data isn't in subject window
+	if usageDate.Before(start) || !usageDate.Before(end) {
+		return nil
+	}
+
+	cost, err := strconv.ParseFloat(record[bep.Cost], 64)
+	if err != nil {
+		log.Errorf("failed to parse cost: '%s'", record[bep.Cost])
+		return nil
+	}
+
+	netCost, err := strconv.ParseFloat(record[bep.NetCost], 64)
+	if err != nil {
+		log.Errorf("failed to parse net cost: '%s'", record[bep.NetCost])
+		return nil
+	}
+
+	additionalInfo := make(map[string]any)
+	additionalInfoJson := encloseInBrackets(record[bep.AdditionalInfo])
+	if additionalInfoJson != "" {
+		err = json.Unmarshal([]byte(additionalInfoJson), &additionalInfo)
+		if err != nil {
+			log.Errorf("Could not parse additional information %s, with Error: %s", additionalInfoJson, err.Error())
+		}
+	}
+
+	tags := make(map[string]string)
+	tagJson := encloseInBrackets(record[bep.Tags])
+	if tagJson != "" {
+		tagsAny := make(map[string]any)
+		err = json.Unmarshal([]byte(tagJson), &tagsAny)
+		if err != nil {
+			log.Errorf("Could not parse tags: %v, with Error: %s", tagJson, err.Error())
+		}
+
+		for name, value := range tagsAny {
+			if valueStr, ok := value.(string); ok && valueStr != "" {
+				tags[name] = valueStr
+			}
+		}
+	}
+
+	return &BillingRowValues{
+		Date:            usageDate,
+		MeterCategory:   record[bep.MeterCategory],
+		SubscriptionID:  record[bep.SubscriptionID],
+		InvoiceEntityID: record[bep.InvoiceEntityID],
+		InstanceID:      record[bep.InstanceID],
+		Service:         record[bep.Service],
+		Tags:            tags,
+		AdditionalInfo:  additionalInfo,
+		Cost:            cost,
+		NetCost:         netCost,
+	}
+}
+
+// enclose json strings in brackets if they are missing
+func encloseInBrackets(jsonString string) string {
+	if jsonString == "" || (jsonString[0] == '{' && jsonString[len(jsonString)-1] == '}') {
+		return jsonString
+	}
+	return fmt.Sprintf("{%s}", jsonString)
+}
+
+func AzureSetProviderID(abv *BillingRowValues) string {
+	category := SelectAzureCategory(abv.MeterCategory)
+	if value, ok := abv.AdditionalInfo["VMName"]; ok {
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+	} else if value, ok := abv.AdditionalInfo["VmName"]; ok {
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+	} else if value2, ook := abv.AdditionalInfo["IpAddress"]; ook && abv.MeterCategory == "Virtual Network" {
+		return fmt.Sprintf("%v", value2)
+	}
+
+	if category == kubecost.StorageCategory {
+		if value2, ok2 := abv.Tags["creationSource"]; ok2 {
+			creationSource := fmt.Sprintf("%v", value2)
+			return strings.TrimPrefix(creationSource, "aks-")
+		} else if value2, ok2 := abv.Tags["aks-managed-creationSource"]; ok2 {
+			creationSource := fmt.Sprintf("%v", value2)
+			return strings.TrimPrefix(creationSource, "vmssclient-")
+		} else {
+			return getSubStringAfterFinalSlash(abv.InstanceID)
+		}
+	}
+	return "azure://" + resourceGroupToLowerCase(abv.InstanceID)
+}
+
+func SelectAzureCategory(meterCategory string) string {
+	if meterCategory == "Virtual Machines" {
+		return kubecost.ComputeCategory
+	} else if meterCategory == "Storage" {
+		return kubecost.StorageCategory
+	} else if meterCategory == "Load Balancer" || meterCategory == "Bandwidth" || meterCategory == "Virtual Network" {
+		return kubecost.NetworkCategory
+	} else {
+		return kubecost.OtherCategory
+	}
+}
+
+func resourceGroupToLowerCase(providerID string) string {
+	var sb strings.Builder
+	for matchNum, group := range groupRegex.FindAllString(providerID, -1) {
+		if matchNum == 3 {
+			sb.WriteString(strings.ToLower(group))
+		} else {
+			sb.WriteString(group)
+		}
+	}
+	return sb.String()
+}
+
+// Returns the substring after the final "/" in a string
+func getSubStringAfterFinalSlash(id string) string {
+	index := strings.LastIndex(id, "/")
+	if index == -1 {
+		log.DedupedInfof(5, "azure.getSubStringAfterFinalSlash: failed to parse %s", id)
+		return id
+	}
+	return id[index+1:]
+}
+
+func getVMNumberForVMSS(vmName string) string {
+	vmNameSplit := strings.Split(vmName, "_")
+	if len(vmNameSplit) > 1 {
+		return "/virtualMachines/" + vmNameSplit[1]
+	}
+	return ""
+}

+ 194 - 0
pkg/cloud/azure/billingexportparser_test.go

@@ -0,0 +1,194 @@
+package azure
+
+import (
+	"encoding/csv"
+	"os"
+	"testing"
+	"time"
+)
+
+const billingExportPath = "./resources/billingexports/"
+const headerSetPath = billingExportPath + "headersets/"
+const valueCasesPath = billingExportPath + "values/"
+
+type TestCSVRetriever struct {
+	CSVName string
+}
+
+func (tcr TestCSVRetriever) getCSVReaders(start, end time.Time) ([]*csv.Reader, error) {
+	csvFile, err := os.Open(tcr.CSVName)
+	if err != nil {
+		return nil, err
+	}
+	reader := csv.NewReader(csvFile)
+	return append([]*csv.Reader{}, reader), nil
+}
+
+func Test_NewBillingExportParser(t *testing.T) {
+	loc, _ := time.LoadLocation("UTC")
+	start := time.Date(2021, 2, 1, 00, 00, 00, 00, loc)
+	end := time.Date(2021, 2, 3, 00, 00, 00, 00, loc)
+	tests := map[string]struct {
+		input    string
+		expected BillingExportParser
+	}{
+		"English Headers": {
+			input: "PayAsYouGo.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+		"Enterprise Camel Headers": {
+			input: "EnterpriseCamel.csv",
+			expected: BillingExportParser{
+				Date:            11,
+				MeterCategory:   18,
+				InvoiceEntityID: 0,
+				SubscriptionID:  23,
+				InstanceID:      29,
+				Service:         15,
+				Tags:            45,
+				AdditionalInfo:  44,
+				Cost:            38,
+				NetCost:         38,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"Enterprise Headers": {
+			input: "Enterprise.csv",
+			expected: BillingExportParser{
+				Date:            7,
+				MeterCategory:   9,
+				InvoiceEntityID: 39,
+				SubscriptionID:  3,
+				InstanceID:      20,
+				Service:         19,
+				Tags:            21,
+				AdditionalInfo:  23,
+				Cost:            17,
+				NetCost:         17,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"German Headers": {
+			input: "German.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+		"YA Headers": {
+			input: "YA.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"BOM Prefixed Headers": {
+			input: "BOM.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			csvRetriever := TestCSVRetriever{
+				CSVName: headerSetPath + tc.input,
+			}
+			csvs, err := csvRetriever.getCSVReaders(start, end)
+			if err != nil {
+				t.Errorf("Failed to read specified CSV: %s", err.Error())
+			}
+			reader := csvs[0]
+			headers, _ := reader.Read()
+			abp, err := NewBillingParseSchema(headers)
+			if err != nil {
+				t.Errorf("failed to create Azure Billing Parser from headers with error: %s", err.Error())
+			}
+
+			if abp.DateFormat != tc.expected.DateFormat {
+				t.Errorf("Azure Billing Parser does not have expected DateFormat index. Expected: %s, Actual: %s", tc.expected.DateFormat, abp.DateFormat)
+			}
+
+			if abp.Date != tc.expected.Date {
+				t.Errorf("Azure Billing Parser does not have expected Date index. Expected: %d, Actual: %d", tc.expected.Date, abp.Date)
+			}
+
+			if abp.MeterCategory != tc.expected.MeterCategory {
+				t.Errorf("Azure Billing Parser does not have expected MeterCategory index. Expected: %d, Actual: %d", tc.expected.MeterCategory, abp.MeterCategory)
+			}
+
+			if abp.InvoiceEntityID != tc.expected.InvoiceEntityID {
+				t.Errorf("Azure Billing Parser does not have expected InvoiceEntityID index. Expected: %d, Actual: %d", tc.expected.InvoiceEntityID, abp.InvoiceEntityID)
+			}
+
+			if abp.SubscriptionID != tc.expected.SubscriptionID {
+				t.Errorf("Azure Billing Parser does not have expected SubscriptionID index. Expected: %d, Actual: %d", tc.expected.SubscriptionID, abp.SubscriptionID)
+			}
+
+			if abp.InstanceID != tc.expected.InstanceID {
+				t.Errorf("Azure Billing Parser does not have expected InstanceID index. Expected: %d, Actual: %d", tc.expected.InstanceID, abp.InstanceID)
+			}
+
+			if abp.Service != tc.expected.Service {
+				t.Errorf("Azure Billing Parser does not have expected Service index. Expected: %d, Actual: %d", tc.expected.Service, abp.Service)
+			}
+
+			if abp.Tags != tc.expected.Tags {
+				t.Errorf("Azure Billing Parser does not have expected Tags index. Expected: %d, Actual: %d", tc.expected.Tags, abp.Tags)
+			}
+
+			if abp.AdditionalInfo != tc.expected.AdditionalInfo {
+				t.Errorf("Azure Billing Parser does not have expected AdditionalInfo index. Expected: %d, Actual: %d", tc.expected.AdditionalInfo, abp.AdditionalInfo)
+			}
+
+			if abp.Cost != tc.expected.Cost {
+				t.Errorf("Azure Billing Parser does not have expected Cost index. Expected: %d, Actual: %d", tc.expected.Cost, abp.Cost)
+			}
+
+			if abp.NetCost != tc.expected.NetCost {
+				t.Errorf("Azure Billing Parser does not have expected NetCost index. Expected: %d, Actual: %d", tc.expected.NetCost, abp.NetCost)
+			}
+		})
+	}
+}

+ 124 - 0
pkg/cloud/azure/pricesheetclient.go

@@ -0,0 +1,124 @@
+package azure
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore"
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+	armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime"
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+)
+
+const (
+	moduleName    = "armconsumption"
+	moduleVersion = "v1.0.0"
+)
+
+// At the moment the consumption pricesheet download API is not a)
+// documented or b) supported by the SDK. This is an implementation of
+// a client in the style of the Azure go SDK - once the API is
+// supported this will be removed.
+
+// PriceSheetClient contains the methods for the PriceSheet group.
+// Don't use this type directly, use NewPriceSheetClient() instead.
+type PriceSheetClient struct {
+	host             string
+	billingAccountID string
+	pl               runtime.Pipeline
+}
+
+// NewPriceSheetClient creates a new instance of PriceSheetClient with the specified values.
+// billingAccountId - Azure Billing Account ID.
+// credential - used to authorize requests. Usually a credential from azidentity.
+// options - pass nil to accept the default values.
+func NewPriceSheetClient(billingAccountID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PriceSheetClient, error) {
+	if options == nil {
+		options = &arm.ClientOptions{}
+	}
+	ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint
+	if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok {
+		ep = c.Endpoint
+	}
+	pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options)
+	if err != nil {
+		return nil, err
+	}
+	client := &PriceSheetClient{
+		billingAccountID: billingAccountID,
+		host:             ep,
+		pl:               pl,
+	}
+	return client, nil
+}
+
+// BeginDownloadByBillingPeriod - requests a pricesheet for a specific billing period `yyyymm`.
+// Returns a Poller that will provide the download URL when the pricesheet is ready.
+// If the operation fails it returns an *azcore.ResponseError type.
+// Generated from API version 2022-06-01
+// billingPeriodName - Billing Period Name `yyyymm`.
+func (client *PriceSheetClient) BeginDownloadByBillingPeriod(ctx context.Context, billingPeriodName string) (*runtime.Poller[PriceSheetClientDownloadResponse], error) {
+	resp, err := client.downloadByBillingPeriodOperation(ctx, billingPeriodName)
+	if err != nil {
+		return nil, err
+	}
+	return runtime.NewPoller[PriceSheetClientDownloadResponse](resp, client.pl, nil)
+}
+
+type PriceSheetClientDownloadResponse struct {
+	ID         string                             `json:"id"`
+	Name       string                             `json:"name"`
+	StartTime  time.Time                          `json:"startTime"`
+	EndTime    time.Time                          `json:"endTime"`
+	Status     string                             `json:"status"`
+	Properties PriceSheetClientDownloadProperties `json:"properties"`
+}
+
+type PriceSheetClientDownloadProperties struct {
+	DownloadURL string `json:"downloadUrl"`
+	ValidTill   string `json:"validTill"`
+}
+
+func (client *PriceSheetClient) downloadByBillingPeriodOperation(ctx context.Context, billingPeriodName string) (*http.Response, error) {
+	req, err := client.downloadByBillingPeriodCreateRequest(ctx, billingPeriodName)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := client.pl.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) {
+		return nil, runtime.NewResponseError(resp)
+	}
+	return resp, nil
+}
+
+const downloadByBillingPeriodTemplate = "/providers/Microsoft.Billing/billingAccounts/%s/billingPeriods/%s/providers/Microsoft.Consumption/pricesheets/download"
+
+// downloadByBillingPeriodCreateRequest creates the DownloadByBillingPeriod request.
+func (client *PriceSheetClient) downloadByBillingPeriodCreateRequest(ctx context.Context, billingPeriodName string) (*policy.Request, error) {
+	if client.billingAccountID == "" {
+		return nil, errors.New("parameter client.billingAccountID cannot be empty")
+	}
+	if billingPeriodName == "" {
+		return nil, errors.New("parameter billingPeriodName cannot be empty")
+	}
+	urlPath := fmt.Sprintf(downloadByBillingPeriodTemplate, url.PathEscape(client.billingAccountID), url.PathEscape(billingPeriodName))
+	req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))
+	if err != nil {
+		return nil, err
+	}
+	reqQP := req.Raw().URL.Query()
+	reqQP.Set("api-version", "2022-06-01")
+	reqQP.Set("ln", "en")
+	req.Raw().URL.RawQuery = reqQP.Encode()
+	req.Raw().Header["Accept"] = []string{"*/*"}
+	return req, nil
+}

+ 300 - 0
pkg/cloud/azure/pricesheetdownloader.go

@@ -0,0 +1,300 @@
+package azure
+
+import (
+	"bufio"
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/profiles/2020-09-01/commerce/mgmt/commerce"
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+
+	"github.com/opencost/opencost/pkg/log"
+)
+
+type PriceSheetDownloader struct {
+	TenantID         string
+	ClientID         string
+	ClientSecret     string
+	BillingAccount   string
+	OfferID          string
+	ConvertMeterInfo func(info commerce.MeterInfo) (map[string]*AzurePricing, error)
+}
+
+func (d *PriceSheetDownloader) GetPricing(ctx context.Context) (map[string]*AzurePricing, error) {
+	log.Infof("requesting pricesheet download link")
+	url, err := d.getDownloadURL(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("getting download URL: %w", err)
+	}
+	log.Infof("downloading pricesheet from %q", url)
+	data, err := d.saveData(ctx, url, "pricesheet")
+	if err != nil {
+		return nil, fmt.Errorf("saving pricesheet from %q: %w", url, err)
+	}
+	defer data.Close()
+
+	prices, err := d.readPricesheet(ctx, data)
+	if err != nil {
+		return nil, fmt.Errorf("reading pricesheet: %w", err)
+	}
+	log.Infof("loaded %d pricings from pricesheet", len(prices))
+	return prices, nil
+}
+
+func (d *PriceSheetDownloader) getDownloadURL(ctx context.Context) (string, error) {
+	cred, err := azidentity.NewClientSecretCredential(d.TenantID, d.ClientID, d.ClientSecret, nil)
+	if err != nil {
+		return "", fmt.Errorf("creating credential: %w", err)
+	}
+	client, err := NewPriceSheetClient(d.BillingAccount, cred, nil)
+	if err != nil {
+		return "", fmt.Errorf("creating pricesheet client: %w", err)
+	}
+	poller, err := client.BeginDownloadByBillingPeriod(ctx, currentBillingPeriod())
+	if err != nil {
+		return "", fmt.Errorf("beginning pricesheet download: %w", err)
+	}
+	resp, err := poller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{
+		Frequency: 30 * time.Second,
+	})
+	if err != nil {
+		return "", fmt.Errorf("polling for pricesheet: %w", err)
+	}
+	return resp.Properties.DownloadURL, nil
+}
+
+func (d PriceSheetDownloader) saveData(ctx context.Context, url, tempName string) (io.ReadCloser, error) {
+	// Download file from URL in response.
+	out, err := os.CreateTemp("", tempName)
+	if err != nil {
+		return nil, fmt.Errorf("creating %s temp file: %w", tempName, err)
+	}
+
+	resp, err := http.Get(url)
+	if err != nil {
+		return nil, fmt.Errorf("downloading: %w", err)
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("unexpected HTTP status %d", resp.StatusCode)
+	}
+
+	if _, err := io.Copy(out, resp.Body); err != nil {
+		return nil, fmt.Errorf("reading response: %w", err)
+	}
+
+	_, err = out.Seek(0, io.SeekStart)
+	if err != nil {
+		return nil, fmt.Errorf("seeking to start of file: %w", err)
+	}
+
+	return &removeOnClose{File: out}, nil
+}
+
+type removeOnClose struct {
+	*os.File
+}
+
+func (r *removeOnClose) Close() error {
+	err := r.File.Close()
+	if err != nil {
+		return err
+	}
+	return os.Remove(r.Name())
+}
+
+func (d *PriceSheetDownloader) readPricesheet(ctx context.Context, data io.Reader) (map[string]*AzurePricing, error) {
+	// Avoid double-buffering.
+	buf, ok := (data).(*bufio.Reader)
+	if !ok {
+		buf = bufio.NewReader(data)
+	}
+
+	// The CSV file starts with two lines before the header without
+	// commas (so different numbers of fields as far as the CSV parser
+	// is concerned). Skip them before making the CSV reader so we
+	// still get the benefit of the row length checks after the
+	// header.
+	for i := 0; i < 2; i++ {
+		_, err := buf.ReadBytes('\n')
+		if err != nil {
+			return nil, fmt.Errorf("skipping preamble line %d: %w", i, err)
+		}
+	}
+	reader := csv.NewReader(buf)
+	reader.ReuseRecord = true
+
+	header, err := reader.Read()
+	if err != nil {
+		return nil, fmt.Errorf("reading header: %w", err)
+	}
+	if err := checkPricesheetHeader(header); err != nil {
+		return nil, err
+	}
+
+	units := make(map[string]bool)
+
+	results := make(map[string]*AzurePricing)
+	lines := 2
+	for {
+		row, err := reader.Read()
+		if err == io.EOF {
+			break
+		}
+		lines++
+		if err != nil {
+			return nil, fmt.Errorf("reading line %d: %w", lines, err)
+		}
+
+		// Skip savings plan - we should be reporting based on the
+		// consumption price because we don't know whether the user is
+		// using a savings plan or over their threshold.
+		if row[pricesheetPriceType] == "Savings Plan" || row[pricesheetOfferID] != d.OfferID {
+			continue
+		}
+
+		// TODO: Creating a meter info for each record will cause a
+		// lot of GC churn - is it worth reusing one meter info instead?
+		meterInfo, err := makeMeterInfo(row)
+		if err != nil {
+			log.Warnf("making meter info (line %d): %v", lines, err)
+			continue
+		}
+
+		pricings, err := d.ConvertMeterInfo(meterInfo)
+		if err != nil {
+			log.Warnf("converting meter to pricings (line %d): %v", lines, err)
+			continue
+		}
+
+		if pricings != nil {
+			units[*meterInfo.Unit] = true
+		}
+
+		for key, pricing := range pricings {
+			results[key] = pricing
+		}
+	}
+
+	if len(results) == 0 {
+		return nil, fmt.Errorf("no matching pricing from price sheet")
+	}
+
+	// Keep track of units seen so we can detect if there are any that
+	// need handling.
+	allUnits := make([]string, 0, len(units))
+	for unit := range units {
+		allUnits = append(allUnits, unit)
+	}
+	sort.Strings(allUnits)
+	log.Infof("all units in pricesheet: %s", strings.Join(allUnits, ", "))
+
+	return results, nil
+}
+
+func checkPricesheetHeader(header []string) error {
+	if len(header) < len(pricesheetCols) {
+		return fmt.Errorf("too few header columns: got %d, expected %d", len(header), len(pricesheetCols))
+	}
+	for col, name := range pricesheetCols {
+		if !strings.EqualFold(header[col], name) {
+			return fmt.Errorf("unexpected header at col %d %q, expected %q", col, header[col], name)
+		}
+	}
+	return nil
+}
+
+func makeMeterInfo(row []string) (commerce.MeterInfo, error) {
+	price, err := strconv.ParseFloat(row[pricesheetUnitPrice], 64)
+	if err != nil {
+		return commerce.MeterInfo{}, fmt.Errorf("parsing unit price: %w", err)
+	}
+	newPrice, unit := normalisePrice(price, row[pricesheetUnit])
+	return commerce.MeterInfo{
+		MeterName:        ptr(row[pricesheetMeterName]),
+		MeterCategory:    ptr(row[pricesheetMeterCategory]),
+		MeterSubCategory: ptr(row[pricesheetMeterSubCategory]),
+		Unit:             &unit,
+		MeterRegion:      ptr(row[pricesheetMeterRegion]),
+		MeterRates:       map[string]*float64{"0": &newPrice},
+	}, nil
+}
+
+var pricesheetCols = []string{
+	"Meter ID",
+	"Meter name",
+	"Meter category",
+	"Meter sub-category",
+	"Meter region",
+	"Unit",
+	"Unit of measure",
+	"Part number",
+	"Unit price",
+	"Currency code",
+	"Included quantity",
+	"Offer Id",
+	"Term",
+	"Price type",
+}
+
+const (
+	pricesheetMeterID          = 0
+	pricesheetMeterName        = 1
+	pricesheetMeterCategory    = 2
+	pricesheetMeterSubCategory = 3
+	pricesheetMeterRegion      = 4
+	pricesheetUnit             = 5
+	pricesheetUnitPrice        = 8
+	pricesheetCurrencyCode     = 9
+	pricesheetOfferID          = 11
+	pricesheetPriceType        = 13
+)
+
+func currentBillingPeriod() string {
+	return time.Now().Format("200601")
+}
+
+func ptr[T any](v T) *T {
+	return &v
+}
+
+// conversions lists all the units seen from the price sheet for
+// prices we're interested in with factors to the corresponding units
+// in the rate card.
+var conversions = map[string]struct {
+	divisor float64
+	unit    string
+}{
+	"1 /Month":       {divisor: 1, unit: "1 /Month"},
+	"1 Hour":         {divisor: 1, unit: "1 Hour"},
+	"1 PiB/Hour":     {divisor: 1_000_000, unit: "1 GiB/Hour"},
+	"10 /Month":      {divisor: 10, unit: "1 /Month"},
+	"10 Hours":       {divisor: 10, unit: "1 Hour"},
+	"100 /Month":     {divisor: 100, unit: "1 /Month"},
+	"100 GB/Month":   {divisor: 100, unit: "1 GB/Month"},
+	"100 Hours":      {divisor: 100, unit: "1 Hour"},
+	"100 TiB/Hour":   {divisor: 100_000, unit: "1 GiB/Hour"},
+	"1000 Hours":     {divisor: 1000, unit: "1 Hour"},
+	"10000 Hours":    {divisor: 10_000, unit: "1 Hour"},
+	"100000 /Hour":   {divisor: 100_000, unit: "1 /Hour"},
+	"1000000 /Hour":  {divisor: 1_000_000, unit: "1 /Hour"},
+	"10000000 /Hour": {divisor: 10_000_000, unit: "1 /Hour"},
+}
+
+func normalisePrice(price float64, unit string) (float64, string) {
+	if conv, ok := conversions[unit]; ok {
+		return price / conv.divisor, conv.unit
+	}
+
+	return price, unit
+}

+ 99 - 0
pkg/cloud/azure/pricesheetdownloader_test.go

@@ -0,0 +1,99 @@
+package azure
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"testing"
+
+	"github.com/Azure/azure-sdk-for-go/profiles/2020-09-01/commerce/mgmt/commerce"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/stretchr/testify/require"
+)
+
+func TestDownloader(t *testing.T) {
+	d := PriceSheetDownloader{
+		TenantID:         "test-tenant-id",
+		ClientID:         "test-client-id",
+		ClientSecret:     "test-client-secret",
+		BillingAccount:   "test-billing-account",
+		OfferID:          "my-offer-id",
+		ConvertMeterInfo: convertMeter,
+	}
+
+	t.Run("read prices", func(t *testing.T) {
+		results, err := d.readPricesheet(context.Background(), strings.NewReader(pricesheetData))
+		require.NoError(t, err)
+
+		// Units and prices are normalised.
+		// Info for saving plans and other offers is skipped.
+		expected := map[string]*AzurePricing{
+			"DC96as_v4 1 Hour": {Node: &models.Node{Cost: "10.505"}},
+			"DC2as_v4 1 Hour":  {Node: &models.Node{Cost: "0.219"}},
+			"VM1 1 Hour":       {Node: &models.Node{Cost: "1.0"}},
+			"VM2 1 Hour":       {Node: &models.Node{Cost: "2.0"}},
+		}
+		require.Equal(t, expected, results)
+	})
+
+	t.Run("bad header", func(t *testing.T) {
+		data := "\n\nMeter ID,Meter name,Meter category,Something else,,,,,,,,,,,,,,\n"
+		_, err := d.readPricesheet(context.Background(), strings.NewReader(data))
+		require.ErrorContains(t, err, `unexpected header at col 3 "Something else", expected "Meter sub-category"`)
+	})
+
+	t.Run("short header", func(t *testing.T) {
+		data := "\n\nMeter ID, Meter name, Meter category, Meter sub-category\n"
+		_, err := d.readPricesheet(context.Background(), strings.NewReader(data))
+		require.ErrorContains(t, err, "too few header columns: got 4, expected 14")
+	})
+
+	t.Run("no matching prices", func(t *testing.T) {
+		d := PriceSheetDownloader{
+			TenantID:       "test-tenant-id",
+			ClientID:       "test-client-id",
+			ClientSecret:   "test-client-secret",
+			BillingAccount: "test-billing-account",
+			OfferID:        "my-offer-id",
+			ConvertMeterInfo: func(commerce.MeterInfo) (map[string]*AzurePricing, error) {
+				return nil, nil
+			},
+		}
+		_, err := d.readPricesheet(context.Background(), strings.NewReader(pricesheetData))
+		require.ErrorContains(t, err, "no matching pricing from price sheet")
+	})
+}
+
+func convertMeter(info commerce.MeterInfo) (map[string]*AzurePricing, error) {
+	switch *info.MeterName {
+	case "skip-this":
+		return nil, nil
+	case "multiple-prices":
+		return map[string]*AzurePricing{
+			"VM1 1 Hour": {Node: &models.Node{Cost: "1.0"}},
+			"VM2 1 Hour": {Node: &models.Node{Cost: "2.0"}},
+		}, nil
+	case "error":
+		return nil, fmt.Errorf("there was an error handling this row!")
+	default:
+		return map[string]*AzurePricing{
+			*info.MeterName + " " + *info.Unit: {
+				Node: &models.Node{Cost: fmt.Sprintf("%0.3f", *info.MeterRates["0"])},
+			},
+		}, nil
+	}
+}
+
+const pricesheetData = `Price Sheet Report for billing period - 202304
+
+Meter ID,Meter name,Meter category,Meter sub-category,Meter region,Unit,Unit of measure,Part number,Unit price,Currency code,Included quantity,Offer Id,Term,Price type
+d4236f8f-3ba6-5a9a-8c6b-14556538c44c,DC96as_v4,Virtual Machines,DCasv4 Series,US East,10 Hours,10 Hours,AAF-70822,105.050000000000000,USD,0.00,my-offer-id,,Consumption
+d4236f8f-3ba6-5a9a-8c6b-14556538c44c,DC96as_v4,Virtual Machines,DCasv4 Series,US East,10 Hours,10 Hours,AAF-70831,60.890000000000000,USD,0.00,other-offer-id,,Consumption
+e47a2c4c-4dc4-55d5-a8d7-ec5b1dcc9c08,DC2as_v4,Virtual Machines,DCasv4 Series,US East,100 Hours,100 Hours,AAF-70890,21.900000000000000,USD,0.000,my-offer-id,,Consumption
+e47a2c4c-4dc4-55d5-a8d7-ec5b1dcc9c08,DC2as_v4,Virtual Machines,DCasv4 Series,US East,100 Hours,100 Hours,AAF-70886,12.700000000000000,USD,0.000,other-offer-id,,Consumption
+cb8d72c0-2b02-5b41-9ac9-2809c04f17ff,DC16as_v4,Virtual Machines,DCasv4 Series,US East,10 Hours,10 Hours,AAF-70911,17.510000000000000,USD,0.00,my-offer-id,,Savings Plan
+cb8d72c0-2b02-5b41-9ac9-2809c04f17ff,DC16as_v4,Virtual Machines,DCasv4 Series,US East,10 Hours,10 Hours,AAF-70910,10.150000000000000,USD,0.00,other-offer-id,,Consumption
+d4236f8f-3ba6-5a9a-8c6b-14556538c44c,skip-this,Virtual Machines,DCasv4 Series,US East,10 Hours,10 Hours,AAF-70822,105.050000000000000,USD,0.00,my-offer-id,,Consumption
+d4236f8f-3ba6-5a9a-8c6b-14556538c44c,multiple-prices,Virtual Machines,DCasv4 Series,US East,10 Hours,10 Hours,AAF-70822,105.050000000000000,USD,0.00,my-offer-id,,Consumption
+d4236f8f-3ba6-5a9a-8c6b-14556538c44c,error,Virtual Machines,DCasv4 Series,US East,10 Hours,10 Hours,AAF-70822,105.050000000000000,USD,0.00,my-offer-id,,Consumption
+`

+ 1649 - 0
pkg/cloud/azure/provider.go

@@ -0,0 +1,1649 @@
+package azure
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute"
+	"github.com/Azure/azure-sdk-for-go/services/preview/commerce/mgmt/2015-06-01-preview/commerce"
+	"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions"
+	"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources"
+	"github.com/Azure/go-autorest/autorest"
+	"github.com/Azure/go-autorest/autorest/azure"
+	"github.com/Azure/go-autorest/autorest/azure/auth"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/utils"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+
+	v1 "k8s.io/api/core/v1"
+)
+
+const (
+	AzureFilePremiumStorageClass     = "premium_smb"
+	AzureFileStandardStorageClass    = "standard_smb"
+	AzureDiskPremiumSSDStorageClass  = "premium_ssd"
+	AzureDiskStandardSSDStorageClass = "standard_ssd"
+	AzureDiskStandardStorageClass    = "standard_hdd"
+	defaultSpotLabel                 = "kubernetes.azure.com/scalesetpriority"
+	defaultSpotLabelValue            = "spot"
+	AzureStorageUpdateType           = "AzureStorage"
+)
+
+var (
+	regionCodeMappings = map[string]string{
+		"ap": "asia",
+		"au": "australia",
+		"br": "brazil",
+		"ca": "canada",
+		"eu": "europe",
+		"fr": "france",
+		"in": "india",
+		"ja": "japan",
+		"kr": "korea",
+		"uk": "uk",
+		"us": "us",
+		"za": "southafrica",
+		"no": "norway",
+		"ch": "switzerland",
+		"de": "germany",
+		"ue": "uae",
+	}
+
+	//mtBasic, _     = regexp.Compile("^BASIC.A\\d+[_Promo]*$")
+	//mtStandardA, _ = regexp.Compile("^A\\d+[_Promo]*$")
+	mtStandardB, _ = regexp.Compile(`^Standard_B\d+m?[_v\d]*[_Promo]*$`)
+	mtStandardD, _ = regexp.Compile(`^Standard_D\d[_v\d]*[_Promo]*$`)
+	mtStandardE, _ = regexp.Compile(`^Standard_E\d+i?[_v\d]*[_Promo]*$`)
+	mtStandardF, _ = regexp.Compile(`^Standard_F\d+[_v\d]*[_Promo]*$`)
+	mtStandardG, _ = regexp.Compile(`^Standard_G\d+[_v\d]*[_Promo]*$`)
+	mtStandardL, _ = regexp.Compile(`^Standard_L\d+[_v\d]*[_Promo]*$`)
+	mtStandardM, _ = regexp.Compile(`^Standard_M\d+[m|t|l]*s[_v\d]*[_Promo]*$`)
+	mtStandardN, _ = regexp.Compile(`^Standard_N[C|D|V]\d+r?[_v\d]*[_Promo]*$`)
+
+	// azure:///subscriptions/0badafdf-1234-abcd-wxyz-123456789/...
+	//  => 0badafdf-1234-abcd-wxyz-123456789
+	azureSubRegex = regexp.MustCompile("azure:///subscriptions/([^/]*)/*")
+)
+
+// List obtained by installing the Azure CLI tool "az", described here:
+// https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-linux?pivots=apt
+// logging into an Azure account, and running command `az account list-locations`
+var azureRegions = []string{
+	"eastus",
+	"eastus2",
+	"southcentralus",
+	"westus2",
+	"westus3",
+	"australiaeast",
+	"southeastasia",
+	"northeurope",
+	"swedencentral",
+	"uksouth",
+	"westeurope",
+	"centralus",
+	"northcentralus",
+	"westus",
+	"southafricanorth",
+	"centralindia",
+	"eastasia",
+	"japaneast",
+	"jioindiawest",
+	"koreacentral",
+	"canadacentral",
+	"francecentral",
+	"germanywestcentral",
+	"norwayeast",
+	"switzerlandnorth",
+	"uaenorth",
+	"brazilsouth",
+	"centralusstage",
+	"eastusstage",
+	"eastus2stage",
+	"northcentralusstage",
+	"southcentralusstage",
+	"westusstage",
+	"westus2stage",
+	"asia",
+	"asiapacific",
+	"australia",
+	"brazil",
+	"canada",
+	"europe",
+	"france",
+	"germany",
+	"global",
+	"india",
+	"japan",
+	"korea",
+	"norway",
+	"southafrica",
+	"switzerland",
+	"uae",
+	"uk",
+	"unitedstates",
+	"eastasiastage",
+	"southeastasiastage",
+	"centraluseuap",
+	"eastus2euap",
+	"westcentralus",
+	"southafricawest",
+	"australiacentral",
+	"australiacentral2",
+	"australiasoutheast",
+	"japanwest",
+	"jioindiacentral",
+	"koreasouth",
+	"southindia",
+	"westindia",
+	"canadaeast",
+	"francesouth",
+	"germanynorth",
+	"norwaywest",
+	"switzerlandwest",
+	"ukwest",
+	"uaecentral",
+	"brazilsoutheast",
+	"usgovarizona",
+	"usgoviowa",
+	"usgovvirginia",
+	"usgovtexas",
+}
+
+type regionParts []string
+
+func (r regionParts) String() string {
+	var result string
+	for _, p := range r {
+		result += p
+	}
+	return result
+}
+
+func getRegions(service string, subscriptionsClient subscriptions.Client, providersClient resources.ProvidersClient, subscriptionID string) (map[string]string, error) {
+
+	allLocations := make(map[string]string)
+	supLocations := make(map[string]string)
+
+	// retrieve all locations for the subscription id (some of them may not be supported by the required provider)
+	if locations, err := subscriptionsClient.ListLocations(context.TODO(), subscriptionID); err == nil {
+		// fill up the map: DisplayName - > Name
+		for _, loc := range *locations.Value {
+			allLocations[*loc.DisplayName] = *loc.Name
+		}
+	} else {
+		return nil, err
+	}
+
+	// identify supported locations for the namespace and resource type
+	const (
+		providerNamespaceForCompute = "Microsoft.Compute"
+		resourceTypeForCompute      = "locations/vmSizes"
+		providerNamespaceForAks     = "Microsoft.ContainerService"
+		resourceTypeForAks          = "managedClusters"
+	)
+
+	switch service {
+	case "aks":
+		if providers, err := providersClient.Get(context.TODO(), providerNamespaceForAks, ""); err == nil {
+			for _, pr := range *providers.ResourceTypes {
+				if *pr.ResourceType == resourceTypeForAks {
+					for _, displName := range *pr.Locations {
+						if loc, ok := allLocations[displName]; ok {
+							supLocations[loc] = displName
+						} else {
+							log.Warnf("unsupported cloud region %q", loc)
+						}
+					}
+					break
+				}
+			}
+		} else {
+			return nil, err
+		}
+		return supLocations, nil
+	default:
+		if providers, err := providersClient.Get(context.TODO(), providerNamespaceForCompute, ""); err == nil {
+			for _, pr := range *providers.ResourceTypes {
+				if *pr.ResourceType == resourceTypeForCompute {
+					for _, displName := range *pr.Locations {
+						if loc, ok := allLocations[displName]; ok {
+							supLocations[loc] = displName
+						} else {
+							log.Warnf("unsupported cloud region %q", loc)
+						}
+					}
+					break
+				}
+			}
+		} else {
+			return nil, err
+		}
+
+		return supLocations, nil
+	}
+}
+
+func getRetailPrice(region string, skuName string, currencyCode string, spot bool) (string, error) {
+	pricingURL := "https://prices.azure.com/api/retail/prices?$skip=0"
+
+	if currencyCode != "" {
+		pricingURL += fmt.Sprintf("&currencyCode='%s'", currencyCode)
+	}
+
+	var filterParams []string
+
+	if region != "" {
+		regionParam := fmt.Sprintf("armRegionName eq '%s'", region)
+		filterParams = append(filterParams, regionParam)
+	}
+
+	if skuName != "" {
+		skuNameParam := fmt.Sprintf("armSkuName eq '%s'", skuName)
+		filterParams = append(filterParams, skuNameParam)
+	}
+
+	if len(filterParams) > 0 {
+		filterParamsEscaped := url.QueryEscape(strings.Join(filterParams[:], " and "))
+		pricingURL += fmt.Sprintf("&$filter=%s", filterParamsEscaped)
+	}
+
+	log.Infof("starting download retail price payload from \"%s\"", pricingURL)
+	resp, err := http.Get(pricingURL)
+
+	if err != nil {
+		return "", fmt.Errorf("bogus fetch of \"%s\": %v", pricingURL, err)
+	}
+
+	if resp.StatusCode < 200 && resp.StatusCode > 299 {
+		return "", fmt.Errorf("retail price responded with error status code %d", resp.StatusCode)
+	}
+
+	pricingPayload := AzureRetailPricing{}
+
+	body, err := io.ReadAll(resp.Body)
+	if err != nil {
+		return "", fmt.Errorf("Error getting response: %v", err)
+	}
+
+	jsonErr := json.Unmarshal(body, &pricingPayload)
+	if jsonErr != nil {
+		return "", fmt.Errorf("Error unmarshalling data: %v", jsonErr)
+	}
+
+	retailPrice := ""
+	for _, item := range pricingPayload.Items {
+		if item.Type == "Consumption" && !strings.Contains(item.ProductName, "Windows") {
+			// if spot is true SkuName should contain "spot, if it is false it should not
+			if spot == strings.Contains(strings.ToLower(item.SkuName), " spot") {
+				retailPrice = fmt.Sprintf("%f", item.RetailPrice)
+			}
+		}
+	}
+
+	log.DedupedInfof(5, "done parsing retail price payload from \"%s\"\n", pricingURL)
+
+	if retailPrice == "" {
+		return retailPrice, fmt.Errorf("Couldn't find price for product \"%s\" in \"%s\" region", skuName, region)
+	}
+
+	return retailPrice, nil
+}
+
+func toRegionID(meterRegion string, regions map[string]string) (string, error) {
+	var rp regionParts = strings.Split(strings.ToLower(meterRegion), " ")
+	regionCode := regionCodeMappings[rp[0]]
+	lastPart := rp[len(rp)-1]
+	var regionIds []string
+	if regionID, ok := regionIdByDisplayName[meterRegion]; ok {
+		regionIds = []string{
+			regionID,
+		}
+	} else if _, err := strconv.Atoi(lastPart); err == nil {
+		regionIds = []string{
+			fmt.Sprintf("%s%s%s", regionCode, rp[1:len(rp)-1], lastPart),
+			fmt.Sprintf("%s%s%s", rp[1:len(rp)-1], regionCode, lastPart),
+		}
+	} else {
+		regionIds = []string{
+			fmt.Sprintf("%s%s", regionCode, rp[1:]),
+			fmt.Sprintf("%s%s", rp[1:], regionCode),
+		}
+	}
+	for _, regionID := range regionIds {
+		if checkRegionID(regionID, regions) {
+			return regionID, nil
+		}
+	}
+	return "", fmt.Errorf("Couldn't find region %q", meterRegion)
+}
+
+// azure has very inconsistent naming standards between display names from the rate card api and display names from the regions api
+// this map is to connect display names from the ratecard api to the appropriate id.
+var regionIdByDisplayName = map[string]string{
+	"US Gov AZ": "usgovarizona",
+	"US Gov TX": "usgovtexas",
+	"US Gov":    "usgovvirginia",
+}
+
+func checkRegionID(regionID string, regions map[string]string) bool {
+	for region := range regions {
+		if regionID == region {
+			return true
+		}
+	}
+	return false
+}
+
+// AzureRetailPricing struct for unmarshalling Azure Retail pricing api JSON response
+type AzureRetailPricing struct {
+	BillingCurrency    string                         `json:"BillingCurrency"`
+	CustomerEntityId   string                         `json:"CustomerEntityId"`
+	CustomerEntityType string                         `json:"CustomerEntityType"`
+	Items              []AzureRetailPricingAttributes `json:"Items"`
+	NextPageLink       string                         `json:"NextPageLink"`
+	Count              int                            `json:"Count"`
+}
+
+// AzureRetailPricingAttributes struct for unmarshalling Azure Retail pricing api JSON response
+type AzureRetailPricingAttributes struct {
+	CurrencyCode         string     `json:"currencyCode"`
+	TierMinimumUnits     float32    `json:"tierMinimumUnits"`
+	RetailPrice          float32    `json:"retailPrice"`
+	UnitPrice            float32    `json:"unitPrice"`
+	ArmRegionName        string     `json:"armRegionName"`
+	Location             string     `json:"location"`
+	EffectiveStartDate   *time.Time `json:"effectiveStartDate"`
+	EffectiveEndDate     *time.Time `json:"effectiveEndDate"`
+	MeterId              string     `json:"meterId"`
+	MeterName            string     `json:"meterName"`
+	ProductId            string     `json:"productId"`
+	SkuId                string     `json:"skuId"`
+	ProductName          string     `json:"productName"`
+	SkuName              string     `json:"skuName"`
+	ServiceName          string     `json:"serviceName"`
+	ServiceId            string     `json:"serviceId"`
+	ServiceFamily        string     `json:"serviceFamily"`
+	UnitOfMeasure        string     `json:"unitOfMeasure"`
+	Type                 string     `json:"type"`
+	IsPrimaryMeterRegion bool       `json:"isPrimaryMeterRegion"`
+	ArmSkuName           string     `json:"armSkuName"`
+}
+
+// AzurePricing either contains a Node or PV
+type AzurePricing struct {
+	Node *models.Node
+	PV   *models.PV
+}
+
+type Azure struct {
+	Pricing                 map[string]*AzurePricing
+	DownloadPricingDataLock sync.RWMutex
+	Clientset               clustercache.ClusterCache
+	Config                  models.ProviderConfig
+	ServiceAccountChecks    *models.ServiceAccountChecks
+	ClusterAccountID        string
+	ClusterRegion           string
+
+	pricingSource                  string
+	rateCardPricingError           error
+	priceSheetPricingError         error
+	loadedAzureSecret              bool
+	azureSecret                    *AzureServiceKey
+	loadedAzureStorageConfigSecret bool
+	azureStorageConfig             *AzureStorageConfig
+}
+
+// PricingSourceSummary returns the pricing source summary for the provider.
+// The summary represents what was _parsed_ from the pricing source, not
+// everything that was _available_ in the pricing source.
+func (az *Azure) PricingSourceSummary() interface{} {
+	return az.Pricing
+}
+
+type azureKey struct {
+	Labels        map[string]string
+	GPULabel      string
+	GPULabelValue string
+}
+
+func (k *azureKey) Features() string {
+	r, _ := util.GetRegion(k.Labels)
+	region := strings.ToLower(r)
+	instance, _ := util.GetInstanceType(k.Labels)
+	usageType := "ondemand"
+	return fmt.Sprintf("%s,%s,%s", region, instance, usageType)
+}
+
+func (k *azureKey) GPUCount() int {
+	return 0
+}
+
+// GPUType returns value of GPULabel if present
+func (k *azureKey) GPUType() string {
+	if t, ok := k.Labels[k.GPULabel]; ok {
+		return t
+	}
+	return ""
+}
+
+func (k *azureKey) isValidGPUNode() bool {
+	return k.GPUType() == k.GPULabelValue && k.GetGPUCount() != "0"
+}
+
+func (k *azureKey) ID() string {
+	return ""
+}
+
+func (k *azureKey) GetGPUCount() string {
+	instance, _ := util.GetInstanceType(k.Labels)
+	// Double digits that could get matches lower in logic
+	if strings.Contains(instance, "NC64") {
+		return "4"
+	}
+	if strings.Contains(instance, "ND96") ||
+		strings.Contains(instance, "ND40") {
+		return "8"
+	}
+
+	// Ordered asc because of some series have different gpu counts on different versions
+	if strings.Contains(instance, "NC6") ||
+		strings.Contains(instance, "NC4") ||
+		strings.Contains(instance, "NC8") ||
+		strings.Contains(instance, "NC16") ||
+		strings.Contains(instance, "ND6") ||
+		strings.Contains(instance, "NV12s") ||
+		strings.Contains(instance, "NV6") {
+		return "1"
+	}
+
+	if strings.Contains(instance, "NC12") ||
+		strings.Contains(instance, "ND12") ||
+		strings.Contains(instance, "NV24s") ||
+		strings.Contains(instance, "NV12") {
+		return "2"
+	}
+	if strings.Contains(instance, "NC24") ||
+		strings.Contains(instance, "ND24") ||
+		strings.Contains(instance, "NV48s") ||
+		strings.Contains(instance, "NV24") {
+		return "4"
+	}
+	return "0"
+}
+
+// AzureStorageConfig Represents an azure storage config
+// Deprecated: v1.104 Use StorageConfiguration instead
+type AzureStorageConfig struct {
+	SubscriptionId string `json:"azureSubscriptionID"`
+	AccountName    string `json:"azureStorageAccount"`
+	AccessKey      string `json:"azureStorageAccessKey"`
+	ContainerName  string `json:"azureStorageContainer"`
+	ContainerPath  string `json:"azureContainerPath"`
+	AzureCloud     string `json:"azureCloud"`
+}
+
+// IsEmpty returns true if all fields in config are empty, false if not.
+func (asc *AzureStorageConfig) IsEmpty() bool {
+	return asc.SubscriptionId == "" &&
+		asc.AccountName == "" &&
+		asc.AccessKey == "" &&
+		asc.ContainerName == "" &&
+		asc.ContainerPath == "" &&
+		asc.AzureCloud == ""
+}
+
+// Represents an azure app key
+type AzureAppKey struct {
+	AppID       string `json:"appId"`
+	DisplayName string `json:"displayName"`
+	Name        string `json:"name"`
+	Password    string `json:"password"`
+	Tenant      string `json:"tenant"`
+}
+
+// AzureServiceKey service key for a specific subscription
+// Deprecated: v1.104 Use ServiceKey instead
+type AzureServiceKey struct {
+	SubscriptionID string       `json:"subscriptionId"`
+	ServiceKey     *AzureAppKey `json:"serviceKey"`
+}
+
+// Validity check on service key
+func (ask *AzureServiceKey) IsValid() bool {
+	return ask.SubscriptionID != "" &&
+		ask.ServiceKey != nil &&
+		ask.ServiceKey.AppID != "" &&
+		ask.ServiceKey.Password != "" &&
+		ask.ServiceKey.Tenant != ""
+}
+
+// Loads the azure authentication via configuration or a secret set at install time.
+func (az *Azure) getAzureRateCardAuth(forceReload bool, cp *models.CustomPricing) (subscriptionID, clientID, clientSecret, tenantID string) {
+	// 1. Check for secret (secret values will always be used if they are present)
+	s, _ := az.loadAzureAuthSecret(forceReload)
+	if s != nil && s.IsValid() {
+		subscriptionID = s.SubscriptionID
+		clientID = s.ServiceKey.AppID
+		clientSecret = s.ServiceKey.Password
+		tenantID = s.ServiceKey.Tenant
+		return
+	}
+	// 2. Check config values (set though endpoint)
+	if cp.AzureSubscriptionID != "" && cp.AzureClientID != "" && cp.AzureClientSecret != "" && cp.AzureTenantID != "" {
+		subscriptionID = cp.AzureSubscriptionID
+		clientID = cp.AzureClientID
+		clientSecret = cp.AzureClientSecret
+		tenantID = cp.AzureTenantID
+		return
+	}
+	// 3. Check if AzureSubscriptionID is set in config (set though endpoint)
+	// MSI credentials will be attempted if the subscription ID is set, but clientID, clientSecret and tenantID are not
+	if cp.AzureSubscriptionID != "" {
+		subscriptionID = cp.AzureSubscriptionID
+		return
+	}
+	// 4. Empty values
+	return "", "", "", ""
+
+}
+
+// GetAzureStorageConfig retrieves storage config from secret and sets default values
+func (az *Azure) GetAzureStorageConfig(forceReload bool, cp *models.CustomPricing) (*AzureStorageConfig, error) {
+	// default subscription id
+	defaultSubscriptionID := cp.AzureSubscriptionID
+
+	// 1. Check Config for storage set up
+	asc := &AzureStorageConfig{
+		SubscriptionId: cp.AzureStorageSubscriptionID,
+		AccountName:    cp.AzureStorageAccount,
+		AccessKey:      cp.AzureStorageAccessKey,
+		ContainerName:  cp.AzureStorageContainer,
+		ContainerPath:  cp.AzureContainerPath,
+		AzureCloud:     cp.AzureCloud,
+	}
+
+	// check for required fields
+	if asc != nil && asc.AccessKey != "" && asc.AccountName != "" && asc.ContainerName != "" && asc.SubscriptionId != "" {
+		az.ServiceAccountChecks.Set("hasStorage", &models.ServiceAccountCheck{
+			Message: "Azure Storage Config exists",
+			Status:  true,
+		})
+		return asc, nil
+	}
+
+	// 2. Check for secret
+	asc, err := az.loadAzureStorageConfig(forceReload)
+	if err != nil {
+		log.Errorf("Error, %s", err.Error())
+	} else if asc != nil {
+		// To support already configured users, subscriptionID may not be set in secret in which case, the subscriptionID
+		// for the rate card API is used
+		if asc.SubscriptionId == "" {
+			asc.SubscriptionId = defaultSubscriptionID
+		}
+		// check for required fields
+		if asc.AccessKey != "" && asc.AccountName != "" && asc.ContainerName != "" && asc.SubscriptionId != "" {
+			az.ServiceAccountChecks.Set("hasStorage", &models.ServiceAccountCheck{
+				Message: "Azure Storage Config exists",
+				Status:  true,
+			})
+
+			return asc, nil
+		}
+	}
+
+	az.ServiceAccountChecks.Set("hasStorage", &models.ServiceAccountCheck{
+		Message: "Azure Storage Config exists",
+		Status:  false,
+	})
+	return nil, fmt.Errorf("azure storage config not found")
+
+}
+
+// Load once and cache the result (even on failure). This is an install time secret, so
+// we don't expect the secret to change. If it does, however, we can force reload using
+// the input parameter.
+func (az *Azure) loadAzureAuthSecret(force bool) (*AzureServiceKey, error) {
+	if !force && az.loadedAzureSecret {
+		return az.azureSecret, nil
+	}
+	az.loadedAzureSecret = true
+
+	exists, err := fileutil.FileExists(models.AuthSecretPath)
+	if !exists || err != nil {
+		return nil, fmt.Errorf("Failed to locate service account file: %s", models.AuthSecretPath)
+	}
+
+	result, err := os.ReadFile(models.AuthSecretPath)
+	if err != nil {
+		return nil, err
+	}
+
+	var ask AzureServiceKey
+	err = json.Unmarshal(result, &ask)
+	if err != nil {
+		return nil, err
+	}
+
+	az.azureSecret = &ask
+	return &ask, nil
+}
+
+// Load once and cache the result (even on failure). This is an install time secret, so
+// we don't expect the secret to change. If it does, however, we can force reload using
+// the input parameter.
+func (az *Azure) loadAzureStorageConfig(force bool) (*AzureStorageConfig, error) {
+	if !force && az.loadedAzureStorageConfigSecret {
+		return az.azureStorageConfig, nil
+	}
+	az.loadedAzureStorageConfigSecret = true
+
+	exists, err := fileutil.FileExists(models.StorageConfigSecretPath)
+	if !exists || err != nil {
+		return nil, fmt.Errorf("Failed to locate azure storage config file: %s", models.StorageConfigSecretPath)
+	}
+
+	result, err := os.ReadFile(models.StorageConfigSecretPath)
+	if err != nil {
+		return nil, err
+	}
+
+	var asc AzureStorageConfig
+	err = json.Unmarshal(result, &asc)
+	if err != nil {
+		return nil, err
+	}
+
+	az.azureStorageConfig = &asc
+	return &asc, nil
+}
+
+func (az *Azure) GetKey(labels map[string]string, n *v1.Node) models.Key {
+	cfg, err := az.GetConfig()
+	if err != nil {
+		log.Infof("Error loading azure custom pricing information")
+	}
+	// azure defaults, see https://docs.microsoft.com/en-us/azure/aks/gpu-cluster
+	gpuLabel := "accelerator"
+	gpuLabelValue := "nvidia"
+	if cfg.GpuLabel != "" {
+		gpuLabel = cfg.GpuLabel
+	}
+	if cfg.GpuLabelValue != "" {
+		gpuLabelValue = cfg.GpuLabelValue
+	}
+	return &azureKey{
+		Labels:        labels,
+		GPULabel:      gpuLabel,
+		GPULabelValue: gpuLabelValue,
+	}
+}
+
+// CreateString builds strings effectively
+func createString(keys ...string) string {
+	var b strings.Builder
+	for _, key := range keys {
+		b.WriteString(key)
+	}
+	return b.String()
+}
+
+func transformMachineType(subCategory string, mt []string) []string {
+	switch {
+	case strings.Contains(subCategory, "Basic"):
+		return []string{createString("Basic_", mt[0])}
+	case len(mt) == 2:
+		return []string{createString("Standard_", mt[0]), createString("Standard_", mt[1])}
+	default:
+		return []string{createString("Standard_", mt[0])}
+	}
+}
+
+func addSuffix(mt string, suffixes ...string) []string {
+	result := make([]string, len(suffixes))
+	var suffix string
+	parts := strings.Split(mt, "_")
+	if len(parts) > 2 {
+		for _, p := range parts[2:] {
+			suffix = createString(suffix, "_", p)
+		}
+	}
+	for i, s := range suffixes {
+		result[i] = createString(parts[0], "_", parts[1], s, suffix)
+	}
+	return result
+}
+
+func getMachineTypeVariants(mt string) []string {
+	switch {
+	case mtStandardB.MatchString(mt):
+		return []string{createString(mt, "s")}
+	case mtStandardD.MatchString(mt):
+		var result []string
+		result = append(result, addSuffix(mt, "s")[0])
+		dsType := strings.Replace(mt, "Standard_D", "Standard_DS", -1)
+		result = append(result, dsType)
+		result = append(result, addSuffix(dsType, "-1", "-2", "-4", "-8")...)
+		return result
+	case mtStandardE.MatchString(mt):
+		return addSuffix(mt, "s", "-2s", "-4s", "-8s", "-16s", "-32s")
+	case mtStandardF.MatchString(mt):
+		return addSuffix(mt, "s")
+	case mtStandardG.MatchString(mt):
+		var result []string
+		gsType := strings.Replace(mt, "Standard_G", "Standard_GS", -1)
+		result = append(result, gsType)
+		return append(result, addSuffix(gsType, "-4", "-8", "-16")...)
+	case mtStandardL.MatchString(mt):
+		return addSuffix(mt, "s")
+	case mtStandardM.MatchString(mt) && strings.HasSuffix(mt, "ms"):
+		base := strings.TrimSuffix(mt, "ms")
+		return addSuffix(base, "-2ms", "-4ms", "-8ms", "-16ms", "-32ms", "-64ms")
+	case mtStandardM.MatchString(mt) && (strings.HasSuffix(mt, "ls") || strings.HasSuffix(mt, "ts")):
+		return []string{}
+	case mtStandardM.MatchString(mt) && strings.HasSuffix(mt, "s"):
+		base := strings.TrimSuffix(mt, "s")
+		return addSuffix(base, "", "m")
+	case mtStandardN.MatchString(mt):
+		return addSuffix(mt, "s")
+	}
+	return []string{}
+}
+
+func (az *Azure) GetManagementPlatform() (string, error) {
+	nodes := az.Clientset.GetAllNodes()
+
+	if len(nodes) > 0 {
+		n := nodes[0]
+		providerID := n.Spec.ProviderID
+		if strings.Contains(providerID, "aks") {
+			return "aks", nil
+		}
+	}
+	return "", nil
+}
+
+// DownloadPricingData uses provided azure "best guesses" for pricing
+func (az *Azure) DownloadPricingData() error {
+	az.DownloadPricingDataLock.Lock()
+	defer az.DownloadPricingDataLock.Unlock()
+
+	config, err := az.GetConfig()
+	if err != nil {
+		az.rateCardPricingError = err
+		return err
+	}
+
+	envBillingAccount := env.GetAzureBillingAccount()
+	if envBillingAccount != "" {
+		config.AzureBillingAccount = envBillingAccount
+	}
+	envOfferID := env.GetAzureOfferID()
+	if envOfferID != "" {
+		config.AzureOfferDurableID = envOfferID
+	}
+
+	// Load the service provider keys
+	subscriptionID, clientID, clientSecret, tenantID := az.getAzureRateCardAuth(false, config)
+	config.AzureSubscriptionID = subscriptionID
+	config.AzureClientID = clientID
+	config.AzureClientSecret = clientSecret
+	config.AzureTenantID = tenantID
+
+	var authorizer autorest.Authorizer
+
+	azureEnv := determineCloudByRegion(az.ClusterRegion)
+
+	if config.AzureClientID != "" && config.AzureClientSecret != "" && config.AzureTenantID != "" {
+		credentialsConfig := NewClientCredentialsConfig(config.AzureClientID, config.AzureClientSecret, config.AzureTenantID, azureEnv)
+		a, err := credentialsConfig.Authorizer()
+		if err != nil {
+			az.rateCardPricingError = err
+			return err
+		}
+		authorizer = a
+	}
+
+	if authorizer == nil {
+		a, err := auth.NewAuthorizerFromEnvironment()
+		authorizer = a
+		if err != nil {
+			a, err := auth.NewAuthorizerFromFile(azureEnv.ResourceManagerEndpoint)
+			if err != nil {
+				az.rateCardPricingError = err
+				return err
+			}
+			authorizer = a
+		}
+	}
+
+	sClient := subscriptions.NewClientWithBaseURI(azureEnv.ResourceManagerEndpoint)
+	sClient.Authorizer = authorizer
+
+	rcClient := commerce.NewRateCardClientWithBaseURI(azureEnv.ResourceManagerEndpoint, config.AzureSubscriptionID)
+	rcClient.Authorizer = authorizer
+
+	providersClient := resources.NewProvidersClientWithBaseURI(azureEnv.ResourceManagerEndpoint, config.AzureSubscriptionID)
+	providersClient.Authorizer = authorizer
+
+	rateCardFilter := fmt.Sprintf("OfferDurableId eq '%s' and Currency eq '%s' and Locale eq 'en-US' and RegionInfo eq '%s'", config.AzureOfferDurableID, config.CurrencyCode, config.AzureBillingRegion)
+
+	log.Infof("Using ratecard query %s", rateCardFilter)
+	result, err := rcClient.Get(context.TODO(), rateCardFilter)
+	if err != nil {
+		log.Warnf("Error in pricing download query from API")
+		az.rateCardPricingError = err
+		return err
+	}
+
+	regions, err := getRegions("compute", sClient, providersClient, config.AzureSubscriptionID)
+	if err != nil {
+		log.Warnf("Error in pricing download regions from API")
+		az.rateCardPricingError = err
+		return err
+	}
+
+	baseCPUPrice := config.CPU
+	allPrices := make(map[string]*AzurePricing)
+
+	for _, v := range *result.Meters {
+		pricings, err := convertMeterToPricings(v, regions, baseCPUPrice)
+		if err != nil {
+			log.Warnf("converting meter to pricings: %s", err.Error())
+			continue
+		}
+		for key, pricing := range pricings {
+			allPrices[key] = pricing
+		}
+	}
+	addAzureFilePricing(allPrices, regions)
+
+	az.Pricing = allPrices
+	az.pricingSource = rateCardPricingSource
+	az.rateCardPricingError = nil
+
+	// If we've got a billing account set, kick off downloading the custom pricing data.
+	if config.AzureBillingAccount != "" {
+		downloader := PriceSheetDownloader{
+			TenantID:       config.AzureTenantID,
+			ClientID:       config.AzureClientID,
+			ClientSecret:   config.AzureClientSecret,
+			BillingAccount: config.AzureBillingAccount,
+			OfferID:        config.AzureOfferDurableID,
+			ConvertMeterInfo: func(meterInfo commerce.MeterInfo) (map[string]*AzurePricing, error) {
+				return convertMeterToPricings(meterInfo, regions, baseCPUPrice)
+			},
+		}
+		// The price sheet can take 5 minutes to generate, so we don't
+		// want to hang onto the lock while we're waiting for it.
+		go func() {
+			ctx := context.Background()
+			allPrices, err := downloader.GetPricing(ctx)
+
+			az.DownloadPricingDataLock.Lock()
+			defer az.DownloadPricingDataLock.Unlock()
+			if err != nil {
+				log.Errorf("Error downloading Azure price sheet: %s", err)
+				az.priceSheetPricingError = err
+				return
+			}
+			addAzureFilePricing(allPrices, regions)
+			az.Pricing = allPrices
+			az.pricingSource = priceSheetPricingSource
+			az.priceSheetPricingError = nil
+		}()
+	}
+
+	return nil
+}
+
+func convertMeterToPricings(info commerce.MeterInfo, regions map[string]string, baseCPUPrice string) (map[string]*AzurePricing, error) {
+	meterName := *info.MeterName
+	meterRegion := *info.MeterRegion
+	meterCategory := *info.MeterCategory
+	meterSubCategory := *info.MeterSubCategory
+
+	region, err := toRegionID(meterRegion, regions)
+	if err != nil {
+		// Skip this meter if we don't recognize the region.
+		return nil, nil
+	}
+
+	if strings.Contains(meterSubCategory, "Windows") {
+		// This meter doesn't correspond to any pricings.
+		return nil, nil
+	}
+
+	if strings.Contains(meterCategory, "Storage") {
+		if strings.Contains(meterSubCategory, "HDD") || strings.Contains(meterSubCategory, "SSD") || strings.Contains(meterSubCategory, "Premium Files") {
+			var storageClass string = ""
+			if strings.Contains(meterName, "P4 ") {
+				storageClass = AzureDiskPremiumSSDStorageClass
+			} else if strings.Contains(meterName, "E4 ") {
+				storageClass = AzureDiskStandardSSDStorageClass
+			} else if strings.Contains(meterName, "S4 ") {
+				storageClass = AzureDiskStandardStorageClass
+			} else if strings.Contains(meterName, "LRS Provisioned") {
+				storageClass = AzureFilePremiumStorageClass
+			}
+
+			if storageClass != "" {
+				var priceInUsd float64
+
+				if len(info.MeterRates) < 1 {
+					return nil, fmt.Errorf("missing rate info %+v", map[string]interface{}{"MeterSubCategory": *info.MeterSubCategory, "region": region})
+				}
+				for _, rate := range info.MeterRates {
+					priceInUsd += *rate
+				}
+				// rate is in disk per month, resolve price per hour, then GB per hour
+				pricePerHour := priceInUsd / 730.0 / 32.0
+				priceStr := fmt.Sprintf("%f", pricePerHour)
+
+				key := region + "," + storageClass
+				log.Debugf("Adding PV.Key: %s, Cost: %s", key, priceStr)
+				return map[string]*AzurePricing{
+					key: {
+						PV: &models.PV{
+							Cost:   priceStr,
+							Region: region,
+						},
+					},
+				}, nil
+			}
+		}
+	}
+
+	if !strings.Contains(meterCategory, "Virtual Machines") {
+		return nil, nil
+	}
+
+	usageType := ""
+	if !strings.Contains(meterName, "Low Priority") {
+		usageType = "ondemand"
+	} else {
+		usageType = "preemptible"
+	}
+
+	var instanceTypes []string
+	name := strings.TrimSuffix(meterName, " Low Priority")
+	instanceType := strings.Split(name, "/")
+	for _, it := range instanceType {
+		if strings.Contains(meterSubCategory, "Promo") {
+			it = it + " Promo"
+		}
+		instanceTypes = append(instanceTypes, strings.Replace(it, " ", "_", 1))
+	}
+
+	instanceTypes = transformMachineType(meterSubCategory, instanceTypes)
+	if strings.Contains(name, "Expired") {
+		instanceTypes = []string{}
+	}
+
+	var priceInUsd float64
+
+	if len(info.MeterRates) < 1 {
+		return nil, fmt.Errorf("missing rate info %+v", map[string]interface{}{"MeterSubCategory": *info.MeterSubCategory, "region": region})
+	}
+	for _, rate := range info.MeterRates {
+		priceInUsd += *rate
+	}
+	priceStr := fmt.Sprintf("%f", priceInUsd)
+	results := make(map[string]*AzurePricing)
+	for _, instanceType := range instanceTypes {
+
+		key := fmt.Sprintf("%s,%s,%s", region, instanceType, usageType)
+		pricing := &AzurePricing{
+			Node: &models.Node{
+				Cost:         priceStr,
+				BaseCPUPrice: baseCPUPrice,
+				UsageType:    usageType,
+			},
+		}
+		results[key] = pricing
+	}
+	return results, nil
+
+}
+
+func addAzureFilePricing(prices map[string]*AzurePricing, regions map[string]string) {
+	// There is no easy way of supporting Standard Azure-File, because it's billed per used GB
+	// this will set the price to "0" as a workaround to not spam with `Persistent Volume pricing not found for` error
+	// check https://github.com/opencost/opencost/issues/159 for more information (same problem on AWS)
+	zeroPrice := "0.0"
+	for region := range regions {
+		key := region + "," + AzureFileStandardStorageClass
+		log.Debugf("Adding PV.Key: %s, Cost: %s", key, zeroPrice)
+		prices[key] = &AzurePricing{
+			PV: &models.PV{
+				Cost:   zeroPrice,
+				Region: region,
+			},
+		}
+	}
+}
+
+// determineCloudByRegion uses region name to pick the correct Cloud Environment for the azure provider to use
+func determineCloudByRegion(region string) azure.Environment {
+	lcRegion := strings.ToLower(region)
+	if strings.Contains(lcRegion, "china") {
+		return azure.ChinaCloud
+	}
+	if strings.Contains(lcRegion, "gov") || strings.Contains(lcRegion, "dod") {
+		return azure.USGovernmentCloud
+	}
+	// Default to public cloud
+	return azure.PublicCloud
+}
+
+// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials.
+func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string, env azure.Environment) auth.ClientCredentialsConfig {
+	return auth.ClientCredentialsConfig{
+		ClientID:     clientID,
+		ClientSecret: clientSecret,
+		TenantID:     tenantID,
+		Resource:     env.ResourceManagerEndpoint,
+		AADEndpoint:  env.ActiveDirectoryEndpoint,
+	}
+}
+
+func (az *Azure) addPricing(features string, azurePricing *AzurePricing) {
+	if az.Pricing == nil {
+		az.Pricing = map[string]*AzurePricing{}
+	}
+	az.Pricing[features] = azurePricing
+}
+
+// AllNodePricing returns the Azure pricing objects stored
+func (az *Azure) AllNodePricing() (interface{}, error) {
+	az.DownloadPricingDataLock.RLock()
+	defer az.DownloadPricingDataLock.RUnlock()
+	return az.Pricing, nil
+}
+
+// NodePricing returns Azure pricing data for a single node
+func (az *Azure) NodePricing(key models.Key) (*models.Node, error) {
+	az.DownloadPricingDataLock.RLock()
+	defer az.DownloadPricingDataLock.RUnlock()
+	pricingDataExists := true
+	if az.Pricing == nil {
+		pricingDataExists = false
+		log.DedupedWarningf(1, "Unable to download Azure pricing data")
+	}
+
+	azKey, ok := key.(*azureKey)
+	if !ok {
+		return nil, fmt.Errorf("azure: NodePricing: key is of type %T", key)
+	}
+	config, _ := az.GetConfig()
+
+	// Spot Node
+	if slv, ok := azKey.Labels[config.SpotLabel]; ok && slv == config.SpotLabelValue && config.SpotLabel != "" && config.SpotLabelValue != "" {
+		features := strings.Split(azKey.Features(), ",")
+		region := features[0]
+		instance := features[1]
+		spotFeatures := fmt.Sprintf("%s,%s,%s", region, instance, "spot")
+		if n, ok := az.Pricing[spotFeatures]; ok {
+			log.DedupedInfof(5, "Returning pricing for node %s: %+v from key %s", azKey, n, spotFeatures)
+			if azKey.isValidGPUNode() {
+				n.Node.GPU = "1" // TODO: support multiple GPUs
+			}
+			return n.Node, nil
+		}
+		log.Infof("[Info] found spot instance, trying to get retail price for %s: %s, ", spotFeatures, azKey)
+		spotCost, err := getRetailPrice(region, instance, config.CurrencyCode, true)
+		if err != nil {
+			log.DedupedWarningf(5, "failed to retrieve spot retail pricing")
+		} else {
+			gpu := ""
+			if azKey.isValidGPUNode() {
+				gpu = "1"
+			}
+			spotNode := &models.Node{
+				Cost:      spotCost,
+				UsageType: "spot",
+				GPU:       gpu,
+			}
+			az.addPricing(spotFeatures, &AzurePricing{
+				Node: spotNode,
+			})
+			return spotNode, nil
+		}
+	}
+
+	// Use the downloaded pricing data if possible. Otherwise, use default
+	// configured pricing data.
+	if pricingDataExists {
+		if n, ok := az.Pricing[azKey.Features()]; ok {
+			log.Debugf("Returning pricing for node %s: %+v from key %s", azKey, n, azKey.Features())
+			if azKey.isValidGPUNode() {
+				n.Node.GPU = azKey.GetGPUCount()
+			}
+			return n.Node, nil
+		}
+		log.DedupedWarningf(5, "No pricing data found for node %s from key %s", azKey, azKey.Features())
+	}
+	c, err := az.GetConfig()
+	if err != nil {
+		return nil, fmt.Errorf("No default pricing data available")
+	}
+
+	// GPU Node
+	if azKey.isValidGPUNode() {
+		return &models.Node{
+			VCPUCost:         c.CPU,
+			RAMCost:          c.RAM,
+			UsesBaseCPUPrice: true,
+			GPUCost:          c.GPU,
+			GPU:              azKey.GetGPUCount(),
+		}, nil
+	}
+
+	// Serverless Node. This is an Azure Container Instance, and no pods can be
+	// scheduled to this node. Azure does not charge for this node. Set costs to
+	// zero.
+	if azKey.Labels["kubernetes.io/hostname"] == "virtual-node-aci-linux" {
+		return &models.Node{
+			VCPUCost: "0",
+			RAMCost:  "0",
+		}, nil
+	}
+
+	// Regular Node
+	return &models.Node{
+		VCPUCost:         c.CPU,
+		RAMCost:          c.RAM,
+		UsesBaseCPUPrice: true,
+	}, nil
+}
+
+// Stubbed NetworkPricing for Azure. Pull directly from azure.json for now
+func (az *Azure) NetworkPricing() (*models.Network, error) {
+	cpricing, err := az.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	znec, err := strconv.ParseFloat(cpricing.ZoneNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	rnec, err := strconv.ParseFloat(cpricing.RegionNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	inec, err := strconv.ParseFloat(cpricing.InternetNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return &models.Network{
+		ZoneNetworkEgressCost:     znec,
+		RegionNetworkEgressCost:   rnec,
+		InternetNetworkEgressCost: inec,
+	}, nil
+}
+
+// LoadBalancerPricing on Azure, LoadBalancer services correspond to public IPs. For now the pricing of LoadBalancer
+// services will be that of a standard static public IP https://azure.microsoft.com/en-us/pricing/details/ip-addresses/.
+// Azure still has load balancers which follow the standard pricing scheme based on rules
+// https://azure.microsoft.com/en-us/pricing/details/load-balancer/, they are created on a per-cluster basis.
+func (azr *Azure) LoadBalancerPricing() (*models.LoadBalancer, error) {
+	return &models.LoadBalancer{
+		Cost: 0.005,
+	}, nil
+}
+
+type azurePvKey struct {
+	Labels                 map[string]string
+	StorageClass           string
+	StorageClassParameters map[string]string
+	DefaultRegion          string
+	ProviderId             string
+}
+
+func (az *Azure) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string, defaultRegion string) models.PVKey {
+	providerID := ""
+	if pv.Spec.AzureDisk != nil {
+		providerID = pv.Spec.AzureDisk.DiskName
+	}
+	return &azurePvKey{
+		Labels:                 pv.Labels,
+		StorageClass:           pv.Spec.StorageClassName,
+		StorageClassParameters: parameters,
+		DefaultRegion:          defaultRegion,
+		ProviderId:             providerID,
+	}
+}
+
+func (key *azurePvKey) ID() string {
+	return key.ProviderId
+}
+
+func (key *azurePvKey) GetStorageClass() string {
+	return key.StorageClass
+}
+
+func (key *azurePvKey) Features() string {
+	storageClass := key.StorageClassParameters["storageaccounttype"]
+	storageSKU := key.StorageClassParameters["skuName"]
+	if storageClass != "" {
+		if strings.EqualFold(storageClass, "Premium_LRS") {
+			storageClass = AzureDiskPremiumSSDStorageClass
+		} else if strings.EqualFold(storageClass, "StandardSSD_LRS") {
+			storageClass = AzureDiskStandardSSDStorageClass
+		} else if strings.EqualFold(storageClass, "Standard_LRS") {
+			storageClass = AzureDiskStandardStorageClass
+		}
+	} else {
+		if strings.EqualFold(storageSKU, "Premium_LRS") {
+			storageClass = AzureFilePremiumStorageClass
+		} else if strings.EqualFold(storageSKU, "Standard_LRS") {
+			storageClass = AzureFileStandardStorageClass
+		}
+	}
+	if region, ok := util.GetRegion(key.Labels); ok {
+		return region + "," + storageClass
+	}
+
+	return key.DefaultRegion + "," + storageClass
+}
+
+func (*Azure) GetAddresses() ([]byte, error) {
+	return nil, nil
+}
+
+func (az *Azure) GetDisks() ([]byte, error) {
+	disks, err := az.getDisks()
+	if err != nil {
+		return nil, err
+	}
+
+	return json.Marshal(disks)
+}
+
+func (az *Azure) getDisks() ([]*compute.Disk, error) {
+	config, err := az.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	// Load the service provider keys
+	subscriptionID, clientID, clientSecret, tenantID := az.getAzureRateCardAuth(false, config)
+	config.AzureSubscriptionID = subscriptionID
+	config.AzureClientID = clientID
+	config.AzureClientSecret = clientSecret
+	config.AzureTenantID = tenantID
+
+	var authorizer autorest.Authorizer
+
+	azureEnv := determineCloudByRegion(az.ClusterRegion)
+
+	if config.AzureClientID != "" && config.AzureClientSecret != "" && config.AzureTenantID != "" {
+		credentialsConfig := NewClientCredentialsConfig(config.AzureClientID, config.AzureClientSecret, config.AzureTenantID, azureEnv)
+		a, err := credentialsConfig.Authorizer()
+		if err != nil {
+			az.rateCardPricingError = err
+			return nil, err
+		}
+		authorizer = a
+	}
+
+	if authorizer == nil {
+		a, err := auth.NewAuthorizerFromEnvironment()
+		authorizer = a
+		if err != nil {
+			a, err := auth.NewAuthorizerFromFile(azureEnv.ResourceManagerEndpoint)
+			if err != nil {
+				az.rateCardPricingError = err
+				return nil, err
+			}
+			authorizer = a
+		}
+	}
+	client := compute.NewDisksClient(config.AzureSubscriptionID)
+	client.Authorizer = authorizer
+
+	ctx := context.TODO()
+
+	var disks []*compute.Disk
+
+	diskPage, err := client.List(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("error getting disks: %v", err)
+	}
+
+	for diskPage.NotDone() {
+		for _, d := range diskPage.Values() {
+			d := d
+			disks = append(disks, &d)
+		}
+		err := diskPage.NextWithContext(context.Background())
+		if err != nil {
+			return nil, fmt.Errorf("error getting next page: %v", err)
+		}
+	}
+
+	return disks, nil
+}
+
+func (az *Azure) isDiskOrphaned(disk *compute.Disk) bool {
+	//TODO: needs better algorithm
+	return disk.DiskState == "Unattached" || disk.DiskState == "Reserved"
+}
+
+func (az *Azure) GetOrphanedResources() ([]models.OrphanedResource, error) {
+	disks, err := az.getDisks()
+	if err != nil {
+		return nil, err
+	}
+
+	var orphanedResources []models.OrphanedResource
+
+	for _, d := range disks {
+		if az.isDiskOrphaned(d) {
+			cost, err := az.findCostForDisk(d)
+			if err != nil {
+				return nil, err
+			}
+
+			diskName := ""
+			if d.Name != nil {
+				diskName = *d.Name
+			}
+
+			diskRegion := ""
+			if d.Location != nil {
+				diskRegion = *d.Location
+			}
+
+			var diskSize int64
+			if d.DiskSizeGB != nil {
+				diskSize = int64(*d.DiskSizeGB)
+			}
+
+			desc := map[string]string{}
+			for k, v := range d.Tags {
+				if v == nil {
+					desc[k] = ""
+				} else {
+					desc[k] = *v
+				}
+			}
+
+			or := models.OrphanedResource{
+				Kind:        "disk",
+				Region:      diskRegion,
+				Description: desc,
+				Size:        &diskSize,
+				DiskName:    diskName,
+				MonthlyCost: &cost,
+			}
+			orphanedResources = append(orphanedResources, or)
+		}
+	}
+
+	return orphanedResources, nil
+}
+
+func (az *Azure) findCostForDisk(d *compute.Disk) (float64, error) {
+	if d == nil {
+		return 0.0, fmt.Errorf("disk is empty")
+	}
+	storageClass := string(d.Sku.Name)
+	if strings.EqualFold(storageClass, "Premium_LRS") {
+		storageClass = AzureDiskPremiumSSDStorageClass
+	} else if strings.EqualFold(storageClass, "StandardSSD_LRS") {
+		storageClass = AzureDiskStandardSSDStorageClass
+	} else if strings.EqualFold(storageClass, "Standard_LRS") {
+		storageClass = AzureDiskStandardStorageClass
+	}
+
+	key := *d.Location + "," + storageClass
+
+	diskPricePerGBHour, err := strconv.ParseFloat(az.Pricing[key].PV.Cost, 64)
+	if err != nil {
+		return 0.0, fmt.Errorf("error converting to float: %s", err)
+	}
+	cost := diskPricePerGBHour * timeutil.HoursPerMonth * float64(*d.DiskSizeGB)
+
+	return cost, nil
+}
+
+func (az *Azure) ClusterInfo() (map[string]string, error) {
+	remoteEnabled := env.IsRemoteEnabled()
+
+	m := make(map[string]string)
+	m["name"] = "Azure Cluster #1"
+	c, err := az.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+	if c.ClusterName != "" {
+		m["name"] = c.ClusterName
+	}
+	m["provider"] = kubecost.AzureProvider
+	m["account"] = az.ClusterAccountID
+	m["region"] = az.ClusterRegion
+	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
+	m["id"] = env.GetClusterID()
+	return m, nil
+
+}
+
+func (az *Azure) UpdateConfigFromConfigMap(a map[string]string) (*models.CustomPricing, error) {
+	return az.Config.UpdateFromMap(a)
+}
+
+func (az *Azure) UpdateConfig(r io.Reader, updateType string) (*models.CustomPricing, error) {
+	return az.Config.Update(func(c *models.CustomPricing) error {
+		if updateType == AzureStorageUpdateType {
+			asc := &AzureStorageConfig{}
+			err := json.NewDecoder(r).Decode(&asc)
+			if err != nil {
+				return fmt.Errorf("error decoding AzureStorageConfig: %s", err)
+			}
+
+			c.AzureStorageSubscriptionID = asc.SubscriptionId
+			c.AzureStorageAccount = asc.AccountName
+			if asc.AccessKey != "" {
+				c.AzureStorageAccessKey = asc.AccessKey
+			}
+			c.AzureStorageContainer = asc.ContainerName
+			c.AzureContainerPath = asc.ContainerPath
+			c.AzureCloud = asc.AzureCloud
+		} else {
+			// This will block if not in a goroutine. It calls GetConfig(), which
+			// in turn calls GetCustomPricingData, which acquires the same lock
+			// that is acquired by az.Config.Update, which is the function to
+			// which this function gets passed, and subsequently called. Booo.
+			defer func() {
+				go az.DownloadPricingData()
+			}()
+
+			a := make(map[string]interface{})
+			err := json.NewDecoder(r).Decode(&a)
+			if err != nil {
+				return fmt.Errorf("error decoding AzureStorageConfig: %s", err)
+			}
+
+			for k, v := range a {
+				// Just so we consistently supply / receive the same values, uppercase the first letter.
+				kUpper := utils.ToTitle.String(k)
+				vstr, ok := v.(string)
+				if ok {
+					err := models.SetCustomPricingField(c, kUpper, vstr)
+					if err != nil {
+						return fmt.Errorf("error setting custom pricing field on AzureStorageConfig: %s", err)
+					}
+				} else {
+					return fmt.Errorf("type error while updating config for %s", kUpper)
+				}
+			}
+		}
+
+		if env.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
+			if err != nil {
+				return fmt.Errorf("error updating cluster metadata: %s", err)
+			}
+		}
+
+		return nil
+	})
+}
+
+func (az *Azure) GetConfig() (*models.CustomPricing, error) {
+	c, err := az.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	if c.Discount == "" {
+		c.Discount = "0%"
+	}
+	if c.NegotiatedDiscount == "" {
+		c.NegotiatedDiscount = "0%"
+	}
+	if c.CurrencyCode == "" {
+		c.CurrencyCode = "USD"
+	}
+	if c.AzureBillingRegion == "" {
+		c.AzureBillingRegion = "US"
+	}
+	// Default to pay-as-you-go Durable offer id
+	if c.AzureOfferDurableID == "" {
+		c.AzureOfferDurableID = "MS-AZR-0003p"
+	}
+	if c.ShareTenancyCosts == "" {
+		c.ShareTenancyCosts = models.DefaultShareTenancyCost
+	}
+	if c.SpotLabel == "" {
+		c.SpotLabel = defaultSpotLabel
+	}
+	if c.SpotLabelValue == "" {
+		c.SpotLabelValue = defaultSpotLabelValue
+	}
+	return c, nil
+}
+
+func (az *Azure) ApplyReservedInstancePricing(nodes map[string]*models.Node) {
+
+}
+
+func (az *Azure) PVPricing(pvk models.PVKey) (*models.PV, error) {
+	az.DownloadPricingDataLock.RLock()
+	defer az.DownloadPricingDataLock.RUnlock()
+
+	pricing, ok := az.Pricing[pvk.Features()]
+	if !ok {
+		log.Debugf("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		return &models.PV{}, nil
+	}
+	return pricing.PV, nil
+}
+
+func (az *Azure) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
+	return ""
+}
+
+func (az *Azure) ServiceAccountStatus() *models.ServiceAccountStatus {
+	return az.ServiceAccountChecks.GetStatus()
+}
+
+const (
+	rateCardPricingSource   = "Rate Card API"
+	priceSheetPricingSource = "Price Sheet API"
+)
+
+// PricingSourceStatus returns the status of the rate card api
+func (az *Azure) PricingSourceStatus() map[string]*models.PricingSource {
+	az.DownloadPricingDataLock.Lock()
+	defer az.DownloadPricingDataLock.Unlock()
+	sources := make(map[string]*models.PricingSource)
+	errMsg := ""
+	if az.rateCardPricingError != nil {
+		errMsg = az.rateCardPricingError.Error()
+	}
+	rcps := &models.PricingSource{
+		Name:    rateCardPricingSource,
+		Enabled: az.pricingSource == rateCardPricingSource,
+		Error:   errMsg,
+	}
+	if rcps.Error != "" {
+		rcps.Available = false
+	} else if len(az.Pricing) == 0 {
+		rcps.Error = "No Pricing Data Available"
+		rcps.Available = false
+	} else {
+		rcps.Available = true
+	}
+
+	errMsg = ""
+	if az.priceSheetPricingError != nil {
+		errMsg = az.priceSheetPricingError.Error()
+	}
+	psps := &models.PricingSource{
+		Name:    priceSheetPricingSource,
+		Enabled: az.pricingSource == priceSheetPricingSource,
+		Error:   errMsg,
+	}
+	if psps.Error != "" {
+		psps.Available = false
+	} else if len(az.Pricing) == 0 {
+		psps.Error = "No Pricing Data Available"
+		psps.Available = false
+	} else if env.GetAzureBillingAccount() == "" {
+		psps.Error = "No Azure Billing Account ID"
+		psps.Available = false
+	} else {
+		psps.Available = true
+	}
+	sources[rateCardPricingSource] = rcps
+	sources[priceSheetPricingSource] = psps
+	return sources
+}
+
+func (*Azure) ClusterManagementPricing() (string, float64, error) {
+	return "", 0.0, nil
+}
+
+func (az *Azure) CombinedDiscountForNode(instanceType string, isPreemptible bool, defaultDiscount, negotiatedDiscount float64) float64 {
+	return 1.0 - ((1.0 - defaultDiscount) * (1.0 - negotiatedDiscount))
+}
+
+func (az *Azure) Regions() []string {
+
+	regionOverrides := env.GetRegionOverrideList()
+
+	if len(regionOverrides) > 0 {
+		log.Debugf("Overriding Azure regions with configured region list: %+v", regionOverrides)
+		return regionOverrides
+	}
+
+	return azureRegions
+}
+
+func ParseAzureSubscriptionID(id string) string {
+	match := azureSubRegex.FindStringSubmatch(id)
+	if len(match) >= 2 {
+		return match[1]
+	}
+	// Return empty string if an account could not be parsed from provided string
+	return ""
+}

+ 97 - 0
pkg/cloud/azure/provider_test.go

@@ -0,0 +1,97 @@
+package azure
+
+import (
+	"testing"
+
+	"github.com/Azure/azure-sdk-for-go/services/preview/commerce/mgmt/2015-06-01-preview/commerce"
+	"github.com/stretchr/testify/require"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+)
+
+func TestParseAzureSubscriptionID(t *testing.T) {
+	cases := []struct {
+		input    string
+		expected string
+	}{
+		{
+			input:    "azure:///subscriptions/0badafdf-1234-abcd-wxyz-123456789/...",
+			expected: "0badafdf-1234-abcd-wxyz-123456789",
+		},
+		{
+			input:    "azure:/subscriptions/0badafdf-1234-abcd-wxyz-123456789/...",
+			expected: "",
+		},
+		{
+			input:    "azure:///subscriptions//",
+			expected: "",
+		},
+		{
+			input:    "",
+			expected: "",
+		},
+	}
+
+	for _, test := range cases {
+		result := ParseAzureSubscriptionID(test.input)
+		if result != test.expected {
+			t.Errorf("Input: %s, Expected: %s, Actual: %s", test.input, test.expected, result)
+		}
+	}
+}
+
+func TestConvertMeterToPricings(t *testing.T) {
+	regions := map[string]string{
+		"useast":             "US East",
+		"japanwest":          "Japan West",
+		"australiasoutheast": "Australia Southeast",
+		"norwaywest":         "Norway West",
+	}
+	baseCPUPrice := "0.30000"
+
+	meterInfo := func(category, subcategory, name, region string, rate float64) commerce.MeterInfo {
+		return commerce.MeterInfo{
+			MeterCategory:    &category,
+			MeterSubCategory: &subcategory,
+			MeterName:        &name,
+			MeterRegion:      &region,
+			MeterRates:       map[string]*float64{"0": &rate},
+		}
+	}
+
+	t.Run("windows", func(t *testing.T) {
+		info := meterInfo("Virtual Machines", "D2 Series Windows", "D2s v3", "AU Southeast", 0.3)
+		results, err := convertMeterToPricings(info, regions, baseCPUPrice)
+		require.NoError(t, err)
+		require.Nil(t, results)
+	})
+
+	t.Run("storage", func(t *testing.T) {
+		info := meterInfo("Storage", "Some SSD type", "P4 are good", "US East", 2000)
+		results, err := convertMeterToPricings(info, regions, baseCPUPrice)
+		require.NoError(t, err)
+
+		expected := map[string]*AzurePricing{
+			"useast,premium_ssd": {
+				PV: &models.PV{Cost: "0.085616", Region: "useast"},
+			},
+		}
+		require.Equal(t, expected, results)
+	})
+
+	t.Run("virtual machines", func(t *testing.T) {
+		info := meterInfo("Virtual Machines", "Eav4/Easv4 Series", "E96a v4/E96as v4 Low Priority", "JA West", 10)
+		results, err := convertMeterToPricings(info, regions, baseCPUPrice)
+		require.NoError(t, err)
+
+		expected := map[string]*AzurePricing{
+			"japanwest,Standard_E96a_v4,preemptible": {
+				Node: &models.Node{Cost: "10.000000", BaseCPUPrice: "0.30000", UsageType: "preemptible"},
+			},
+			"japanwest,Standard_E96as_v4,preemptible": {
+				Node: &models.Node{Cost: "10.000000", BaseCPUPrice: "0.30000", UsageType: "preemptible"},
+			},
+		}
+		require.Equal(t, expected, results)
+	})
+}

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/BOM.csv

@@ -0,0 +1,2 @@
+SubscriptionGuid,ResourceGroup,ResourceLocation,UsageDateTime,MeterCategory,MeterSubcategory,MeterId,MeterName,MeterRegion,UsageQuantity,ResourceRate,PreTaxCost,ConsumedService,ResourceType,InstanceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ServiceName,ServiceTier,Currency,UnitOfMeasure
+,,,2022-11-03,,,,,,,,,,,,,,,,,,,,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/Enterprise.csv

@@ -0,0 +1,2 @@
+InvoiceSectionName,AccountName,AccountOwnerId,SubscriptionId,SubscriptionName,ResourceGroup,ResourceLocation,Date,ProductName,MeterCategory,MeterSubCategory,MeterId,MeterName,MeterRegion,UnitOfMeasure,Quantity,EffectivePrice,CostInBillingCurrency,CostCenter,ConsumedService,ResourceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ResourceName,ReservationId,ReservationName,UnitPrice,ProductOrderId,ProductOrderName,Term,PublisherType,PublisherName,ChargeType,Frequency,PricingModel,AvailabilityZone,BillingAccountId,BillingAccountName,BillingCurrencyCode,BillingPeriodStartDate,BillingPeriodEndDate,BillingProfileId,BillingProfileName,InvoiceSectionId,IsAzureCreditEligible,PartNumber,PayGPrice,PlanName,ServiceFamily,CostAllocationRuleName
+Unassigned,Azure Service,email@email.com,11111111-12ab-34dc-56ef-123456abcdef,Example-Subscription,Example-Resource-Group,canadacentral,02/02/2021,Virtual Machines Ev3/ESv3 Series - E4 v3/E4s v3 - CA Central,Virtual Machines,Ev3/ESv3 Series,3dbc3a0c-32b6-4c4d-adbb-3ee577aaba4d,E4 v3/E4s v3,CA Central,10 Hours,10,1.2,0,,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-defaultpool-12345678-vmss,"""createOperationID"": ""11111111-12ab-34dc-56ef-123456abcdef"",""creationSource"": ""vmssclient-aks-defaultpool-12345678-vmss"",""orchestrator"": ""Kubernetes:1.19.9"",""poolName"": ""defaultpool"",""resourceNameSuffix"": ""12345678""",MS-AZR-0017P,"{""UsageType"":""ComputeHR"",""ImageType"":""Canonical"",""ServiceType"":""Standard_E4s_v3"",""VMName"":""aks-defaultpool-12345678-vmss_2"",""VMProperties"":null,""VCPUs"":4,""CPUs"":0,""ReservationOrderId"":""11111111-12ab-34dc-56ef-123456abcdef"",""ReservationId"":""4f18e7c9-9ae8-4251-886b-8bd942a41bdf"",""ConsumptionMeter"":""11111111-12ab-34dc-56ef-123456abcdef"",""RINormalizationRatio"":2.0}",,Canonical,aks-defaultpool-12345678-vmss,11111111-12ab-34dc-56ef-123456abcdef,ExampleReservationName,0.1,b13f2808-a13e-49a3-a899-06d83b8f5d32,"Reserved VM Instance, Standard_E2s_v3, CA Central, 3 Years",36,Azure,,Usage,UsageBased,,,12345678,Example Company,CAD,05/01/2021,05/31/2021,12345678,Example Company,,TRUE,ABC-12345,0,,Compute,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/EnterpriseCamel.csv

@@ -0,0 +1,2 @@
+billingAccountName,partnerName,resellerName,resellerMpnId,customerTenantId,customerName,costCenter,billingPeriodEndDate,billingPeriodStartDate,servicePeriodEndDate,servicePeriodStartDate,date,serviceFamily,productOrderId,productOrderName,consumedService,meterId,meterName,meterCategory,meterSubCategory,meterRegion,ProductId,ProductName,SubscriptionId,subscriptionName,publisherType,publisherId,publisherName,resourceGroupName,ResourceId,resourceLocation,location,effectivePrice,quantity,unitOfMeasure,chargeType,billingCurrency,pricingCurrency,costInBillingCurrency,costInUsd,exchangeRatePricingToBilling,exchangeRateDate,serviceInfo1,serviceInfo2,additionalInfo,tags,PayGPrice,frequency,term,reservationId,reservationName,pricingModel
+,PartnerName,,,11111111-1111-1111-1111-123456789012,Customer Name,,,,02/01/2021,02/01/2021,02/02/2021,Networking,11111111-1111-1111-1111-123456789012,Azure plan,Microsoft.Network,11111111-1111-1111-1111-123456789012,Dynamic Public IP,Virtual Network,IP Addresses,,DZH318Z0BNXN0032,IP Addresses - Basic,11111111-1111-1111-1111-123456789012,Microsoft Azure,Azure,,Microsoft,databricks,/subscriptions/11111111-1111-1111-1111-123456789012/resourceGroups/testspot/providers/Microsoft.Storage/storageAccounts/storename,WESTUS,US West,0.004,3,1 Hour,Usage,USD,USD,0.012,0.012,1,3/1/21,,,,"{  ""ClusterId"": ""0103-212455-stash756"",  ""ServiceType"": ""DataAnalysis"",  ""ClusterName"": ""SrgExtractsPartDeux"",  ""databricks-instance-name"": ""0c1ef59764casdf0c0e094e1cc"",  ""Creator"": ""email@email.com"",  ""Vendor"": ""Databricks"",  ""DatabricksEnvironment"": ""workerenv-6448504491843616""}",0.004,UsageBased,,,,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/German.csv

@@ -0,0 +1,2 @@
+Abonnement-GUID (SubscriptionGuid),Ressourcengruppe (ResourceGroup),Ressourcenstandort (ResourceLocation),UsageDateTime (UsageDateTime),Kategorie der Verbrauchseinheit (MeterCategory),MeterSubcategory (MeterSubcategory),ID der Verbrauchseinheit (MeterId),Name der Verbrauchseinheit (MeterName),Region der Verbrauchseinheit (MeterRegion),UsageQuantity (UsageQuantity),Ressourcensatz (ResourceRate),PreTaxCost (PreTaxCost),Genutzter Dienst (ConsumedService),ResourceType (ResourceType),InstanceId (InstanceId),Tags (Tags),OfferId (OfferId),Zusätzliche Informationen (AdditionalInfo),Dienstinformation 1 (ServiceInfo1),Dienstinformation 2 (ServiceInfo2),ServiceName,ServiceTier,Currency,Maßeinheit (UnitOfMeasure)
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,2021-02-02,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/PayAsYouGo.csv

@@ -0,0 +1,2 @@
+SubscriptionGuid,ResourceGroup,ResourceLocation,UsageDateTime,MeterCategory,MeterSubcategory,MeterId,MeterName,MeterRegion,UsageQuantity,ResourceRate,PreTaxCost,ConsumedService,ResourceType,InstanceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ServiceName,ServiceTier,Currency,UnitOfMeasure
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,2021-02-02,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/YA.csv

@@ -0,0 +1,2 @@
+subscriptionId,Ressourcengruppe (ResourceGroup),Ressourcenstandort (ResourceLocation),date,meterCategory,MeterSubcategory (MeterSubcategory),ID der Verbrauchseinheit (MeterId),Name der Verbrauchseinheit (MeterName),Region der Verbrauchseinheit (MeterRegion),UsageQuantity (UsageQuantity),Ressourcensatz (ResourceRate),costInBillingCurrency,consumedService,ResourceType (ResourceType),InstanceName,tags,OfferId (OfferId),additionalInfo,Dienstinformation 1 (ServiceInfo1),Dienstinformation 2 (ServiceInfo2),ServiceName,ServiceTier,Currency,Maßeinheit (UnitOfMeasure)
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,02/02/2021,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv

@@ -0,0 +1,2 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456abcdef,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""","""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2"

+ 88 - 0
pkg/cloud/azure/resources/billingexports/values/Template.csv

@@ -0,0 +1,88 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0,  ""ReservationOrderId"": ""689aadb1-13ea-40bb-a8f9-e705dbe57543"",  ""ReservationId"": ""770228a7-62da-4155-802b-0422e1c62efc"",  ""ConsumptionMeter"": ""14fc9a21-4919-4cb1-b495-5666966556bc""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Log Analytics,0,0,microsoft.operationalinsights,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourcegroups/defaultresourcegroup-eus/providers/microsoft.operationalinsights/workspaces/defaultworkspace-11111111-12ab-34dc-56ef-123456abcdef-eus,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-agentpool-23456789-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Log Analytics,0,0,microsoft.operationalinsights,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourcegroups/defaultresourcegroup-eus/providers/microsoft.operationalinsights/workspaces/defaultworkspace-11111111-12ab-34dc-56ef-123456abcdef-eus,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd08,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd08"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-192133aks-nodepool1-1921336OS__1_0a5e4b97e5ca4c2ab46328ca392a02f5,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.001301934407093,0.001301934407093,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.0828,0.0828,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-aef001b536d4711ea86115a2af700dc9,"{""service"":""kubecost/kubecost-frontend-test""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-agentpool-23456789-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.09,0.09,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a173cf24babf311e98b7f8e5ecb03810,"{""service"":""kubecost/kubecost-frontend""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0015896,0.0015896,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0000362,0.0000362,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.01236783717759,0.01236783717759,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.00000204,0.00000204,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0821376,0.0821376,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-229217aks-agentpool-2292178OS__1_7fcada7aa38e4d5ca6d15257b8998b7a,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.005,0.005,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/bc6b73c3-5689-4f72-9a15-103d0c48d98f,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000060000000000000000000,0.000000060000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000140000000000000000,0.000000140000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000020000000000000000000,0.000000020000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0013522,0.0013522,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000160000000000000000,0.000000160000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000100000000000000000,0.000000100000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.001686412831768,0.001686412831768,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.005,0.005,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a4969d597c5674b4480ec987cc6b24a1,"{""service"":""kubecost/kubecost-frontend"",""kubernetes-cluster-name"":""kubernetes""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-34567890-0_OsDisk_1_c523fe080d784f55a7cd3868bf989fde,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.125,0.125,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/7b21b77b-4ed1-474b-b068-6ab6d1ecf549,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0004494,0.0004494,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.00000154,0.00000154,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-45678901-0_OsDisk_1_6bb726d077d84b238780857a380772ea,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.15,0.15,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/bc6b73c3-5689-4f72-9a15-103d0c48d98f,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0,  ""ReservationOrderId"": ""689aadb1-13ea-40bb-a8f9-e705dbe57543"",  ""ReservationId"": ""770228a7-62da-4155-802b-0422e1c62efc"",  ""ConsumptionMeter"": ""14fc9a21-4919-4cb1-b495-5666966556bc""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000040000000000000000000,0.000000040000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0002082,0.0002082,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.000177,0.000177,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-192133aks-nodepool1-1921336OS__1_0a5e4b97e5ca4c2ab46328ca392a02f5,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0107136,0.0107136,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0032604,0.0032604,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.0072,0.0072,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-aef001b536d4711ea86115a2af700dc9,"{""service"":""kubecost/kubecost-frontend-test""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.00000445,0.00000445,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.575,0.575,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.00992768780794,0.00992768780794,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.14,0.14,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a4969d597c5674b4480ec987cc6b24a1,"{""service"":""kubecost/kubecost-frontend"",""kubernetes-cluster-name"":""kubernetes""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd08,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd08"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.000191,0.000191,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0014714,0.0014714,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.575,0.575,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.0144,0.0144,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a173cf24babf311e98b7f8e5ecb03810,"{""service"":""kubecost/kubecost-frontend""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.015,0.015,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/7b21b77b-4ed1-474b-b068-6ab6d1ecf549,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-34567890-0_OsDisk_1_c523fe080d784f55a7cd3868bf989fde,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000280000000000000000,0.000000280000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-229217aks-agentpool-2292178OS__1_7fcada7aa38e4d5ca6d15257b8998b7a,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-45678901-0_OsDisk_1_6bb726d077d84b238780857a380772ea,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",

+ 2 - 0
pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv

@@ -0,0 +1,2 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456billing,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""}","{ ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2  }"

+ 170 - 0
pkg/cloud/azure/storagebillingparser.go

@@ -0,0 +1,170 @@
+package azure
+
+import (
+	"bytes"
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	"github.com/opencost/opencost/pkg/cloud"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// AzureStorageBillingParser accesses billing data stored in CSV files in Azure Storage
+type AzureStorageBillingParser struct {
+	StorageConnection
+}
+
+func (asbp *AzureStorageBillingParser) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*AzureStorageBillingParser)
+	if !ok {
+		return false
+	}
+	return asbp.StorageConnection.Equals(&thatConfig.StorageConnection)
+}
+
+type AzureBillingResultFunc func(*BillingRowValues) error
+
+func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, resultFn AzureBillingResultFunc) (cloud.ConnectionStatus, error) {
+	err := asbp.Validate()
+	if err != nil {
+		return cloud.InvalidConfiguration, err
+	}
+
+	containerURL, err := asbp.getContainer()
+	if err != nil {
+		return cloud.FailedConnection, err
+	}
+	ctx := context.Background()
+	blobNames, err := asbp.getMostRecentBlobs(start, end, containerURL, ctx)
+	if err != nil {
+		return cloud.FailedConnection, err
+	}
+	for _, blobName := range blobNames {
+		blobBytes, err2 := asbp.DownloadBlob(blobName, containerURL, ctx)
+		if err2 != nil {
+			return cloud.FailedConnection, err2
+		}
+		err2 = asbp.parseCSV(start, end, csv.NewReader(bytes.NewReader(blobBytes)), resultFn)
+		if err2 != nil {
+			return cloud.ParseError, err2
+		}
+
+	}
+	return cloud.SuccessfulConnection, nil
+}
+
+func (asbp *AzureStorageBillingParser) parseCSV(start, end time.Time, reader *csv.Reader, resultFn AzureBillingResultFunc) error {
+	headers, err := reader.Read()
+	if err != nil {
+		return err
+	}
+	abp, err := NewBillingParseSchema(headers)
+	if err != nil {
+		return err
+	}
+	for {
+		var record, err = reader.Read()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		abv := abp.ParseRow(start, end, record)
+		if abv == nil {
+			continue
+		}
+
+		err = resultFn(abv)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (asbp *AzureStorageBillingParser) getMostRecentBlobs(start, end time.Time, containerURL *azblob.ContainerURL, ctx context.Context) ([]string, error) {
+	log.Infof("Azure Storage: retrieving most recent reports from: %v - %v", start, end)
+
+	// Get list of month substrings for months contained in the start to end range
+	monthStrs, err := asbp.getMonthStrings(start, end)
+	if err != nil {
+		return nil, err
+	}
+	mostResentBlobs := make(map[string]azblob.BlobItemInternal)
+	for marker := (azblob.Marker{}); marker.NotDone(); {
+		// Get a result segment starting with the blob indicated by the current Marker.
+		listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{})
+		if err != nil {
+			return nil, err
+		}
+
+		// ListBlobs returns the start of the next segment; you MUST use this to get
+		// the next segment (after processing the current result segment).
+		marker = listBlob.NextMarker
+
+		// Using the list of months strings find the most resent blob for each month in the range
+		for _, blobInfo := range listBlob.Segment.BlobItems {
+			for _, month := range monthStrs {
+				if strings.Contains(blobInfo.Name, month) {
+					// If Container Path configuration exists, check if it is in the blobs name
+					if asbp.Path != "" && !strings.Contains(blobInfo.Name, asbp.Path) {
+						continue
+					}
+
+					if prevBlob, ok := mostResentBlobs[month]; ok {
+						if prevBlob.Properties.CreationTime.After(*blobInfo.Properties.CreationTime) {
+							continue
+						}
+					}
+					mostResentBlobs[month] = blobInfo
+				}
+			}
+		}
+	}
+
+	// convert blob names into blob urls and move from map into ordered list of blob names
+	var blobNames []string
+	for _, month := range monthStrs {
+		if blob, ok := mostResentBlobs[month]; ok {
+			blobNames = append(blobNames, blob.Name)
+		}
+	}
+
+	return blobNames, nil
+}
+
+func (asbp *AzureStorageBillingParser) getMonthStrings(start, end time.Time) ([]string, error) {
+	if start.After(end) {
+		return []string{}, fmt.Errorf("start date must be before end date")
+	}
+	if end.After(time.Now()) {
+		end = time.Now()
+	}
+	var monthStrs []string
+	monthStr := asbp.timeToMonthString(start)
+	endStr := asbp.timeToMonthString(end)
+	monthStrs = append(monthStrs, monthStr)
+	currMonth := start.AddDate(0, 0, -start.Day()+1)
+	for monthStr != endStr {
+		currMonth = currMonth.AddDate(0, 1, 0)
+		monthStr = asbp.timeToMonthString(currMonth)
+		monthStrs = append(monthStrs, monthStr)
+	}
+
+	return monthStrs, nil
+}
+
+func (asbp *AzureStorageBillingParser) timeToMonthString(input time.Time) string {
+	format := "20060102"
+	startOfMonth := input.AddDate(0, 0, -input.Day()+1)
+	endOfMonth := input.AddDate(0, 1, -input.Day())
+	return startOfMonth.Format(format) + "-" + endOfMonth.Format(format)
+}

+ 204 - 0
pkg/cloud/azure/storagebillingparser_test.go

@@ -0,0 +1,204 @@
+package azure
+
+import (
+	"testing"
+	"time"
+)
+
+func TestAzureStorageBillingParser_getMonthStrings(t *testing.T) {
+	asbp := AzureStorageBillingParser{}
+	loc, _ := time.LoadLocation("UTC")
+	testCases := map[string]struct {
+		start    time.Time
+		end      time.Time
+		expected []string
+	}{
+		"Single Month": {
+			start: time.Date(2021, 2, 1, 00, 00, 00, 00, loc),
+			end:   time.Date(2021, 2, 3, 00, 00, 00, 00, loc),
+			expected: []string{
+				"20210201-20210228",
+			},
+		},
+		"Two Month": {
+			start: time.Date(2021, 2, 1, 00, 00, 00, 00, loc),
+			end:   time.Date(2021, 3, 3, 00, 00, 00, 00, loc),
+			expected: []string{
+				"20210201-20210228",
+				"20210301-20210331",
+			},
+		},
+	}
+
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			months, err := asbp.getMonthStrings(tc.start, tc.end)
+			if err != nil {
+				t.Errorf("Could not retrieve month strings %v", err)
+			}
+
+			if len(months) != len(tc.expected) {
+				t.Errorf("Did not create the expected number of month strings. Expected: %d, Actual: %d", len(tc.expected), len(months))
+			}
+
+			for i, monthStr := range months {
+				if monthStr != tc.expected[i] {
+					t.Errorf("Incorrect month string at index %d. Expected: %s, Actual: %s", i, tc.expected[i], monthStr)
+				}
+			}
+		})
+	}
+}
+
+func TestAzureStorageBillingParser_parseCSV(t *testing.T) {
+	loc, _ := time.LoadLocation("UTC")
+	start := time.Date(2021, 2, 1, 00, 00, 00, 00, loc)
+	end := time.Date(2021, 2, 3, 00, 00, 00, 00, loc)
+	tests := map[string]struct {
+		input    string
+		expected []BillingRowValues
+	}{
+		"Virtual Machine": {
+			input: "VirtualMachine.csv",
+			expected: []BillingRowValues{
+				{
+					Date:            start,
+					MeterCategory:   "Virtual Machines",
+					SubscriptionID:  "11111111-12ab-34dc-56ef-123456abcdef",
+					InvoiceEntityID: "11111111-12ab-34dc-56ef-123456billing",
+					InstanceID:      "/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss",
+					Service:         "Microsoft.Compute",
+					Tags: map[string]string{
+						"resourceNameSuffix": "12345678",
+						"aksEngineVersion":   "aks-release-v0.47.0-1-aks",
+						"creationSource":     "aks-aks-nodepool1-12345678-vmss",
+					},
+					AdditionalInfo: map[string]any{
+						"ServiceType": "Standard_DS2_v2",
+						"VMName":      "aks-nodepool1-12345678-vmss_0",
+						"VCPUs":       2.0,
+					},
+					Cost:    5,
+					NetCost: 4,
+				},
+			},
+		},
+		"Missing Brackets": {
+			input: "MissingBrackets.csv",
+			expected: []BillingRowValues{
+				{
+					Date:            start,
+					MeterCategory:   "Virtual Machines",
+					SubscriptionID:  "11111111-12ab-34dc-56ef-123456abcdef",
+					InvoiceEntityID: "11111111-12ab-34dc-56ef-123456abcdef",
+					InstanceID:      "/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss",
+					Service:         "Microsoft.Compute",
+					Tags: map[string]string{
+						"resourceNameSuffix": "12345678",
+						"aksEngineVersion":   "aks-release-v0.47.0-1-aks",
+						"creationSource":     "aks-aks-nodepool1-12345678-vmss",
+					},
+					AdditionalInfo: map[string]any{
+						"ServiceType": "Standard_DS2_v2",
+						"VMName":      "aks-nodepool1-12345678-vmss_0",
+						"VCPUs":       2.0,
+					},
+					Cost:    5,
+					NetCost: 4,
+				},
+			},
+		},
+	}
+	asbp := &AzureStorageBillingParser{}
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			csvRetriever := &TestCSVRetriever{
+				CSVName: valueCasesPath + tc.input,
+			}
+			csvs, err := csvRetriever.getCSVReaders(start, end)
+			if err != nil {
+				t.Errorf("Failed to read specified CSV: %s", err.Error())
+			}
+			reader := csvs[0]
+
+			var actual []*BillingRowValues
+			resultFn := func(abv *BillingRowValues) error {
+				actual = append(actual, abv)
+				return nil
+			}
+
+			err = asbp.parseCSV(start, end, reader, resultFn)
+			if err != nil {
+				t.Errorf("Error generating BillingRowValues: %s", err.Error())
+			}
+
+			if len(actual) != len(tc.expected) {
+				t.Errorf("Actual output length did not match expected. Expected: %d, Actual: %d", len(tc.expected), len(actual))
+			}
+
+			for i, this := range actual {
+				that := tc.expected[i]
+
+				if !this.Date.Equal(that.Date) {
+					t.Errorf("Parsed data at index %d has incorrect Date value. Expected: %s, Actual: %s", i, this.Date.String(), that.Date.String())
+				}
+
+				if this.MeterCategory != that.MeterCategory {
+					t.Errorf("Parsed data at index %d has incorrect MeterCategroy value. Expected: %s, Actual: %s", i, this.MeterCategory, that.MeterCategory)
+				}
+
+				if this.SubscriptionID != that.SubscriptionID {
+					t.Errorf("Parsed data at index %d has incorrect SubscriptionID value. Expected: %s, Actual: %s", i, this.SubscriptionID, that.SubscriptionID)
+				}
+
+				if this.InvoiceEntityID != that.InvoiceEntityID {
+					t.Errorf("Parsed data at index %d has incorrect InvoiceEntityID value. Expected: %s, Actual: %s", i, this.InvoiceEntityID, that.InvoiceEntityID)
+				}
+
+				if this.InstanceID != that.InstanceID {
+					t.Errorf("Parsed data at index %d has incorrect InstanceID value. Expected: %s, Actual: %s", i, this.InstanceID, that.InstanceID)
+				}
+
+				if this.Service != that.Service {
+					t.Errorf("Parsed data at index %d has incorrect Service value. Expected: %s, Actual: %s", i, this.Service, that.Service)
+				}
+
+				if this.Cost != that.Cost {
+					t.Errorf("Parsed data at index %d has incorrect Cost value. Expected: %f, Actual: %f", i, this.Cost, that.Cost)
+				}
+
+				if this.NetCost != that.NetCost {
+					t.Errorf("Parsed data at index %d has incorrect NetCost value. Expected: %f, Actual: %f", i, this.NetCost, that.NetCost)
+				}
+
+				if len(this.Tags) != len(that.Tags) {
+					t.Errorf("Parsed data at index %d did not have the expected number of tags. Expected: %d, Actual: %d", i, len(that.Tags), len(this.Tags))
+				}
+
+				for key, thisTag := range this.Tags {
+					thatTag, ok := that.Tags[key]
+					if !ok {
+						t.Errorf("Parsed data at index %d is has unexpected entry in Tags with key: %s", i, key)
+					}
+
+					if thisTag != thatTag {
+						t.Errorf("Parsed data at index %d is has unexpected value in Tags for key: %s. Expected: %s, Actual: %s", i, key, thatTag, thisTag)
+					}
+				}
+
+				for key, thisAI := range this.AdditionalInfo {
+					thatAI, ok := that.AdditionalInfo[key]
+					if !ok {
+						t.Errorf("Parsed data at index %d is has unexpected entry in Additional Inforamation with key: %s", i, key)
+					}
+
+					if thisAI != thatAI {
+						t.Errorf("Parsed data at index %d is has unexpected value in Tags for key: %s. Expected: %v, Actual: %v", i, key, thisAI, thatAI)
+					}
+				}
+			}
+
+		})
+
+	}
+}

+ 179 - 0
pkg/cloud/azure/storageconfiguration.go

@@ -0,0 +1,179 @@
+package azure
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type StorageConfiguration struct {
+	SubscriptionID string     `json:"subscriptionID"`
+	Account        string     `json:"account"`
+	Container      string     `json:"container"`
+	Path           string     `json:"path"`
+	Cloud          string     `json:"cloud"`
+	Authorizer     Authorizer `json:"authorizer"`
+}
+
+// Check ensures that all required fields are set, and throws an error if they are not
+func (sc *StorageConfiguration) Validate() error {
+
+	if sc.Authorizer == nil {
+		return fmt.Errorf("StorageConfiguration: missing authorizer")
+	}
+
+	err := sc.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	if sc.SubscriptionID == "" {
+		return fmt.Errorf("StorageConfiguration: missing Subcription ID")
+	}
+
+	if sc.Account == "" {
+		return fmt.Errorf("StorageConfiguration: missing Account")
+	}
+
+	if sc.Container == "" {
+		return fmt.Errorf("StorageConfiguration: missing Container")
+	}
+
+	return nil
+}
+
+func (sc *StorageConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*StorageConfiguration)
+	if !ok {
+		return false
+	}
+
+	if sc.Authorizer != nil {
+		if !sc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if sc.SubscriptionID != thatConfig.SubscriptionID {
+		return false
+	}
+
+	if sc.Account != thatConfig.Account {
+		return false
+	}
+
+	if sc.Container != thatConfig.Container {
+		return false
+	}
+
+	if sc.Path != thatConfig.Path {
+		return false
+	}
+
+	if sc.Cloud != thatConfig.Cloud {
+		return false
+	}
+
+	return true
+}
+
+func (sc *StorageConfiguration) Sanitize() config.Config {
+	return &StorageConfiguration{
+		SubscriptionID: sc.SubscriptionID,
+		Account:        sc.Account,
+		Container:      sc.Container,
+		Path:           sc.Path,
+		Cloud:          sc.Cloud,
+		Authorizer:     sc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (sc *StorageConfiguration) Key() string {
+	key := fmt.Sprintf("%s/%s", sc.SubscriptionID, sc.Container)
+	// append container path to key if it exists
+	if sc.Path != "" {
+		key = fmt.Sprintf("%s/%s", key, sc.Path)
+	}
+	return key
+}
+
+func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	subscriptionID, err := config.GetInterfaceValue[string](fmap, "subscriptionID")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.SubscriptionID = subscriptionID
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Account = account
+
+	container, err := config.GetInterfaceValue[string](fmap, "container")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Container = container
+
+	path, err := config.GetInterfaceValue[string](fmap, "path")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Path = path
+
+	cloud, err := config.GetInterfaceValue[string](fmap, "cloud")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Cloud = cloud
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Authorizer = authorizer
+
+	return nil
+}
+
+func ConvertAzureStorageConfigToConfig(asc AzureStorageConfig) config.KeyedConfig {
+	if asc.IsEmpty() {
+		return nil
+	}
+
+	var authorizer Authorizer
+	authorizer = &AccessKey{
+		AccessKey: asc.AccessKey,
+		Account:   asc.AccountName,
+	}
+
+	return &StorageConfiguration{
+		SubscriptionID: asc.SubscriptionId,
+		Account:        asc.AccountName,
+		Container:      asc.ContainerName,
+		Path:           asc.ContainerPath,
+		Cloud:          asc.AzureCloud,
+		Authorizer:     authorizer,
+	}
+}

+ 446 - 0
pkg/cloud/azure/storageconfiguration_test.go

@@ -0,0 +1,446 @@
+package azure
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestStorageConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   StorageConfiguration
+		expected error
+	}{
+		"valid config Azure AccessKey": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					Account: "account",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing access key"),
+		},
+		"missing authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing authorizer"),
+		},
+		"missing subscriptionID": {
+			config: StorageConfiguration{
+				SubscriptionID: "",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Subcription ID"),
+		},
+		"missing account": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Account"),
+		},
+		"missing container": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Container"),
+		},
+		"missing path": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+		"missing cloud": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestStorageConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     StorageConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: true,
+		},
+
+		"missing both authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: true,
+		},
+		"missing left authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"missing right authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: false,
+		},
+		"different subscriptionID": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID2",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account2",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different container": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container2",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different path": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path2",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different cloud": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud2",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &AccessKey{
+				AccessKey: "accessKey",
+				Account:   "account",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestStorageConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config StorageConfiguration
+	}{
+		"Empty Config": {
+			config: StorageConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+		},
+		"AccessKey Authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &StorageConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 77 - 0
pkg/cloud/azure/storageconnection.go

@@ -0,0 +1,77 @@
+package azure
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// StorageConnection provides access to Azure Storage
+type StorageConnection struct {
+	StorageConfiguration
+}
+
+func (sc *StorageConnection) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*StorageConnection)
+	if !ok {
+		return false
+	}
+
+	return sc.StorageConfiguration.Equals(&thatConfig.StorageConfiguration)
+}
+
+func (sc *StorageConnection) getContainer() (*azblob.ContainerURL, error) {
+
+	credential, err := sc.Authorizer.GetBlobCredentials()
+	if err != nil {
+		return nil, err
+	}
+
+	p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
+
+	// From the Azure portal, get your storage account blob service URL endpoint.
+	URL, _ := url.Parse(
+		fmt.Sprintf(sc.getBlobURLTemplate(), sc.Account, sc.Container))
+
+	// Create a ContainerURL object that wraps the container URL and a request
+	// pipeline to make requests.
+	containerURL := azblob.NewContainerURL(*URL, p)
+	return &containerURL, nil
+}
+
+// getBlobURLTemplate returns the correct BlobUrl for whichever Cloud storage account is specified by the AzureCloud configuration
+// defaults to the Public Cloud template
+func (sc *StorageConnection) getBlobURLTemplate() string {
+	// Use gov cloud blob url if gov is detected in AzureCloud
+	if strings.Contains(strings.ToLower(sc.Cloud), "gov") {
+		return "https://%s.blob.core.usgovcloudapi.net/%s"
+	}
+	// default to Public Cloud template
+	return "https://%s.blob.core.windows.net/%s"
+}
+
+func (sc *StorageConnection) DownloadBlob(blobName string, containerURL *azblob.ContainerURL, ctx context.Context) ([]byte, error) {
+	log.Infof("Azure Storage: retrieving blob: %v", blobName)
+
+	blobURL := containerURL.NewBlobURL(blobName)
+	downloadResponse, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
+	if err != nil {
+		return nil, err
+	}
+	// NOTE: automatically retries are performed if the connection fails
+	bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
+
+	// read the body into a buffer
+	downloadedData := bytes.Buffer{}
+	_, err = downloadedData.ReadFrom(bodyStream)
+	if err != nil {
+		return nil, err
+	}
+	return downloadedData.Bytes(), nil
+}

+ 53 - 0
pkg/cloud/config/authorizer.go

@@ -0,0 +1,53 @@
+package config
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// AuthorizerTypeProperty is the property where the id of an Authorizer should be placed in its custom MarshalJSON function
+const AuthorizerTypeProperty = "authorizerType"
+
+type Authorizer interface {
+	Config
+	json.Marshaler
+}
+
+// AuthorizerSelectorFn implementations of this function should be a simple switch
+// and acts as a register for the Authorizer types, returned Authorizer should be empty
+// except for its default type property and will have other values marshalled into it
+type AuthorizerSelectorFn[T Authorizer] func(string) (T, error)
+
+// AuthorizerFromInterface this generic function provides Authorizer unmarshalling for all providers
+func AuthorizerFromInterface[T Authorizer](f any, authSelectFn AuthorizerSelectorFn[T]) (T, error) {
+	var emptyAuth T
+	if f == nil {
+		return emptyAuth, nil
+	}
+	fmap, ok := f.(map[string]interface{})
+	if !ok {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not cast interface as map")
+	}
+
+	authType, err := GetInterfaceValue[string](fmap, AuthorizerTypeProperty)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not retrieve type property: %w", err)
+	}
+	authorizer, err := authSelectFn(authType)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: %w", err)
+	}
+
+	// convert the interface back to a []Byte so that it can be unmarshalled into the correct type
+	fBin, err := json.Marshal(f)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not marshal value %v: %w", f, err)
+	}
+
+	err = json.Unmarshal(fBin, authorizer)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: failed to unmarshal into Authorizer type %T from value %v: %w", authorizer, f, err)
+	}
+	return authorizer, nil
+}

+ 37 - 0
pkg/cloud/config/config.go

@@ -0,0 +1,37 @@
+package config
+
+import (
+	"fmt"
+)
+
+const Redacted = "REDACTED"
+
+// Config allows for nested configurations which encapsulate their functionality to be validated and compared easily
+type Config interface {
+	Validate() error
+	Sanitize() Config
+	Equals(Config) bool
+}
+
+// KeyedConfig is a top level Config which uses its public values as a unique identifier allowing duplicates to be identified
+type KeyedConfig interface {
+	Config
+	Key() string
+}
+
+type KeyedConfigWatcher interface {
+	GetConfigs() []KeyedConfig
+}
+
+func GetInterfaceValue[T any](fmap map[string]interface{}, key string) (T, error) {
+	var value T
+	interfaceValue, ok := fmap[key]
+	if !ok {
+		return value, fmt.Errorf("FromInterface: missing '%s' property", key)
+	}
+	typedValue, ok := interfaceValue.(T)
+	if !ok {
+		return value, fmt.Errorf("GetInterfaceValue: property '%s' had expected type '%T' but did not match", key, value)
+	}
+	return typedValue, nil
+}

+ 42 - 0
pkg/cloud/connectionstatus.go

@@ -0,0 +1,42 @@
+package cloud
+
+// ConnectionStatus communicates the status of a cloud connection in a way that is general enough to apply to each
+// Cloud Provider, but still give actionable information on how to trouble shoot one the four failing statuses.
+type ConnectionStatus string
+
+const (
+	// InitialStatus is the zero value of CloudConnectionStatus and means that cloud connection is untested. Once
+	// CloudConnection Status has been changed in should not return to this value. This status is assigned on creation
+	// to the cloud provider
+	InitialStatus ConnectionStatus = "No Connection"
+
+	// InvalidConfiguration means that Cloud Configuration is missing required values to connect to cloud provider.
+	// This status is assigned during failures in the provider implementation of getCloudConfig()
+	InvalidConfiguration = "Invalid Configuration"
+
+	// FailedConnection means that all required Cloud Configuration values are filled in, but a connection with the
+	// Cloud Provider cannot be established. This is indicative of a typo in one of the Cloud Configuration values or an
+	// issue in how the connection was set up in the Cloud Provider's Console. The assignment of this status varies
+	// between implementations, but should happen if an error is thrown when an interaction with an object from
+	// the Cloud Service Provider's sdk occurs.
+	FailedConnection = "Failed Connection"
+
+	// ParseError indicates an issue with our functions which parse responses
+	ParseError = "Parse Error"
+
+	// MissingData means that the Cloud Integration is properly configured, but the cloud provider is not returning
+	// billing/cost and usage data. This status is indicative of the billing/cost and usage data export of the Cloud Provider
+	// being incorrectly set up or the export being set up in the last 48 hours and not having started populating data yet.
+	// This status is set when a query has been successfully made but the results come back empty. If the cloud provider,
+	// already has a SUCCESSFUL_CONNECTION status then this status should not be set, because this indicates that the specific
+	// query made may have been empty.
+	MissingData = "Data Missing"
+
+	// SuccessfulConnection means that the Cloud Integration is properly configured and returning data. This status is
+	// set on any successful query where data is returned
+	SuccessfulConnection = "Connection Successful"
+)
+
+func (cs ConnectionStatus) String() string {
+	return string(cs)
+}

+ 132 - 0
pkg/cloud/gcp/authorizer.go

@@ -0,0 +1,132 @@
+package gcp
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"google.golang.org/api/option"
+)
+
+const ServiceAccountKeyAuthorizerType = "GCPServiceAccountKey"
+const WorkloadIdentityAuthorizerType = "GCPWorkloadIdentity"
+
+// Authorizer provide a []option.ClientOption which is used in when creating clients in the GCP SDK
+type Authorizer interface {
+	config.Authorizer
+	CreateGCPClientOptions() ([]option.ClientOption, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case ServiceAccountKeyAuthorizerType:
+		return &ServiceAccountKey{}, nil
+	case WorkloadIdentityAuthorizerType:
+		return &WorkloadIdentity{}, nil
+	default:
+		return nil, fmt.Errorf("GCP: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+type ServiceAccountKey struct {
+	Key map[string]string `json:"key"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (gkc *ServiceAccountKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 2)
+	fmap[config.AuthorizerTypeProperty] = ServiceAccountKeyAuthorizerType
+	fmap["key"] = gkc.Key
+	return json.Marshal(fmap)
+}
+
+func (gkc *ServiceAccountKey) Validate() error {
+	if gkc.Key == nil || len(gkc.Key) == 0 {
+		return fmt.Errorf("ServiceAccountKey: missing Key")
+	}
+
+	return nil
+}
+
+func (gkc *ServiceAccountKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*ServiceAccountKey)
+	if !ok {
+		return false
+	}
+
+	if len(gkc.Key) != len(thatConfig.Key) {
+		return false
+	}
+
+	for k, v := range gkc.Key {
+		if thatConfig.Key[k] != v {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (gkc *ServiceAccountKey) Sanitize() config.Config {
+	redactedMap := make(map[string]string, len(gkc.Key))
+	for key, _ := range gkc.Key {
+		redactedMap[key] = config.Redacted
+	}
+	return &ServiceAccountKey{
+		Key: redactedMap,
+	}
+}
+
+func (gkc *ServiceAccountKey) CreateGCPClientOptions() ([]option.ClientOption, error) {
+	err := gkc.Validate()
+	if err != nil {
+		return nil, err
+	}
+
+	b, err := json.Marshal(gkc.Key)
+	if err != nil {
+		return nil, fmt.Errorf("Key: failed to marshal Key: %s", err.Error())
+	}
+	clientOption := option.WithCredentialsJSON(b)
+
+	// The creation of the BigQuery Client is where FAILED_CONNECTION CloudConnectionStatus is recorded for GCP
+	return []option.ClientOption{clientOption}, nil
+}
+
+// WorkloadIdentity passes an empty slice of client options which causes the GCP SDK to check for the workload identity in the environment
+type WorkloadIdentity struct{}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (wi *WorkloadIdentity) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 1)
+	fmap[config.AuthorizerTypeProperty] = WorkloadIdentityAuthorizerType
+	return json.Marshal(fmap)
+}
+
+func (wi *WorkloadIdentity) Validate() error {
+	return nil
+}
+
+func (wi *WorkloadIdentity) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	_, ok := config.(*WorkloadIdentity)
+	if !ok {
+		return false
+	}
+
+	return true
+}
+
+func (wi *WorkloadIdentity) Sanitize() config.Config {
+	return &WorkloadIdentity{}
+}
+
+func (wi *WorkloadIdentity) CreateGCPClientOptions() ([]option.ClientOption, error) {
+	return []option.ClientOption{}, nil
+}

+ 172 - 0
pkg/cloud/gcp/bigqueryconfiguration.go

@@ -0,0 +1,172 @@
+package gcp
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"cloud.google.com/go/bigquery"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type BigQueryConfiguration struct {
+	ProjectID  string     `json:"projectID"`
+	Dataset    string     `json:"dataset"`
+	Table      string     `json:"table"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (bqc *BigQueryConfiguration) Validate() error {
+
+	if bqc.Authorizer == nil {
+		return fmt.Errorf("BigQueryConfig: missing configurer")
+	}
+
+	err := bqc.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("BigQueryConfig: issue with GCP Authorizer: %s", err.Error())
+	}
+
+	if bqc.ProjectID == "" {
+		return fmt.Errorf("BigQueryConfig: missing ProjectID")
+	}
+
+	if bqc.Dataset == "" {
+		return fmt.Errorf("BigQueryConfig: missing Dataset")
+	}
+
+	if bqc.Table == "" {
+		return fmt.Errorf("BigQueryConfig: missing Table")
+	}
+
+	return nil
+}
+
+func (bqc *BigQueryConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*BigQueryConfiguration)
+	if !ok {
+		return false
+	}
+
+	if bqc.Authorizer != nil {
+		if !bqc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if bqc.ProjectID != thatConfig.ProjectID {
+		return false
+	}
+
+	if bqc.Dataset != thatConfig.Dataset {
+		return false
+	}
+
+	if bqc.Table != thatConfig.Table {
+		return false
+	}
+
+	return true
+}
+
+func (bqc *BigQueryConfiguration) Sanitize() config.Config {
+	return &BigQueryConfiguration{
+		ProjectID:  bqc.ProjectID,
+		Dataset:    bqc.Dataset,
+		Table:      bqc.Table,
+		Authorizer: bqc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+// Key uses the Usage Project Id as the Provider Key for GCP
+func (bqc *BigQueryConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", bqc.ProjectID, bqc.GetBillingDataDataset())
+}
+
+func (bqc *BigQueryConfiguration) GetBillingDataDataset() string {
+	return fmt.Sprintf("%s.%s", bqc.Dataset, bqc.Table)
+}
+
+func (bqc *BigQueryConfiguration) GetBigQueryClient(ctx context.Context) (*bigquery.Client, error) {
+	clientOpts, err := bqc.Authorizer.CreateGCPClientOptions()
+	if err != nil {
+		return nil, err
+	}
+	return bigquery.NewClient(ctx, bqc.ProjectID, clientOpts...)
+}
+
+// UnmarshalJSON assumes data is save as an BigQueryConfigurationDTO
+func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	projectID, err := config.GetInterfaceValue[string](fmap, "projectID")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.ProjectID = projectID
+
+	dataset, err := config.GetInterfaceValue[string](fmap, "dataset")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.Dataset = dataset
+
+	table, err := config.GetInterfaceValue[string](fmap, "table")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.Table = table
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bqc.Authorizer = authorizer
+	return nil
+}
+
+func ConvertBigQueryConfigToConfig(bqc BigQueryConfig) config.KeyedConfig {
+	if bqc.IsEmpty() {
+		return nil
+	}
+
+	BillingDataDataset := strings.Split(bqc.BillingDataDataset, ".")
+	dataset := BillingDataDataset[0]
+	var table string
+	if len(BillingDataDataset) > 1 {
+		table = BillingDataDataset[1]
+	}
+
+	bigQueryConfiguration := &BigQueryConfiguration{
+		ProjectID:  bqc.ProjectID,
+		Dataset:    dataset,
+		Table:      table,
+		Authorizer: &WorkloadIdentity{}, // Default to WorkloadIdentity
+	}
+
+	if len(bqc.Key) != 0 {
+		bigQueryConfiguration.Authorizer = &ServiceAccountKey{
+			Key: bqc.Key,
+		}
+	}
+
+	return bigQueryConfiguration
+}

+ 388 - 0
pkg/cloud/gcp/bigqueryconfiguration_test.go

@@ -0,0 +1,388 @@
+package gcp
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestBigQueryConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   BigQueryConfiguration
+		expected error
+	}{
+		"valid config GCP Key": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: nil,
+		},
+		"valid config WorkloadIdentity": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: nil,
+		},
+		"access Key invalid": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: nil,
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: issue with GCP Authorizer: ServiceAccountKey: missing Key"),
+		},
+		"missing configurer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing configurer"),
+		},
+		"missing projectID": {
+			config: BigQueryConfiguration{
+				ProjectID: "",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing ProjectID"),
+		},
+		"missing dataset": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing Dataset"),
+		},
+		"missing table": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing Table"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestBigQueryConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     BigQueryConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: true,
+		},
+		"different configurer": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: false,
+		},
+		"missing both configurer": {
+			left: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left configurer": {
+			left: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: false,
+		},
+		"missing right configurer": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different projectID": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID2",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different dataset": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset2",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different table": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table2",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &ServiceAccountKey{
+
+				Key: map[string]string{
+					"Key":  "Key",
+					"key1": "key2",
+				},
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestBigQueryConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config BigQueryConfiguration
+	}{
+		"Empty Config": {
+			config: BigQueryConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+		},
+		"ServiceAccountKeyConfigurer": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+		},
+		"WorkLoadIdentityConfigurer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &BigQueryConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 110 - 0
pkg/cloud/gcp/bigqueryquerier.go

@@ -0,0 +1,110 @@
+package gcp
+
+import (
+	"context"
+	"regexp"
+	"strings"
+
+	"cloud.google.com/go/bigquery"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+type BigQueryQuerier struct {
+	BigQueryConfiguration
+}
+
+func (bqq *BigQueryQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*BigQueryQuerier)
+	if !ok {
+		return false
+	}
+
+	return bqq.BigQueryConfiguration.Equals(&thatConfig.BigQueryConfiguration)
+}
+
+func (bqq *BigQueryQuerier) QueryBigQuery(ctx context.Context, queryStr string) (*bigquery.RowIterator, error) {
+	client, err := bqq.GetBigQueryClient(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	query := client.Query(queryStr)
+	return query.Read(ctx)
+}
+
+func GCPSelectCategory(service, description string) string {
+	s := strings.ToLower(service)
+	d := strings.ToLower(description)
+
+	// Network descriptions
+	if strings.Contains(d, "download") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "network") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "ingress") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "egress") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "static ip") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "external ip") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "load balanced") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "licensing fee") {
+		return kubecost.OtherCategory
+	}
+
+	// Storage Descriptions
+	if strings.Contains(d, "storage") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd capacity") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd iops") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd snapshot") {
+		return kubecost.StorageCategory
+	}
+
+	// Service Defaults
+	if strings.Contains(s, "storage") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "compute") {
+		return kubecost.ComputeCategory
+	}
+	if strings.Contains(s, "sql") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "bigquery") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "kubernetes") {
+		return kubecost.ManagementCategory
+	} else if strings.Contains(s, "pub/sub") {
+		return kubecost.NetworkCategory
+	}
+
+	return kubecost.OtherCategory
+}
+
+var parseProviderIDRx = regexp.MustCompile("^.+\\/(.+)?") // Capture "gke-cluster-3-default-pool-xxxx-yy" from "projects/###/instances/gke-cluster-3-default-pool-xxxx-yy"
+
+func GCPParseProviderID(id string) string {
+	match := parseProviderIDRx.FindStringSubmatch(id)
+	if len(match) == 0 {
+		return id
+	}
+	return match[len(match)-1]
+}

+ 1632 - 0
pkg/cloud/gcp/provider.go

@@ -0,0 +1,1632 @@
+package gcp
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"math"
+	"net/http"
+	"os"
+	"path"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/utils"
+	"github.com/opencost/opencost/pkg/kubecost"
+
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/fileutil"
+	"github.com/opencost/opencost/pkg/util/json"
+	"github.com/opencost/opencost/pkg/util/timeutil"
+	"github.com/rs/zerolog"
+
+	"cloud.google.com/go/bigquery"
+	"cloud.google.com/go/compute/metadata"
+	"golang.org/x/oauth2/google"
+	"google.golang.org/api/compute/v1"
+	v1 "k8s.io/api/core/v1"
+)
+
+const GKE_GPU_TAG = "cloud.google.com/gke-accelerator"
+const BigqueryUpdateType = "bigqueryupdate"
+
+const (
+	GCPHourlyPublicIPCost = 0.01
+
+	GCPMonthlyBasicDiskCost = 0.04
+	GCPMonthlySSDDiskCost   = 0.17
+	GCPMonthlyGP2DiskCost   = 0.1
+
+	GKEPreemptibleLabel = "cloud.google.com/gke-preemptible"
+	GKESpotLabel        = "cloud.google.com/gke-spot"
+)
+
+// List obtained by installing the `gcloud` CLI tool,
+// logging into gcp account, and running command
+// `gcloud compute regions list`
+var gcpRegions = []string{
+	"asia-east1",
+	"asia-east2",
+	"asia-northeast1",
+	"asia-northeast2",
+	"asia-northeast3",
+	"asia-south1",
+	"asia-south2",
+	"asia-southeast1",
+	"asia-southeast2",
+	"australia-southeast1",
+	"australia-southeast2",
+	"europe-central2",
+	"europe-north1",
+	"europe-west1",
+	"europe-west2",
+	"europe-west3",
+	"europe-west4",
+	"europe-west6",
+	"northamerica-northeast1",
+	"northamerica-northeast2",
+	"southamerica-east1",
+	"us-central1",
+	"us-east1",
+	"us-east4",
+	"us-west1",
+	"us-west2",
+	"us-west3",
+	"us-west4",
+}
+
+var (
+	nvidiaGPURegex = regexp.MustCompile("(Nvidia Tesla [^ ]+) ")
+	// gce://guestbook-12345/...
+	//  => guestbook-12345
+	gceRegex = regexp.MustCompile("gce://([^/]*)/*")
+)
+
+// GCP implements a provider interface for GCP
+type GCP struct {
+	Pricing                 map[string]*GCPPricing
+	Clientset               clustercache.ClusterCache
+	APIKey                  string
+	BaseCPUPrice            string
+	ProjectID               string
+	BillingDataDataset      string
+	DownloadPricingDataLock sync.RWMutex
+	ReservedInstances       []*GCPReservedInstance
+	Config                  models.ProviderConfig
+	ServiceKeyProvided      bool
+	ValidPricingKeys        map[string]bool
+	MetadataClient          *metadata.Client
+	clusterManagementPrice  float64
+	ClusterRegion           string
+	ClusterAccountID        string
+	ClusterProjectID        string
+	clusterProvisioner      string
+}
+
+type gcpAllocation struct {
+	Aggregator  bigquery.NullString
+	Environment bigquery.NullString
+	Service     string
+	Cost        float64
+}
+
+type multiKeyGCPAllocation struct {
+	Keys    bigquery.NullString
+	Service string
+	Cost    float64
+}
+
+// GetLocalStorageQuery returns the cost of local storage for the given window. Setting rate=true
+// returns hourly spend. Setting used=true only tracks used storage, not total.
+func (gcp *GCP) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
+	// TODO Set to the price for the appropriate storage class. It's not trivial to determine the local storage disk type
+	// See https://cloud.google.com/compute/disks-image-pricing#persistentdisk
+	localStorageCost := 0.04
+
+	baseMetric := "container_fs_limit_bytes"
+	if used {
+		baseMetric = "container_fs_usage_bytes"
+	}
+
+	fmtOffset := timeutil.DurationToPromOffsetString(offset)
+
+	fmtCumulativeQuery := `sum(
+		sum_over_time(%s{device!="tmpfs", id="/"}[%s:1m]%s)
+	) by (%s) / 60 / 730 / 1024 / 1024 / 1024 * %f`
+
+	fmtMonthlyQuery := `sum(
+		avg_over_time(%s{device!="tmpfs", id="/"}[%s:1m]%s)
+	) by (%s) / 1024 / 1024 / 1024 * %f`
+
+	fmtQuery := fmtCumulativeQuery
+	if rate {
+		fmtQuery = fmtMonthlyQuery
+	}
+	fmtWindow := timeutil.DurationString(window)
+
+	return fmt.Sprintf(fmtQuery, baseMetric, fmtWindow, fmtOffset, env.GetPromClusterLabel(), localStorageCost)
+}
+
+func (gcp *GCP) GetConfig() (*models.CustomPricing, error) {
+	c, err := gcp.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	if c.Discount == "" {
+		c.Discount = "30%"
+	}
+	if c.NegotiatedDiscount == "" {
+		c.NegotiatedDiscount = "0%"
+	}
+	if c.CurrencyCode == "" {
+		c.CurrencyCode = "USD"
+	}
+	if c.ShareTenancyCosts == "" {
+		c.ShareTenancyCosts = models.DefaultShareTenancyCost
+	}
+	return c, nil
+}
+
+// BigQueryConfig contain the required config and credentials to access OOC resources for GCP
+// Deprecated: v1.104 Use BigQueryConfiguration instead
+type BigQueryConfig struct {
+	ProjectID          string            `json:"projectID"`
+	BillingDataDataset string            `json:"billingDataDataset"`
+	Key                map[string]string `json:"key"`
+}
+
+// IsEmpty returns true if all fields in config are empty, false if not.
+func (bqc *BigQueryConfig) IsEmpty() bool {
+	return bqc.ProjectID == "" &&
+		bqc.BillingDataDataset == "" &&
+		(bqc.Key == nil || len(bqc.Key) == 0)
+}
+func (gcp *GCP) GetManagementPlatform() (string, error) {
+	nodes := gcp.Clientset.GetAllNodes()
+
+	if len(nodes) > 0 {
+		n := nodes[0]
+		version := n.Status.NodeInfo.KubeletVersion
+		if strings.Contains(version, "gke") {
+			return "gke", nil
+		}
+	}
+	return "", nil
+}
+
+// Attempts to load a GCP auth secret and copy the contents to the key file.
+func (*GCP) loadGCPAuthSecret() {
+	path := env.GetConfigPathWithDefault("/models/")
+
+	keyPath := path + "key.json"
+	keyExists, _ := fileutil.FileExists(keyPath)
+	if keyExists {
+		log.Info("GCP Auth Key already exists, no need to load from secret")
+		return
+	}
+
+	exists, err := fileutil.FileExists(models.AuthSecretPath)
+	if !exists || err != nil {
+		errMessage := "Secret does not exist"
+		if err != nil {
+			errMessage = err.Error()
+		}
+
+		log.Warnf("Failed to load auth secret, or was not mounted: %s", errMessage)
+		return
+	}
+
+	result, err := os.ReadFile(models.AuthSecretPath)
+	if err != nil {
+		log.Warnf("Failed to load auth secret, or was not mounted: %s", err.Error())
+		return
+	}
+
+	err = os.WriteFile(keyPath, result, 0644)
+	if err != nil {
+		log.Warnf("Failed to copy auth secret to %s: %s", keyPath, err.Error())
+	}
+}
+
+func (gcp *GCP) UpdateConfigFromConfigMap(a map[string]string) (*models.CustomPricing, error) {
+	return gcp.Config.UpdateFromMap(a)
+}
+
+func (gcp *GCP) UpdateConfig(r io.Reader, updateType string) (*models.CustomPricing, error) {
+	return gcp.Config.Update(func(c *models.CustomPricing) error {
+		if updateType == BigqueryUpdateType {
+			a := BigQueryConfig{}
+			err := json.NewDecoder(r).Decode(&a)
+			if err != nil {
+				return err
+			}
+
+			c.ProjectID = a.ProjectID
+			c.BillingDataDataset = a.BillingDataDataset
+
+			if len(a.Key) > 0 {
+				j, err := json.Marshal(a.Key)
+				if err != nil {
+					return err
+				}
+
+				path := env.GetConfigPathWithDefault("/models/")
+
+				keyPath := path + "key.json"
+				err = os.WriteFile(keyPath, j, 0644)
+				if err != nil {
+					return err
+				}
+				gcp.ServiceKeyProvided = true
+			}
+		} else if updateType == aws.AthenaInfoUpdateType {
+			a := aws.AwsAthenaInfo{}
+			err := json.NewDecoder(r).Decode(&a)
+			if err != nil {
+				return err
+			}
+			c.AthenaBucketName = a.AthenaBucketName
+			c.AthenaRegion = a.AthenaRegion
+			c.AthenaDatabase = a.AthenaDatabase
+			c.AthenaTable = a.AthenaTable
+			c.AthenaWorkgroup = a.AthenaWorkgroup
+			c.ServiceKeyName = a.ServiceKeyName
+			c.ServiceKeySecret = a.ServiceKeySecret
+			c.AthenaProjectID = a.AccountID
+		} else {
+			a := make(map[string]interface{})
+			err := json.NewDecoder(r).Decode(&a)
+			if err != nil {
+				return err
+			}
+			for k, v := range a {
+				kUpper := utils.ToTitle.String(k) // Just so we consistently supply / receive the same values, uppercase the first letter.
+				vstr, ok := v.(string)
+				if ok {
+					err := models.SetCustomPricingField(c, kUpper, vstr)
+					if err != nil {
+						return err
+					}
+				} else {
+					return fmt.Errorf("type error while updating config for %s", kUpper)
+				}
+			}
+		}
+
+		if env.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
+			if err != nil {
+				return err
+			}
+		}
+
+		return nil
+	})
+}
+
+// ClusterInfo returns information on the GKE cluster, as provided by metadata.
+func (gcp *GCP) ClusterInfo() (map[string]string, error) {
+	remoteEnabled := env.IsRemoteEnabled()
+
+	attribute, err := gcp.MetadataClient.InstanceAttributeValue("cluster-name")
+	if err != nil {
+		log.Infof("Error loading metadata cluster-name: %s", err.Error())
+	}
+
+	c, err := gcp.GetConfig()
+	if err != nil {
+		log.Errorf("Error opening config: %s", err.Error())
+	}
+	if c.ClusterName != "" {
+		attribute = c.ClusterName
+	}
+
+	// Use a default name if none has been set until this point
+	if attribute == "" {
+		attribute = "GKE Cluster #1"
+	}
+
+	m := make(map[string]string)
+	m["name"] = attribute
+	m["provider"] = kubecost.GCPProvider
+	m["region"] = gcp.ClusterRegion
+	m["account"] = gcp.ClusterAccountID
+	m["project"] = gcp.ClusterProjectID
+	m["provisioner"] = gcp.clusterProvisioner
+	m["id"] = env.GetClusterID()
+	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
+	return m, nil
+}
+
+func (gcp *GCP) ClusterManagementPricing() (string, float64, error) {
+	return gcp.clusterProvisioner, gcp.clusterManagementPrice, nil
+}
+
+func (gcp *GCP) getAllAddresses() (*compute.AddressAggregatedList, error) {
+	projID, err := gcp.MetadataClient.ProjectID()
+	if err != nil {
+		return nil, err
+	}
+
+	client, err := google.DefaultClient(context.TODO(),
+		"https://www.googleapis.com/auth/compute.readonly")
+	if err != nil {
+		return nil, err
+	}
+
+	svc, err := compute.New(client)
+	if err != nil {
+		return nil, err
+	}
+
+	res, err := svc.Addresses.AggregatedList(projID).Do()
+
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (gcp *GCP) GetAddresses() ([]byte, error) {
+	res, err := gcp.getAllAddresses()
+	if err != nil {
+		return nil, err
+	}
+
+	return json.Marshal(res)
+}
+
+func (gcp *GCP) isAddressOrphaned(address *compute.Address) bool {
+	// Consider address orphaned if it has 0 users
+	return len(address.Users) == 0
+}
+
+func (gcp *GCP) getAllDisks() (*compute.DiskAggregatedList, error) {
+	projID, err := gcp.MetadataClient.ProjectID()
+	if err != nil {
+		return nil, err
+	}
+
+	client, err := google.DefaultClient(context.TODO(),
+		"https://www.googleapis.com/auth/compute.readonly")
+	if err != nil {
+		return nil, err
+	}
+
+	svc, err := compute.New(client)
+	if err != nil {
+		return nil, err
+	}
+
+	res, err := svc.Disks.AggregatedList(projID).Do()
+
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+// GetDisks returns the GCP disks backing PVs. Useful because sometimes k8s will not clean up PVs correctly. Requires a json config in /var/configs with key region.
+func (gcp *GCP) GetDisks() ([]byte, error) {
+	res, err := gcp.getAllDisks()
+	if err != nil {
+		return nil, err
+	}
+
+	return json.Marshal(res)
+}
+
+func (gcp *GCP) isDiskOrphaned(disk *compute.Disk) (bool, error) {
+	// Do not consider disk orphaned if it has more than 0 users
+	if len(disk.Users) > 0 {
+		return false, nil
+	}
+
+	// Do not consider disk orphaned if it was used within the last hour
+	threshold := time.Now().Add(time.Duration(-1) * time.Hour)
+	if disk.LastDetachTimestamp != "" {
+		lastUsed, err := time.Parse(time.RFC3339, disk.LastDetachTimestamp)
+		if err != nil {
+			// This can return false since errors are checked before the bool
+			return false, fmt.Errorf("error parsing time: %s", err)
+		}
+		if threshold.Before(lastUsed) {
+			return false, nil
+		}
+	}
+	return true, nil
+}
+
+func (gcp *GCP) GetOrphanedResources() ([]models.OrphanedResource, error) {
+	disks, err := gcp.getAllDisks()
+	if err != nil {
+		return nil, err
+	}
+
+	addresses, err := gcp.getAllAddresses()
+	if err != nil {
+		return nil, err
+	}
+
+	var orphanedResources []models.OrphanedResource
+
+	for _, diskList := range disks.Items {
+		if len(diskList.Disks) == 0 {
+			continue
+		}
+
+		for _, disk := range diskList.Disks {
+			isOrphaned, err := gcp.isDiskOrphaned(disk)
+			if err != nil {
+				return nil, err
+			}
+			if isOrphaned {
+				cost, err := gcp.findCostForDisk(disk)
+				if err != nil {
+					return nil, err
+				}
+
+				// GCP gives us description as a string formatted as a map[string]string, so we need to
+				// deconstruct it back into a map[string]string to match the OR struct
+				desc := map[string]string{}
+				if disk.Description != "" {
+					if err := json.Unmarshal([]byte(disk.Description), &desc); err != nil {
+						return nil, fmt.Errorf("error converting string to map: %s", err)
+					}
+				}
+
+				// Converts https://www.googleapis.com/compute/v1/projects/xxxxx/zones/us-central1-c to us-central1-c
+				zone := path.Base(disk.Zone)
+				if zone == "." {
+					zone = ""
+				}
+
+				or := models.OrphanedResource{
+					Kind:        "disk",
+					Region:      zone,
+					Description: desc,
+					Size:        &disk.SizeGb,
+					DiskName:    disk.Name,
+					Url:         disk.SelfLink,
+					MonthlyCost: cost,
+				}
+				orphanedResources = append(orphanedResources, or)
+			}
+		}
+	}
+
+	for _, addressList := range addresses.Items {
+		if len(addressList.Addresses) == 0 {
+			continue
+		}
+
+		for _, address := range addressList.Addresses {
+			if gcp.isAddressOrphaned(address) {
+				//todo: use GCP pricing
+				cost := GCPHourlyPublicIPCost * timeutil.HoursPerMonth
+
+				// Converts https://www.googleapis.com/compute/v1/projects/xxxxx/regions/us-central1 to us-central1
+				region := path.Base(address.Region)
+				if region == "." {
+					region = ""
+				}
+
+				or := models.OrphanedResource{
+					Kind:   "address",
+					Region: region,
+					Description: map[string]string{
+						"type": address.AddressType,
+					},
+					Address:     address.Address,
+					Url:         address.SelfLink,
+					MonthlyCost: &cost,
+				}
+				orphanedResources = append(orphanedResources, or)
+			}
+		}
+	}
+
+	return orphanedResources, nil
+}
+
+func (gcp *GCP) findCostForDisk(disk *compute.Disk) (*float64, error) {
+	//todo: use GCP pricing struct
+	price := GCPMonthlyBasicDiskCost
+	if strings.Contains(disk.Type, "ssd") {
+		price = GCPMonthlySSDDiskCost
+	}
+	if strings.Contains(disk.Type, "gp2") {
+		price = GCPMonthlyGP2DiskCost
+	}
+	cost := price * float64(disk.SizeGb)
+
+	// This isn't much use but I (Nick) think its could be going down the
+	// right path. Disk region isnt returning anything (and if it did its
+	// a url, same with type). Currently the only region stored in the
+	// Pricing struct is uscentral-1, so that would need to be fixed
+	// key := disk.Region + "," + disk.Type
+
+	// priceStr := gcp.Pricing[key].PV.Cost
+	// price, err := strconv.ParseFloat(priceStr, 64)
+	// if err != nil {
+	// 	return nil, err
+	// }
+
+	// cost := price * timeutil.HoursPerMonth * float64(disk.SizeGb)
+	return &cost, nil
+}
+
+// GCPPricing represents GCP pricing data for a SKU
+type GCPPricing struct {
+	Name                string           `json:"name"`
+	SKUID               string           `json:"skuId"`
+	Description         string           `json:"description"`
+	Category            *GCPResourceInfo `json:"category"`
+	ServiceRegions      []string         `json:"serviceRegions"`
+	PricingInfo         []*PricingInfo   `json:"pricingInfo"`
+	ServiceProviderName string           `json:"serviceProviderName"`
+	Node                *models.Node     `json:"node"`
+	PV                  *models.PV       `json:"pv"`
+}
+
+// PricingInfo contains metadata about a cost.
+type PricingInfo struct {
+	Summary                string             `json:"summary"`
+	PricingExpression      *PricingExpression `json:"pricingExpression"`
+	CurrencyConversionRate float64            `json:"currencyConversionRate"`
+	EffectiveTime          string             `json:""`
+}
+
+// PricingExpression contains metadata about a cost.
+type PricingExpression struct {
+	UsageUnit                string         `json:"usageUnit"`
+	UsageUnitDescription     string         `json:"usageUnitDescription"`
+	BaseUnit                 string         `json:"baseUnit"`
+	BaseUnitConversionFactor int64          `json:"-"`
+	DisplayQuantity          int            `json:"displayQuantity"`
+	TieredRates              []*TieredRates `json:"tieredRates"`
+}
+
+// TieredRates contain data about variable pricing.
+type TieredRates struct {
+	StartUsageAmount int            `json:"startUsageAmount"`
+	UnitPrice        *UnitPriceInfo `json:"unitPrice"`
+}
+
+// UnitPriceInfo contains data about the actual price being charged.
+type UnitPriceInfo struct {
+	CurrencyCode string  `json:"currencyCode"`
+	Units        string  `json:"units"`
+	Nanos        float64 `json:"nanos"`
+}
+
+// GCPResourceInfo contains metadata about the node.
+type GCPResourceInfo struct {
+	ServiceDisplayName string `json:"serviceDisplayName"`
+	ResourceFamily     string `json:"resourceFamily"`
+	ResourceGroup      string `json:"resourceGroup"`
+	UsageType          string `json:"usageType"`
+}
+
+func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]models.Key, pvKeys map[string]models.PVKey) (map[string]*GCPPricing, string, error) {
+	gcpPricingList := make(map[string]*GCPPricing)
+	var nextPageToken string
+	dec := json.NewDecoder(r)
+	for {
+		t, err := dec.Token()
+		if err == io.EOF {
+			break
+		} else if err != nil {
+			return nil, "", fmt.Errorf("Error parsing GCP pricing page: %s", err)
+		}
+		if t == "skus" {
+			_, err := dec.Token() // consumes [
+			if err != nil {
+				return nil, "", err
+			}
+			for dec.More() {
+
+				product := &GCPPricing{}
+				err := dec.Decode(&product)
+				if err != nil {
+					return nil, "", err
+				}
+
+				usageType := strings.ToLower(product.Category.UsageType)
+				instanceType := strings.ToLower(product.Category.ResourceGroup)
+
+				if instanceType == "ssd" && strings.Contains(product.Description, "SSD backed") && !strings.Contains(product.Description, "Regional") { // TODO: support regional
+					lastRateIndex := len(product.PricingInfo[0].PricingExpression.TieredRates) - 1
+					var nanos float64
+					if lastRateIndex > -1 && len(product.PricingInfo) > 0 {
+						nanos = product.PricingInfo[0].PricingExpression.TieredRates[lastRateIndex].UnitPrice.Nanos
+					} else {
+						continue
+					}
+					hourlyPrice := (nanos * math.Pow10(-9)) / 730
+
+					for _, sr := range product.ServiceRegions {
+						region := sr
+						candidateKey := region + "," + "ssd"
+						if _, ok := pvKeys[candidateKey]; ok {
+							product.PV = &models.PV{
+								Cost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+							}
+							gcpPricingList[candidateKey] = product
+							continue
+						}
+					}
+					continue
+				} else if instanceType == "ssd" && strings.Contains(product.Description, "SSD backed") && strings.Contains(product.Description, "Regional") { // TODO: support regional
+					lastRateIndex := len(product.PricingInfo[0].PricingExpression.TieredRates) - 1
+					var nanos float64
+					if lastRateIndex > -1 && len(product.PricingInfo) > 0 {
+						nanos = product.PricingInfo[0].PricingExpression.TieredRates[lastRateIndex].UnitPrice.Nanos
+					} else {
+						continue
+					}
+					hourlyPrice := (nanos * math.Pow10(-9)) / 730
+
+					for _, sr := range product.ServiceRegions {
+						region := sr
+						candidateKey := region + "," + "ssd" + "," + "regional"
+						if _, ok := pvKeys[candidateKey]; ok {
+							product.PV = &models.PV{
+								Cost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+							}
+							gcpPricingList[candidateKey] = product
+							continue
+						}
+					}
+					continue
+				} else if instanceType == "pdstandard" && !strings.Contains(product.Description, "Regional") { // TODO: support regional
+					lastRateIndex := len(product.PricingInfo[0].PricingExpression.TieredRates) - 1
+					var nanos float64
+					if lastRateIndex > -1 && len(product.PricingInfo) > 0 {
+						nanos = product.PricingInfo[0].PricingExpression.TieredRates[lastRateIndex].UnitPrice.Nanos
+					} else {
+						continue
+					}
+					hourlyPrice := (nanos * math.Pow10(-9)) / 730
+					for _, sr := range product.ServiceRegions {
+						region := sr
+						candidateKey := region + "," + "pdstandard"
+						if _, ok := pvKeys[candidateKey]; ok {
+							product.PV = &models.PV{
+								Cost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+							}
+							gcpPricingList[candidateKey] = product
+							continue
+						}
+					}
+					continue
+				} else if instanceType == "pdstandard" && strings.Contains(product.Description, "Regional") { // TODO: support regional
+					lastRateIndex := len(product.PricingInfo[0].PricingExpression.TieredRates) - 1
+					var nanos float64
+					if lastRateIndex > -1 && len(product.PricingInfo) > 0 {
+						nanos = product.PricingInfo[0].PricingExpression.TieredRates[lastRateIndex].UnitPrice.Nanos
+					} else {
+						continue
+					}
+					hourlyPrice := (nanos * math.Pow10(-9)) / 730
+					for _, sr := range product.ServiceRegions {
+						region := sr
+						candidateKey := region + "," + "pdstandard" + "," + "regional"
+						if _, ok := pvKeys[candidateKey]; ok {
+							product.PV = &models.PV{
+								Cost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+							}
+							gcpPricingList[candidateKey] = product
+							continue
+						}
+					}
+					continue
+				}
+
+				if (instanceType == "ram" || instanceType == "cpu") && strings.Contains(strings.ToUpper(product.Description), "CUSTOM") {
+					instanceType = "custom"
+				}
+
+				if (instanceType == "ram" || instanceType == "cpu") && strings.Contains(strings.ToUpper(product.Description), "N2") && !strings.Contains(strings.ToUpper(product.Description), "PREMIUM") {
+					if (instanceType == "ram" || instanceType == "cpu") && strings.Contains(strings.ToUpper(product.Description), "N2D AMD") {
+						instanceType = "n2dstandard"
+					} else {
+						instanceType = "n2standard"
+					}
+				}
+
+				if (instanceType == "ram" || instanceType == "cpu") && strings.Contains(strings.ToUpper(product.Description), "A2 INSTANCE") {
+					instanceType = "a2"
+				}
+
+				if (instanceType == "ram" || instanceType == "cpu") && strings.Contains(strings.ToUpper(product.Description), "COMPUTE OPTIMIZED") {
+					instanceType = "c2standard"
+				}
+
+				if (instanceType == "ram" || instanceType == "cpu") && strings.Contains(strings.ToUpper(product.Description), "E2 INSTANCE") {
+					instanceType = "e2"
+				}
+				partialCPUMap := make(map[string]float64)
+				partialCPUMap["e2micro"] = 0.25
+				partialCPUMap["e2small"] = 0.5
+				partialCPUMap["e2medium"] = 1
+				/*
+					var partialCPU float64
+					if strings.ToLower(instanceType) == "f1micro" {
+						partialCPU = 0.2
+					} else if strings.ToLower(instanceType) == "g1small" {
+						partialCPU = 0.5
+					}
+				*/
+				var gpuType string
+				for matchnum, group := range nvidiaGPURegex.FindStringSubmatch(product.Description) {
+					if matchnum == 1 {
+						gpuType = strings.ToLower(strings.Join(strings.Split(group, " "), "-"))
+						log.Debug("GPU type found: " + gpuType)
+					}
+				}
+
+				candidateKeys := []string{}
+				if gcp.ValidPricingKeys == nil {
+					gcp.ValidPricingKeys = make(map[string]bool)
+				}
+
+				for _, region := range product.ServiceRegions {
+					switch instanceType {
+					case "e2":
+						candidateKeys = append(candidateKeys, region+","+"e2micro"+","+usageType)
+						candidateKeys = append(candidateKeys, region+","+"e2small"+","+usageType)
+						candidateKeys = append(candidateKeys, region+","+"e2medium"+","+usageType)
+						candidateKeys = append(candidateKeys, region+","+"e2standard"+","+usageType)
+						candidateKeys = append(candidateKeys, region+","+"e2custom"+","+usageType)
+					case "a2":
+						candidateKeys = append(candidateKeys, region+","+"a2highgpu"+","+usageType)
+						candidateKeys = append(candidateKeys, region+","+"a2megagpu"+","+usageType)
+					default:
+						candidateKey := region + "," + instanceType + "," + usageType
+						candidateKeys = append(candidateKeys, candidateKey)
+					}
+				}
+
+				for _, candidateKey := range candidateKeys {
+					instanceType = strings.Split(candidateKey, ",")[1] // we may have overridden this while generating candidate keys
+					region := strings.Split(candidateKey, ",")[0]
+					candidateKeyGPU := candidateKey + ",gpu"
+					gcp.ValidPricingKeys[candidateKey] = true
+					gcp.ValidPricingKeys[candidateKeyGPU] = true
+					if gpuType != "" {
+						lastRateIndex := len(product.PricingInfo[0].PricingExpression.TieredRates) - 1
+						var nanos float64
+						var unitsBaseCurrency int
+						if lastRateIndex > -1 && len(product.PricingInfo) > 0 {
+							nanos = product.PricingInfo[0].PricingExpression.TieredRates[lastRateIndex].UnitPrice.Nanos
+							unitsBaseCurrency, err = strconv.Atoi(product.PricingInfo[0].PricingExpression.TieredRates[lastRateIndex].UnitPrice.Units)
+							if err != nil {
+								return nil, "", fmt.Errorf("error parsing base unit price for gpu: %w", err)
+							}
+						} else {
+							continue
+						}
+
+						// as per https://cloud.google.com/billing/v1/how-tos/catalog-api
+						// the hourly price is the whole currency price + the fractional currency price
+						hourlyPrice := (nanos * math.Pow10(-9)) + float64(unitsBaseCurrency)
+
+						// GPUs with an hourly price of 0 are reserved versions of GPUs
+						// (E.g., SKU "2013-37B4-22EA")
+						// and are excluded from cost computations
+						if hourlyPrice == 0 {
+							log.Infof("Excluding reserved GPU SKU #%s", product.SKUID)
+							continue
+						}
+
+						for k, key := range inputKeys {
+							if key.GPUType() == gpuType+","+usageType {
+								if region == strings.Split(k, ",")[0] {
+									log.Infof("Matched GPU to node in region \"%s\"", region)
+									log.Debugf("PRODUCT DESCRIPTION: %s", product.Description)
+									matchedKey := key.Features()
+									if pl, ok := gcpPricingList[matchedKey]; ok {
+										pl.Node.GPUName = gpuType
+										pl.Node.GPUCost = strconv.FormatFloat(hourlyPrice, 'f', -1, 64)
+										pl.Node.GPU = "1"
+									} else {
+										product.Node = &models.Node{
+											GPUName: gpuType,
+											GPUCost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+											GPU:     "1",
+										}
+										gcpPricingList[matchedKey] = product
+									}
+									log.Infof("Added data for " + matchedKey)
+								}
+							}
+						}
+					} else {
+						_, ok := inputKeys[candidateKey]
+						_, ok2 := inputKeys[candidateKeyGPU]
+						if ok || ok2 {
+							lastRateIndex := len(product.PricingInfo[0].PricingExpression.TieredRates) - 1
+							var nanos float64
+							var unitsBaseCurrency int
+							if lastRateIndex > -1 && len(product.PricingInfo) > 0 {
+								nanos = product.PricingInfo[0].PricingExpression.TieredRates[lastRateIndex].UnitPrice.Nanos
+								unitsBaseCurrency, err = strconv.Atoi(product.PricingInfo[0].PricingExpression.TieredRates[lastRateIndex].UnitPrice.Units)
+								if err != nil {
+									return nil, "", fmt.Errorf("error parsing base unit price for instance: %w", err)
+								}
+							} else {
+								continue
+							}
+
+							// as per https://cloud.google.com/billing/v1/how-tos/catalog-api
+							// the hourly price is the whole currency price + the fractional currency price
+							hourlyPrice := (nanos * math.Pow10(-9)) + float64(unitsBaseCurrency)
+
+							if hourlyPrice == 0 {
+								continue
+							} else if strings.Contains(strings.ToUpper(product.Description), "RAM") {
+								if instanceType == "custom" {
+									log.Debug("RAM custom sku is: " + product.Name)
+								}
+								if _, ok := gcpPricingList[candidateKey]; ok {
+									gcpPricingList[candidateKey].Node.RAMCost = strconv.FormatFloat(hourlyPrice, 'f', -1, 64)
+								} else {
+									product = &GCPPricing{}
+									product.Node = &models.Node{
+										RAMCost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+									}
+									partialCPU, pcok := partialCPUMap[instanceType]
+									if pcok {
+										product.Node.VCPU = fmt.Sprintf("%f", partialCPU)
+									}
+									product.Node.UsageType = usageType
+									gcpPricingList[candidateKey] = product
+								}
+								if _, ok := gcpPricingList[candidateKeyGPU]; ok {
+									log.Infof("Adding RAM %f for %s", hourlyPrice, candidateKeyGPU)
+									gcpPricingList[candidateKeyGPU].Node.RAMCost = strconv.FormatFloat(hourlyPrice, 'f', -1, 64)
+								} else {
+									log.Infof("Adding RAM %f for %s", hourlyPrice, candidateKeyGPU)
+									product = &GCPPricing{}
+									product.Node = &models.Node{
+										RAMCost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+									}
+									partialCPU, pcok := partialCPUMap[instanceType]
+									if pcok {
+										product.Node.VCPU = fmt.Sprintf("%f", partialCPU)
+									}
+									product.Node.UsageType = usageType
+									gcpPricingList[candidateKeyGPU] = product
+								}
+								break
+							} else {
+								if _, ok := gcpPricingList[candidateKey]; ok {
+									gcpPricingList[candidateKey].Node.VCPUCost = strconv.FormatFloat(hourlyPrice, 'f', -1, 64)
+								} else {
+									product = &GCPPricing{}
+									product.Node = &models.Node{
+										VCPUCost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+									}
+									partialCPU, pcok := partialCPUMap[instanceType]
+									if pcok {
+										product.Node.VCPU = fmt.Sprintf("%f", partialCPU)
+									}
+									product.Node.UsageType = usageType
+									gcpPricingList[candidateKey] = product
+								}
+								if _, ok := gcpPricingList[candidateKeyGPU]; ok {
+									gcpPricingList[candidateKeyGPU].Node.VCPUCost = strconv.FormatFloat(hourlyPrice, 'f', -1, 64)
+								} else {
+									product = &GCPPricing{}
+									product.Node = &models.Node{
+										VCPUCost: strconv.FormatFloat(hourlyPrice, 'f', -1, 64),
+									}
+									partialCPU, pcok := partialCPUMap[instanceType]
+									if pcok {
+										product.Node.VCPU = fmt.Sprintf("%f", partialCPU)
+									}
+									product.Node.UsageType = usageType
+									gcpPricingList[candidateKeyGPU] = product
+								}
+								break
+							}
+						}
+					}
+				}
+			}
+		}
+		if t == "nextPageToken" {
+			pageToken, err := dec.Token()
+			if err != nil {
+				log.Errorf("Error parsing nextpage token: " + err.Error())
+				return nil, "", err
+			}
+			if pageToken.(string) != "" {
+				nextPageToken = pageToken.(string)
+			} else {
+				nextPageToken = "done"
+			}
+		}
+	}
+	return gcpPricingList, nextPageToken, nil
+}
+
+func (gcp *GCP) parsePages(inputKeys map[string]models.Key, pvKeys map[string]models.PVKey) (map[string]*GCPPricing, error) {
+	var pages []map[string]*GCPPricing
+	c, err := gcp.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+	url := "https://cloudbilling.googleapis.com/v1/services/6F81-5844-456A/skus?key=" + gcp.APIKey + "&currencyCode=" + c.CurrencyCode
+	log.Infof("Fetch GCP Billing Data from URL: %s", url)
+	var parsePagesHelper func(string) error
+	parsePagesHelper = func(pageToken string) error {
+		if pageToken == "done" {
+			return nil
+		} else if pageToken != "" {
+			url = url + "&pageToken=" + pageToken
+		}
+		resp, err := http.Get(url)
+		if err != nil {
+			return err
+		}
+		page, token, err := gcp.parsePage(resp.Body, inputKeys, pvKeys)
+		if err != nil {
+			return err
+		}
+		pages = append(pages, page)
+		return parsePagesHelper(token)
+	}
+	err = parsePagesHelper("")
+	if err != nil {
+		return nil, err
+	}
+	returnPages := make(map[string]*GCPPricing)
+	for _, page := range pages {
+		for k, v := range page {
+			if val, ok := returnPages[k]; ok { //keys may need to be merged
+				if val.Node != nil {
+					if val.Node.VCPUCost == "" {
+						val.Node.VCPUCost = v.Node.VCPUCost
+					}
+					if val.Node.RAMCost == "" {
+						val.Node.RAMCost = v.Node.RAMCost
+					}
+					if val.Node.GPUCost == "" {
+						val.Node.GPUCost = v.Node.GPUCost
+						val.Node.GPU = v.Node.GPU
+						val.Node.GPUName = v.Node.GPUName
+					}
+				}
+				if val.PV != nil {
+					if val.PV.Cost == "" {
+						val.PV.Cost = v.PV.Cost
+					}
+				}
+			} else {
+				returnPages[k] = v
+			}
+		}
+	}
+	log.Debugf("ALL PAGES: %+v", returnPages)
+	for k, v := range returnPages {
+		if v.Node != nil {
+			log.Debugf("Returned Page: %s : %+v", k, v.Node)
+		}
+		if v.PV != nil {
+			log.Debugf("Returned Page: %s : %+v", k, v.PV)
+		}
+	}
+	return returnPages, err
+}
+
+// DownloadPricingData fetches data from the GCP Pricing API. Requires a key-- a kubecost key is provided for quickstart, but should be replaced by a users.
+func (gcp *GCP) DownloadPricingData() error {
+	gcp.DownloadPricingDataLock.Lock()
+	defer gcp.DownloadPricingDataLock.Unlock()
+	c, err := gcp.Config.GetCustomPricingData()
+	if err != nil {
+		log.Errorf("Error downloading default pricing data: %s", err.Error())
+		return err
+	}
+	gcp.loadGCPAuthSecret()
+
+	gcp.BaseCPUPrice = c.CPU
+	gcp.ProjectID = c.ProjectID
+	gcp.BillingDataDataset = c.BillingDataDataset
+
+	nodeList := gcp.Clientset.GetAllNodes()
+	inputkeys := make(map[string]models.Key)
+
+	defaultRegion := "" // Sometimes, PVs may be missing the region label. In that case assume that they are in the same region as the nodes
+	for _, n := range nodeList {
+		labels := n.GetObjectMeta().GetLabels()
+		if _, ok := labels["cloud.google.com/gke-nodepool"]; ok { // The node is part of a GKE nodepool, so you're paying a cluster management cost
+			gcp.clusterManagementPrice = 0.10
+			gcp.clusterProvisioner = "GKE"
+		}
+		r, _ := util.GetRegion(labels)
+		if r != "" {
+			defaultRegion = r
+		}
+		key := gcp.GetKey(labels, n)
+		inputkeys[key.Features()] = key
+	}
+
+	pvList := gcp.Clientset.GetAllPersistentVolumes()
+	storageClasses := gcp.Clientset.GetAllStorageClasses()
+	storageClassMap := make(map[string]map[string]string)
+	for _, storageClass := range storageClasses {
+		params := storageClass.Parameters
+		storageClassMap[storageClass.ObjectMeta.Name] = params
+		if storageClass.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" || storageClass.GetAnnotations()["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
+			storageClassMap["default"] = params
+			storageClassMap[""] = params
+		}
+	}
+
+	pvkeys := make(map[string]models.PVKey)
+	for _, pv := range pvList {
+		params, ok := storageClassMap[pv.Spec.StorageClassName]
+		if !ok {
+			log.DedupedWarningf(5, "Unable to find params for storageClassName %s", pv.Name)
+			continue
+		}
+		key := gcp.GetPVKey(pv, params, defaultRegion)
+		pvkeys[key.Features()] = key
+	}
+
+	reserved, err := gcp.getReservedInstances()
+	if err != nil {
+		log.Errorf("Failed to lookup reserved instance data: %s", err.Error())
+	} else {
+		gcp.ReservedInstances = reserved
+
+		if zerolog.GlobalLevel() <= zerolog.DebugLevel {
+			log.Debugf("Found %d reserved instances", len(reserved))
+			for _, r := range reserved {
+				log.Debugf("%s", r)
+			}
+		}
+	}
+
+	pages, err := gcp.parsePages(inputkeys, pvkeys)
+
+	if err != nil {
+		return err
+	}
+	gcp.Pricing = pages
+	return nil
+}
+
+func (gcp *GCP) PVPricing(pvk models.PVKey) (*models.PV, error) {
+	gcp.DownloadPricingDataLock.RLock()
+	defer gcp.DownloadPricingDataLock.RUnlock()
+	pricing, ok := gcp.Pricing[pvk.Features()]
+	if !ok {
+		log.Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		return &models.PV{}, nil
+	}
+	return pricing.PV, nil
+}
+
+// Stubbed NetworkPricing for GCP. Pull directly from gcp.json for now
+func (gcp *GCP) NetworkPricing() (*models.Network, error) {
+	cpricing, err := gcp.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	znec, err := strconv.ParseFloat(cpricing.ZoneNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	rnec, err := strconv.ParseFloat(cpricing.RegionNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	inec, err := strconv.ParseFloat(cpricing.InternetNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return &models.Network{
+		ZoneNetworkEgressCost:     znec,
+		RegionNetworkEgressCost:   rnec,
+		InternetNetworkEgressCost: inec,
+	}, nil
+}
+
+func (gcp *GCP) LoadBalancerPricing() (*models.LoadBalancer, error) {
+	fffrc := 0.025
+	afrc := 0.010
+	lbidc := 0.008
+
+	numForwardingRules := 1.0
+	dataIngressGB := 0.0
+
+	var totalCost float64
+	if numForwardingRules < 5 {
+		totalCost = fffrc*numForwardingRules + lbidc*dataIngressGB
+	} else {
+		totalCost = fffrc*5 + afrc*(numForwardingRules-5) + lbidc*dataIngressGB
+	}
+	return &models.LoadBalancer{
+		Cost: totalCost,
+	}, nil
+}
+
+const (
+	GCPReservedInstanceResourceTypeRAM string = "MEMORY"
+	GCPReservedInstanceResourceTypeCPU string = "VCPU"
+	GCPReservedInstanceStatusActive    string = "ACTIVE"
+	GCPReservedInstancePlanOneYear     string = "TWELVE_MONTH"
+	GCPReservedInstancePlanThreeYear   string = "THIRTY_SIX_MONTH"
+)
+
+type GCPReservedInstancePlan struct {
+	Name    string
+	CPUCost float64
+	RAMCost float64
+}
+
+type GCPReservedInstance struct {
+	ReservedRAM int64
+	ReservedCPU int64
+	Plan        *GCPReservedInstancePlan
+	StartDate   time.Time
+	EndDate     time.Time
+	Region      string
+}
+
+func (r *GCPReservedInstance) String() string {
+	return fmt.Sprintf("[CPU: %d, RAM: %d, Region: %s, Start: %s, End: %s]", r.ReservedCPU, r.ReservedRAM, r.Region, r.StartDate.String(), r.EndDate.String())
+}
+
+type GCPReservedCounter struct {
+	RemainingCPU int64
+	RemainingRAM int64
+	Instance     *GCPReservedInstance
+}
+
+func newReservedCounter(instance *GCPReservedInstance) *GCPReservedCounter {
+	return &GCPReservedCounter{
+		RemainingCPU: instance.ReservedCPU,
+		RemainingRAM: instance.ReservedRAM,
+		Instance:     instance,
+	}
+}
+
+// Two available Reservation plans for GCP, 1-year and 3-year
+var gcpReservedInstancePlans map[string]*GCPReservedInstancePlan = map[string]*GCPReservedInstancePlan{
+	GCPReservedInstancePlanOneYear: {
+		Name:    GCPReservedInstancePlanOneYear,
+		CPUCost: 0.019915,
+		RAMCost: 0.002669,
+	},
+	GCPReservedInstancePlanThreeYear: {
+		Name:    GCPReservedInstancePlanThreeYear,
+		CPUCost: 0.014225,
+		RAMCost: 0.001907,
+	},
+}
+
+func (gcp *GCP) ApplyReservedInstancePricing(nodes map[string]*models.Node) {
+	numReserved := len(gcp.ReservedInstances)
+
+	// Early return if no reserved instance data loaded
+	if numReserved == 0 {
+		log.Debug("[Reserved] No Reserved Instances")
+		return
+	}
+
+	now := time.Now()
+
+	counters := make(map[string][]*GCPReservedCounter)
+	for _, r := range gcp.ReservedInstances {
+		if now.Before(r.StartDate) || now.After(r.EndDate) {
+			log.Infof("[Reserved] Skipped Reserved Instance due to dates")
+			continue
+		}
+
+		_, ok := counters[r.Region]
+		counter := newReservedCounter(r)
+		if !ok {
+			counters[r.Region] = []*GCPReservedCounter{counter}
+		} else {
+			counters[r.Region] = append(counters[r.Region], counter)
+		}
+	}
+
+	gcpNodes := make(map[string]*v1.Node)
+	currentNodes := gcp.Clientset.GetAllNodes()
+
+	// Create a node name -> node map
+	for _, gcpNode := range currentNodes {
+		gcpNodes[gcpNode.GetName()] = gcpNode
+	}
+
+	// go through all provider nodes using k8s nodes for region
+	for nodeName, node := range nodes {
+		// Reset reserved allocation to prevent double allocation
+		node.Reserved = nil
+
+		kNode, ok := gcpNodes[nodeName]
+		if !ok {
+			log.Debugf("[Reserved] Could not find K8s Node with name: %s", nodeName)
+			continue
+		}
+
+		nodeRegion, ok := util.GetRegion(kNode.Labels)
+		if !ok {
+			log.Debug("[Reserved] Could not find node region")
+			continue
+		}
+
+		reservedCounters, ok := counters[nodeRegion]
+		if !ok {
+			log.Debugf("[Reserved] Could not find counters for region: %s", nodeRegion)
+			continue
+		}
+
+		node.Reserved = &models.ReservedInstanceData{
+			ReservedCPU: 0,
+			ReservedRAM: 0,
+		}
+
+		for _, reservedCounter := range reservedCounters {
+			if reservedCounter.RemainingCPU != 0 {
+				nodeCPU, _ := strconv.ParseInt(node.VCPU, 10, 64)
+				nodeCPU -= node.Reserved.ReservedCPU
+				node.Reserved.CPUCost = reservedCounter.Instance.Plan.CPUCost
+
+				if reservedCounter.RemainingCPU >= nodeCPU {
+					reservedCounter.RemainingCPU -= nodeCPU
+					node.Reserved.ReservedCPU += nodeCPU
+				} else {
+					node.Reserved.ReservedCPU += reservedCounter.RemainingCPU
+					reservedCounter.RemainingCPU = 0
+				}
+			}
+
+			if reservedCounter.RemainingRAM != 0 {
+				nodeRAMF, _ := strconv.ParseFloat(node.RAMBytes, 64)
+				nodeRAM := int64(nodeRAMF)
+				nodeRAM -= node.Reserved.ReservedRAM
+				node.Reserved.RAMCost = reservedCounter.Instance.Plan.RAMCost
+
+				if reservedCounter.RemainingRAM >= nodeRAM {
+					reservedCounter.RemainingRAM -= nodeRAM
+					node.Reserved.ReservedRAM += nodeRAM
+				} else {
+					node.Reserved.ReservedRAM += reservedCounter.RemainingRAM
+					reservedCounter.RemainingRAM = 0
+				}
+			}
+		}
+	}
+}
+
+func (gcp *GCP) getReservedInstances() ([]*GCPReservedInstance, error) {
+	var results []*GCPReservedInstance
+
+	ctx := context.Background()
+	computeService, err := compute.NewService(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	commitments, err := computeService.RegionCommitments.AggregatedList(gcp.ProjectID).Do()
+	if err != nil {
+		return nil, err
+	}
+
+	for regionKey, commitList := range commitments.Items {
+		for _, commit := range commitList.Commitments {
+			if commit.Status != GCPReservedInstanceStatusActive {
+				continue
+			}
+
+			var vcpu int64 = 0
+			var ram int64 = 0
+			for _, resource := range commit.Resources {
+				switch resource.Type {
+				case GCPReservedInstanceResourceTypeRAM:
+					ram = resource.Amount * 1024 * 1024
+				case GCPReservedInstanceResourceTypeCPU:
+					vcpu = resource.Amount
+				default:
+					log.Debugf("Failed to handle resource type: %s", resource.Type)
+				}
+			}
+
+			var region string
+			regionStr := strings.Split(regionKey, "/")
+			if len(regionStr) == 2 {
+				region = regionStr[1]
+			}
+
+			timeLayout := "2006-01-02T15:04:05Z07:00"
+			startTime, err := time.Parse(timeLayout, commit.StartTimestamp)
+			if err != nil {
+				log.Warnf("Failed to parse start date: %s", commit.StartTimestamp)
+				continue
+			}
+
+			endTime, err := time.Parse(timeLayout, commit.EndTimestamp)
+			if err != nil {
+				log.Warnf("Failed to parse end date: %s", commit.EndTimestamp)
+				continue
+			}
+
+			// Look for a plan based on the name. Default to One Year if it fails
+			plan, ok := gcpReservedInstancePlans[commit.Plan]
+			if !ok {
+				plan = gcpReservedInstancePlans[GCPReservedInstancePlanOneYear]
+			}
+
+			results = append(results, &GCPReservedInstance{
+				Region:      region,
+				ReservedRAM: ram,
+				ReservedCPU: vcpu,
+				Plan:        plan,
+				StartDate:   startTime,
+				EndDate:     endTime,
+			})
+		}
+	}
+
+	return results, nil
+}
+
+type pvKey struct {
+	ProviderID             string
+	Labels                 map[string]string
+	StorageClass           string
+	StorageClassParameters map[string]string
+	DefaultRegion          string
+}
+
+func (key *pvKey) ID() string {
+	return key.ProviderID
+}
+
+func (key *pvKey) GetStorageClass() string {
+	return key.StorageClass
+}
+
+func (gcp *GCP) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string, defaultRegion string) models.PVKey {
+	providerID := ""
+	if pv.Spec.GCEPersistentDisk != nil {
+		providerID = pv.Spec.GCEPersistentDisk.PDName
+	}
+	return &pvKey{
+		ProviderID:             providerID,
+		Labels:                 pv.Labels,
+		StorageClass:           pv.Spec.StorageClassName,
+		StorageClassParameters: parameters,
+		DefaultRegion:          defaultRegion,
+	}
+}
+
+func (key *pvKey) Features() string {
+	// TODO: regional cluster pricing.
+	storageClass := key.StorageClassParameters["type"]
+	if storageClass == "pd-ssd" {
+		storageClass = "ssd"
+	} else if storageClass == "pd-standard" {
+		storageClass = "pdstandard"
+	}
+	replicationType := ""
+	if rt, ok := key.StorageClassParameters["replication-type"]; ok {
+		if rt == "regional-pd" {
+			replicationType = ",regional"
+		}
+	}
+	region, _ := util.GetRegion(key.Labels)
+	if region == "" {
+		region = key.DefaultRegion
+	}
+	return region + "," + storageClass + replicationType
+}
+
+type gcpKey struct {
+	Labels map[string]string
+}
+
+func (gcp *GCP) GetKey(labels map[string]string, n *v1.Node) models.Key {
+	return &gcpKey{
+		Labels: labels,
+	}
+}
+
+func (gcp *gcpKey) ID() string {
+	return ""
+}
+
+func (k *gcpKey) GPUCount() int {
+	return 0
+}
+
+func (gcp *gcpKey) GPUType() string {
+	if t, ok := gcp.Labels[GKE_GPU_TAG]; ok {
+		usageType := getUsageType(gcp.Labels)
+		log.Debugf("GPU of type: \"%s\" found", t)
+		return t + "," + usageType
+	}
+	return ""
+}
+
+func parseGCPInstanceTypeLabel(it string) string {
+	var instanceType string
+
+	splitByDash := strings.Split(it, "-")
+
+	// GKE nodes are labeled with the GCP instance type, but users can deploy on GCP
+	// with tools like K3s, whose instance type labels will be "k3s". This logic
+	// avoids a panic in the slice operation then there are no dashes (-) in the
+	// instance type label value.
+	if len(splitByDash) < 2 {
+		instanceType = "unknown"
+	} else {
+		instanceType = strings.ToLower(strings.Join(splitByDash[:2], ""))
+		if instanceType == "n1highmem" || instanceType == "n1highcpu" {
+			instanceType = "n1standard" // These are priced the same. TODO: support n1ultrahighmem
+		} else if instanceType == "n2highmem" || instanceType == "n2highcpu" {
+			instanceType = "n2standard"
+		} else if instanceType == "e2highmem" || instanceType == "e2highcpu" {
+			instanceType = "e2standard"
+		} else if instanceType == "n2dhighmem" || instanceType == "n2dhighcpu" {
+			instanceType = "n2dstandard"
+		} else if strings.HasPrefix(instanceType, "custom") {
+			instanceType = "custom" // The suffix of custom does not matter
+		}
+	}
+
+	return instanceType
+}
+
+// GetKey maps node labels to information needed to retrieve pricing data
+func (gcp *gcpKey) Features() string {
+	var instanceType string
+	it, _ := util.GetInstanceType(gcp.Labels)
+	if it == "" {
+		log.DedupedErrorf(1, "Missing or Unknown 'node.kubernetes.io/instance-type' node label")
+		instanceType = "unknown"
+	} else {
+		instanceType = parseGCPInstanceTypeLabel(it)
+	}
+
+	r, _ := util.GetRegion(gcp.Labels)
+	region := strings.ToLower(r)
+	usageType := getUsageType(gcp.Labels)
+
+	if _, ok := gcp.Labels[GKE_GPU_TAG]; ok {
+		return region + "," + instanceType + "," + usageType + "," + "gpu"
+	}
+
+	return region + "," + instanceType + "," + usageType
+}
+
+// AllNodePricing returns the GCP pricing objects stored
+func (gcp *GCP) AllNodePricing() (interface{}, error) {
+	gcp.DownloadPricingDataLock.RLock()
+	defer gcp.DownloadPricingDataLock.RUnlock()
+	return gcp.Pricing, nil
+}
+
+func (gcp *GCP) getPricing(key models.Key) (*GCPPricing, bool) {
+	gcp.DownloadPricingDataLock.RLock()
+	defer gcp.DownloadPricingDataLock.RUnlock()
+	n, ok := gcp.Pricing[key.Features()]
+	return n, ok
+}
+func (gcp *GCP) isValidPricingKey(key models.Key) bool {
+	gcp.DownloadPricingDataLock.RLock()
+	defer gcp.DownloadPricingDataLock.RUnlock()
+	_, ok := gcp.ValidPricingKeys[key.Features()]
+	return ok
+}
+
+// NodePricing returns GCP pricing data for a single node
+func (gcp *GCP) NodePricing(key models.Key) (*models.Node, error) {
+	if n, ok := gcp.getPricing(key); ok {
+		log.Debugf("Returning pricing for node %s: %+v from SKU %s", key, n.Node, n.Name)
+		n.Node.BaseCPUPrice = gcp.BaseCPUPrice
+		return n.Node, nil
+	} else if ok := gcp.isValidPricingKey(key); ok {
+		err := gcp.DownloadPricingData()
+		if err != nil {
+			return nil, fmt.Errorf("Download pricing data failed: %s", err.Error())
+		}
+		if n, ok := gcp.getPricing(key); ok {
+			log.Debugf("Returning pricing for node %s: %+v from SKU %s", key, n.Node, n.Name)
+			n.Node.BaseCPUPrice = gcp.BaseCPUPrice
+			return n.Node, nil
+		}
+		log.Warnf("no pricing data found for %s: %s", key.Features(), key)
+		return nil, fmt.Errorf("Warning: no pricing data found for %s", key)
+	}
+	return nil, fmt.Errorf("Warning: no pricing data found for %s", key)
+}
+
+func (gcp *GCP) ServiceAccountStatus() *models.ServiceAccountStatus {
+	return &models.ServiceAccountStatus{
+		Checks: []*models.ServiceAccountCheck{},
+	}
+}
+
+func (gcp *GCP) PricingSourceStatus() map[string]*models.PricingSource {
+	return make(map[string]*models.PricingSource)
+}
+
+func (gcp *GCP) CombinedDiscountForNode(instanceType string, isPreemptible bool, defaultDiscount, negotiatedDiscount float64) float64 {
+	class := strings.Split(instanceType, "-")[0]
+	return 1.0 - ((1.0 - sustainedUseDiscount(class, defaultDiscount, isPreemptible)) * (1.0 - negotiatedDiscount))
+}
+
+func (gcp *GCP) Regions() []string {
+
+	regionOverrides := env.GetRegionOverrideList()
+
+	if len(regionOverrides) > 0 {
+		log.Debugf("Overriding GCP regions with configured region list: %+v", regionOverrides)
+		return regionOverrides
+	}
+
+	return gcpRegions
+}
+
+func sustainedUseDiscount(class string, defaultDiscount float64, isPreemptible bool) float64 {
+	if isPreemptible {
+		return 0.0
+	}
+	discount := defaultDiscount
+	switch class {
+	case "e2", "f1", "g1":
+		discount = 0.0
+	case "n2", "n2d":
+		discount = 0.2
+	}
+	return discount
+}
+
+func ParseGCPProjectID(id string) string {
+	// gce://guestbook-12345/...
+	//  => guestbook-12345
+	match := gceRegex.FindStringSubmatch(id)
+	if len(match) >= 2 {
+		return match[1]
+	}
+	// Return empty string if an account could not be parsed from provided string
+	return ""
+}
+
+func getUsageType(labels map[string]string) string {
+	if t, ok := labels[GKEPreemptibleLabel]; ok && t == "true" {
+		return "preemptible"
+	} else if t, ok := labels[GKESpotLabel]; ok && t == "true" {
+		// https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms
+		return "preemptible"
+	} else if t, ok := labels[models.KarpenterCapacityTypeLabel]; ok && t == models.KarpenterCapacitySpotTypeValue {
+		return "preemptible"
+	}
+	return "ondemand"
+}
+
+// PricingSourceSummary returns the pricing source summary for the provider.
+// The summary represents what was _parsed_ from the pricing source, not
+// everything that was _available_ in the pricing source.
+func (gcp *GCP) PricingSourceSummary() interface{} {
+	return gcp.Pricing
+}

+ 369 - 0
pkg/cloud/gcp/provider_test.go

@@ -0,0 +1,369 @@
+package gcp
+
+import (
+	"bytes"
+	"io/ioutil"
+	"reflect"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+)
+
+func TestParseGCPInstanceTypeLabel(t *testing.T) {
+	cases := []struct {
+		input    string
+		expected string
+	}{
+		{
+			input:    "n1-standard-2",
+			expected: "n1standard",
+		},
+		{
+			input:    "e2-medium",
+			expected: "e2medium",
+		},
+		{
+			input:    "k3s",
+			expected: "unknown",
+		},
+		{
+			input:    "custom-n1-standard-2",
+			expected: "custom",
+		},
+		{
+			input:    "n2d-highmem-8",
+			expected: "n2dstandard",
+		},
+	}
+
+	for _, test := range cases {
+		result := parseGCPInstanceTypeLabel(test.input)
+		if result != test.expected {
+			t.Errorf("Input: %s, Expected: %s, Actual: %s", test.input, test.expected, result)
+		}
+	}
+}
+
+func TestParseGCPProjectID(t *testing.T) {
+	cases := []struct {
+		input    string
+		expected string
+	}{
+		{
+			input:    "gce://guestbook-12345/...",
+			expected: "guestbook-12345",
+		},
+		{
+			input:    "gce:/guestbook-12345/...",
+			expected: "",
+		},
+		{
+			input:    "asdfa",
+			expected: "",
+		},
+		{
+			input:    "",
+			expected: "",
+		},
+	}
+
+	for _, test := range cases {
+		result := ParseGCPProjectID(test.input)
+		if result != test.expected {
+			t.Errorf("Input: %s, Expected: %s, Actual: %s", test.input, test.expected, result)
+		}
+	}
+}
+
+func TestGetUsageType(t *testing.T) {
+	cases := []struct {
+		input    map[string]string
+		expected string
+	}{
+		{
+			input: map[string]string{
+				GKEPreemptibleLabel: "true",
+			},
+			expected: "preemptible",
+		},
+		{
+			input: map[string]string{
+				GKESpotLabel: "true",
+			},
+			expected: "preemptible",
+		},
+		{
+			input: map[string]string{
+				models.KarpenterCapacityTypeLabel: models.KarpenterCapacitySpotTypeValue,
+			},
+			expected: "preemptible",
+		},
+		{
+			input: map[string]string{
+				"someotherlabel": "true",
+			},
+			expected: "ondemand",
+		},
+		{
+			input:    map[string]string{},
+			expected: "ondemand",
+		},
+	}
+
+	for _, test := range cases {
+		result := getUsageType(test.input)
+		if result != test.expected {
+			t.Errorf("Input: %v, Expected: %s, Actual: %s", test.input, test.expected, result)
+		}
+	}
+}
+
+// tests basic parsing of GCP pricing API responses
+// Load a reader object on a portion of a GCP api response
+// Confirm that the resting *GCP object contains the correctly parsed pricing info
+func TestParsePage(t *testing.T) {
+
+	gcpSkuString := `
+	{
+		"skus": [
+			{
+				"name": "services/6F81-5844-456A/skus/039F-D0DA-4055",
+				"skuId": "039F-D0DA-4055",
+				"description": "Nvidia Tesla A100 GPU running in Americas",
+				"category": {
+				  "serviceDisplayName": "Compute Engine",
+				  "resourceFamily": "Compute",
+				  "resourceGroup": "GPU",
+				  "usageType": "OnDemand"
+				},
+				"serviceRegions": [
+				  "us-central1",
+				  "us-east1",
+				  "us-west1"
+				],
+				"pricingInfo": [
+				  {
+					"summary": "",
+					"pricingExpression": {
+					  "usageUnit": "h",
+					  "displayQuantity": 1,
+					  "tieredRates": [
+						{
+						  "startUsageAmount": 0,
+						  "unitPrice": {
+							"currencyCode": "USD",
+							"units": "2",
+							"nanos": 933908000
+						  }
+						}
+					  ],
+					  "usageUnitDescription": "hour",
+					  "baseUnit": "s",
+					  "baseUnitDescription": "second",
+					  "baseUnitConversionFactor": 3600
+					},
+					"currencyConversionRate": 1,
+					"effectiveTime": "2023-03-24T10:52:50.681Z"
+				  }
+				],
+				"serviceProviderName": "Google",
+				"geoTaxonomy": {
+				  "type": "MULTI_REGIONAL",
+				  "regions": [
+					"us-central1",
+					"us-east1",
+					"us-west1"
+				  ]
+				}
+			},
+			{
+				"name": "services/6F81-5844-456A/skus/2390-DCAF-DA38",
+				"skuId": "2390-DCAF-DA38",
+				"description": "A2 Instance Ram running in Americas",
+				"category": {
+				  "serviceDisplayName": "Compute Engine",
+				  "resourceFamily": "Compute",
+				  "resourceGroup": "RAM",
+				  "usageType": "OnDemand"
+				},
+				"serviceRegions": [
+				  "us-central1",
+				  "us-east1",
+				  "us-west1"
+				],
+				"pricingInfo": [
+				  {
+					"summary": "",
+					"pricingExpression": {
+					  "usageUnit": "GiBy.h",
+					  "displayQuantity": 1,
+					  "tieredRates": [
+						{
+						  "startUsageAmount": 0,
+						  "unitPrice": {
+							"currencyCode": "USD",
+							"units": "0",
+							"nanos": 4237000
+						  }
+						}
+					  ],
+					  "usageUnitDescription": "gibibyte hour",
+					  "baseUnit": "By.s",
+					  "baseUnitDescription": "byte second",
+					  "baseUnitConversionFactor": 3865470566400
+					},
+					"currencyConversionRate": 1,
+					"effectiveTime": "2023-03-24T10:52:50.681Z"
+				  }
+				],
+				"serviceProviderName": "Google",
+				"geoTaxonomy": {
+				  "type": "MULTI_REGIONAL",
+				  "regions": [
+					"us-central1",
+					"us-east1",
+					"us-west1"
+				  ]
+				}
+			},
+			{
+				"name": "services/6F81-5844-456A/skus/2922-40C5-B19F",
+				"skuId": "2922-40C5-B19F",
+				"description": "A2 Instance Core running in Americas",
+				"category": {
+				  "serviceDisplayName": "Compute Engine",
+				  "resourceFamily": "Compute",
+				  "resourceGroup": "CPU",
+				  "usageType": "OnDemand"
+				},
+				"serviceRegions": [
+				  "us-central1",
+				  "us-east1",
+				  "us-west1"
+				],
+				"pricingInfo": [
+				  {
+					"summary": "",
+					"pricingExpression": {
+					  "usageUnit": "h",
+					  "displayQuantity": 1,
+					  "tieredRates": [
+						{
+						  "startUsageAmount": 0,
+						  "unitPrice": {
+							"currencyCode": "USD",
+							"units": "0",
+							"nanos": 31611000
+						  }
+						}
+					  ],
+					  "usageUnitDescription": "hour",
+					  "baseUnit": "s",
+					  "baseUnitDescription": "second",
+					  "baseUnitConversionFactor": 3600
+					},
+					"currencyConversionRate": 1,
+					"effectiveTime": "2023-03-24T10:52:50.681Z"
+				  }
+				],
+				"serviceProviderName": "Google",
+				"geoTaxonomy": {
+				  "type": "MULTI_REGIONAL",
+				  "regions": [
+					"us-central1",
+					"us-east1",
+					"us-west1"
+				  ]
+				}
+			}
+		],
+			"nextPageToken": "APKCS1HVa0YpwgyTFbqbJ1eGwzKZmsPwLqzMZPTSNia5ck1Hc54Tx_Kz3oBxwSnRIdGVxXoSPdf-XlDpyNBf4QuxKcIEgtrQ1LDLWAgZowI0ns7HjrGta2s="
+		}
+	`
+	reader := ioutil.NopCloser(bytes.NewBufferString(gcpSkuString))
+
+	testGcp := &GCP{}
+
+	inputKeys := map[string]models.Key{
+		"us-central1,a2highgpu,ondemand,gpu": &gcpKey{
+			Labels: map[string]string{
+				"node.kubernetes.io/instance-type": "a2-highgpu-1g",
+				"cloud.google.com/gke-gpu":         "true",
+				"cloud.google.com/gke-accelerator": "nvidia-tesla-a100",
+				"topology.kubernetes.io/region":    "us-central1",
+			},
+		},
+	}
+
+	pvKeys := map[string]models.PVKey{}
+
+	actualPrices, token, err := testGcp.parsePage(reader, inputKeys, pvKeys)
+	if err != nil {
+		t.Fatalf("got error parsing page: %v", err)
+	}
+
+	const expectedToken = "APKCS1HVa0YpwgyTFbqbJ1eGwzKZmsPwLqzMZPTSNia5ck1Hc54Tx_Kz3oBxwSnRIdGVxXoSPdf-XlDpyNBf4QuxKcIEgtrQ1LDLWAgZowI0ns7HjrGta2s="
+	if token != expectedToken {
+		t.Fatalf("error parsing GCP next page token, parsed %s but expected %s", token, expectedToken)
+	}
+
+	expectedActualPrices := map[string]*GCPPricing{
+		"us-central1,a2highgpu,ondemand,gpu": {
+			Name:        "services/6F81-5844-456A/skus/039F-D0DA-4055",
+			SKUID:       "039F-D0DA-4055",
+			Description: "Nvidia Tesla A100 GPU running in Americas",
+			Category: &GCPResourceInfo{
+				ServiceDisplayName: "Compute Engine",
+				ResourceFamily:     "Compute",
+				ResourceGroup:      "GPU",
+				UsageType:          "OnDemand",
+			},
+			ServiceRegions: []string{"us-central1", "us-east1", "us-west1"},
+			PricingInfo: []*PricingInfo{
+				{
+					Summary: "",
+					PricingExpression: &PricingExpression{
+						UsageUnit:                "h",
+						UsageUnitDescription:     "hour",
+						BaseUnit:                 "s",
+						BaseUnitConversionFactor: 0,
+						DisplayQuantity:          1,
+						TieredRates: []*TieredRates{
+							{
+								StartUsageAmount: 0,
+								UnitPrice: &UnitPriceInfo{
+									CurrencyCode: "USD",
+									Units:        "2",
+									Nanos:        933908000,
+								},
+							},
+						},
+					},
+					CurrencyConversionRate: 1,
+					EffectiveTime:          "2023-03-24T10:52:50.681Z",
+				},
+			},
+			ServiceProviderName: "Google",
+			Node: &models.Node{
+				VCPUCost:         "0.031611",
+				RAMCost:          "0.004237",
+				UsesBaseCPUPrice: false,
+				GPU:              "1",
+				GPUName:          "nvidia-tesla-a100",
+				GPUCost:          "2.933908",
+			},
+		},
+		"us-central1,a2highgpu,ondemand": {
+			Node: &models.Node{
+				VCPUCost:         "0.031611",
+				RAMCost:          "0.004237",
+				UsesBaseCPUPrice: false,
+				UsageType:        "ondemand",
+			},
+		},
+	}
+
+	if !reflect.DeepEqual(actualPrices, expectedActualPrices) {
+		t.Fatalf("error parsing GCP prices. parsed %v but expected %v", actualPrices, expectedActualPrices)
+	}
+}

+ 438 - 0
pkg/cloud/provider/csvprovider.go

@@ -0,0 +1,438 @@
+package cloud
+
+import (
+	"encoding/csv"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/util"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/session"
+	"github.com/aws/aws-sdk-go/service/s3"
+	"github.com/opencost/opencost/pkg/log"
+	v1 "k8s.io/api/core/v1"
+
+	"github.com/jszwec/csvutil"
+)
+
+const refreshMinutes = 60
+
+type CSVProvider struct {
+	*CustomProvider
+	CSVLocation             string
+	Pricing                 map[string]*price
+	NodeClassPricing        map[string]float64
+	NodeClassCount          map[string]float64
+	NodeMapField            string
+	PricingPV               map[string]*price
+	PVMapField              string
+	GPUClassPricing         map[string]*price
+	GPUMapFields            []string // Fields in a node's labels that represent the GPU class.
+	UsesRegion              bool
+	DownloadPricingDataLock sync.RWMutex
+}
+type price struct {
+	EndTimestamp      string `csv:"EndTimestamp"`
+	InstanceID        string `csv:"InstanceID"`
+	Region            string `csv:"Region"`
+	AssetClass        string `csv:"AssetClass"`
+	InstanceIDField   string `csv:"InstanceIDField"`
+	InstanceType      string `csv:"InstanceType"`
+	MarketPriceHourly string `csv:"MarketPriceHourly"`
+	Version           string `csv:"Version"`
+}
+
+func GetCsv(location string) (io.Reader, error) {
+	return os.Open(location)
+}
+
+func (c *CSVProvider) DownloadPricingData() error {
+	c.DownloadPricingDataLock.Lock()
+	defer time.AfterFunc(refreshMinutes*time.Minute, func() { c.DownloadPricingData() })
+	defer c.DownloadPricingDataLock.Unlock()
+	pricing := make(map[string]*price)
+	nodeclasspricing := make(map[string]float64)
+	nodeclasscount := make(map[string]float64)
+	pvpricing := make(map[string]*price)
+	gpupricing := make(map[string]*price)
+	c.GPUMapFields = make([]string, 0, 1)
+	header, err := csvutil.Header(price{}, "csv")
+	if err != nil {
+		return err
+	}
+	fieldsPerRecord := len(header)
+	var csvr io.Reader
+	var csverr error
+	if strings.HasPrefix(c.CSVLocation, "s3://") {
+		region := env.GetCSVRegion()
+		conf := aws.NewConfig().WithRegion(region).WithCredentialsChainVerboseErrors(true)
+		endpoint := env.GetCSVEndpoint()
+		if endpoint != "" {
+			conf = conf.WithEndpoint(endpoint)
+		}
+		s3Client := s3.New(session.New(conf))
+		bucketAndKey := strings.Split(strings.TrimPrefix(c.CSVLocation, "s3://"), "/")
+		if len(bucketAndKey) == 2 {
+			out, err := s3Client.GetObject(&s3.GetObjectInput{
+				Bucket: aws.String(bucketAndKey[0]),
+				Key:    aws.String(bucketAndKey[1]),
+			})
+			csverr = err
+			csvr = out.Body
+		} else {
+			c.Pricing = pricing
+			c.NodeClassPricing = nodeclasspricing
+			c.NodeClassCount = nodeclasscount
+			c.PricingPV = pvpricing
+			c.GPUClassPricing = gpupricing
+			return fmt.Errorf("Invalid s3 URI: %s", c.CSVLocation)
+		}
+	} else {
+		csvr, csverr = GetCsv(c.CSVLocation)
+	}
+	if csverr != nil {
+		log.Infof("Error reading csv at %s: %s", c.CSVLocation, csverr)
+		c.Pricing = pricing
+		c.NodeClassPricing = nodeclasspricing
+		c.NodeClassCount = nodeclasscount
+		c.PricingPV = pvpricing
+		c.GPUClassPricing = gpupricing
+		return nil
+	}
+	csvReader := csv.NewReader(csvr)
+	csvReader.Comma = ','
+	csvReader.FieldsPerRecord = fieldsPerRecord
+
+	dec, err := csvutil.NewDecoder(csvReader, header...)
+	if err != nil {
+		c.Pricing = pricing
+		c.NodeClassPricing = nodeclasspricing
+		c.NodeClassCount = nodeclasscount
+		c.PricingPV = pvpricing
+		c.GPUClassPricing = gpupricing
+		return err
+	}
+	for {
+		p := price{}
+		err := dec.Decode(&p)
+		csvParseErr, isCsvParseErr := err.(*csv.ParseError)
+		if err == io.EOF {
+			break
+		} else if err == csvutil.ErrFieldCount || (isCsvParseErr && csvParseErr.Err == csv.ErrFieldCount) {
+			rec := dec.Record()
+			if len(rec) != 1 {
+				log.Infof("Expected %d price info fields but received %d: %s", fieldsPerRecord, len(rec), rec)
+				continue
+			}
+			if strings.Index(rec[0], "#") == 0 {
+				continue
+			} else {
+				log.Infof("skipping non-CSV line: %s", rec)
+				continue
+			}
+		} else if err != nil {
+			log.Infof("Error during spot info decode: %+v", err)
+			continue
+		}
+		log.Infof("Found price info %+v", p)
+		key := strings.ToLower(p.InstanceID)
+		if p.Region != "" { // strip the casing from region and add to key.
+			key = fmt.Sprintf("%s,%s", strings.ToLower(p.Region), strings.ToLower(p.InstanceID))
+			c.UsesRegion = true
+		}
+		if p.AssetClass == "pv" {
+			pvpricing[key] = &p
+			c.PVMapField = p.InstanceIDField
+		} else if p.AssetClass == "node" {
+			pricing[key] = &p
+			classKey := p.Region + "," + p.InstanceType + "," + p.AssetClass
+			cost, err := strconv.ParseFloat(p.MarketPriceHourly, 64)
+			if err != nil {
+
+			} else {
+				if _, ok := nodeclasspricing[classKey]; ok {
+					oldPrice := nodeclasspricing[classKey]
+					oldCount := nodeclasscount[classKey]
+					newPrice := ((oldPrice * oldCount) + cost) / (oldCount + 1.0)
+					nodeclasscount[classKey] = newPrice
+					nodeclasscount[classKey]++
+				} else {
+					nodeclasspricing[classKey] = cost
+					nodeclasscount[classKey] = 1
+				}
+			}
+
+			c.NodeMapField = p.InstanceIDField
+		} else if p.AssetClass == "gpu" {
+			gpupricing[key] = &p
+			c.GPUMapFields = append(c.GPUMapFields, strings.ToLower(p.InstanceIDField))
+		} else {
+			log.Infof("Unrecognized asset class %s, defaulting to node", p.AssetClass)
+			pricing[key] = &p
+			c.NodeMapField = p.InstanceIDField
+		}
+	}
+	if len(pricing) > 0 {
+		c.Pricing = pricing
+		c.NodeClassPricing = nodeclasspricing
+		c.NodeClassCount = nodeclasscount
+		c.PricingPV = pvpricing
+		c.GPUClassPricing = gpupricing
+	} else {
+		log.DedupedWarningf(5, "No data received from csv at %s", c.CSVLocation)
+	}
+	return nil
+}
+
+type csvKey struct {
+	Labels     map[string]string
+	ProviderID string
+	GPULabel   []string
+	GPU        int64
+}
+
+func (k *csvKey) Features() string {
+	instanceType, _ := util.GetInstanceType(k.Labels)
+	region, _ := util.GetRegion(k.Labels)
+	class := "node"
+
+	return region + "," + instanceType + "," + class
+}
+
+func (k *csvKey) GPUCount() int {
+	return int(k.GPU)
+}
+
+func (k *csvKey) GPUType() string {
+	for _, label := range k.GPULabel {
+		if val, ok := k.Labels[label]; ok {
+			return val
+		}
+	}
+	return ""
+}
+func (k *csvKey) ID() string {
+	return k.ProviderID
+}
+
+func (c *CSVProvider) NodePricing(key models.Key) (*models.Node, error) {
+	c.DownloadPricingDataLock.RLock()
+	defer c.DownloadPricingDataLock.RUnlock()
+	var node *models.Node
+	if p, ok := c.Pricing[key.ID()]; ok {
+		node = &models.Node{
+			Cost:        p.MarketPriceHourly,
+			PricingType: models.CsvExact,
+		}
+	}
+	s := strings.Split(key.ID(), ",") // Try without a region to be sure
+	if len(s) == 2 {
+		if p, ok := c.Pricing[s[1]]; ok {
+			node = &models.Node{
+				Cost:        p.MarketPriceHourly,
+				PricingType: models.CsvExact,
+			}
+		}
+	}
+	classKey := key.Features() // Use node attributes to try and do a class match
+	if cost, ok := c.NodeClassPricing[classKey]; ok {
+		log.Infof("Unable to find provider ID `%s`, using features:`%s`", key.ID(), key.Features())
+		node = &models.Node{
+			Cost:        fmt.Sprintf("%f", cost),
+			PricingType: models.CsvClass,
+		}
+	}
+
+	if node != nil {
+		if t := key.GPUType(); t != "" {
+			t = strings.ToLower(t)
+			count := key.GPUCount()
+			node.GPU = strconv.Itoa(count)
+			hourly := 0.0
+			if p, ok := c.GPUClassPricing[t]; ok {
+				var err error
+				hourly, err = strconv.ParseFloat(p.MarketPriceHourly, 64)
+				if err != nil {
+					log.Errorf("Unable to parse %s as float", p.MarketPriceHourly)
+				}
+			}
+			totalCost := hourly * float64(count)
+			node.GPUCost = fmt.Sprintf("%f", totalCost)
+			nc, err := strconv.ParseFloat(node.Cost, 64)
+			if err != nil {
+				log.Errorf("Unable to parse %s as float", node.Cost)
+			}
+			node.Cost = fmt.Sprintf("%f", nc+totalCost)
+		}
+		return node, nil
+	} else {
+		return nil, fmt.Errorf("Unable to find Node matching `%s`:`%s`", key.ID(), key.Features())
+	}
+}
+
+func NodeValueFromMapField(m string, n *v1.Node, useRegion bool) string {
+	mf := strings.Split(m, ".")
+	toReturn := ""
+	if useRegion {
+		if region, ok := util.GetRegion(n.Labels); ok {
+			toReturn = region + ","
+		} else {
+			log.Errorf("Getting region based on labels failed")
+		}
+	}
+	if len(mf) == 2 && mf[0] == "spec" && mf[1] == "providerID" {
+		for matchNum, group := range provIdRx.FindStringSubmatch(n.Spec.ProviderID) {
+			if matchNum == 2 {
+				return toReturn + group
+			}
+		}
+		if strings.HasPrefix(n.Spec.ProviderID, "azure://") {
+			vmOrScaleSet := strings.ToLower(strings.TrimPrefix(n.Spec.ProviderID, "azure://"))
+			return toReturn + vmOrScaleSet
+		}
+		return toReturn + n.Spec.ProviderID
+	} else if len(mf) > 1 && mf[0] == "metadata" {
+		if mf[1] == "name" {
+			return toReturn + n.Name
+		} else if mf[1] == "labels" {
+			lkey := strings.Join(mf[2:len(mf)], ".")
+			return toReturn + n.Labels[lkey]
+		} else if mf[1] == "annotations" {
+			akey := strings.Join(mf[2:len(mf)], "")
+			return toReturn + n.Annotations[akey]
+		} else {
+			log.Errorf("Unsupported InstanceIDField %s in CSV For Node", m)
+			return ""
+		}
+	} else {
+		log.Errorf("Unsupported InstanceIDField %s in CSV For Node", m)
+		return ""
+	}
+}
+
+func PVValueFromMapField(m string, n *v1.PersistentVolume) string {
+	mf := strings.Split(m, ".")
+	if len(mf) > 1 && mf[0] == "metadata" {
+		if mf[1] == "name" {
+			return n.Name
+		} else if mf[1] == "labels" {
+			lkey := strings.Join(mf[2:len(mf)], "")
+			return n.Labels[lkey]
+		} else if mf[1] == "annotations" {
+			akey := strings.Join(mf[2:len(mf)], "")
+			return n.Annotations[akey]
+		} else {
+			log.Errorf("Unsupported InstanceIDField %s in CSV For PV", m)
+			return ""
+		}
+	} else if len(mf) > 2 && mf[0] == "spec" {
+		if mf[1] == "capacity" && mf[2] == "storage" {
+			skey := n.Spec.Capacity["storage"]
+			return skey.String()
+		} else {
+			log.Infof("[ERROR] Unsupported InstanceIDField %s in CSV For PV", m)
+			return ""
+		}
+	} else if len(mf) > 1 && mf[0] == "spec" {
+		if mf[1] == "storageClassName" {
+			return n.Spec.StorageClassName
+		} else {
+			log.Infof("[ERROR] Unsupported InstanceIDField %s in CSV For PV", m)
+			return ""
+		}
+	} else {
+		log.Errorf("Unsupported InstanceIDField %s in CSV For PV", m)
+		return ""
+	}
+}
+
+func (c *CSVProvider) GetKey(l map[string]string, n *v1.Node) models.Key {
+	id := NodeValueFromMapField(c.NodeMapField, n, c.UsesRegion)
+	var gpuCount int64
+	gpuCount = 0
+	if gpuc, ok := n.Status.Capacity["nvidia.com/gpu"]; ok { // TODO: support non-nvidia GPUs
+		gpuCount = gpuc.Value()
+	}
+	return &csvKey{
+		ProviderID: id,
+		Labels:     l,
+		GPULabel:   c.GPUMapFields,
+		GPU:        gpuCount,
+	}
+}
+
+type csvPVKey struct {
+	Labels                 map[string]string
+	ProviderID             string
+	StorageClassName       string
+	StorageClassParameters map[string]string
+	Name                   string
+	DefaultRegion          string
+}
+
+func (key *csvPVKey) ID() string {
+	return ""
+}
+
+func (key *csvPVKey) GetStorageClass() string {
+	return key.StorageClassName
+}
+
+func (key *csvPVKey) Features() string {
+	return key.ProviderID
+}
+
+func (c *CSVProvider) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string, defaultRegion string) models.PVKey {
+	id := PVValueFromMapField(c.PVMapField, pv)
+	return &csvPVKey{
+		Labels:                 pv.Labels,
+		ProviderID:             id,
+		StorageClassName:       pv.Spec.StorageClassName,
+		StorageClassParameters: parameters,
+		Name:                   pv.Name,
+		DefaultRegion:          defaultRegion,
+	}
+}
+
+func (c *CSVProvider) PVPricing(pvk models.PVKey) (*models.PV, error) {
+	c.DownloadPricingDataLock.RLock()
+	defer c.DownloadPricingDataLock.RUnlock()
+	pricing, ok := c.PricingPV[pvk.Features()]
+	if !ok {
+		log.Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		return &models.PV{}, nil
+	}
+	return &models.PV{
+		Cost: pricing.MarketPriceHourly,
+	}, nil
+}
+
+func (c *CSVProvider) ServiceAccountStatus() *models.ServiceAccountStatus {
+	return &models.ServiceAccountStatus{
+		Checks: []*models.ServiceAccountCheck{},
+	}
+}
+
+func (*CSVProvider) ClusterManagementPricing() (string, float64, error) {
+	return "", 0.0, nil
+}
+
+func (c *CSVProvider) CombinedDiscountForNode(instanceType string, isPreemptible bool, defaultDiscount, negotiatedDiscount float64) float64 {
+	return 1.0 - ((1.0 - defaultDiscount) * (1.0 - negotiatedDiscount))
+}
+
+func (c *CSVProvider) Regions() []string {
+	return []string{}
+}
+
+func (c *CSVProvider) PricingSourceSummary() interface{} {
+	return c.Pricing
+}

+ 406 - 0
pkg/cloud/provider/customprovider.go

@@ -0,0 +1,406 @@
+package provider
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/utils"
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/json"
+
+	v1 "k8s.io/api/core/v1"
+)
+
+type NodePrice struct {
+	CPU string
+	RAM string
+	GPU string
+}
+
+type CustomProvider struct {
+	Clientset               clustercache.ClusterCache
+	Pricing                 map[string]*NodePrice
+	SpotLabel               string
+	SpotLabelValue          string
+	GPULabel                string
+	GPULabelValue           string
+	ClusterRegion           string
+	ClusterAccountID        string
+	DownloadPricingDataLock sync.RWMutex
+	Config                  models.ProviderConfig
+}
+
+var volTypes = map[string]string{
+	"EBS:VolumeUsage.gp2":    "gp2",
+	"EBS:VolumeUsage.gp3":    "gp3",
+	"EBS:VolumeUsage":        "standard",
+	"EBS:VolumeUsage.sc1":    "sc1",
+	"EBS:VolumeP-IOPS.piops": "io1",
+	"EBS:VolumeUsage.st1":    "st1",
+	"EBS:VolumeUsage.piops":  "io1",
+	"gp2":                    "EBS:VolumeUsage.gp2",
+	"gp3":                    "EBS:VolumeUsage.gp3",
+	"standard":               "EBS:VolumeUsage",
+	"sc1":                    "EBS:VolumeUsage.sc1",
+	"io1":                    "EBS:VolumeUsage.piops",
+	"st1":                    "EBS:VolumeUsage.st1",
+}
+
+type customPVKey struct {
+	Labels                 map[string]string
+	StorageClassParameters map[string]string
+	StorageClassName       string
+	Name                   string
+	DefaultRegion          string
+	ProviderID             string
+}
+
+// PricingSourceSummary returns the pricing source summary for the provider.
+// The summary represents what was _parsed_ from the pricing source, not what
+// was returned from the relevant API.
+func (cp *CustomProvider) PricingSourceSummary() interface{} {
+	return cp.Pricing
+}
+
+type customProviderKey struct {
+	SpotLabel      string
+	SpotLabelValue string
+	GPULabel       string
+	GPULabelValue  string
+	Labels         map[string]string
+}
+
+func (*CustomProvider) ClusterManagementPricing() (string, float64, error) {
+	return "", 0.0, nil
+}
+
+func (*CustomProvider) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
+	return ""
+}
+
+func (cp *CustomProvider) GetConfig() (*models.CustomPricing, error) {
+	return cp.Config.GetCustomPricingData()
+}
+
+func (*CustomProvider) GetManagementPlatform() (string, error) {
+	return "", nil
+}
+
+func (*CustomProvider) ApplyReservedInstancePricing(nodes map[string]*models.Node) {
+
+}
+
+func (cp *CustomProvider) UpdateConfigFromConfigMap(a map[string]string) (*models.CustomPricing, error) {
+	return cp.Config.UpdateFromMap(a)
+}
+
+func (cp *CustomProvider) UpdateConfig(r io.Reader, updateType string) (*models.CustomPricing, error) {
+	// Parse config updates from reader
+	a := make(map[string]interface{})
+	err := json.NewDecoder(r).Decode(&a)
+	if err != nil {
+		return nil, err
+	}
+
+	// Update Config
+	c, err := cp.Config.Update(func(c *models.CustomPricing) error {
+		for k, v := range a {
+			kUpper := utils.ToTitle.String(k) // Just so we consistently supply / receive the same values, uppercase the first letter.
+			vstr, ok := v.(string)
+			if ok {
+				err := models.SetCustomPricingField(c, kUpper, vstr)
+				if err != nil {
+					return err
+				}
+			} else {
+				return fmt.Errorf("type error while updating config for %s", kUpper)
+			}
+		}
+
+		return nil
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	defer cp.DownloadPricingData()
+	return c, nil
+}
+
+func (cp *CustomProvider) ClusterInfo() (map[string]string, error) {
+	conf, err := cp.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string)
+	if conf.ClusterName != "" {
+		m["name"] = conf.ClusterName
+	}
+	m["provider"] = kubecost.CustomProvider
+	m["region"] = cp.ClusterRegion
+	m["account"] = cp.ClusterAccountID
+	m["id"] = env.GetClusterID()
+	return m, nil
+}
+
+func (*CustomProvider) GetAddresses() ([]byte, error) {
+	return nil, nil
+}
+
+func (*CustomProvider) GetDisks() ([]byte, error) {
+	return nil, nil
+}
+
+func (*CustomProvider) GetOrphanedResources() ([]models.OrphanedResource, error) {
+	return nil, errors.New("not implemented")
+}
+
+func (cp *CustomProvider) AllNodePricing() (interface{}, error) {
+	cp.DownloadPricingDataLock.RLock()
+	defer cp.DownloadPricingDataLock.RUnlock()
+
+	return cp.Pricing, nil
+}
+
+func (cp *CustomProvider) NodePricing(key models.Key) (*models.Node, error) {
+	cp.DownloadPricingDataLock.RLock()
+	defer cp.DownloadPricingDataLock.RUnlock()
+
+	k := key.Features()
+	var gpuCount string
+	if _, ok := cp.Pricing[k]; !ok {
+		// Default is saying that there is no pricing info for the cluster and we should fall back to the default values.
+		// An interesting case is if the default values weren't loaded.
+		k = "default"
+	}
+	if key.GPUType() != "" {
+		k += ",gpu"    // TODO: support multiple custom gpu types.
+		gpuCount = "1" // TODO: support more than one gpu.
+	}
+
+	var cpuCost, ramCost, gpuCost string
+	if pricing, ok := cp.Pricing[k]; !ok {
+		log.Warnf("No pricing found for key=%s, setting values to 0", k)
+		cpuCost = "0.0"
+		ramCost = "0.0"
+		gpuCost = "0.0"
+	} else {
+		cpuCost = pricing.CPU
+		ramCost = pricing.RAM
+		gpuCost = pricing.GPU
+	}
+
+	return &models.Node{
+		VCPUCost: cpuCost,
+		RAMCost:  ramCost,
+		GPUCost:  gpuCost,
+		GPU:      gpuCount,
+	}, nil
+}
+
+func (cp *CustomProvider) DownloadPricingData() error {
+	cp.DownloadPricingDataLock.Lock()
+	defer cp.DownloadPricingDataLock.Unlock()
+
+	if cp.Pricing == nil {
+		m := make(map[string]*NodePrice)
+		cp.Pricing = m
+	}
+	p, err := cp.Config.GetCustomPricingData()
+	if err != nil {
+		return err
+	}
+	cp.SpotLabel = p.SpotLabel
+	cp.SpotLabelValue = p.SpotLabelValue
+	cp.GPULabel = p.GpuLabel
+	cp.GPULabelValue = p.GpuLabelValue
+	cp.Pricing["default"] = &NodePrice{
+		CPU: p.CPU,
+		RAM: p.RAM,
+	}
+	cp.Pricing["default,spot"] = &NodePrice{
+		CPU: p.SpotCPU,
+		RAM: p.SpotRAM,
+	}
+	cp.Pricing["default,gpu"] = &NodePrice{
+		CPU: p.CPU,
+		RAM: p.RAM,
+		GPU: p.GPU,
+	}
+	return nil
+}
+
+func (cp *CustomProvider) GetKey(labels map[string]string, n *v1.Node) models.Key {
+	return &customProviderKey{
+		SpotLabel:      cp.SpotLabel,
+		SpotLabelValue: cp.SpotLabelValue,
+		GPULabel:       cp.GPULabel,
+		GPULabelValue:  cp.GPULabelValue,
+		Labels:         labels,
+	}
+}
+
+// ExternalAllocations represents tagged assets outside the scope of kubernetes.
+// "start" and "end" are dates of the format YYYY-MM-DD
+// "aggregator" is the tag used to determine how to allocate those assets, ie namespace, pod, etc.
+func (*CustomProvider) ExternalAllocations(start string, end string, aggregator []string, filterType string, filterValue string, crossCluster bool) ([]*models.OutOfClusterAllocation, error) {
+	return nil, nil // TODO: transform the QuerySQL lines into the new OutOfClusterAllocation Struct
+}
+
+func (*CustomProvider) QuerySQL(query string) ([]byte, error) {
+	return nil, nil
+}
+
+func (cp *CustomProvider) PVPricing(pvk models.PVKey) (*models.PV, error) {
+	cpricing, err := cp.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	return &models.PV{
+		Cost: cpricing.Storage,
+	}, nil
+}
+
+func (cp *CustomProvider) NetworkPricing() (*models.Network, error) {
+	cpricing, err := cp.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	znec, err := strconv.ParseFloat(cpricing.ZoneNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	rnec, err := strconv.ParseFloat(cpricing.RegionNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+	inec, err := strconv.ParseFloat(cpricing.InternetNetworkEgress, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return &models.Network{
+		ZoneNetworkEgressCost:     znec,
+		RegionNetworkEgressCost:   rnec,
+		InternetNetworkEgressCost: inec,
+	}, nil
+}
+
+func (cp *CustomProvider) LoadBalancerPricing() (*models.LoadBalancer, error) {
+	cpricing, err := cp.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	fffrc, err := strconv.ParseFloat(cpricing.FirstFiveForwardingRulesCost, 64)
+	if err != nil {
+		return nil, err
+	}
+	afrc, err := strconv.ParseFloat(cpricing.AdditionalForwardingRuleCost, 64)
+	if err != nil {
+		return nil, err
+	}
+	lbidc, err := strconv.ParseFloat(cpricing.LBIngressDataCost, 64)
+	if err != nil {
+		return nil, err
+	}
+	var totalCost float64
+	numForwardingRules := 1.0 // hard-code at 1 for now
+	dataIngressGB := 0.0      // hard-code at 0 for now
+
+	if numForwardingRules < 5 {
+		totalCost = fffrc*numForwardingRules + lbidc*dataIngressGB
+	} else {
+		totalCost = fffrc*5 + afrc*(numForwardingRules-5) + lbidc*dataIngressGB
+	}
+	return &models.LoadBalancer{
+		Cost: totalCost,
+	}, nil
+}
+
+func (*CustomProvider) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string, defaultRegion string) models.PVKey {
+	return &customPVKey{
+		Labels:                 pv.Labels,
+		StorageClassName:       pv.Spec.StorageClassName,
+		StorageClassParameters: parameters,
+		DefaultRegion:          defaultRegion,
+	}
+}
+
+func (key *customPVKey) ID() string {
+	return key.ProviderID
+}
+
+func (key *customPVKey) GetStorageClass() string {
+	return key.StorageClassName
+}
+
+// Features returns a comma separated string of features for a given PV
+// (@pokom): This was imported from aws which caused a cyclical dependency. This _should_ be refactored to be specific to a custom pvkey
+func (key *customPVKey) Features() string {
+	storageClass := key.StorageClassParameters["type"]
+	if storageClass == "standard" {
+		storageClass = "gp2"
+	}
+	// Storage class names are generally EBS volume types (gp2)
+	// Keys in Pricing are based on UsageTypes (EBS:VolumeType.gp2)
+	// Converts between the 2
+	region, ok := util.GetRegion(key.Labels)
+	if !ok {
+		region = key.DefaultRegion
+	}
+	class, ok := volTypes[storageClass]
+	if !ok {
+		log.Debugf("No voltype mapping for %s's storageClass: %s", key.Name, storageClass)
+	}
+	return region + "," + class
+}
+
+func (k *customProviderKey) GPUCount() int {
+	return 0
+}
+
+func (cpk *customProviderKey) GPUType() string {
+	if t, ok := cpk.Labels[cpk.GPULabel]; ok {
+		return t
+	}
+	return ""
+}
+
+func (cpk *customProviderKey) ID() string {
+	return ""
+}
+
+func (cpk *customProviderKey) Features() string {
+	if cpk.Labels[cpk.SpotLabel] != "" && cpk.Labels[cpk.SpotLabel] == cpk.SpotLabelValue {
+		return "default,spot"
+	}
+	return "default" // TODO: multiple custom pricing support.
+}
+
+func (cp *CustomProvider) ServiceAccountStatus() *models.ServiceAccountStatus {
+	return &models.ServiceAccountStatus{
+		Checks: []*models.ServiceAccountCheck{},
+	}
+}
+
+func (cp *CustomProvider) PricingSourceStatus() map[string]*models.PricingSource {
+	return make(map[string]*models.PricingSource)
+}
+
+func (cp *CustomProvider) CombinedDiscountForNode(instanceType string, isPreemptible bool, defaultDiscount, negotiatedDiscount float64) float64 {
+	return 1.0 - ((1.0 - defaultDiscount) * (1.0 - negotiatedDiscount))
+}
+
+func (cp *CustomProvider) Regions() []string {
+	return []string{}
+}

+ 342 - 0
pkg/cloud/provider/provider.go

@@ -0,0 +1,342 @@
+package provider
+
+import (
+	"errors"
+	"net"
+	"net/http"
+	"regexp"
+	"strings"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/scaleway"
+	"github.com/opencost/opencost/pkg/kubecost"
+
+	"github.com/opencost/opencost/pkg/util"
+
+	"cloud.google.com/go/compute/metadata"
+
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/httputil"
+	"github.com/opencost/opencost/pkg/util/watcher"
+
+	v1 "k8s.io/api/core/v1"
+)
+
+// ClusterName returns the name defined in cluster info, defaulting to the
+// CLUSTER_ID environment variable
+func ClusterName(p models.Provider) string {
+	info, err := p.ClusterInfo()
+	if err != nil {
+		return env.GetClusterID()
+	}
+
+	name, ok := info["name"]
+	if !ok {
+		return env.GetClusterID()
+	}
+
+	return name
+}
+
+// CustomPricesEnabled returns the boolean equivalent of the cloup provider's custom prices flag,
+// indicating whether or not the cluster is using custom pricing.
+func CustomPricesEnabled(p models.Provider) bool {
+	config, err := p.GetConfig()
+	if err != nil {
+		return false
+	}
+	// TODO:CLEANUP what is going on with this?
+	if config.NegotiatedDiscount == "" {
+		config.NegotiatedDiscount = "0%"
+	}
+
+	return config.CustomPricesEnabled == "true"
+}
+
+// ConfigWatcherFor returns a new ConfigWatcher instance which watches changes to the "pricing-configs"
+// configmap
+func ConfigWatcherFor(p models.Provider) *watcher.ConfigMapWatcher {
+	return &watcher.ConfigMapWatcher{
+		ConfigMapName: env.GetPricingConfigmapName(),
+		WatchFunc: func(name string, data map[string]string) error {
+			_, err := p.UpdateConfigFromConfigMap(data)
+			return err
+		},
+	}
+}
+
+// AllocateIdleByDefault returns true if the application settings specify to allocate idle by default
+func AllocateIdleByDefault(p models.Provider) bool {
+	config, err := p.GetConfig()
+	if err != nil {
+		return false
+	}
+
+	return config.DefaultIdle == "true"
+}
+
+// SharedNamespace returns a list of names of shared namespaces, as defined in the application settings
+func SharedNamespaces(p models.Provider) []string {
+	namespaces := []string{}
+
+	config, err := p.GetConfig()
+	if err != nil {
+		return namespaces
+	}
+	if config.SharedNamespaces == "" {
+		return namespaces
+	}
+	// trim spaces so that "kube-system, kubecost" is equivalent to "kube-system,kubecost"
+	for _, ns := range strings.Split(config.SharedNamespaces, ",") {
+		namespaces = append(namespaces, strings.Trim(ns, " "))
+	}
+
+	return namespaces
+}
+
+// SharedLabel returns the configured set of shared labels as a parallel tuple of keys to values; e.g.
+// for app:kubecost,type:staging this returns (["app", "type"], ["kubecost", "staging"]) in order to
+// match the signature of the NewSharedResourceInfo
+func SharedLabels(p models.Provider) ([]string, []string) {
+	names := []string{}
+	values := []string{}
+
+	config, err := p.GetConfig()
+	if err != nil {
+		return names, values
+	}
+
+	if config.SharedLabelNames == "" || config.SharedLabelValues == "" {
+		return names, values
+	}
+
+	ks := strings.Split(config.SharedLabelNames, ",")
+	vs := strings.Split(config.SharedLabelValues, ",")
+	if len(ks) != len(vs) {
+		log.Warnf("Shared labels have mis-matched lengths: %d names, %d values", len(ks), len(vs))
+		return names, values
+	}
+
+	for i := range ks {
+		names = append(names, strings.Trim(ks[i], " "))
+		values = append(values, strings.Trim(vs[i], " "))
+	}
+
+	return names, values
+}
+
+// ShareTenancyCosts returns true if the application settings specify to share
+// tenancy costs by default.
+func ShareTenancyCosts(p models.Provider) bool {
+	config, err := p.GetConfig()
+	if err != nil {
+		return false
+	}
+
+	return config.ShareTenancyCosts == "true"
+}
+
+// NewProvider looks at the nodespec or provider metadata server to decide which provider to instantiate.
+func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.ConfigFileManager) (models.Provider, error) {
+	nodes := cache.GetAllNodes()
+	if len(nodes) == 0 {
+		log.Infof("Could not locate any nodes for cluster.") // valid in ETL readonly mode
+		return &CustomProvider{
+			Clientset: cache,
+			Config:    NewProviderConfig(config, "default.json"),
+		}, nil
+	}
+
+	cp := getClusterProperties(nodes[0])
+	providerConfig := NewProviderConfig(config, cp.configFileName)
+	// If ClusterAccount is set apply it to the cluster properties
+	if providerConfig.customPricing != nil && providerConfig.customPricing.ClusterAccountID != "" {
+		cp.accountID = providerConfig.customPricing.ClusterAccountID
+	}
+
+	switch cp.provider {
+	case kubecost.CSVProvider:
+		log.Infof("Using CSV Provider with CSV at %s", env.GetCSVPath())
+		return &CSVProvider{
+			CSVLocation: env.GetCSVPath(),
+			CustomProvider: &CustomProvider{
+				Clientset:        cache,
+				ClusterRegion:    cp.region,
+				ClusterAccountID: cp.accountID,
+				Config:           NewProviderConfig(config, cp.configFileName),
+			},
+		}, nil
+	case kubecost.GCPProvider:
+		log.Info("Found ProviderID starting with \"gce\", using GCP Provider")
+		if apiKey == "" {
+			return nil, errors.New("Supply a GCP Key to start getting data")
+		}
+		return &gcp.GCP{
+			Clientset:        cache,
+			APIKey:           apiKey,
+			Config:           NewProviderConfig(config, cp.configFileName),
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
+			ClusterProjectID: cp.projectID,
+			MetadataClient: metadata.NewClient(
+				&http.Client{
+					Transport: httputil.NewUserAgentTransport("kubecost", &http.Transport{
+						Dial: (&net.Dialer{
+							Timeout:   2 * time.Second,
+							KeepAlive: 30 * time.Second,
+						}).Dial,
+					}),
+					Timeout: 5 * time.Second,
+				}),
+		}, nil
+	case kubecost.AWSProvider:
+		log.Info("Found ProviderID starting with \"aws\", using AWS Provider")
+		return &aws.AWS{
+			Clientset:            cache,
+			Config:               NewProviderConfig(config, cp.configFileName),
+			ClusterRegion:        cp.region,
+			ClusterAccountID:     cp.accountID,
+			ServiceAccountChecks: models.NewServiceAccountChecks(),
+		}, nil
+	case kubecost.AzureProvider:
+		log.Info("Found ProviderID starting with \"azure\", using Azure Provider")
+		return &azure.Azure{
+			Clientset:            cache,
+			Config:               NewProviderConfig(config, cp.configFileName),
+			ClusterRegion:        cp.region,
+			ClusterAccountID:     cp.accountID,
+			ServiceAccountChecks: models.NewServiceAccountChecks(),
+		}, nil
+	case kubecost.AlibabaProvider:
+		log.Info("Found ProviderID starting with \"alibaba\", using Alibaba Cloud Provider")
+		return &alibaba.Alibaba{
+			Clientset:            cache,
+			Config:               NewProviderConfig(config, cp.configFileName),
+			ClusterRegion:        cp.region,
+			ClusterAccountId:     cp.accountID,
+			ServiceAccountChecks: models.NewServiceAccountChecks(),
+		}, nil
+	case kubecost.ScalewayProvider:
+		log.Info("Found ProviderID starting with \"scaleway\", using Scaleway Provider")
+		return &scaleway.Scaleway{
+			Clientset:        cache,
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
+			Config:           NewProviderConfig(config, cp.configFileName),
+		}, nil
+
+	default:
+		log.Info("Unsupported provider, falling back to default")
+		return &CustomProvider{
+			Clientset:        cache,
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
+			Config:           NewProviderConfig(config, cp.configFileName),
+		}, nil
+	}
+}
+
+type clusterProperties struct {
+	provider       string
+	configFileName string
+	region         string
+	accountID      string
+	projectID      string
+}
+
+func getClusterProperties(node *v1.Node) clusterProperties {
+	providerID := strings.ToLower(node.Spec.ProviderID)
+	region, _ := util.GetRegion(node.Labels)
+	cp := clusterProperties{
+		provider:       "DEFAULT",
+		configFileName: "default.json",
+		region:         region,
+		accountID:      "",
+		projectID:      "",
+	}
+	// The second conditional is mainly if you're running opencost outside of GCE, say in a local environment.
+	if metadata.OnGCE() || strings.HasPrefix(providerID, "gce") {
+		cp.provider = kubecost.GCPProvider
+		cp.configFileName = "gcp.json"
+		cp.projectID = gcp.ParseGCPProjectID(providerID)
+	} else if strings.HasPrefix(providerID, "aws") {
+		cp.provider = kubecost.AWSProvider
+		cp.configFileName = "aws.json"
+	} else if strings.HasPrefix(providerID, "azure") {
+		cp.provider = kubecost.AzureProvider
+		cp.configFileName = "azure.json"
+		cp.accountID = azure.ParseAzureSubscriptionID(providerID)
+	} else if strings.HasPrefix(providerID, "scaleway") { // the scaleway provider ID looks like scaleway://instance/<instance_id>
+		cp.provider = kubecost.ScalewayProvider
+		cp.configFileName = "scaleway.json"
+	} else if strings.Contains(node.Status.NodeInfo.KubeletVersion, "aliyun") { // provider ID is not prefix with any distinct keyword like other providers
+		cp.provider = kubecost.AlibabaProvider
+		cp.configFileName = "alibaba.json"
+	}
+	if env.IsUseCSVProvider() {
+		cp.provider = kubecost.CSVProvider
+	}
+
+	return cp
+}
+
+var (
+	// It's of the form aws:///us-east-2a/i-0fea4fd46592d050b and we want i-0fea4fd46592d050b, if it exists
+	providerAWSRegex = regexp.MustCompile("aws://[^/]*/[^/]*/([^/]+)")
+	// gce://guestbook-227502/us-central1-a/gke-niko-n1-standard-2-wljla-8df8e58a-hfy7
+	//  => gke-niko-n1-standard-2-wljla-8df8e58a-hfy7
+	providerGCERegex = regexp.MustCompile("gce://[^/]*/[^/]*/([^/]+)")
+	// Capture "vol-0fc54c5e83b8d2b76" from "aws://us-east-2a/vol-0fc54c5e83b8d2b76"
+	persistentVolumeAWSRegex = regexp.MustCompile("aws:/[^/]*/[^/]*/([^/]+)")
+	// Capture "ad9d88195b52a47c89b5055120f28c58" from "ad9d88195b52a47c89b5055120f28c58-1037804914.us-east-2.elb.amazonaws.com"
+	loadBalancerAWSRegex = regexp.MustCompile("^([^-]+)-.+amazonaws\\.com$")
+)
+
+// ParseID attempts to parse a ProviderId from a string based on formats from the various providers and
+// returns the string as is if it cannot find a match
+func ParseID(id string) string {
+	match := providerAWSRegex.FindStringSubmatch(id)
+	if len(match) >= 2 {
+		return match[1]
+	}
+
+	match = providerGCERegex.FindStringSubmatch(id)
+	if len(match) >= 2 {
+		return match[1]
+	}
+
+	// Return id for Azure Provider, CSV Provider and Custom Provider
+	return id
+}
+
+// ParsePVID attempts to parse a PV ProviderId from a string based on formats from the various providers and
+// returns the string as is if it cannot find a match
+func ParsePVID(id string) string {
+	match := persistentVolumeAWSRegex.FindStringSubmatch(id)
+	if len(match) >= 2 {
+		return match[1]
+	}
+
+	// Return id for GCP Provider, Azure Provider, CSV Provider and Custom Provider
+	return id
+}
+
+// ParseLBID attempts to parse a LB ProviderId from a string based on formats from the various providers and
+// returns the string as is if it cannot find a match
+func ParseLBID(id string) string {
+	match := loadBalancerAWSRegex.FindStringSubmatch(id)
+	if len(match) >= 2 {
+		return match[1]
+	}
+
+	// Return id for GCP Provider, Azure Provider, CSV Provider and Custom Provider
+	return id
+}

+ 290 - 0
pkg/cloud/provider/providerconfig.go

@@ -0,0 +1,290 @@
+package provider
+
+import (
+	"fmt"
+	"os"
+	gopath "path"
+	"strconv"
+	"sync"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/utils"
+	"github.com/opencost/opencost/pkg/config"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const closedSourceConfigMount = "models/"
+
+// ProviderConfig is a utility class that provides a thread-safe configuration storage/cache for all Provider
+// implementations
+type ProviderConfig struct {
+	lock            *sync.Mutex
+	configManager   *config.ConfigFileManager
+	configFile      *config.ConfigFile
+	customPricing   *models.CustomPricing
+	watcherHandleID config.HandlerID
+}
+
+// NewProviderConfig creates a new ConfigFile and returns the ProviderConfig
+func NewProviderConfig(configManager *config.ConfigFileManager, fileName string) *ProviderConfig {
+	configFile := configManager.ConfigFileAt(configPathFor(fileName))
+	pc := &ProviderConfig{
+		lock:          new(sync.Mutex),
+		configManager: configManager,
+		configFile:    configFile,
+		customPricing: nil,
+	}
+
+	// add the provider config func as handler for the config file changes
+	pc.watcherHandleID = configFile.AddChangeHandler(pc.onConfigFileUpdated)
+	return pc
+}
+
+// onConfigFileUpdated handles any time the config file contents are updated, created, or deleted
+func (pc *ProviderConfig) onConfigFileUpdated(changeType config.ChangeType, data []byte) {
+	// TODO: (bolt) Currently this has the side-effect of setting pc.customPricing twice when the update
+	// TODO: (bolt) is made from this ProviderConfig instance. We'll need to implement a way of identifying
+	// TODO: (bolt) when to ignore updates when the change and handler are the same source
+	log.Infof("CustomPricing Config Updated: %s", changeType)
+
+	switch changeType {
+	case config.ChangeTypeCreated:
+		fallthrough
+	case config.ChangeTypeModified:
+		pc.lock.Lock()
+		defer pc.lock.Unlock()
+
+		customPricing := new(models.CustomPricing)
+		err := json.Unmarshal(data, customPricing)
+		if err != nil {
+			log.Infof("Could not decode Custom Pricing file at path %s. Using default.", pc.configFile.Path())
+			customPricing = DefaultPricing()
+		}
+
+		pc.customPricing = customPricing
+		if pc.customPricing.SpotGPU == "" {
+			pc.customPricing.SpotGPU = DefaultPricing().SpotGPU // Migration for users without this value set by default.
+		}
+
+		if pc.customPricing.ShareTenancyCosts == "" {
+			pc.customPricing.ShareTenancyCosts = models.DefaultShareTenancyCost
+		}
+	}
+}
+
+// Non-ThreadSafe logic to load the config file if a cache does not exist. Flag to write
+// the default config if the config file doesn't exist.
+func (pc *ProviderConfig) loadConfig(writeIfNotExists bool) (*models.CustomPricing, error) {
+	if pc.customPricing != nil {
+		return pc.customPricing, nil
+	}
+
+	exists, err := pc.configFile.Exists()
+	// File Error other than NotExists
+	if err != nil {
+		log.Infof("Custom Pricing file at path '%s' read error: '%s'", pc.configFile.Path(), err.Error())
+		return DefaultPricing(), err
+	}
+
+	// File Doesn't Exist
+	if !exists {
+		log.Infof("Could not find Custom Pricing file at path '%s'", pc.configFile.Path())
+		pc.customPricing = DefaultPricing()
+		// If config file is not present use the contents from mount models/ as pricing data
+		// in closed source rather than from from  DefaultPricing as first source of truth.
+		// since most images will already have a mount, to avail this facility user needs to delete the
+		// config file manually from configpath else default pricing still holds good.
+		fileName := filenameInConfigPath(pc.configFile.Path())
+		defaultPricing, err := ReturnPricingFromConfigs(fileName)
+		if err == nil {
+			pc.customPricing = defaultPricing
+		}
+
+		// Only write the file if flag enabled
+		if writeIfNotExists {
+			cj, err := json.Marshal(pc.customPricing)
+			if err != nil {
+				return pc.customPricing, err
+			}
+
+			err = pc.configFile.Write(cj)
+			if err != nil {
+				log.Infof("Could not write Custom Pricing file to path '%s'", pc.configFile.Path())
+				return pc.customPricing, err
+			}
+		}
+
+		return pc.customPricing, nil
+	}
+
+	// File Exists - Read all contents of file, unmarshal json
+	byteValue, err := pc.configFile.Read()
+	if err != nil {
+		log.Infof("Could not read Custom Pricing file at path %s", pc.configFile.Path())
+		// If read fails, we don't want to cache default, assuming that the file is valid
+		return DefaultPricing(), err
+	}
+
+	var customPricing models.CustomPricing
+	err = json.Unmarshal(byteValue, &customPricing)
+	if err != nil {
+		log.Infof("Could not decode Custom Pricing file at path %s", pc.configFile.Path())
+		return DefaultPricing(), err
+	}
+
+	pc.customPricing = &customPricing
+	if pc.customPricing.SpotGPU == "" {
+		pc.customPricing.SpotGPU = DefaultPricing().SpotGPU // Migration for users without this value set by default.
+	}
+
+	if pc.customPricing.ShareTenancyCosts == "" {
+		pc.customPricing.ShareTenancyCosts = models.DefaultShareTenancyCost
+	}
+
+	return pc.customPricing, nil
+}
+
+// ThreadSafe method for retrieving the custom pricing config.
+func (pc *ProviderConfig) GetCustomPricingData() (*models.CustomPricing, error) {
+	pc.lock.Lock()
+	defer pc.lock.Unlock()
+
+	return pc.loadConfig(true)
+}
+
+// ConfigFileManager returns the ConfigFileManager instance used to manage the CustomPricing
+// configuration. In the event of a multi-provider setup, this instance should be used to
+// configure any other configuration providers.
+func (pc *ProviderConfig) ConfigFileManager() *config.ConfigFileManager {
+	return pc.configManager
+}
+
+// Allows a call to manually update the configuration while maintaining proper thread-safety
+// for read/write methods.
+func (pc *ProviderConfig) Update(updateFunc func(*models.CustomPricing) error) (*models.CustomPricing, error) {
+	pc.lock.Lock()
+	defer pc.lock.Unlock()
+
+	// Load Config, set flag to _not_ write if failure to find file.
+	// We're about to write the updated values, so we don't want to double write.
+	c, _ := pc.loadConfig(false)
+
+	// Execute Update - On error, return the in-memory config but don't update cache
+	// explicitly
+	err := updateFunc(c)
+	if err != nil {
+		return c, err
+	}
+
+	// Cache Update (possible the ptr already references the cached value)
+	pc.customPricing = c
+
+	cj, err := json.Marshal(c)
+	if err != nil {
+		return c, err
+	}
+	err = pc.configFile.Write(cj)
+
+	if err != nil {
+		return c, err
+	}
+
+	return c, nil
+}
+
+// ThreadSafe update of the config using a string map
+func (pc *ProviderConfig) UpdateFromMap(a map[string]string) (*models.CustomPricing, error) {
+	// Run our Update() method using SetCustomPricingField logic
+	return pc.Update(func(c *models.CustomPricing) error {
+		for k, v := range a {
+			// Just so we consistently supply / receive the same values, uppercase the first letter.
+			kUpper := utils.ToTitle.String(k)
+			if kUpper == "CPU" || kUpper == "SpotCPU" || kUpper == "RAM" || kUpper == "SpotRAM" || kUpper == "GPU" || kUpper == "Storage" {
+				val, err := strconv.ParseFloat(v, 64)
+				if err != nil {
+					return fmt.Errorf("Unable to parse CPU from string to float: %s", err.Error())
+				}
+				v = fmt.Sprintf("%f", val/730)
+			}
+
+			err := models.SetCustomPricingField(c, kUpper, v)
+			if err != nil {
+				return err
+			}
+		}
+
+		return nil
+	})
+}
+
+// DefaultPricing should be returned so we can do computation even if no file is supplied.
+func DefaultPricing() *models.CustomPricing {
+	// https://cloud.google.com/compute/all-pricing
+	return &models.CustomPricing{
+		Provider:    "base",
+		Description: "Default prices based on GCP us-central1",
+
+		// E2 machine types in GCP us-central1 (Iowa)
+		CPU:     "0.021811", // per vCPU hour
+		SpotCPU: "0.006543", // per vCPU hour
+		RAM:     "0.002923", // per G(i?)B hour
+		SpotRAM: "0.000877", // per G(i?)B hour
+
+		// There are many GPU types. This serves as a reasonably-appropriate
+		// estimate within a broad range (0.35 up to 3.93)
+		GPU: "0.95", // per GPU hour
+		// Same story as above.
+		SpotGPU: "0.308", // per GPU hour
+
+		// This is the "Standard provision space" pricing in the "Disk pricing"
+		// table.
+		//
+		// (($.04 / month) per G(i?)B) *
+		//   month/730 hours =
+		//     0.00005479452054794521
+		Storage: "0.00005479452",
+
+		ZoneNetworkEgress:     "0.01",
+		RegionNetworkEgress:   "0.01",
+		InternetNetworkEgress: "0.12",
+		CustomPricesEnabled:   "false",
+		ShareTenancyCosts:     "true",
+	}
+}
+
+// Returns the configuration directory concatenated with a specific config file name
+func configPathFor(filename string) string {
+	path := env.GetConfigPathWithDefault("/models/")
+	return gopath.Join(path, filename)
+}
+
+// Gives the config file name in a full qualified file name
+func filenameInConfigPath(fqfn string) string {
+	_, fileName := gopath.Split(fqfn)
+	return fileName
+}
+
+// ReturnPricingFromConfigs is a safe function to return pricing from configs of opensource to the closed source
+// before defaulting it with the above function DefaultPricing
+func ReturnPricingFromConfigs(filename string) (*models.CustomPricing, error) {
+	if _, err := os.Stat(closedSourceConfigMount); os.IsNotExist(err) {
+		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: %s likely running in provider config in opencost itself with err: %v", closedSourceConfigMount, err)
+	}
+	providerConfigFile := gopath.Join(closedSourceConfigMount, filename)
+	if _, err := os.Stat(providerConfigFile); err != nil {
+		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to find file %s with err: %v", providerConfigFile, err)
+	}
+	configFile, err := os.ReadFile(providerConfigFile)
+	if err != nil {
+		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to open file %s with err: %v", providerConfigFile, err)
+	}
+
+	defaultPricing := &models.CustomPricing{}
+	err = json.Unmarshal(configFile, defaultPricing)
+	if err != nil {
+		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to open file %s with err: %v", providerConfigFile, err)
+	}
+	return defaultPricing, nil
+}

+ 379 - 0
pkg/cloud/scaleway/provider.go

@@ -0,0 +1,379 @@
+package scaleway
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/utils"
+	"github.com/opencost/opencost/pkg/kubecost"
+
+	"github.com/opencost/opencost/pkg/clustercache"
+	"github.com/opencost/opencost/pkg/env"
+	"github.com/opencost/opencost/pkg/util"
+	"github.com/opencost/opencost/pkg/util/json"
+
+	"github.com/opencost/opencost/pkg/log"
+	v1 "k8s.io/api/core/v1"
+
+	"github.com/scaleway/scaleway-sdk-go/api/instance/v1"
+	"github.com/scaleway/scaleway-sdk-go/scw"
+)
+
+const (
+	InstanceAPIPricing = "Instance API Pricing"
+)
+
+type ScalewayPricing struct {
+	NodesInfos map[string]*instance.ServerType
+	PVCost     float64
+}
+
+type Scaleway struct {
+	Clientset               clustercache.ClusterCache
+	Config                  models.ProviderConfig
+	Pricing                 map[string]*ScalewayPricing
+	ClusterRegion           string
+	ClusterAccountID        string
+	DownloadPricingDataLock sync.RWMutex
+}
+
+// PricingSourceSummary returns the pricing source summary for the provider.
+// The summary represents what was _parsed_ from the pricing source, not
+// everything that was _available_ in the pricing source.
+func (c *Scaleway) PricingSourceSummary() interface{} {
+	return c.Pricing
+}
+func (c *Scaleway) DownloadPricingData() error {
+	c.DownloadPricingDataLock.Lock()
+	defer c.DownloadPricingDataLock.Unlock()
+
+	// TODO wait for an official Pricing API from Scaleway
+	// Let's use a static map and an old API
+
+	if len(c.Pricing) != 0 {
+		// Already initialized
+		return nil
+	}
+
+	// PV pricing per AZ
+	pvPrice := map[string]float64{
+		"fr-par-1": 0.00011,
+		"fr-par-2": 0.00011,
+		"fr-par-3": 0.00032,
+		"nl-ams-1": 0.00008,
+		"nl-ams-2": 0.00008,
+		"pl-waw-1": 0.00011,
+	}
+
+	c.Pricing = make(map[string]*ScalewayPricing)
+
+	// The endpoint we are trying to hit does not have authentication
+	client, err := scw.NewClient(scw.WithoutAuth())
+	if err != nil {
+		return err
+	}
+
+	instanceAPI := instance.NewAPI(client)
+
+	for _, zone := range scw.AllZones {
+		resp, err := instanceAPI.ListServersTypes(&instance.ListServersTypesRequest{Zone: zone})
+		if err != nil {
+			log.Errorf("Could not get Scaleway pricing data from instance API in zone %s: %+v", zone, err)
+			continue
+		}
+		c.Pricing[zone.String()] = &ScalewayPricing{
+			PVCost:     pvPrice[zone.String()],
+			NodesInfos: map[string]*instance.ServerType{},
+		}
+
+		for name, infos := range resp.Servers {
+			c.Pricing[zone.String()].NodesInfos[name] = infos
+		}
+	}
+
+	return nil
+}
+
+func (c *Scaleway) AllNodePricing() (interface{}, error) {
+	c.DownloadPricingDataLock.RLock()
+	defer c.DownloadPricingDataLock.RUnlock()
+	return c.Pricing, nil
+}
+
+type scalewayKey struct {
+	Labels map[string]string
+}
+
+func (k *scalewayKey) Features() string {
+	instanceType, _ := util.GetInstanceType(k.Labels)
+	zone, _ := util.GetZone(k.Labels)
+
+	return zone + "," + instanceType
+}
+
+func (k *scalewayKey) GPUCount() int {
+	return 0
+}
+
+func (k *scalewayKey) GPUType() string {
+	instanceType, _ := util.GetInstanceType(k.Labels)
+	if strings.HasPrefix(instanceType, "RENDER") || strings.HasPrefix(instanceType, "GPU") {
+		return instanceType
+	}
+	return ""
+}
+func (k *scalewayKey) ID() string {
+	return ""
+}
+
+func (c *Scaleway) NodePricing(key models.Key) (*models.Node, error) {
+	c.DownloadPricingDataLock.RLock()
+	defer c.DownloadPricingDataLock.RUnlock()
+
+	// There is only the zone and the instance ID in the providerID, hence we must use the features
+	split := strings.Split(key.Features(), ",")
+	if pricing, ok := c.Pricing[split[0]]; ok {
+		if info, ok := pricing.NodesInfos[split[1]]; ok {
+			return &models.Node{
+				Cost:        fmt.Sprintf("%f", info.HourlyPrice),
+				PricingType: models.DefaultPrices,
+				VCPU:        fmt.Sprintf("%d", info.Ncpus),
+				RAM:         fmt.Sprintf("%d", info.RAM),
+				// This is tricky, as instances can have local volumes or not
+				Storage:      fmt.Sprintf("%d", info.PerVolumeConstraint.LSSD.MinSize),
+				GPU:          fmt.Sprintf("%d", info.Gpu),
+				InstanceType: split[1],
+				Region:       split[0],
+				GPUName:      key.GPUType(),
+			}, nil
+
+		}
+
+	}
+	return nil, fmt.Errorf("Unable to find node pricing matching thes features `%s`", key.Features())
+}
+
+func (c *Scaleway) LoadBalancerPricing() (*models.LoadBalancer, error) {
+	// Different LB types, lets take the cheaper for now, we can't get the type
+	// without a service specifying the type in the annotations
+	return &models.LoadBalancer{
+		Cost: 0.014,
+	}, nil
+}
+
+func (c *Scaleway) NetworkPricing() (*models.Network, error) {
+	// it's free baby!
+	return &models.Network{
+		ZoneNetworkEgressCost:     0,
+		RegionNetworkEgressCost:   0,
+		InternetNetworkEgressCost: 0,
+	}, nil
+}
+
+func (c *Scaleway) GetKey(l map[string]string, n *v1.Node) models.Key {
+	return &scalewayKey{
+		Labels: l,
+	}
+}
+
+type scalewayPVKey struct {
+	Labels                 map[string]string
+	StorageClassName       string
+	StorageClassParameters map[string]string
+	Name                   string
+	Zone                   string
+}
+
+func (key *scalewayPVKey) ID() string {
+	return ""
+}
+
+func (key *scalewayPVKey) GetStorageClass() string {
+	return key.StorageClassName
+}
+
+func (key *scalewayPVKey) Features() string {
+	// Only 1 type of PV for now
+	return key.Zone
+}
+
+func (c *Scaleway) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string, defaultRegion string) models.PVKey {
+	// the csi volume handle is the form <az>/<volume-id>
+	zone := strings.Split(pv.Spec.CSI.VolumeHandle, "/")[0]
+	return &scalewayPVKey{
+		Labels:                 pv.Labels,
+		StorageClassName:       pv.Spec.StorageClassName,
+		StorageClassParameters: parameters,
+		Name:                   pv.Name,
+		Zone:                   zone,
+	}
+}
+
+func (c *Scaleway) PVPricing(pvk models.PVKey) (*models.PV, error) {
+	c.DownloadPricingDataLock.RLock()
+	defer c.DownloadPricingDataLock.RUnlock()
+
+	pricing, ok := c.Pricing[pvk.Features()]
+	if !ok {
+		log.Infof("Persistent Volume pricing not found for %s: %s", pvk.GetStorageClass(), pvk.Features())
+		return &models.PV{}, nil
+	}
+	return &models.PV{
+		Cost:  fmt.Sprintf("%f", pricing.PVCost),
+		Class: pvk.GetStorageClass(),
+	}, nil
+}
+
+func (c *Scaleway) ServiceAccountStatus() *models.ServiceAccountStatus {
+	return &models.ServiceAccountStatus{
+		Checks: []*models.ServiceAccountCheck{},
+	}
+}
+
+func (*Scaleway) ClusterManagementPricing() (string, float64, error) {
+	return "", 0.0, nil
+}
+
+func (c *Scaleway) CombinedDiscountForNode(instanceType string, isPreemptible bool, defaultDiscount, negotiatedDiscount float64) float64 {
+	return 1.0 - ((1.0 - defaultDiscount) * (1.0 - negotiatedDiscount))
+}
+
+func (c *Scaleway) Regions() []string {
+
+	regionOverrides := env.GetRegionOverrideList()
+
+	if len(regionOverrides) > 0 {
+		log.Debugf("Overriding Scaleway regions with configured region list: %+v", regionOverrides)
+		return regionOverrides
+	}
+
+	// These are zones but hey, its 2022
+	zones := []string{}
+	for _, zone := range scw.AllZones {
+		zones = append(zones, zone.String())
+	}
+	return zones
+}
+
+func (*Scaleway) ApplyReservedInstancePricing(map[string]*models.Node) {}
+
+func (*Scaleway) GetAddresses() ([]byte, error) {
+	return nil, nil
+}
+
+func (*Scaleway) GetDisks() ([]byte, error) {
+	return nil, nil
+}
+
+func (*Scaleway) GetOrphanedResources() ([]models.OrphanedResource, error) {
+	return nil, errors.New("not implemented")
+}
+
+func (scw *Scaleway) ClusterInfo() (map[string]string, error) {
+	remoteEnabled := env.IsRemoteEnabled()
+
+	m := make(map[string]string)
+	m["name"] = "Scaleway Cluster #1"
+	c, err := scw.GetConfig()
+	if err != nil {
+		return nil, err
+	}
+	if c.ClusterName != "" {
+		m["name"] = c.ClusterName
+	}
+	m["provider"] = kubecost.ScalewayProvider
+	m["region"] = scw.ClusterRegion
+	m["account"] = scw.ClusterAccountID
+	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
+	m["id"] = env.GetClusterID()
+	return m, nil
+
+}
+
+func (c *Scaleway) UpdateConfigFromConfigMap(a map[string]string) (*models.CustomPricing, error) {
+	return c.Config.UpdateFromMap(a)
+}
+
+func (c *Scaleway) UpdateConfig(r io.Reader, updateType string) (*models.CustomPricing, error) {
+	defer c.DownloadPricingData()
+
+	return c.Config.Update(func(c *models.CustomPricing) error {
+		a := make(map[string]interface{})
+		err := json.NewDecoder(r).Decode(&a)
+		if err != nil {
+			return err
+		}
+		for k, v := range a {
+			kUpper := utils.ToTitle.String(k) // Just so we consistently supply / receive the same values, uppercase the first letter.
+			vstr, ok := v.(string)
+			if ok {
+				err := models.SetCustomPricingField(c, kUpper, vstr)
+				if err != nil {
+					return err
+				}
+			} else {
+				return fmt.Errorf("type error while updating config for %s", kUpper)
+			}
+		}
+
+		if env.IsRemoteEnabled() {
+			err := utils.UpdateClusterMeta(env.GetClusterID(), c.ClusterName)
+			if err != nil {
+				return err
+			}
+		}
+
+		return nil
+	})
+}
+func (scw *Scaleway) GetConfig() (*models.CustomPricing, error) {
+	c, err := scw.Config.GetCustomPricingData()
+	if err != nil {
+		return nil, err
+	}
+	if c.Discount == "" {
+		c.Discount = "0%"
+	}
+	if c.NegotiatedDiscount == "" {
+		c.NegotiatedDiscount = "0%"
+	}
+	if c.CurrencyCode == "" {
+		c.CurrencyCode = "EUR"
+	}
+	return c, nil
+}
+
+func (*Scaleway) GetLocalStorageQuery(window, offset time.Duration, rate bool, used bool) string {
+	return ""
+}
+
+func (scw *Scaleway) GetManagementPlatform() (string, error) {
+	nodes := scw.Clientset.GetAllNodes()
+
+	if len(nodes) > 0 {
+		n := nodes[0]
+		if _, ok := n.Labels["k8s.scaleway.com/kapsule"]; ok {
+			return "kapsule", nil
+		}
+		if _, ok := n.Labels["kops.k8s.io/instancegroup"]; ok {
+			return "kops", nil
+		}
+	}
+	return "", nil
+}
+
+func (c *Scaleway) PricingSourceStatus() map[string]*models.PricingSource {
+	return map[string]*models.PricingSource{
+		InstanceAPIPricing: {
+			Name:      InstanceAPIPricing,
+			Enabled:   true,
+			Available: true,
+		},
+	}
+}