Преглед изворни кода

Merge branch 'develop' into parcs-accum-support-II

Alex Meijer пре 3 година
родитељ
комит
81dfc6e693
84 измењених фајлова са 5922 додато и 437 уклоњено
  1. 87 0
      pkg/cloud/alibaba/authorizer.go
  2. 130 0
      pkg/cloud/alibaba/boaconfiguration.go
  3. 289 0
      pkg/cloud/alibaba/boaconfiguration_test.go
  4. 127 0
      pkg/cloud/alibaba/boaquerier.go
  5. 19 18
      pkg/cloud/alibaba/provider.go
  6. 20 20
      pkg/cloud/alibaba/provider_test.go
  7. 233 0
      pkg/cloud/aws/athenaconfiguration.go
  8. 594 0
      pkg/cloud/aws/athenaconfiguration_test.go
  9. 208 0
      pkg/cloud/aws/athenaquerier.go
  10. 251 0
      pkg/cloud/aws/authorizer.go
  11. 67 0
      pkg/cloud/aws/authorizer_test.go
  12. 19 41
      pkg/cloud/aws/provider.go
  13. 6 6
      pkg/cloud/aws/provider_test.go
  14. 134 0
      pkg/cloud/aws/s3configuration.go
  15. 40 0
      pkg/cloud/aws/s3connection.go
  16. 387 0
      pkg/cloud/aws/s3connection_test.go
  17. 181 0
      pkg/cloud/aws/s3selectquerier.go
  18. 80 0
      pkg/cloud/azure/authorizer.go
  19. 322 0
      pkg/cloud/azure/billingexportparser.go
  20. 194 0
      pkg/cloud/azure/billingexportparser_test.go
  21. 0 0
      pkg/cloud/azure/pricesheetclient.go
  22. 0 0
      pkg/cloud/azure/pricesheetdownloader.go
  23. 0 0
      pkg/cloud/azure/pricesheetdownloader_test.go
  24. 3 1
      pkg/cloud/azure/provider.go
  25. 0 0
      pkg/cloud/azure/provider_test.go
  26. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/BOM.csv
  27. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/Enterprise.csv
  28. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/EnterpriseCamel.csv
  29. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/German.csv
  30. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/PayAsYouGo.csv
  31. 2 0
      pkg/cloud/azure/resources/billingexports/headersets/YA.csv
  32. 2 0
      pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv
  33. 88 0
      pkg/cloud/azure/resources/billingexports/values/Template.csv
  34. 2 0
      pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv
  35. 170 0
      pkg/cloud/azure/storagebillingparser.go
  36. 204 0
      pkg/cloud/azure/storagebillingparser_test.go
  37. 179 0
      pkg/cloud/azure/storageconfiguration.go
  38. 446 0
      pkg/cloud/azure/storageconfiguration_test.go
  39. 77 0
      pkg/cloud/azure/storageconnection.go
  40. 53 0
      pkg/cloud/config/authorizer.go
  41. 37 0
      pkg/cloud/config/config.go
  42. 42 0
      pkg/cloud/connectionstatus.go
  43. 132 0
      pkg/cloud/gcp/authorizer.go
  44. 172 0
      pkg/cloud/gcp/bigqueryconfiguration.go
  45. 388 0
      pkg/cloud/gcp/bigqueryconfiguration_test.go
  46. 110 0
      pkg/cloud/gcp/bigqueryquerier.go
  47. 21 21
      pkg/cloud/gcp/provider.go
  48. 7 7
      pkg/cloud/gcp/provider_test.go
  49. 5 3
      pkg/cloud/models/models.go
  50. 10 5
      pkg/cloud/provider/csvprovider.go
  51. 63 8
      pkg/cloud/provider/customprovider.go
  52. 26 25
      pkg/cloud/provider/provider.go
  53. 2 3
      pkg/cloud/provider/providerconfig.go
  54. 7 7
      pkg/cloud/scaleway/provider.go
  55. 3 3
      pkg/cmd/agent/agent.go
  56. 7 7
      pkg/costmodel/aggregation.go
  57. 5 5
      pkg/costmodel/allocation_helpers.go
  58. 4 4
      pkg/costmodel/cluster.go
  59. 10 10
      pkg/costmodel/cluster_helpers.go
  60. 118 118
      pkg/costmodel/cluster_helpers_test.go
  61. 1 1
      pkg/costmodel/csv_export_test.go
  62. 1 1
      pkg/costmodel/intervals.go
  63. 44 44
      pkg/costmodel/intervals_test.go
  64. 9 7
      pkg/costmodel/router.go
  65. 1 1
      pkg/costmodel/sql.go
  66. 5 5
      pkg/env/costmodelenv.go
  67. 1 1
      pkg/env/env.go
  68. 2 2
      pkg/kubecost/allocation.go
  69. 1 1
      pkg/kubecost/allocationfilter.go
  70. 1 1
      pkg/kubecost/asset.go
  71. 1 1
      pkg/kubecost/asset_json.go
  72. 4 4
      pkg/kubecost/status.go
  73. 2 2
      pkg/kubecost/summaryallocation.go
  74. 1 1
      pkg/kubecost/summaryallocation_test.go
  75. 1 1
      pkg/log/profiler.go
  76. 5 3
      pkg/metrics/pvmetrics.go
  77. 2 2
      pkg/prom/query.go
  78. 0 1
      pkg/prom/ratelimitedclient_test.go
  79. 1 1
      pkg/prom/result.go
  80. 1 1
      pkg/util/buffer.go
  81. 4 3
      pkg/util/fileutil/fileutil.go
  82. 2 2
      pkg/util/mapper/mapper.go
  83. 1 1
      pkg/util/timeutil/timeutil.go
  84. 38 38
      test/cloud_test.go

+ 87 - 0
pkg/cloud/alibaba/authorizer.go

@@ -0,0 +1,87 @@
+package alibaba
+
+import (
+	"fmt"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const AccessKeyAuthorizerType = "AlibabaAccessKey"
+
+// Authorizer provide *bssopenapi.Client for Alibaba cloud BOS for Billing related SDK calls
+type Authorizer interface {
+	config.Authorizer
+	GetCredentials() (auth.Credential, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	default:
+		return nil, fmt.Errorf("alibaba: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// AccessKey holds Alibaba credentials parsing from the service-key.json file.
+type AccessKey struct {
+	AccessKeyID     string `json:"accessKeyID"`
+	AccessKeySecret string `json:"accessKeySecret"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["accessKeyID"] = ak.AccessKeyID
+	fmap["accessKeySecret"] = ak.AccessKeySecret
+	return json.Marshal(fmap)
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.AccessKeyID == "" {
+		return fmt.Errorf("AccessKey: missing Access key ID")
+	}
+	if ak.AccessKeySecret == "" {
+		return fmt.Errorf("AccessKey: missing Access Key secret")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.AccessKeyID != thatConfig.AccessKeyID {
+		return false
+	}
+	if ak.AccessKeySecret != thatConfig.AccessKeySecret {
+		return false
+	}
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		AccessKeyID:     ak.AccessKeyID,
+		AccessKeySecret: config.Redacted,
+	}
+}
+
+// GetCredentials creates a credentials object to authorize the use of service sdk calls
+func (ak *AccessKey) GetCredentials() (auth.Credential, error) {
+	err := ak.Validate()
+	if err != nil {
+		return nil, err
+	}
+	return &credentials.AccessKeyCredential{AccessKeyId: ak.AccessKeyID, AccessKeySecret: ak.AccessKeySecret}, nil
+}

+ 130 - 0
pkg/cloud/alibaba/boaconfiguration.go

@@ -0,0 +1,130 @@
+package alibaba
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// BOAConfiguration is the BSS open API configuration for Alibaba's Billing information
+type BOAConfiguration struct {
+	Account    string     `json:"account"`
+	Region     string     `json:"region"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (bc *BOAConfiguration) Validate() error {
+	// Validate Authorizer
+	if bc.Authorizer == nil {
+		return fmt.Errorf("BOAConfiguration: missing authorizer")
+	}
+
+	err := bc.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	// Validate base properties
+	if bc.Region == "" {
+		return fmt.Errorf("BOAConfiguration: missing region")
+	}
+
+	if bc.Account == "" {
+		return fmt.Errorf("BOAConfiguration: missing account")
+	}
+	return nil
+}
+
+func (bc *BOAConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*BOAConfiguration)
+	if !ok {
+		return false
+	}
+
+	if bc.Authorizer != nil {
+		if !bc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if bc.Account != thatConfig.Account {
+		return false
+	}
+
+	if bc.Region != thatConfig.Region {
+		return false
+	}
+	return true
+}
+
+func (bc *BOAConfiguration) Sanitize() config.Config {
+	return &BOAConfiguration{
+		Account:    bc.Account,
+		Region:     bc.Region,
+		Authorizer: bc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (bc *BOAConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", bc.Account, bc.Region)
+}
+
+func (bc *BOAConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Account = account
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Region = region
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("BOAConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bc.Authorizer = authorizer
+
+	return nil
+}
+
+func ConvertAlibabaInfoToConfig(acc AlibabaInfo) config.KeyedConfig {
+	if acc.IsEmpty() {
+		return nil
+	}
+	var configurer Authorizer
+
+	configurer = &AccessKey{
+		AccessKeyID:     acc.AlibabaServiceKeyName,
+		AccessKeySecret: acc.AlibabaServiceKeySecret,
+	}
+
+	return &BOAConfiguration{
+		Account:    acc.AlibabaAccountID,
+		Region:     acc.AlibabaClusterRegion,
+		Authorizer: configurer,
+	}
+}

+ 289 - 0
pkg/cloud/alibaba/boaconfiguration_test.go

@@ -0,0 +1,289 @@
+package alibaba
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestBoaConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   BOAConfiguration
+		expected error
+	}{
+		"valid config Azure AccessKey": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing Access key ID"),
+		},
+		"access secret invalid": {
+			config: BOAConfiguration{
+				Account: "Account Number",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID: "accessKeyId",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing Access Key secret"),
+		},
+		"missing authorizer": {
+			config: BOAConfiguration{
+				Account:    "Account Number",
+				Region:     "Region",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing authorizer"),
+		},
+		"missing Account": {
+			config: BOAConfiguration{
+				Account: "",
+				Region:  "Region",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing account"),
+		},
+		"missing Region": {
+			config: BOAConfiguration{
+				Account: "Account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "accessKeyID",
+					AccessKeySecret: "accessKeySecret",
+				},
+			},
+			expected: fmt.Errorf("BOAConfiguration: missing region"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestBOAConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     BOAConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id2",
+					AccessKeySecret: "secret2",
+				},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different region": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region2",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &BOAConfiguration{
+				Region:  "region",
+				Account: "account2",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+			right: &AccessKey{
+				AccessKeyID:     "id",
+				AccessKeySecret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestBOAConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config BOAConfiguration
+	}{
+		"Empty Config": {
+			config: BOAConfiguration{},
+		},
+		"AccessKey": {
+			config: BOAConfiguration{
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					AccessKeyID:     "id",
+					AccessKeySecret: "secret",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &BOAConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 127 - 0
pkg/cloud/alibaba/boaquerier.go

@@ -0,0 +1,127 @@
+package alibaba
+
+import (
+	"fmt"
+	"strings"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+
+	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+	"github.com/aliyun/alibaba-cloud-sdk-go/services/bssopenapi"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+const (
+	boaIsNode    = "i-"    // isNode if prefix of instance_id is i-
+	boaIsDisk    = "d-"    // isDisk if prefix is disk is d-
+	boaIsNetwork = "piece" //usage unit of network resource in Alibaba is Piece
+)
+
+type BoaQuerier struct {
+	BOAConfiguration
+}
+
+func (bq *BoaQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*BoaQuerier)
+	if !ok {
+		return false
+	}
+
+	return bq.BOAConfiguration.Equals(&thatConfig.BOAConfiguration)
+}
+
+// QueryInstanceBill performs the request to the BSS client and get the response for the current page number
+func (bq *BoaQuerier) QueryInstanceBill(client *bssopenapi.Client, isBillingItem bool, invocationScheme, granularity, billingCycle, billingDate string, pageNum int) (*bssopenapi.QueryInstanceBillResponse, error) {
+	log.Debugf("QueryInstanceBill: query for BSS Open API for billing date: %s with pageNum: %d ", billingDate, pageNum)
+	request := bssopenapi.CreateQueryInstanceBillRequest()
+	request.Scheme = invocationScheme
+	request.BillingCycle = billingCycle
+	request.IsBillingItem = requests.NewBoolean(true)
+	request.Granularity = granularity
+	request.BillingDate = billingDate
+	request.PageNum = requests.NewInteger(pageNum)
+	response, err := client.QueryInstanceBill(request)
+	if err != nil {
+		return nil, fmt.Errorf("QueryInstanceBill: Failed to hit the BSS Open API with error for page num %d: %v", pageNum, err)
+	}
+	log.Debugf("QueryInstanceBill: Total Number of total items for billing Date: %s pageNum: %d is %d", billingDate, pageNum, response.Data.TotalCount)
+	return response, nil
+}
+
+// QueryBoaPaginated Calls the API in a paginated fashion. There's no paramter in API that can distinguish if it hasMorePages
+// hence the logic of processedItem <= TotalItem.
+func (bq *BoaQuerier) QueryBoaPaginated(client *bssopenapi.Client, isBillingItem bool, invocationScheme, granularity, billingCycle, billingDate string, fn func(*bssopenapi.QueryInstanceBillResponse) bool) error {
+	pageNum := 1
+	processedItem := 0 // setting default here to hit the API for the first time
+	totalItem := 1
+	for processedItem < totalItem {
+		log.Debugf("QueryBoaPaginated: query for BSS Open API for billing date: %s with pageNum: %d", billingDate, pageNum)
+		response, err := bq.QueryInstanceBill(client, isBillingItem, invocationScheme, granularity, billingCycle, billingDate, pageNum)
+		if err != nil {
+			return fmt.Errorf("QueryBoaPaginated for billing cycle : %s, billing date: %s, page num %d: %v", billingCycle, billingDate, pageNum, err)
+		}
+		fn(response)
+		totalItem = response.Data.TotalCount
+		processedItem += response.Data.PageSize
+		pageNum += 1
+	}
+	return nil
+}
+
+// GetBoaQueryInstanceBillFunc gives the item to the handler function in boaIntegration.go to process
+// computeItem, topNItem and aggregatedItem
+func GetBoaQueryInstanceBillFunc(fn func(bssopenapi.Item) error, billingDate string) func(output *bssopenapi.QueryInstanceBillResponse) bool {
+	processBOAItems := func(output *bssopenapi.QueryInstanceBillResponse) bool {
+		// This could be connection error were unable to fetch response output from Client
+		if output == nil {
+			log.Errorf("BoaQuerier: No Response from the ALibaba BSS Open API client for billing Date: %s", billingDate)
+			return false
+		}
+
+		// These infer that the rest call was successful but the Cloud Usage resource for those days were 0
+		if output.Data.TotalCount == 0 {
+			log.Warnf("BoaQuerier: Total Item Count is 0 for billing Date: %s ", billingDate)
+			return false
+		}
+
+		for _, item := range output.Data.Items.Item {
+			fn(item)
+		}
+		return true
+	}
+	return processBOAItems
+}
+
+// SelectAlibabaCategory processes the Alibaba service to associated Kubecost category
+func SelectAlibabaCategory(item bssopenapi.Item) string {
+	if (item != bssopenapi.Item{}) {
+		// Provider ID has prefix "i-" for node in Alibaba
+		if strings.HasPrefix(item.InstanceID, boaIsNode) {
+			return kubecost.ComputeCategory
+		}
+		// Provider ID for disk start with "d-" for storage type in Alibaba
+		if strings.HasPrefix(item.InstanceID, boaIsDisk) {
+			return kubecost.StorageCategory
+		}
+		// Network has the highest priority and is based on the usage type of "piece" in Alibaba
+		if item.UsageUnit == boaIsNetwork {
+			return kubecost.NetworkCategory
+		}
+	}
+
+	// Alibaba CUR integration report has service lower case mostly unlike AWS
+	// TO-DO: Can investigate further product codes but bare minimal differentiation for start
+	switch strings.ToLower(item.ProductCode) {
+	case "slb", "eip", "nis", "gtm":
+		return kubecost.NetworkCategory
+	case "ecs", "eds", "sas":
+		return kubecost.ComputeCategory
+	case "ack":
+		return kubecost.ManagementCategory
+	case "ebs", "oss", "scu":
+		return kubecost.StorageCategory
+	default:
+		return kubecost.OtherCategory
+	}
+}

+ 19 - 18
pkg/cloud/aliyunprovider.go → pkg/cloud/alibaba/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package alibaba
 
 import (
 	"errors"
@@ -122,8 +122,9 @@ var alibabaInstanceFamilies = []string{
 }
 
 // AlibabaInfo contains configuration for Alibaba's CUR integration
+// Deprecated: v1.104 Use BOAConfiguration instead
 type AlibabaInfo struct {
-	AlibabaClusterRegion    string `json:"clusterRegion"`
+	AlibabaClusterRegion    string `json:"ClusterRegion"`
 	AlibabaServiceKeyName   string `json:"serviceKeyName"`
 	AlibabaServiceKeySecret string `json:"serviceKeySecret"`
 	AlibabaAccountID        string `json:"accountID"`
@@ -138,6 +139,7 @@ func (ai *AlibabaInfo) IsEmpty() bool {
 }
 
 // AlibabaAccessKey holds Alibaba credentials parsing from the service-key.json file.
+// Deprecated: v1.104 Use AccessKey instead
 type AlibabaAccessKey struct {
 	AccessKeyID     string `json:"alibaba_access_key_id"`
 	SecretAccessKey string `json:"alibaba_secret_access_key"`
@@ -323,15 +325,14 @@ type Alibaba struct {
 	// Lock Needed to provide thread safe
 	DownloadPricingDataLock sync.RWMutex
 	Clientset               clustercache.ClusterCache
-	Config                  *ProviderConfig
-	*CustomProvider
+	Config                  models.ProviderConfig
+	ServiceAccountChecks    *models.ServiceAccountChecks
+	ClusterAccountId        string
+	ClusterRegion           string
 
 	// The following fields are unexported because of avoiding any leak of secrets of these keys.
 	// Alibaba Access key used specifically in signer interface used to sign API calls
-	serviceAccountChecks *models.ServiceAccountChecks
-	clusterAccountId     string
-	clusterRegion        string
-	accessKey            *credentials.AccessKeyCredential
+	accessKey *credentials.AccessKeyCredential
 	// Map of regionID to sdk.client to call API for that region
 	clients map[string]*sdk.Client
 }
@@ -460,11 +461,11 @@ func (alibaba *Alibaba) DownloadPricingData() error {
 		alibaba.Pricing[lookupKey] = pricingObj
 	}
 
-	// set the first occurance of region from the node
-	if alibaba.clusterRegion == "" {
+	// set the first occurrence of region from the node
+	if alibaba.ClusterRegion == "" {
 		for _, node := range nodeList {
 			if regionID, ok := node.Labels["topology.kubernetes.io/region"]; ok {
-				alibaba.clusterRegion = regionID
+				alibaba.ClusterRegion = regionID
 				break
 			}
 		}
@@ -478,7 +479,7 @@ func (alibaba *Alibaba) DownloadPricingData() error {
 	for _, pv := range pvList {
 		pvRegion := determinePVRegion(pv)
 		if pvRegion == "" {
-			pvRegion = alibaba.clusterRegion
+			pvRegion = alibaba.ClusterRegion
 		}
 		pricingObj := &AlibabaPricing{}
 		slimK8sDisk := generateSlimK8sDiskFromV1PV(pv, pvRegion)
@@ -685,8 +686,8 @@ func (alibaba *Alibaba) ClusterInfo() (map[string]string, error) {
 	m := make(map[string]string)
 	m["name"] = clusterName
 	m["provider"] = kubecost.AlibabaProvider
-	m["project"] = alibaba.clusterAccountId
-	m["region"] = alibaba.clusterRegion
+	m["project"] = alibaba.ClusterAccountId
+	m["region"] = alibaba.ClusterRegion
 	m["id"] = env.GetClusterID()
 	return m, nil
 }
@@ -912,7 +913,7 @@ func (alibaba *Alibaba) GetPVKey(pv *v1.PersistentVolume, parameters map[string]
 	regionID := defaultRegion
 	// If default Region is not passed default it to cluster region ID.
 	if defaultRegion == "" {
-		regionID = alibaba.clusterRegion
+		regionID = alibaba.ClusterRegion
 	}
 	slimK8sDisk := generateSlimK8sDiskFromV1PV(pv, defaultRegion)
 	return &AlibabaPVKey{
@@ -947,7 +948,7 @@ func (alibabaPVKey *AlibabaPVKey) GetStorageClass() string {
 // When supporting subscription and Premptible resources this HTTP call needs to be modified with PriceUnit information
 // When supporting different new type of instances like Compute Optimized, Memory Optimized etc make sure you add the instance type
 // in unit test and check if it works or not to create the ack request and processDescribePriceAndCreateAlibabaPricing function
-// else more paramters need to be pulled from kubernetes node response or gather infromation from elsewhere and function modified.
+// else more parameters need to be pulled from kubernetes node response or gather information from elsewhere and function modified.
 func createDescribePriceACSRequest(i interface{}) (*requests.CommonRequest, error) {
 	request := requests.NewCommonRequest()
 	request.Method = requests.GET
@@ -1315,7 +1316,7 @@ func generateSlimK8sDiskFromV1PV(pv *v1.PersistentVolume, regionID string) *Slim
 		}
 	}
 
-	// Highly unlikely that label pv.Spec.CSI.VolumeAttributes["type"] doesn't exist but if occured default to cloud (most basic disk type)
+	// Highly unlikely that label pv.Spec.CSI.VolumeAttributes["type"] doesn't exist but if occurred default to cloud (most basic disk type)
 	if diskCategory == "" {
 		diskCategory = ALIBABA_DISK_CLOUD_CATEGORY
 	}
@@ -1356,7 +1357,7 @@ func determinePVRegion(pv *v1.PersistentVolume) string {
 
 	if pvZone == "" {
 		// zone and regionID labels are optional in Alibaba PV creation, while PV through UI creation put's a zone PV is associated with and the region
-		// can be determined from this information. If pv is provision via yaml and the block is missing that's the only time it gets defaulted to clusterRegion.
+		// can be determined from this information. If pv is provision via yaml and the block is missing that's the only time it gets defaulted to ClusterRegion.
 		if pv.Spec.NodeAffinity != nil {
 			nodeAffinity := pv.Spec.NodeAffinity
 			if nodeAffinity.Required != nil && nodeAffinity.Required.NodeSelectorTerms != nil {

+ 20 - 20
pkg/cloud/aliyunprovider_test.go → pkg/cloud/alibaba/provider_test.go

@@ -1,4 +1,4 @@
-package cloud
+package alibaba
 
 import (
 	"fmt"
@@ -9,7 +9,7 @@ import (
 	"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	v1 "k8s.io/api/core/v1"
-	resource "k8s.io/apimachinery/pkg/api/resource"
+	"k8s.io/apimachinery/pkg/api/resource"
 )
 
 func TestCreateDescribePriceACSRequest(t *testing.T) {
@@ -594,10 +594,10 @@ func TestDetermineKeyForPricing(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnString, returnErr := determineKeyForPricing(c.testVar)
 			if c.expectedError == nil && returnErr != nil {
-				t.Fatalf("Case name %s: expected error was nil but recieved error %v", c.name, returnErr)
+				t.Fatalf("Case name %s: expected error was nil but received error %v", c.name, returnErr)
 			}
 			if returnString != c.expectedKey {
-				t.Fatalf("Case name %s: determineKeyForPricing recieved %s but expected %s", c.name, returnString, c.expectedKey)
+				t.Fatalf("Case name %s: determineKeyForPricing received %s but expected %s", c.name, returnString, c.expectedKey)
 			}
 		})
 	}
@@ -635,22 +635,22 @@ func TestGenerateSlimK8sNodeFromV1Node(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnSlimK8sNode := generateSlimK8sNodeFromV1Node(c.testNode)
 			if returnSlimK8sNode.InstanceType != c.expectedSlimNode.InstanceType {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceType: %s , recieved InstanceType: %s", c.expectedSlimNode.InstanceType, returnSlimK8sNode.InstanceType)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceType: %s , received InstanceType: %s", c.expectedSlimNode.InstanceType, returnSlimK8sNode.InstanceType)
 			}
 			if returnSlimK8sNode.RegionID != c.expectedSlimNode.RegionID {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected RegionID: %s , recieved RegionID: %s", c.expectedSlimNode.RegionID, returnSlimK8sNode.RegionID)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected RegionID: %s , received RegionID: %s", c.expectedSlimNode.RegionID, returnSlimK8sNode.RegionID)
 			}
 			if returnSlimK8sNode.PriceUnit != c.expectedSlimNode.PriceUnit {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected PriceUnit: %s , recieved PriceUnit: %s", c.expectedSlimNode.PriceUnit, returnSlimK8sNode.PriceUnit)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected PriceUnit: %s , received PriceUnit: %s", c.expectedSlimNode.PriceUnit, returnSlimK8sNode.PriceUnit)
 			}
 			if returnSlimK8sNode.MemorySizeInKiB != c.expectedSlimNode.MemorySizeInKiB {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected MemorySizeInKiB: %s , recieved MemorySizeInKiB: %s", c.expectedSlimNode.MemorySizeInKiB, returnSlimK8sNode.MemorySizeInKiB)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected MemorySizeInKiB: %s , received MemorySizeInKiB: %s", c.expectedSlimNode.MemorySizeInKiB, returnSlimK8sNode.MemorySizeInKiB)
 			}
 			if returnSlimK8sNode.OSType != c.expectedSlimNode.OSType {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected OSType: %s , recieved OSType: %s", c.expectedSlimNode.OSType, returnSlimK8sNode.OSType)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected OSType: %s , received OSType: %s", c.expectedSlimNode.OSType, returnSlimK8sNode.OSType)
 			}
 			if returnSlimK8sNode.InstanceTypeFamily != c.expectedSlimNode.InstanceTypeFamily {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceTypeFamily: %s , recieved InstanceTypeFamily: %s", c.expectedSlimNode.InstanceTypeFamily, returnSlimK8sNode.InstanceTypeFamily)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceTypeFamily: %s , received InstanceTypeFamily: %s", c.expectedSlimNode.InstanceTypeFamily, returnSlimK8sNode.InstanceTypeFamily)
 			}
 		})
 	}
@@ -695,28 +695,28 @@ func TestGenerateSlimK8sDiskFromV1PV(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnSlimK8sDisk := generateSlimK8sDiskFromV1PV(c.testPV, c.inpRegionID)
 			if returnSlimK8sDisk.DiskType != c.expectedSlimDisk.DiskType {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskType: %s , recieved DiskType: %s", c.expectedSlimDisk.DiskType, returnSlimK8sDisk.DiskType)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskType: %s , received DiskType: %s", c.expectedSlimDisk.DiskType, returnSlimK8sDisk.DiskType)
 			}
 			if returnSlimK8sDisk.RegionID != c.expectedSlimDisk.RegionID {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected RegionID: %s , recieved RegionID Type: %s", c.expectedSlimDisk.RegionID, returnSlimK8sDisk.RegionID)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected RegionID: %s , received RegionID Type: %s", c.expectedSlimDisk.RegionID, returnSlimK8sDisk.RegionID)
 			}
 			if returnSlimK8sDisk.PriceUnit != c.expectedSlimDisk.PriceUnit {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PriceUnit: %s , recieved PriceUnit Type: %s", c.expectedSlimDisk.PriceUnit, returnSlimK8sDisk.PriceUnit)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PriceUnit: %s , received PriceUnit Type: %s", c.expectedSlimDisk.PriceUnit, returnSlimK8sDisk.PriceUnit)
 			}
 			if returnSlimK8sDisk.SizeInGiB != c.expectedSlimDisk.SizeInGiB {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected SizeInGiB: %s , recieved SizeInGiB Type: %s", c.expectedSlimDisk.SizeInGiB, returnSlimK8sDisk.SizeInGiB)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected SizeInGiB: %s , received SizeInGiB Type: %s", c.expectedSlimDisk.SizeInGiB, returnSlimK8sDisk.SizeInGiB)
 			}
 			if returnSlimK8sDisk.DiskCategory != c.expectedSlimDisk.DiskCategory {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskCategory: %s , recieved DiskCategory Type: %s", c.expectedSlimDisk.DiskCategory, returnSlimK8sDisk.DiskCategory)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskCategory: %s , received DiskCategory Type: %s", c.expectedSlimDisk.DiskCategory, returnSlimK8sDisk.DiskCategory)
 			}
 			if returnSlimK8sDisk.PerformanceLevel != c.expectedSlimDisk.PerformanceLevel {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PerformanceLevel: %s , recieved PerformanceLevel Type: %s", c.expectedSlimDisk.PerformanceLevel, returnSlimK8sDisk.PerformanceLevel)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PerformanceLevel: %s , received PerformanceLevel Type: %s", c.expectedSlimDisk.PerformanceLevel, returnSlimK8sDisk.PerformanceLevel)
 			}
 			if returnSlimK8sDisk.ProviderID != c.expectedSlimDisk.ProviderID {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected ProviderID: %s , recieved ProviderID Type: %s", c.expectedSlimDisk.ProviderID, returnSlimK8sDisk.ProviderID)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected ProviderID: %s , received ProviderID Type: %s", c.expectedSlimDisk.ProviderID, returnSlimK8sDisk.ProviderID)
 			}
 			if returnSlimK8sDisk.StorageClass != c.expectedSlimDisk.StorageClass {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected StorageClass: %s , recieved StorageClass Type: %s", c.expectedSlimDisk.StorageClass, returnSlimK8sDisk.StorageClass)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected StorageClass: %s , received StorageClass Type: %s", c.expectedSlimDisk.StorageClass, returnSlimK8sDisk.StorageClass)
 			}
 		})
 	}
@@ -753,7 +753,7 @@ func TestGetNumericalValueFromResourceQuantity(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnValue := getNumericalValueFromResourceQuantity(c.inputResourceQuanity)
 			if c.expectedValue != returnValue {
-				t.Fatalf("Case name %s: getNumericalValueFromResourceQuantity recieved %s but expected %s", c.name, returnValue, c.expectedValue)
+				t.Fatalf("Case name %s: getNumericalValueFromResourceQuantity received %s but expected %s", c.name, returnValue, c.expectedValue)
 			}
 		})
 	}
@@ -831,7 +831,7 @@ func TestDeterminePVRegion(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnRegion := determinePVRegion(c.inputPV)
 			if c.expectedRegion != returnRegion {
-				t.Fatalf("Case name %s: determinePVRegion recieved region :%s but expected region: %s", c.name, returnRegion, c.expectedRegion)
+				t.Fatalf("Case name %s: determinePVRegion received region :%s but expected region: %s", c.name, returnRegion, c.expectedRegion)
 			}
 		})
 	}

+ 233 - 0
pkg/cloud/aws/athenaconfiguration.go

@@ -0,0 +1,233 @@
+package aws
+
+import (
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/service/athena"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// AthenaConfiguration
+type AthenaConfiguration struct {
+	Bucket     string     `json:"bucket"`
+	Region     string     `json:"region"`
+	Database   string     `json:"database"`
+	Table      string     `json:"table"`
+	Workgroup  string     `json:"workgroup"`
+	Account    string     `json:"account"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (ac *AthenaConfiguration) Validate() error {
+
+	// Validate Authorizer
+	if ac.Authorizer == nil {
+		return fmt.Errorf("AthenaConfiguration: missing Authorizer")
+	}
+
+	err := ac.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: %s", err)
+	}
+
+	// Validate base properties
+	if ac.Bucket == "" {
+		return fmt.Errorf("AthenaConfiguration: missing bucket")
+	}
+
+	if ac.Region == "" {
+		return fmt.Errorf("AthenaConfiguration: missing region")
+	}
+
+	if ac.Database == "" {
+		return fmt.Errorf("AthenaConfiguration: missing database")
+	}
+
+	if ac.Table == "" {
+		return fmt.Errorf("AthenaConfiguration: missing table")
+	}
+
+	if ac.Account == "" {
+		return fmt.Errorf("AthenaConfiguration: missing account")
+	}
+
+	return nil
+}
+
+func (ac *AthenaConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AthenaConfiguration)
+	if !ok {
+		return false
+	}
+
+	if ac.Authorizer != nil {
+		if !ac.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if ac.Bucket != thatConfig.Bucket {
+		return false
+	}
+
+	if ac.Region != thatConfig.Region {
+		return false
+	}
+
+	if ac.Database != thatConfig.Database {
+		return false
+	}
+
+	if ac.Table != thatConfig.Table {
+		return false
+	}
+
+	if ac.Workgroup != thatConfig.Workgroup {
+		return false
+	}
+
+	if ac.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (ac *AthenaConfiguration) Sanitize() config.Config {
+	return &AthenaConfiguration{
+		Bucket:     ac.Bucket,
+		Region:     ac.Region,
+		Database:   ac.Database,
+		Table:      ac.Table,
+		Workgroup:  ac.Workgroup,
+		Account:    ac.Account,
+		Authorizer: ac.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (ac *AthenaConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", ac.Account, ac.Bucket)
+}
+
+func (ac *AthenaConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Bucket = bucket
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Region = region
+
+	database, err := config.GetInterfaceValue[string](fmap, "database")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Database = database
+
+	table, err := config.GetInterfaceValue[string](fmap, "table")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Table = table
+
+	workgroup, err := config.GetInterfaceValue[string](fmap, "workgroup")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Workgroup = workgroup
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Account = account
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("AthenaConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ac.Authorizer = authorizer
+
+	return nil
+}
+
+func (ac *AthenaConfiguration) GetAthenaClient() (*athena.Client, error) {
+	cfg, err := ac.Authorizer.CreateAWSConfig(ac.Region)
+	if err != nil {
+		return nil, err
+	}
+	cli := athena.NewFromConfig(cfg)
+	return cli, nil
+}
+
+// ConvertAwsAthenaInfoToConfig takes a legacy config and generates a Config based on the presence of properties to match
+// legacy behavior
+func ConvertAwsAthenaInfoToConfig(aai AwsAthenaInfo) config.KeyedConfig {
+	if aai.IsEmpty() {
+		return nil
+	}
+
+	var authorizer Authorizer
+	if aai.ServiceKeyName == "" && aai.ServiceKeySecret == "" {
+		authorizer = &ServiceAccount{}
+	} else {
+		authorizer = &AccessKey{
+			ID:     aai.ServiceKeyName,
+			Secret: aai.ServiceKeySecret,
+		}
+	}
+
+	// Wrap Authorizer with AssumeRole if MasterPayerArn is set
+	if aai.MasterPayerARN != "" {
+		authorizer = &AssumeRole{
+			Authorizer: authorizer,
+			RoleARN:    aai.MasterPayerARN,
+		}
+	}
+
+	var config config.KeyedConfig
+	if aai.AthenaTable != "" || aai.AthenaDatabase != "" {
+		config = &AthenaConfiguration{
+			Bucket:     aai.AthenaBucketName,
+			Region:     aai.AthenaRegion,
+			Database:   aai.AthenaDatabase,
+			Table:      aai.AthenaTable,
+			Workgroup:  aai.AthenaWorkgroup,
+			Account:    aai.AccountID,
+			Authorizer: authorizer,
+		}
+	} else {
+		config = &S3Configuration{
+			Bucket:     aai.AthenaBucketName,
+			Region:     aai.AthenaRegion,
+			Account:    aai.AccountID,
+			Authorizer: authorizer,
+		}
+	}
+
+	return config
+}

+ 594 - 0
pkg/cloud/aws/athenaconfiguration_test.go

@@ -0,0 +1,594 @@
+package aws
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestAthenaConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   AthenaConfiguration
+		expected error
+	}{
+		"valid config access key": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: nil,
+		},
+		"valid config service account": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID: "id",
+				},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: AccessKey: missing Secret"),
+		},
+		"missing Authorizer": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing Authorizer"),
+		},
+		"missing bucket": {
+			config: AthenaConfiguration{
+				Bucket:     "",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing bucket"),
+		},
+		"missing region": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing region"),
+		},
+		"missing database": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing database"),
+		},
+		"missing table": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing table"),
+		},
+		"missing workgroup": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"missing account": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("AthenaConfiguration: missing account"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestAthenaConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     AthenaConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different bucket": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket2",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region2",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different database": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database2",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different table": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table2",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different workgroup": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup2",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account2",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AccessKey{
+				ID:     "id",
+				Secret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestAthenaConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config AthenaConfiguration
+	}{
+		"Empty Config": {
+			config: AthenaConfiguration{},
+		},
+		"AccessKey": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+		},
+
+		"ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Database:   "database",
+				Table:      "table",
+				Workgroup:  "workgroup",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+		},
+		"AssumeRole with AccessKey": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+		"AssumeRole with ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &ServiceAccount{},
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"RoleArnNil": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: nil,
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"AssumeRole with AssumeRole with ServiceAccount": {
+			config: AthenaConfiguration{
+				Bucket:    "bucket",
+				Region:    "region",
+				Database:  "database",
+				Table:     "table",
+				Workgroup: "workgroup",
+				Account:   "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AssumeRole{
+						RoleARN:    "12345",
+						Authorizer: &ServiceAccount{},
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &AthenaConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 208 - 0
pkg/cloud/aws/athenaquerier.go

@@ -0,0 +1,208 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/athena"
+	"github.com/aws/aws-sdk-go-v2/service/athena/types"
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+)
+
+type AthenaQuerier struct {
+	AthenaConfiguration
+}
+
+func (aq *AthenaQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*AthenaQuerier)
+	if !ok {
+		return false
+	}
+
+	return aq.AthenaConfiguration.Equals(&thatConfig.AthenaConfiguration)
+}
+
+// QueryAthenaPaginated executes athena query and processes results. An error from this method indicates a
+// FAILED_CONNECTION CloudConnectionStatus and should immediately stop the caller to maintain the correct CloudConnectionStatus
+func (aq *AthenaQuerier) QueryAthenaPaginated(ctx context.Context, query string, fn func(*athena.GetQueryResultsOutput) bool) error {
+
+	queryExecutionCtx := &types.QueryExecutionContext{
+		Database: aws.String(aq.Database),
+	}
+
+	resultConfiguration := &types.ResultConfiguration{
+		OutputLocation: aws.String(aq.Bucket),
+	}
+	startQueryExecutionInput := &athena.StartQueryExecutionInput{
+		QueryString:           aws.String(query),
+		QueryExecutionContext: queryExecutionCtx,
+		ResultConfiguration:   resultConfiguration,
+	}
+
+	// Only set if there is a value, the default input is nil
+	if aq.Workgroup != "" {
+		startQueryExecutionInput.WorkGroup = aws.String(aq.Workgroup)
+	}
+
+	// Create Athena Client
+	cli, err := aq.AthenaConfiguration.GetAthenaClient()
+
+	// Query Athena
+	startQueryExecutionOutput, err := cli.StartQueryExecution(ctx, startQueryExecutionInput)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: start query error: %s", err.Error())
+	}
+	err = waitForQueryToComplete(ctx, cli, startQueryExecutionOutput.QueryExecutionId)
+	if err != nil {
+		return fmt.Errorf("QueryAthenaPaginated: query execution error: %s", err.Error())
+	}
+	queryResultsInput := &athena.GetQueryResultsInput{
+		QueryExecutionId: startQueryExecutionOutput.QueryExecutionId,
+	}
+	getQueryResultsPaginator := athena.NewGetQueryResultsPaginator(cli, queryResultsInput)
+	for getQueryResultsPaginator.HasMorePages() {
+		pg, err := getQueryResultsPaginator.NextPage(ctx)
+		if err != nil {
+			log.Errorf("queryAthenaPaginated: NextPage error: %s", err.Error())
+			continue
+		}
+		fn(pg)
+	}
+	return nil
+}
+
+func waitForQueryToComplete(ctx context.Context, client *athena.Client, queryExecutionID *string) error {
+	inp := &athena.GetQueryExecutionInput{
+		QueryExecutionId: queryExecutionID,
+	}
+	isQueryStillRunning := true
+	for isQueryStillRunning {
+		qe, err := client.GetQueryExecution(ctx, inp)
+		if err != nil {
+			return err
+		}
+		if qe.QueryExecution.Status.State == "SUCCEEDED" {
+			isQueryStillRunning = false
+			continue
+		}
+		if qe.QueryExecution.Status.State != "RUNNING" && qe.QueryExecution.Status.State != "QUEUED" {
+			return fmt.Errorf("no query results available for query %s", *queryExecutionID)
+		}
+		time.Sleep(2 * time.Second)
+	}
+	return nil
+}
+
+// GetAthenaRowValue retrieve value from athena row based on column names and used stringutil.Bank() to prevent duplicate
+// allocation of strings
+func GetAthenaRowValue(row types.Row, queryColumnIndexes map[string]int, columnName string) string {
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return ""
+	}
+	valuePointer := row.Data[columnIndex].VarCharValue
+	if valuePointer == nil {
+		return ""
+	}
+	return stringutil.Bank(*valuePointer)
+}
+
+// getAthenaRowValueFloat retrieve value from athena row based on column names and convert to float if possible
+func GetAthenaRowValueFloat(row types.Row, queryColumnIndexes map[string]int, columnName string) (float64, error) {
+
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return 0.0, fmt.Errorf("getAthenaRowValueFloat: missing column index: %s", columnName)
+	}
+
+	valuePointer := row.Data[columnIndex].VarCharValue
+	if valuePointer == nil {
+		return 0.0, fmt.Errorf("getAthenaRowValueFloat: nil field")
+	}
+
+	cost, err := strconv.ParseFloat(*valuePointer, 64)
+	if err != nil {
+		return cost, fmt.Errorf("getAthenaRowValueFloat: failed to parse %s: '%s': %s", columnName, *valuePointer, err.Error())
+	}
+	return cost, nil
+}
+
+func SelectAWSCategory(isNode, isVol, isNetwork bool, providerID, service string) string {
+	// Network has the highest priority and is based on the usage type ending in "Bytes"
+	if isNetwork {
+		return kubecost.NetworkCategory
+	}
+	// The node and volume conditions are mutually exclusive.
+	// Provider ID has prefix "i-"
+	if isNode {
+		return kubecost.ComputeCategory
+	}
+	// Provider ID has prefix "vol-"
+	if isVol {
+		return kubecost.StorageCategory
+	}
+
+	// Default categories based on service
+	switch strings.ToUpper(service) {
+	case "AWSELB", "AWSGLUE", "AMAZONROUTE53":
+		return kubecost.NetworkCategory
+	case "AMAZONEC2", "AWSLAMBDA", "AMAZONELASTICACHE":
+		return kubecost.ComputeCategory
+	case "AMAZONEKS":
+		// Check if line item is a fargate pod
+		if strings.Contains(providerID, ":pod/") {
+			return kubecost.ComputeCategory
+		}
+		return kubecost.ManagementCategory
+	case "AMAZONS3", "AMAZONATHENA", "AMAZONRDS", "AMAZONDYNAMODB", "AWSSECRETSMANAGER", "AMAZONFSX":
+		return kubecost.StorageCategory
+	default:
+		return kubecost.OtherCategory
+	}
+}
+
+var parseARNRx = regexp.MustCompile("^.+\\/(.+)?") // Capture "a406f7761142e4ef58a8f2ba478d2db2" from "arn:aws:elasticloadbalancing:us-east-1:297945954695:loadbalancer/a406f7761142e4ef58a8f2ba478d2db2"
+
+func ParseARN(id string) string {
+	match := parseARNRx.FindStringSubmatch(id)
+	if len(match) == 0 {
+		if id != "" {
+			log.DedupedInfof(10, "aws.parseARN: failed to parse %s", id)
+		}
+		return id
+	}
+	return match[len(match)-1]
+}
+
+func GetAthenaQueryFunc(fn func(types.Row)) func(*athena.GetQueryResultsOutput) bool {
+	pageNum := 0
+	processItemQueryResults := func(page *athena.GetQueryResultsOutput) bool {
+		if page == nil {
+			log.Errorf("AthenaQuerier: Athena page is nil")
+			return false
+		} else if page.ResultSet == nil {
+			log.Errorf("AthenaQuerier: Athena page.ResultSet is nil")
+			return false
+		}
+		rows := page.ResultSet.Rows
+		if pageNum == 0 {
+			rows = page.ResultSet.Rows[1:len(page.ResultSet.Rows)]
+		}
+
+		for _, row := range rows {
+			fn(row)
+		}
+		pageNum++
+		return true
+	}
+	return processItemQueryResults
+}

+ 251 - 0
pkg/cloud/aws/authorizer.go

@@ -0,0 +1,251 @@
+package aws
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	awsconfig "github.com/aws/aws-sdk-go-v2/config"
+	"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+	"github.com/aws/aws-sdk-go-v2/service/sts"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const AccessKeyAuthorizerType = "AWSAccessKey"
+const ServiceAccountAuthorizerType = "AWSServiceAccount"
+const AssumeRoleAuthorizerType = "AWSAssumeRole"
+
+// Authorizer implementations provide aws.Config for AWS SDK calls
+type Authorizer interface {
+	config.Authorizer
+	CreateAWSConfig(string) (aws.Config, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	case ServiceAccountAuthorizerType:
+		return &ServiceAccount{}, nil
+	case AssumeRoleAuthorizerType:
+		return &AssumeRole{}, nil
+	default:
+		return nil, fmt.Errorf("AWS: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+// AccessKey holds AWS credentials and fulfils the awsV2.CredentialsProvider interface
+type AccessKey struct {
+	ID     string `json:"id"`
+	Secret string `json:"secret"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["id"] = ak.ID
+	fmap["secret"] = ak.Secret
+	return json.Marshal(fmap)
+}
+
+// Retrieve returns a set of awsV2 credentials using the AccessKey's key and secret.
+// This fulfils the awsV2.CredentialsProvider interface contract.
+func (ak *AccessKey) Retrieve(ctx context.Context) (aws.Credentials, error) {
+	return aws.Credentials{
+		AccessKeyID:     ak.ID,
+		SecretAccessKey: ak.Secret,
+	}, nil
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.ID == "" {
+		return fmt.Errorf("AccessKey: missing ID")
+	}
+	if ak.Secret == "" {
+		return fmt.Errorf("AccessKey: missing Secret")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.ID != thatConfig.ID {
+		return false
+	}
+	if ak.Secret != thatConfig.Secret {
+		return false
+	}
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		ID:     ak.ID,
+		Secret: config.Redacted,
+	}
+}
+
+// CreateAWSConfig creates an AWS SDK V2 Config for the credentials that it contains for the provided region
+func (ak *AccessKey) CreateAWSConfig(region string) (cfg aws.Config, err error) {
+	err = ak.Validate()
+	if err != nil {
+		return cfg, err
+	}
+	// The AWS SDK v2 requires an object fulfilling the CredentialsProvider interface, which cloud.AccessKey does
+	cfg, err = awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithCredentialsProvider(ak), awsconfig.WithRegion(region))
+	if err != nil {
+		return cfg, fmt.Errorf("failed to initialize AWS SDK config for region %s: %s", region, err)
+	}
+	return cfg, nil
+}
+
+// ServiceAccount uses pod annotations along with a service account to authenticate integrations
+type ServiceAccount struct{}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (sa *ServiceAccount) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 1)
+	fmap[config.AuthorizerTypeProperty] = ServiceAccountAuthorizerType
+	return json.Marshal(fmap)
+}
+
+// Check has nothing to check at this level, connection will fail if Pod Annotation and Service Account are not configured correctly
+func (sa *ServiceAccount) Validate() error {
+	return nil
+}
+
+func (sa *ServiceAccount) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	_, ok := config.(*ServiceAccount)
+	if !ok {
+		return false
+	}
+
+	return true
+}
+
+func (sa *ServiceAccount) Sanitize() config.Config {
+	return &ServiceAccount{}
+}
+
+func (sa *ServiceAccount) CreateAWSConfig(region string) (aws.Config, error) {
+	cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithRegion(region))
+	if err != nil {
+		return cfg, fmt.Errorf("failed to initialize AWS SDK config for region from annotation %s: %s", region, err)
+	}
+	return cfg, nil
+}
+
+// AssumeRole is a wrapper for another Authorizer which adds an assumed role to the configuration
+type AssumeRole struct {
+	Authorizer Authorizer `json:"authorizer"`
+	RoleARN    string     `json:"roleARN"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (ara *AssumeRole) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AssumeRoleAuthorizerType
+	fmap["roleARN"] = ara.RoleARN
+	fmap["authorizer"] = ara.Authorizer
+	return json.Marshal(fmap)
+}
+
+// UnmarshalJSON is required for AssumeRole because it needs to unmarshal an Authorizer interface
+func (ara *AssumeRole) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	roleARN, err := config.GetInterfaceValue[string](fmap, "roleARN")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	ara.RoleARN = roleARN
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("AssumeRole: UnmarshalJSON: missing Authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("AssumeRole: UnmarshalJSON: %s", err.Error())
+	}
+	ara.Authorizer = authorizer
+
+	return nil
+}
+
+func (ara *AssumeRole) CreateAWSConfig(region string) (aws.Config, error) {
+	cfg, _ := ara.Authorizer.CreateAWSConfig(region)
+	// Create the credentials from AssumeRoleProvider to assume the role
+	// referenced by the RoleARN.
+	stsSvc := sts.NewFromConfig(cfg)
+	creds := stscreds.NewAssumeRoleProvider(stsSvc, ara.RoleARN)
+	cfg.Credentials = aws.NewCredentialsCache(creds)
+	return cfg, nil
+}
+
+func (ara *AssumeRole) Validate() error {
+	if ara.Authorizer == nil {
+		return fmt.Errorf("AssumeRole: misisng base Authorizer")
+	}
+	err := ara.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	if ara.RoleARN == "" {
+		return fmt.Errorf("AssumeRole: misisng RoleARN configuration")
+	}
+
+	return nil
+}
+
+func (ara *AssumeRole) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AssumeRole)
+	if !ok {
+		return false
+	}
+	if ara.Authorizer != nil {
+		if !ara.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if ara.RoleARN != thatConfig.RoleARN {
+		return false
+	}
+
+	return true
+}
+
+func (ara *AssumeRole) Sanitize() config.Config {
+	return &AssumeRole{
+		Authorizer: ara.Authorizer.Sanitize().(Authorizer),
+		RoleARN:    ara.RoleARN,
+	}
+}

+ 67 - 0
pkg/cloud/aws/authorizer_test.go

@@ -0,0 +1,67 @@
+package aws
+
+import (
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+func TestAuthorizerJSON_Sanitize(t *testing.T) {
+
+	testCases := map[string]struct {
+		input    Authorizer
+		expected Authorizer
+	}{
+		"Access Key": {
+			input: &AccessKey{
+				ID:     "ID",
+				Secret: "Secret",
+			},
+			expected: &AccessKey{
+				ID:     "ID",
+				Secret: config.Redacted,
+			},
+		},
+		"Service Account": {
+			input:    &ServiceAccount{},
+			expected: &ServiceAccount{},
+		},
+		"Master Payer Access Key": {
+			input: &AssumeRole{
+				Authorizer: &AccessKey{
+					ID:     "ID",
+					Secret: "Secret",
+				},
+				RoleARN: "role arn",
+			},
+			expected: &AssumeRole{
+				Authorizer: &AccessKey{
+					ID:     "ID",
+					Secret: config.Redacted,
+				},
+				RoleARN: "role arn",
+			},
+		},
+		"Master Payer Service Account": {
+			input: &AssumeRole{
+				Authorizer: &ServiceAccount{},
+				RoleARN:    "role arn",
+			},
+			expected: &AssumeRole{
+				Authorizer: &ServiceAccount{},
+				RoleARN:    "role arn",
+			},
+		},
+	}
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// Convert to AuthorizerJSON for sanitization
+			sanitizedAuthorizer := tc.input.Sanitize()
+
+			if !tc.expected.Equals(sanitizedAuthorizer) {
+				t.Error("Authorizer was not as expected after Sanitization")
+			}
+
+		})
+	}
+}

+ 19 - 41
pkg/cloud/awsprovider.go → pkg/cloud/aws/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package aws
 
 import (
 	"bytes"
@@ -60,7 +60,6 @@ const (
 	AWSHourlyPublicIPCost    = 0.005
 	EKSCapacityTypeLabel     = "eks.amazonaws.com/capacityType"
 	EKSCapacitySpotTypeValue = "SPOT"
-	
 )
 
 var (
@@ -179,16 +178,16 @@ type AWS struct {
 	SpotDataPrefix              string
 	ProjectID                   string
 	DownloadPricingDataLock     sync.RWMutex
-	Config                      *ProviderConfig
-	serviceAccountChecks        *models.ServiceAccountChecks
+	Config                      models.ProviderConfig
+	ServiceAccountChecks        *models.ServiceAccountChecks
 	clusterManagementPrice      float64
-	clusterRegion               string
-	clusterAccountID            string
+	ClusterRegion               string
+	ClusterAccountID            string
 	clusterProvisioner          string
-	*CustomProvider
 }
 
 // AWSAccessKey holds AWS credentials and fulfils the awsV2.CredentialsProvider interface
+// Deprecated: v1.104 Use AccessKey instead
 type AWSAccessKey struct {
 	AccessKeyID     string `json:"aws_access_key_id"`
 	SecretAccessKey string `json:"aws_secret_access_key"`
@@ -395,6 +394,7 @@ type AwsSpotFeedInfo struct {
 }
 
 // AwsAthenaInfo contains configuration for CUR integration
+// Deprecated: v1.104 Use AthenaConfiguration instead
 type AwsAthenaInfo struct {
 	AthenaBucketName string `json:"athenaBucketName"`
 	AthenaRegion     string `json:"athenaRegion"`
@@ -619,7 +619,7 @@ func (k *awsKey) ID() string {
 	return ""
 }
 
-// Features will return a comma seperated list of features for the given node
+// Features will return a comma separated list of features for the given node
 // If the node has a spot label, it will be included in the list
 // Otherwise, the list include instance type, operating system, and the region
 func (k *awsKey) Features() string {
@@ -651,7 +651,7 @@ func (k *awsKey) getUsageType(labels map[string]string) string {
 		// We currently write out spot instances as "preemptible" in the pricing data, so these need to match
 		return PreemptibleType
 	}
-	if kLabel, ok := labels[KarpenterCapacityTypeLabel]; ok && kLabel == KarpenterCapacitySpotTypeValue {
+	if kLabel, ok := labels[models.KarpenterCapacityTypeLabel]; ok && kLabel == models.KarpenterCapacitySpotTypeValue {
 		return PreemptibleType
 	}
 	return ""
@@ -1367,7 +1367,7 @@ func (awsProvider *AWS) ClusterInfo() (map[string]string, error) {
 	m["name"] = clusterName
 	m["provider"] = kubecost.AWSProvider
 	m["account"] = clusterAccountID
-	m["region"] = awsProvider.clusterRegion
+	m["region"] = awsProvider.ClusterRegion
 	m["id"] = env.GetClusterID()
 	m["remoteReadEnabled"] = strconv.FormatBool(env.IsRemoteEnabled())
 	m["provisioner"] = awsProvider.clusterProvisioner
@@ -1404,7 +1404,7 @@ func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string,
 
 	// 1. Check config values first (set from frontend UI)
 	if cp.ServiceKeyName != "" && cp.ServiceKeySecret != "" {
-		aws.serviceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
+		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
 			Message: "AWS ServiceKey exists",
 			Status:  true,
 		})
@@ -1414,7 +1414,7 @@ func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string,
 	// 2. Check for secret
 	s, _ := aws.loadAWSAuthSecret(forceReload)
 	if s != nil && s.AccessKeyID != "" && s.SecretAccessKey != "" {
-		aws.serviceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
+		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
 			Message: "AWS ServiceKey exists",
 			Status:  true,
 		})
@@ -1423,12 +1423,12 @@ func (aws *AWS) getAWSAuth(forceReload bool, cp *models.CustomPricing) (string,
 
 	// 3. Fall back to env vars
 	if env.GetAWSAccessKeyID() == "" || env.GetAWSAccessKeySecret() == "" {
-		aws.serviceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
+		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
 			Message: "AWS ServiceKey exists",
 			Status:  false,
 		})
 	} else {
-		aws.serviceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
+		aws.ServiceAccountChecks.Set("hasKey", &models.ServiceAccountCheck{
 			Message: "AWS ServiceKey exists",
 			Status:  true,
 		})
@@ -1850,28 +1850,6 @@ func (aws *AWS) QueryAthenaPaginated(ctx context.Context, query string, fn func(
 	return nil
 }
 
-func waitForQueryToComplete(ctx context.Context, client *athena.Client, queryExecutionID *string) error {
-	inp := &athena.GetQueryExecutionInput{
-		QueryExecutionId: queryExecutionID,
-	}
-	isQueryStillRunning := true
-	for isQueryStillRunning {
-		qe, err := client.GetQueryExecution(ctx, inp)
-		if err != nil {
-			return err
-		}
-		if qe.QueryExecution.Status.State == "SUCCEEDED" {
-			isQueryStillRunning = false
-			continue
-		}
-		if qe.QueryExecution.Status.State != "RUNNING" && qe.QueryExecution.Status.State != "QUEUED" {
-			return fmt.Errorf("no query results available for query %s", *queryExecutionID)
-		}
-		time.Sleep(2 * time.Second)
-	}
-	return nil
-}
-
 type SavingsPlanData struct {
 	ResourceID     string
 	EffectiveCost  float64
@@ -2155,14 +2133,14 @@ func (aws *AWS) parseSpotData(bucket string, prefix string, projectID string, re
 	}
 	lso, err := cli.ListObjects(context.TODO(), ls)
 	if err != nil {
-		aws.serviceAccountChecks.Set("bucketList", &models.ServiceAccountCheck{
+		aws.ServiceAccountChecks.Set("bucketList", &models.ServiceAccountCheck{
 			Message:        "Bucket List Permissions Available",
 			Status:         false,
 			AdditionalInfo: err.Error(),
 		})
 		return nil, err
 	} else {
-		aws.serviceAccountChecks.Set("bucketList", &models.ServiceAccountCheck{
+		aws.ServiceAccountChecks.Set("bucketList", &models.ServiceAccountCheck{
 			Message: "Bucket List Permissions Available",
 			Status:  true,
 		})
@@ -2207,14 +2185,14 @@ func (aws *AWS) parseSpotData(bucket string, prefix string, projectID string, re
 		buf := manager.NewWriteAtBuffer([]byte{})
 		_, err := downloader.Download(context.TODO(), buf, getObj)
 		if err != nil {
-			aws.serviceAccountChecks.Set("objectList", &models.ServiceAccountCheck{
+			aws.ServiceAccountChecks.Set("objectList", &models.ServiceAccountCheck{
 				Message:        "Object Get Permissions Available",
 				Status:         false,
 				AdditionalInfo: err.Error(),
 			})
 			return nil, err
 		} else {
-			aws.serviceAccountChecks.Set("objectList", &models.ServiceAccountCheck{
+			aws.ServiceAccountChecks.Set("objectList", &models.ServiceAccountCheck{
 				Message: "Object Get Permissions Available",
 				Status:  true,
 			})
@@ -2290,7 +2268,7 @@ func (aws *AWS) ApplyReservedInstancePricing(nodes map[string]*models.Node) {
 }
 
 func (aws *AWS) ServiceAccountStatus() *models.ServiceAccountStatus {
-	return aws.serviceAccountChecks.GetStatus()
+	return aws.ServiceAccountChecks.GetStatus()
 }
 
 func (aws *AWS) CombinedDiscountForNode(instanceType string, isPreemptible bool, defaultDiscount, negotiatedDiscount float64) float64 {

+ 6 - 6
pkg/cloud/awsprovider_test.go → pkg/cloud/aws/provider_test.go

@@ -1,4 +1,4 @@
-package cloud
+package aws
 
 import (
 	"bytes"
@@ -64,7 +64,7 @@ func Test_awsKey_getUsageType(t *testing.T) {
 			name: "Karpenter label with a capacityType set to empty string should return empty string",
 			args: args{
 				labels: map[string]string{
-					KarpenterCapacityTypeLabel: "",
+					models.KarpenterCapacityTypeLabel: "",
 				},
 			},
 			want: "",
@@ -73,7 +73,7 @@ func Test_awsKey_getUsageType(t *testing.T) {
 			name: "Karpenter label with capacityType set to a random value should return empty string",
 			args: args{
 				labels: map[string]string{
-					KarpenterCapacityTypeLabel: "TEST_ME",
+					models.KarpenterCapacityTypeLabel: "TEST_ME",
 				},
 			},
 			want: "",
@@ -82,7 +82,7 @@ func Test_awsKey_getUsageType(t *testing.T) {
 			name: "Karpenter label with capacityType set to spot should return spot",
 			args: args{
 				labels: map[string]string{
-					KarpenterCapacityTypeLabel: KarpenterCapacitySpotTypeValue,
+					models.KarpenterCapacityTypeLabel: models.KarpenterCapacitySpotTypeValue,
 				},
 			},
 			want: PreemptibleType,
@@ -326,7 +326,7 @@ func Test_populate_pricing(t *testing.T) {
 			Sku:           "M6UGCCQ3CDJQAA37",
 			OfferTermCode: "JRTCKXETXF",
 			PriceDimensions: map[string]*AWSRateCode{
-				"M6UGCCQ3CDJQAA37.JRTCKXETXF.6YS6EN2CT7": &AWSRateCode{
+				"M6UGCCQ3CDJQAA37.JRTCKXETXF.6YS6EN2CT7": {
 					Unit: "GB-Mo",
 					PricePerUnit: AWSCurrencyCode{
 						USD: "0.0800000000",
@@ -465,7 +465,7 @@ func Test_populate_pricing(t *testing.T) {
 			Sku:           "R83VXG9NAPDASEGN",
 			OfferTermCode: "5Y9WH78GDR",
 			PriceDimensions: map[string]*AWSRateCode{
-				"R83VXG9NAPDASEGN.5Y9WH78GDR.Q7UJUT2CE6": &AWSRateCode{
+				"R83VXG9NAPDASEGN.5Y9WH78GDR.Q7UJUT2CE6": {
 					Unit: "GB-Mo",
 					PricePerUnit: AWSCurrencyCode{
 						USD: "",

+ 134 - 0
pkg/cloud/aws/s3configuration.go

@@ -0,0 +1,134 @@
+package aws
+
+import (
+	"fmt"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type S3Configuration struct {
+	Bucket     string     `json:"bucket"`
+	Region     string     `json:"region"`
+	Account    string     `json:"account"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (s3c *S3Configuration) Validate() error {
+	// Validate Authorizer
+	if s3c.Authorizer == nil {
+		return fmt.Errorf("S3Configuration: missing Authorizer")
+	}
+
+	err := s3c.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("S3Configuration: %s", err)
+	}
+
+	// Validate base properties
+	if s3c.Bucket == "" {
+		return fmt.Errorf("S3Configuration: missing bucket")
+	}
+
+	if s3c.Region == "" {
+		return fmt.Errorf("S3Configuration: missing region")
+	}
+
+	if s3c.Account == "" {
+		return fmt.Errorf("S3Configuration: missing account")
+	}
+
+	return nil
+}
+
+func (s3c *S3Configuration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*S3Configuration)
+	if !ok {
+		return false
+	}
+
+	if s3c.Authorizer != nil {
+		if !s3c.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if s3c.Bucket != thatConfig.Bucket {
+		return false
+	}
+
+	if s3c.Region != thatConfig.Region {
+		return false
+	}
+
+	if s3c.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (s3c *S3Configuration) Sanitize() config.Config {
+	return &S3Configuration{
+		Bucket:     s3c.Bucket,
+		Region:     s3c.Region,
+		Account:    s3c.Account,
+		Authorizer: s3c.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (s3c *S3Configuration) Key() string {
+	return fmt.Sprintf("%s/%s", s3c.Account, s3c.Bucket)
+}
+
+func (s3c *S3Configuration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	bucket, err := config.GetInterfaceValue[string](fmap, "bucket")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Bucket = bucket
+
+	region, err := config.GetInterfaceValue[string](fmap, "region")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Region = region
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Account = account
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("S3Configuration: UnmarshalJSON: %s", err.Error())
+	}
+	s3c.Authorizer = authorizer
+
+	return nil
+}
+
+func (s3c *S3Configuration) CreateAWSConfig() (aws.Config, error) {
+	return s3c.Authorizer.CreateAWSConfig(s3c.Region)
+}

+ 40 - 0
pkg/cloud/aws/s3connection.go

@@ -0,0 +1,40 @@
+package aws
+
+import (
+	"context"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+type S3Connection struct {
+	S3Configuration
+}
+
+func (s3c *S3Connection) Equals(config config.Config) bool {
+	thatConfig, ok := config.(*S3Connection)
+	if !ok {
+		return false
+	}
+
+	return s3c.S3Configuration.Equals(&thatConfig.S3Configuration)
+}
+
+func (s3c *S3Connection) GetS3Client() (*s3.Client, error) {
+	cfg, err := s3c.CreateAWSConfig()
+	if err != nil {
+		return nil, err
+	}
+	return s3.NewFromConfig(cfg), nil
+}
+
+func (s3c *S3Connection) ListObjects(cli *s3.Client) (*s3.ListObjectsOutput, error) {
+	objs, err := cli.ListObjects(context.TODO(), &s3.ListObjectsInput{
+		Bucket: aws.String(s3c.Bucket),
+	})
+	if err != nil {
+		return nil, err
+	}
+	return objs, err
+}

+ 387 - 0
pkg/cloud/aws/s3connection_test.go

@@ -0,0 +1,387 @@
+package aws
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestS3Configuration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   S3Configuration
+		expected error
+	}{
+		"valid config access key": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: nil,
+		},
+		"valid config service account": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID: "id",
+				},
+			},
+			expected: fmt.Errorf("S3Configuration: AccessKey: missing Secret"),
+		},
+		"missing Authorizer": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("S3Configuration: missing Authorizer"),
+		},
+		"missing bucket": {
+			config: S3Configuration{
+				Bucket:     "",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing bucket"),
+		},
+		"missing region": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing region"),
+		},
+		"missing account": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: fmt.Errorf("S3Configuration: missing account"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestS3Configuration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     S3Configuration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: true,
+		},
+		"different Authorizer": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing both Authorizer": {
+			left: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left Authorizer": {
+			left: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+			expected: false,
+		},
+		"missing right Authorizer": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different bucket": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket2",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different region": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region2",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account2",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+			right: &AccessKey{
+				ID:     "id",
+				Secret: "secret",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestS3Configuration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config S3Configuration
+	}{
+		"Empty Config": {
+			config: S3Configuration{},
+		},
+		"AccessKey": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AccessKey{
+					ID:     "id",
+					Secret: "secret",
+				},
+			},
+		},
+
+		"ServiceAccount": {
+			config: S3Configuration{
+				Bucket:     "bucket",
+				Region:     "region",
+				Account:    "account",
+				Authorizer: &ServiceAccount{},
+			},
+		},
+		"AssumeRole with AccessKey": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AccessKey{
+						ID:     "id",
+						Secret: "secret",
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+		"AssumeRole with ServiceAccount": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &ServiceAccount{},
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"RoleArnNil": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: nil,
+					RoleARN:    "12345",
+				},
+			},
+		},
+		"AssumeRole with AssumeRole with ServiceAccount": {
+			config: S3Configuration{
+				Bucket:  "bucket",
+				Region:  "region",
+				Account: "account",
+				Authorizer: &AssumeRole{
+					Authorizer: &AssumeRole{
+						RoleARN:    "12345",
+						Authorizer: &ServiceAccount{},
+					},
+					RoleARN: "12345",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &S3Configuration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 181 - 0
pkg/cloud/aws/s3selectquerier.go

@@ -0,0 +1,181 @@
+package aws
+
+import (
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+	s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/stringutil"
+)
+
+type S3SelectQuerier struct {
+	S3Connection
+}
+
+func (s3sq *S3SelectQuerier) Equals(config config.Config) bool {
+	thatConfig, ok := config.(*S3SelectQuerier)
+	if !ok {
+		return false
+	}
+
+	return s3sq.S3Connection.Equals(&thatConfig.S3Connection)
+}
+
+func (s3sq *S3SelectQuerier) Query(query string, queryKeys []string, cli *s3.Client, fn func(*csv.Reader) error) error {
+	for _, queryKey := range queryKeys {
+		reader, err2 := s3sq.fetchCSVReader(query, queryKey, cli, s3Types.FileHeaderInfoUse)
+		if err2 != nil {
+			return err2
+		}
+		err2 = fn(reader)
+		if err2 != nil {
+			return err2
+		}
+	}
+
+	return nil
+}
+
+// GetQueryKeys returns a list of s3 object names, where the there are 1 object for each month within the range between
+// start and end
+func (s3sq *S3SelectQuerier) GetQueryKeys(start, end time.Time, client *s3.Client) ([]string, error) {
+	objs, err := s3sq.ListObjects(client)
+	if err != nil {
+		return nil, err
+	}
+
+	monthStrings, err := getMonthStrings(start, end)
+	if err != err {
+		return nil, err
+	}
+
+	var queryKeys []string
+	// Find all matching "csv.gz" files per monthString
+	for _, monthStr := range monthStrings {
+		for _, obj := range objs.Contents {
+			if strings.Contains(*obj.Key, monthStr) && strings.HasSuffix(*obj.Key, ".csv.gz") {
+				queryKeys = append(queryKeys, *obj.Key)
+			}
+		}
+	}
+
+	if len(queryKeys) == 0 {
+		return nil, fmt.Errorf("no CUR files for given time range")
+	}
+
+	return queryKeys, nil
+}
+
+func (s3sq *S3SelectQuerier) fetchCSVReader(query string, queryKey string, client *s3.Client, fileHeaderInfo s3Types.FileHeaderInfo) (*csv.Reader, error) {
+	input := &s3.SelectObjectContentInput{
+		Bucket:         aws.String(s3sq.Bucket),
+		Key:            aws.String(queryKey),
+		Expression:     aws.String(query),
+		ExpressionType: s3Types.ExpressionTypeSql,
+		InputSerialization: &s3Types.InputSerialization{
+			CompressionType: s3Types.CompressionTypeGzip,
+			CSV: &s3Types.CSVInput{
+				FileHeaderInfo: fileHeaderInfo,
+			},
+		},
+		OutputSerialization: &s3Types.OutputSerialization{
+			CSV: &s3Types.CSVOutput{},
+		},
+	}
+
+	res, err := client.SelectObjectContent(context.TODO(), input)
+	if err != nil {
+		return nil, err
+	}
+	resStream := res.GetStream()
+	// todo: this needs work
+	results, resultWriter := io.Pipe()
+	go func() {
+		defer resultWriter.Close()
+		defer resStream.Close()
+		resStream.Events()
+		for event := range resStream.Events() {
+			switch e := event.(type) {
+			case *s3Types.SelectObjectContentEventStreamMemberRecords:
+				resultWriter.Write(e.Value.Payload)
+			case *s3Types.SelectObjectContentEventStreamMemberEnd:
+				break
+			}
+
+		}
+	}()
+
+	if err := resStream.Err(); err != nil {
+		return nil, fmt.Errorf("failed to read from SelectObjectContent EventStream, %v", err)
+	}
+
+	return csv.NewReader(results), nil
+}
+
+func getMonthStrings(start, end time.Time) ([]string, error) {
+	if start.After(end) {
+		return []string{}, fmt.Errorf("start date must be before end date")
+	}
+	if end.After(time.Now()) {
+		end = time.Now()
+	}
+	dateTemplate := "%d%02d01-%d%02d01/"
+	// set to first of the month
+	currMonth := start.AddDate(0, 0, -start.Day()+1)
+	nextMonth := currMonth.AddDate(0, 1, 0)
+	monthStr := fmt.Sprintf(dateTemplate, currMonth.Year(), int(currMonth.Month()), nextMonth.Year(), int(nextMonth.Month()))
+
+	// Create string for end condition
+	endMonth := end.AddDate(0, 0, -end.Day()+1)
+	endNextMonth := endMonth.AddDate(0, 1, 0)
+	endStr := fmt.Sprintf(dateTemplate, endMonth.Year(), int(endMonth.Month()), endNextMonth.Year(), int(endNextMonth.Month()))
+
+	var monthStrs []string
+	monthStrs = append(monthStrs, monthStr)
+
+	for monthStr != endStr {
+		currMonth = nextMonth
+		nextMonth = nextMonth.AddDate(0, 1, 0)
+		monthStr = fmt.Sprintf(dateTemplate, currMonth.Year(), int(currMonth.Month()), nextMonth.Year(), int(nextMonth.Month()))
+		monthStrs = append(monthStrs, monthStr)
+	}
+
+	return monthStrs, nil
+}
+
+// GetCSVRowValue retrieve value from athena row based on column names and used stringutil.Bank() to prevent duplicate
+// allocation of strings
+func GetCSVRowValue(row []string, queryColumnIndexes map[string]int, columnName string) string {
+	if row == nil {
+		return ""
+	}
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return ""
+	}
+	return stringutil.Bank(row[columnIndex])
+}
+
+// GetCSVRowValueFloat retrieve value from athena row based on column names and convert to float if possible.
+func GetCSVRowValueFloat(row []string, queryColumnIndexes map[string]int, columnName string) (float64, error) {
+	if row == nil {
+		return 0.0, fmt.Errorf("getCSVRowValueFloat: nil row")
+	}
+	columnIndex, ok := queryColumnIndexes[columnName]
+	if !ok {
+		return 0.0, fmt.Errorf("getCSVRowValueFloat: missing column index: %s", columnName)
+	}
+	cost, err := strconv.ParseFloat(row[columnIndex], 64)
+	if err != nil {
+		return cost, fmt.Errorf("getCSVRowValueFloat: failed to parse %s: '%s': %s", columnName, row[columnIndex], err.Error())
+	}
+	return cost, nil
+}

+ 80 - 0
pkg/cloud/azure/authorizer.go

@@ -0,0 +1,80 @@
+package azure
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	"github.com/opencost/opencost/pkg/cloud/config"
+)
+
+const AccessKeyAuthorizerType = "AzureAccessKey"
+
+type Authorizer interface {
+	config.Authorizer
+	GetBlobCredentials() (azblob.Credential, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case AccessKeyAuthorizerType:
+		return &AccessKey{}, nil
+	default:
+		return nil, fmt.Errorf("azure: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+type AccessKey struct {
+	AccessKey string `json:"accessKey"`
+	Account   string `json:"account"`
+}
+
+func (ak *AccessKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 3)
+	fmap[config.AuthorizerTypeProperty] = AccessKeyAuthorizerType
+	fmap["accessKey"] = ak.AccessKey
+	fmap["account"] = ak.Account
+	return json.Marshal(fmap)
+}
+
+func (ak *AccessKey) Validate() error {
+	if ak.AccessKey == "" {
+		return fmt.Errorf("AccessKey: missing access key")
+	}
+	if ak.Account == "" {
+		return fmt.Errorf("AccessKey: missing account")
+	}
+	return nil
+}
+
+func (ak *AccessKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*AccessKey)
+	if !ok {
+		return false
+	}
+
+	if ak.AccessKey != thatConfig.AccessKey {
+		return false
+	}
+	if ak.Account != thatConfig.Account {
+		return false
+	}
+
+	return true
+}
+
+func (ak *AccessKey) Sanitize() config.Config {
+	return &AccessKey{
+		AccessKey: config.Redacted,
+		Account:   ak.Account,
+	}
+}
+
+func (ak *AccessKey) GetBlobCredentials() (azblob.Credential, error) {
+	// Create a default request pipeline using your storage account name and account key.
+	return azblob.NewSharedKeyCredential(ak.Account, ak.AccessKey)
+}

+ 322 - 0
pkg/cloud/azure/billingexportparser.go

@@ -0,0 +1,322 @@
+package azure
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/opencost/opencost/pkg/kubecost"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+const azureDateLayout = "2006-01-02"
+const AzureEnterpriseDateLayout = "01/02/2006"
+
+var groupRegex = regexp.MustCompile("(/[^/]+)")
+
+// BillingRowValues holder for Azure Billing Values
+type BillingRowValues struct {
+	Date            time.Time
+	MeterCategory   string
+	SubscriptionID  string
+	InvoiceEntityID string
+	InstanceID      string
+	Service         string
+	Tags            map[string]string
+	AdditionalInfo  map[string]any
+	Cost            float64
+	NetCost         float64
+}
+
+func (brv *BillingRowValues) IsCompute(category string) bool {
+	if category == kubecost.ComputeCategory {
+		return true
+	}
+
+	if category == kubecost.StorageCategory || category == kubecost.NetworkCategory {
+		if brv.Service == "Microsoft.Compute" {
+			return true
+		}
+	}
+	if category == kubecost.NetworkCategory && brv.MeterCategory == "Virtual Network" {
+		return true
+	}
+	return false
+}
+
+// BillingExportParser holds indexes of relevent fields in Azure Billing CSV in addition to the correct data format
+type BillingExportParser struct {
+	Date            int
+	MeterCategory   int
+	InvoiceEntityID int
+	SubscriptionID  int
+	InstanceID      int
+	Service         int
+	Tags            int
+	AdditionalInfo  int
+	Cost            int
+	NetCost         int
+	DateFormat      string
+}
+
+// match "SubscriptionGuid" in "Abonnement-GUID (SubscriptionGuid)"
+var getParenContentRegEx = regexp.MustCompile("\\((.*?)\\)")
+
+func NewBillingParseSchema(headers []string) (*BillingExportParser, error) {
+	// clear BOM from headers
+	if len(headers) != 0 {
+		headers[0] = strings.TrimPrefix(headers[0], "\xEF\xBB\xBF")
+	}
+
+	headerIndexes := map[string]int{}
+	for i, header := range headers {
+		// Azure Headers in different regions will have english headers in parentheses
+		match := getParenContentRegEx.FindStringSubmatch(header)
+		if len(match) != 0 {
+			header = match[len(match)-1]
+		}
+		headerIndexes[strings.ToLower(header)] = i
+	}
+
+	abp := &BillingExportParser{}
+
+	// Set Date Column and Date Format
+	if i, ok := headerIndexes["usagedatetime"]; ok {
+		abp.Date = i
+		abp.DateFormat = azureDateLayout
+	} else if j, ok2 := headerIndexes["date"]; ok2 {
+		abp.Date = j
+		abp.DateFormat = AzureEnterpriseDateLayout
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Date field")
+	}
+
+	// set Subscription ID
+	if i, ok := headerIndexes["subscriptionid"]; ok {
+		abp.SubscriptionID = i
+	} else if j, ok2 := headerIndexes["subscriptionguid"]; ok2 {
+		abp.SubscriptionID = j
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Subscription ID field")
+	}
+
+	// Set Billing ID
+	if i, ok := headerIndexes["billingaccountid"]; ok {
+		abp.InvoiceEntityID = i
+	} else if j, ok2 := headerIndexes["billingaccountname"]; ok2 {
+		abp.InvoiceEntityID = j
+	} else {
+		// if no billing ID column is present use subscription ID
+		abp.InvoiceEntityID = abp.SubscriptionID
+	}
+
+	// Set Instance ID
+	if i, ok := headerIndexes["instanceid"]; ok {
+		abp.InstanceID = i
+	} else if j, ok2 := headerIndexes["instancename"]; ok2 {
+		abp.InstanceID = j
+	} else if k, ok3 := headerIndexes["resourceid"]; ok3 {
+		abp.InstanceID = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Instance ID field")
+	}
+
+	// Set Meter Category
+	if i, ok := headerIndexes["metercategory"]; ok {
+		abp.MeterCategory = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Meter Category field")
+	}
+
+	// Set Tags
+	if i, ok := headerIndexes["tags"]; ok {
+		abp.Tags = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Tags field")
+	}
+
+	// Set Additional Info
+	if i, ok := headerIndexes["additionalinfo"]; ok {
+		abp.AdditionalInfo = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Additional Info field")
+	}
+
+	// Set Service
+	if i, ok := headerIndexes["consumedservice"]; ok {
+		abp.Service = i
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Service field")
+	}
+
+	// Set Net Cost
+	if i, ok := headerIndexes["costinbillingcurrency"]; ok {
+		abp.NetCost = i
+	} else if j, ok2 := headerIndexes["pretaxcost"]; ok2 {
+		abp.NetCost = j
+	} else if k, ok3 := headerIndexes["cost"]; ok3 {
+		abp.NetCost = k
+	} else {
+		return nil, fmt.Errorf("NewBillingParseSchema: failed to find Net Cost field")
+	}
+
+	// Set Cost
+	if i, ok := headerIndexes["paygcostinbillingcurrency"]; ok {
+		abp.Cost = i
+	} else {
+		// if no Cost column is present use Net Cost column
+		abp.Cost = abp.NetCost
+	}
+
+	return abp, nil
+}
+
+func (bep *BillingExportParser) ParseRow(start, end time.Time, record []string) *BillingRowValues {
+	usageDate, err := time.Parse(bep.DateFormat, record[bep.Date])
+	if err != nil {
+		// try other format, and switch if successful
+		if bep.DateFormat == azureDateLayout {
+			bep.DateFormat = AzureEnterpriseDateLayout
+		} else {
+			bep.DateFormat = azureDateLayout
+		}
+		usageDate, err = time.Parse(bep.DateFormat, record[bep.Date])
+		// If parse still fails then return line
+		if err != nil {
+			log.Errorf("failed to parse usage date: '%s'", record[bep.Date])
+			return nil
+		}
+	}
+
+	// skip if usage data isn't in subject window
+	if usageDate.Before(start) || !usageDate.Before(end) {
+		return nil
+	}
+
+	cost, err := strconv.ParseFloat(record[bep.Cost], 64)
+	if err != nil {
+		log.Errorf("failed to parse cost: '%s'", record[bep.Cost])
+		return nil
+	}
+
+	netCost, err := strconv.ParseFloat(record[bep.NetCost], 64)
+	if err != nil {
+		log.Errorf("failed to parse net cost: '%s'", record[bep.NetCost])
+		return nil
+	}
+
+	additionalInfo := make(map[string]any)
+	additionalInfoJson := encloseInBrackets(record[bep.AdditionalInfo])
+	if additionalInfoJson != "" {
+		err = json.Unmarshal([]byte(additionalInfoJson), &additionalInfo)
+		if err != nil {
+			log.Errorf("Could not parse additional information %s, with Error: %s", additionalInfoJson, err.Error())
+		}
+	}
+
+	tags := make(map[string]string)
+	tagJson := encloseInBrackets(record[bep.Tags])
+	if tagJson != "" {
+		tagsAny := make(map[string]any)
+		err = json.Unmarshal([]byte(tagJson), &tagsAny)
+		if err != nil {
+			log.Errorf("Could not parse tags: %v, with Error: %s", tagJson, err.Error())
+		}
+
+		for name, value := range tagsAny {
+			if valueStr, ok := value.(string); ok && valueStr != "" {
+				tags[name] = valueStr
+			}
+		}
+	}
+
+	return &BillingRowValues{
+		Date:            usageDate,
+		MeterCategory:   record[bep.MeterCategory],
+		SubscriptionID:  record[bep.SubscriptionID],
+		InvoiceEntityID: record[bep.InvoiceEntityID],
+		InstanceID:      record[bep.InstanceID],
+		Service:         record[bep.Service],
+		Tags:            tags,
+		AdditionalInfo:  additionalInfo,
+		Cost:            cost,
+		NetCost:         netCost,
+	}
+}
+
+// enclose json strings in brackets if they are missing
+func encloseInBrackets(jsonString string) string {
+	if jsonString == "" || (jsonString[0] == '{' && jsonString[len(jsonString)-1] == '}') {
+		return jsonString
+	}
+	return fmt.Sprintf("{%s}", jsonString)
+}
+
+func AzureSetProviderID(abv *BillingRowValues) string {
+	category := SelectAzureCategory(abv.MeterCategory)
+	if value, ok := abv.AdditionalInfo["VMName"]; ok {
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+	} else if value, ok := abv.AdditionalInfo["VmName"]; ok {
+		return "azure://" + resourceGroupToLowerCase(abv.InstanceID) + getVMNumberForVMSS(fmt.Sprintf("%v", value))
+	} else if value2, ook := abv.AdditionalInfo["IpAddress"]; ook && abv.MeterCategory == "Virtual Network" {
+		return fmt.Sprintf("%v", value2)
+	}
+
+	if category == kubecost.StorageCategory {
+		if value2, ok2 := abv.Tags["creationSource"]; ok2 {
+			creationSource := fmt.Sprintf("%v", value2)
+			return strings.TrimPrefix(creationSource, "aks-")
+		} else if value2, ok2 := abv.Tags["aks-managed-creationSource"]; ok2 {
+			creationSource := fmt.Sprintf("%v", value2)
+			return strings.TrimPrefix(creationSource, "vmssclient-")
+		} else {
+			return getSubStringAfterFinalSlash(abv.InstanceID)
+		}
+	}
+	return "azure://" + resourceGroupToLowerCase(abv.InstanceID)
+}
+
+func SelectAzureCategory(meterCategory string) string {
+	if meterCategory == "Virtual Machines" {
+		return kubecost.ComputeCategory
+	} else if meterCategory == "Storage" {
+		return kubecost.StorageCategory
+	} else if meterCategory == "Load Balancer" || meterCategory == "Bandwidth" || meterCategory == "Virtual Network" {
+		return kubecost.NetworkCategory
+	} else {
+		return kubecost.OtherCategory
+	}
+}
+
+func resourceGroupToLowerCase(providerID string) string {
+	var sb strings.Builder
+	for matchNum, group := range groupRegex.FindAllString(providerID, -1) {
+		if matchNum == 3 {
+			sb.WriteString(strings.ToLower(group))
+		} else {
+			sb.WriteString(group)
+		}
+	}
+	return sb.String()
+}
+
+// Returns the substring after the final "/" in a string
+func getSubStringAfterFinalSlash(id string) string {
+	index := strings.LastIndex(id, "/")
+	if index == -1 {
+		log.DedupedInfof(5, "azure.getSubStringAfterFinalSlash: failed to parse %s", id)
+		return id
+	}
+	return id[index+1:]
+}
+
+func getVMNumberForVMSS(vmName string) string {
+	vmNameSplit := strings.Split(vmName, "_")
+	if len(vmNameSplit) > 1 {
+		return "/virtualMachines/" + vmNameSplit[1]
+	}
+	return ""
+}

+ 194 - 0
pkg/cloud/azure/billingexportparser_test.go

@@ -0,0 +1,194 @@
+package azure
+
+import (
+	"encoding/csv"
+	"os"
+	"testing"
+	"time"
+)
+
+const billingExportPath = "./resources/billingexports/"
+const headerSetPath = billingExportPath + "headersets/"
+const valueCasesPath = billingExportPath + "values/"
+
+type TestCSVRetriever struct {
+	CSVName string
+}
+
+func (tcr TestCSVRetriever) getCSVReaders(start, end time.Time) ([]*csv.Reader, error) {
+	csvFile, err := os.Open(tcr.CSVName)
+	if err != nil {
+		return nil, err
+	}
+	reader := csv.NewReader(csvFile)
+	return append([]*csv.Reader{}, reader), nil
+}
+
+func Test_NewBillingExportParser(t *testing.T) {
+	loc, _ := time.LoadLocation("UTC")
+	start := time.Date(2021, 2, 1, 00, 00, 00, 00, loc)
+	end := time.Date(2021, 2, 3, 00, 00, 00, 00, loc)
+	tests := map[string]struct {
+		input    string
+		expected BillingExportParser
+	}{
+		"English Headers": {
+			input: "PayAsYouGo.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+		"Enterprise Camel Headers": {
+			input: "EnterpriseCamel.csv",
+			expected: BillingExportParser{
+				Date:            11,
+				MeterCategory:   18,
+				InvoiceEntityID: 0,
+				SubscriptionID:  23,
+				InstanceID:      29,
+				Service:         15,
+				Tags:            45,
+				AdditionalInfo:  44,
+				Cost:            38,
+				NetCost:         38,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"Enterprise Headers": {
+			input: "Enterprise.csv",
+			expected: BillingExportParser{
+				Date:            7,
+				MeterCategory:   9,
+				InvoiceEntityID: 39,
+				SubscriptionID:  3,
+				InstanceID:      20,
+				Service:         19,
+				Tags:            21,
+				AdditionalInfo:  23,
+				Cost:            17,
+				NetCost:         17,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"German Headers": {
+			input: "German.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+		"YA Headers": {
+			input: "YA.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      AzureEnterpriseDateLayout,
+			},
+		},
+		"BOM Prefixed Headers": {
+			input: "BOM.csv",
+			expected: BillingExportParser{
+				Date:            3,
+				MeterCategory:   4,
+				InvoiceEntityID: 0,
+				SubscriptionID:  0,
+				InstanceID:      14,
+				Service:         12,
+				Tags:            15,
+				AdditionalInfo:  17,
+				Cost:            11,
+				NetCost:         11,
+				DateFormat:      azureDateLayout,
+			},
+		},
+	}
+
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			csvRetriever := TestCSVRetriever{
+				CSVName: headerSetPath + tc.input,
+			}
+			csvs, err := csvRetriever.getCSVReaders(start, end)
+			if err != nil {
+				t.Errorf("Failed to read specified CSV: %s", err.Error())
+			}
+			reader := csvs[0]
+			headers, _ := reader.Read()
+			abp, err := NewBillingParseSchema(headers)
+			if err != nil {
+				t.Errorf("failed to create Azure Billing Parser from headers with error: %s", err.Error())
+			}
+
+			if abp.DateFormat != tc.expected.DateFormat {
+				t.Errorf("Azure Billing Parser does not have expected DateFormat index. Expected: %s, Actual: %s", tc.expected.DateFormat, abp.DateFormat)
+			}
+
+			if abp.Date != tc.expected.Date {
+				t.Errorf("Azure Billing Parser does not have expected Date index. Expected: %d, Actual: %d", tc.expected.Date, abp.Date)
+			}
+
+			if abp.MeterCategory != tc.expected.MeterCategory {
+				t.Errorf("Azure Billing Parser does not have expected MeterCategory index. Expected: %d, Actual: %d", tc.expected.MeterCategory, abp.MeterCategory)
+			}
+
+			if abp.InvoiceEntityID != tc.expected.InvoiceEntityID {
+				t.Errorf("Azure Billing Parser does not have expected InvoiceEntityID index. Expected: %d, Actual: %d", tc.expected.InvoiceEntityID, abp.InvoiceEntityID)
+			}
+
+			if abp.SubscriptionID != tc.expected.SubscriptionID {
+				t.Errorf("Azure Billing Parser does not have expected SubscriptionID index. Expected: %d, Actual: %d", tc.expected.SubscriptionID, abp.SubscriptionID)
+			}
+
+			if abp.InstanceID != tc.expected.InstanceID {
+				t.Errorf("Azure Billing Parser does not have expected InstanceID index. Expected: %d, Actual: %d", tc.expected.InstanceID, abp.InstanceID)
+			}
+
+			if abp.Service != tc.expected.Service {
+				t.Errorf("Azure Billing Parser does not have expected Service index. Expected: %d, Actual: %d", tc.expected.Service, abp.Service)
+			}
+
+			if abp.Tags != tc.expected.Tags {
+				t.Errorf("Azure Billing Parser does not have expected Tags index. Expected: %d, Actual: %d", tc.expected.Tags, abp.Tags)
+			}
+
+			if abp.AdditionalInfo != tc.expected.AdditionalInfo {
+				t.Errorf("Azure Billing Parser does not have expected AdditionalInfo index. Expected: %d, Actual: %d", tc.expected.AdditionalInfo, abp.AdditionalInfo)
+			}
+
+			if abp.Cost != tc.expected.Cost {
+				t.Errorf("Azure Billing Parser does not have expected Cost index. Expected: %d, Actual: %d", tc.expected.Cost, abp.Cost)
+			}
+
+			if abp.NetCost != tc.expected.NetCost {
+				t.Errorf("Azure Billing Parser does not have expected NetCost index. Expected: %d, Actual: %d", tc.expected.NetCost, abp.NetCost)
+			}
+		})
+	}
+}

+ 0 - 0
pkg/cloud/azure/client.go → pkg/cloud/azure/pricesheetclient.go


+ 0 - 0
pkg/cloud/azure/downloader.go → pkg/cloud/azure/pricesheetdownloader.go


+ 0 - 0
pkg/cloud/azure/downloader_test.go → pkg/cloud/azure/pricesheetdownloader_test.go


+ 3 - 1
pkg/cloud/azure/azureprovider.go → pkg/cloud/azure/provider.go

@@ -489,6 +489,7 @@ func (k *azureKey) GetGPUCount() string {
 }
 
 // AzureStorageConfig Represents an azure storage config
+// Deprecated: v1.104 Use StorageConfiguration instead
 type AzureStorageConfig struct {
 	SubscriptionId string `json:"azureSubscriptionID"`
 	AccountName    string `json:"azureStorageAccount"`
@@ -517,7 +518,8 @@ type AzureAppKey struct {
 	Tenant      string `json:"tenant"`
 }
 
-// Azure service key for a specific subscription
+// AzureServiceKey service key for a specific subscription
+// Deprecated: v1.104 Use ServiceKey instead
 type AzureServiceKey struct {
 	SubscriptionID string       `json:"subscriptionId"`
 	ServiceKey     *AzureAppKey `json:"serviceKey"`

+ 0 - 0
pkg/cloud/azure/azureprovider_test.go → pkg/cloud/azure/provider_test.go


+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/BOM.csv

@@ -0,0 +1,2 @@
+SubscriptionGuid,ResourceGroup,ResourceLocation,UsageDateTime,MeterCategory,MeterSubcategory,MeterId,MeterName,MeterRegion,UsageQuantity,ResourceRate,PreTaxCost,ConsumedService,ResourceType,InstanceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ServiceName,ServiceTier,Currency,UnitOfMeasure
+,,,2022-11-03,,,,,,,,,,,,,,,,,,,,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/Enterprise.csv

@@ -0,0 +1,2 @@
+InvoiceSectionName,AccountName,AccountOwnerId,SubscriptionId,SubscriptionName,ResourceGroup,ResourceLocation,Date,ProductName,MeterCategory,MeterSubCategory,MeterId,MeterName,MeterRegion,UnitOfMeasure,Quantity,EffectivePrice,CostInBillingCurrency,CostCenter,ConsumedService,ResourceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ResourceName,ReservationId,ReservationName,UnitPrice,ProductOrderId,ProductOrderName,Term,PublisherType,PublisherName,ChargeType,Frequency,PricingModel,AvailabilityZone,BillingAccountId,BillingAccountName,BillingCurrencyCode,BillingPeriodStartDate,BillingPeriodEndDate,BillingProfileId,BillingProfileName,InvoiceSectionId,IsAzureCreditEligible,PartNumber,PayGPrice,PlanName,ServiceFamily,CostAllocationRuleName
+Unassigned,Azure Service,email@email.com,11111111-12ab-34dc-56ef-123456abcdef,Example-Subscription,Example-Resource-Group,canadacentral,02/02/2021,Virtual Machines Ev3/ESv3 Series - E4 v3/E4s v3 - CA Central,Virtual Machines,Ev3/ESv3 Series,3dbc3a0c-32b6-4c4d-adbb-3ee577aaba4d,E4 v3/E4s v3,CA Central,10 Hours,10,1.2,0,,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-defaultpool-12345678-vmss,"""createOperationID"": ""11111111-12ab-34dc-56ef-123456abcdef"",""creationSource"": ""vmssclient-aks-defaultpool-12345678-vmss"",""orchestrator"": ""Kubernetes:1.19.9"",""poolName"": ""defaultpool"",""resourceNameSuffix"": ""12345678""",MS-AZR-0017P,"{""UsageType"":""ComputeHR"",""ImageType"":""Canonical"",""ServiceType"":""Standard_E4s_v3"",""VMName"":""aks-defaultpool-12345678-vmss_2"",""VMProperties"":null,""VCPUs"":4,""CPUs"":0,""ReservationOrderId"":""11111111-12ab-34dc-56ef-123456abcdef"",""ReservationId"":""4f18e7c9-9ae8-4251-886b-8bd942a41bdf"",""ConsumptionMeter"":""11111111-12ab-34dc-56ef-123456abcdef"",""RINormalizationRatio"":2.0}",,Canonical,aks-defaultpool-12345678-vmss,11111111-12ab-34dc-56ef-123456abcdef,ExampleReservationName,0.1,b13f2808-a13e-49a3-a899-06d83b8f5d32,"Reserved VM Instance, Standard_E2s_v3, CA Central, 3 Years",36,Azure,,Usage,UsageBased,,,12345678,Example Company,CAD,05/01/2021,05/31/2021,12345678,Example Company,,TRUE,ABC-12345,0,,Compute,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/EnterpriseCamel.csv

@@ -0,0 +1,2 @@
+billingAccountName,partnerName,resellerName,resellerMpnId,customerTenantId,customerName,costCenter,billingPeriodEndDate,billingPeriodStartDate,servicePeriodEndDate,servicePeriodStartDate,date,serviceFamily,productOrderId,productOrderName,consumedService,meterId,meterName,meterCategory,meterSubCategory,meterRegion,ProductId,ProductName,SubscriptionId,subscriptionName,publisherType,publisherId,publisherName,resourceGroupName,ResourceId,resourceLocation,location,effectivePrice,quantity,unitOfMeasure,chargeType,billingCurrency,pricingCurrency,costInBillingCurrency,costInUsd,exchangeRatePricingToBilling,exchangeRateDate,serviceInfo1,serviceInfo2,additionalInfo,tags,PayGPrice,frequency,term,reservationId,reservationName,pricingModel
+,PartnerName,,,11111111-1111-1111-1111-123456789012,Customer Name,,,,02/01/2021,02/01/2021,02/02/2021,Networking,11111111-1111-1111-1111-123456789012,Azure plan,Microsoft.Network,11111111-1111-1111-1111-123456789012,Dynamic Public IP,Virtual Network,IP Addresses,,DZH318Z0BNXN0032,IP Addresses - Basic,11111111-1111-1111-1111-123456789012,Microsoft Azure,Azure,,Microsoft,databricks,/subscriptions/11111111-1111-1111-1111-123456789012/resourceGroups/testspot/providers/Microsoft.Storage/storageAccounts/storename,WESTUS,US West,0.004,3,1 Hour,Usage,USD,USD,0.012,0.012,1,3/1/21,,,,"{  ""ClusterId"": ""0103-212455-stash756"",  ""ServiceType"": ""DataAnalysis"",  ""ClusterName"": ""SrgExtractsPartDeux"",  ""databricks-instance-name"": ""0c1ef59764casdf0c0e094e1cc"",  ""Creator"": ""email@email.com"",  ""Vendor"": ""Databricks"",  ""DatabricksEnvironment"": ""workerenv-6448504491843616""}",0.004,UsageBased,,,,

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/German.csv

@@ -0,0 +1,2 @@
+Abonnement-GUID (SubscriptionGuid),Ressourcengruppe (ResourceGroup),Ressourcenstandort (ResourceLocation),UsageDateTime (UsageDateTime),Kategorie der Verbrauchseinheit (MeterCategory),MeterSubcategory (MeterSubcategory),ID der Verbrauchseinheit (MeterId),Name der Verbrauchseinheit (MeterName),Region der Verbrauchseinheit (MeterRegion),UsageQuantity (UsageQuantity),Ressourcensatz (ResourceRate),PreTaxCost (PreTaxCost),Genutzter Dienst (ConsumedService),ResourceType (ResourceType),InstanceId (InstanceId),Tags (Tags),OfferId (OfferId),Zusätzliche Informationen (AdditionalInfo),Dienstinformation 1 (ServiceInfo1),Dienstinformation 2 (ServiceInfo2),ServiceName,ServiceTier,Currency,Maßeinheit (UnitOfMeasure)
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,2021-02-02,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/PayAsYouGo.csv

@@ -0,0 +1,2 @@
+SubscriptionGuid,ResourceGroup,ResourceLocation,UsageDateTime,MeterCategory,MeterSubcategory,MeterId,MeterName,MeterRegion,UsageQuantity,ResourceRate,PreTaxCost,ConsumedService,ResourceType,InstanceId,Tags,OfferId,AdditionalInfo,ServiceInfo1,ServiceInfo2,ServiceName,ServiceTier,Currency,UnitOfMeasure
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,2021-02-02,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/headersets/YA.csv

@@ -0,0 +1,2 @@
+subscriptionId,Ressourcengruppe (ResourceGroup),Ressourcenstandort (ResourceLocation),date,meterCategory,MeterSubcategory (MeterSubcategory),ID der Verbrauchseinheit (MeterId),Name der Verbrauchseinheit (MeterName),Region der Verbrauchseinheit (MeterRegion),UsageQuantity (UsageQuantity),Ressourcensatz (ResourceRate),costInBillingCurrency,consumedService,ResourceType (ResourceType),InstanceName,tags,OfferId (OfferId),additionalInfo,Dienstinformation 1 (ServiceInfo1),Dienstinformation 2 (ServiceInfo2),ServiceName,ServiceTier,Currency,Maßeinheit (UnitOfMeasure)
+11111111-12ab-34dc-56ef-123456abcdef,Example-Resource-Group,US East,02/02/2021,Load Balancer,Standard,27827eb0-7f60-4928-940b-f5fe15e7a4cb,Included LB Rules and Outbound Rules,,3,0.025,0.075,Microsoft.Network,Microsoft.Network/loadBalancers,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,,,,,Load Balancer,Std Load Balancer,USD,100 Hours

+ 2 - 0
pkg/cloud/azure/resources/billingexports/values/MissingBrackets.csv

@@ -0,0 +1,2 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456abcdef,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""","""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2"

+ 88 - 0
pkg/cloud/azure/resources/billingexports/values/Template.csv

@@ -0,0 +1,88 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0,  ""ReservationOrderId"": ""689aadb1-13ea-40bb-a8f9-e705dbe57543"",  ""ReservationId"": ""770228a7-62da-4155-802b-0422e1c62efc"",  ""ConsumptionMeter"": ""14fc9a21-4919-4cb1-b495-5666966556bc""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Log Analytics,0,0,microsoft.operationalinsights,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourcegroups/defaultresourcegroup-eus/providers/microsoft.operationalinsights/workspaces/defaultworkspace-11111111-12ab-34dc-56ef-123456abcdef-eus,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-agentpool-23456789-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Log Analytics,0,0,microsoft.operationalinsights,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourcegroups/defaultresourcegroup-eus/providers/microsoft.operationalinsights/workspaces/defaultworkspace-11111111-12ab-34dc-56ef-123456abcdef-eus,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd08,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd08"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-192133aks-nodepool1-1921336OS__1_0a5e4b97e5ca4c2ab46328ca392a02f5,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.001301934407093,0.001301934407093,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.0828,0.0828,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-aef001b536d4711ea86115a2af700dc9,"{""service"":""kubecost/kubecost-frontend-test""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-agentpool-23456789-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.09,0.09,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a173cf24babf311e98b7f8e5ecb03810,"{""service"":""kubecost/kubecost-frontend""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0015896,0.0015896,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0000362,0.0000362,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.01236783717759,0.01236783717759,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.00000204,0.00000204,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0000045,0.0000045,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0821376,0.0821376,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-229217aks-agentpool-2292178OS__1_7fcada7aa38e4d5ca6d15257b8998b7a,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.005,0.005,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/bc6b73c3-5689-4f72-9a15-103d0c48d98f,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000060000000000000000000,0.000000060000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000140000000000000000,0.000000140000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000020000000000000000000,0.000000020000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0013522,0.0013522,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000160000000000000000,0.000000160000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000100000000000000000,0.000000100000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.001686412831768,0.001686412831768,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.005,0.005,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a4969d597c5674b4480ec987cc6b24a1,"{""service"":""kubecost/kubecost-frontend"",""kubernetes-cluster-name"":""kubernetes""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-34567890-0_OsDisk_1_c523fe080d784f55a7cd3868bf989fde,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,3.504,3.504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.125,0.125,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/7b21b77b-4ed1-474b-b068-6ab6d1ecf549,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0004494,0.0004494,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-agentpool-45678901-0,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""e5c201c1-7acd-43c3-af5e-3480998c0776"",  ""CRPVMId"": ""0255b3e6-f280-4cb3-9664-ccbe86990e85""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.00000154,0.00000154,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-45678901-0_OsDisk_1_6bb726d077d84b238780857a380772ea,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.15,0.15,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/bc6b73c3-5689-4f72-9a15-103d0c48d98f,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Machines,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": null,  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0,  ""ReservationOrderId"": ""689aadb1-13ea-40bb-a8f9-e705dbe57543"",  ""ReservationId"": ""770228a7-62da-4155-802b-0422e1c62efc"",  ""ConsumptionMeter"": ""14fc9a21-4919-4cb1-b495-5666966556bc""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ec16b946-8778-49a4-8b9b-283bc90319ed"",  ""CRPVMId"": ""5163cb2c-2a32-4421-ab69-2a75ca69cf16""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0.000000040000000000000000000,0.000000040000000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""BY1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0002082,0.0002082,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd03,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-pushgateway"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd03"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.000177,0.000177,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-192133aks-nodepool1-1921336OS__1_0a5e4b97e5ca4c2ab46328ca392a02f5,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0107136,0.0107136,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0032604,0.0032604,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd04,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd04"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.0013392,0.0013392,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd06,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd06"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Machines,0.146,0.146,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss"",""orchestrator"":""Kubernetes:1.15.7"",""poolName"":""nodepool1""}","{  ""UsageType"": ""ComputeHR"",  ""ImageType"": ""Canonical"",  ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VMProperties"": ""Microsoft.AKS.Compute.AKS.Linux.Billing"",  ""VCPUs"": 2,  ""CPUs"": 0}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.0072,0.0072,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-aef001b536d4711ea86115a2af700dc9,"{""service"":""kubecost/kubecost-frontend-test""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.00000445,0.00000445,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.575,0.575,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.00992768780794,0.00992768780794,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0102672,0.0102672,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd00,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd00"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Virtual Network,0.14,0.14,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a4969d597c5674b4480ec987cc6b24a1,"{""service"":""kubecost/kubecost-frontend"",""kubernetes-cluster-name"":""kubernetes""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.052568064,0.052568064,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd07,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd07"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd08,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-cost-analyzer"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd08"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.000191,0.000191,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.0014714,0.0014714,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd01,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd01"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Load Balancer,0.575,0.575,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.0144,0.0144,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/kubernetes-a173cf24babf311e98b7f8e5ecb03810,"{""service"":""kubecost/kubecost-frontend""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Virtual Network,0.015,0.015,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/publicIPAddresses/7b21b77b-4ed1-474b-b068-6ab6d1ecf549,"{""owner"":""kubernetes"",""type"":""aks-slb-managed-outbound-ip""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Bandwidth,0,0,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-23456789-vmss,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes"",""poolName"":""agentpool""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""External"",  ""ContainerId"": ""1c8bb337-451e-487c-ac06-9f83cf69751f"",  ""CRPVMId"": ""2936d707-afda-4ba7-9166-9cac60faba7c""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.67455504,0.67455504,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-nodepool1-34567890-0_OsDisk_1_c523fe080d784f55a7cd3868bf989fde,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""nodepool1""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-01,Storage,0.00003615,0.00003615,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd02,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-server"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd02"",""created-by"":""kubernetes-azure-dd""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Bandwidth,0.000000280000000000000000,0.000000280000000000000000,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-34567890-0,"{""resourceNameSuffix"":""34567890"",""aksEngineVersion"":""v0.47.0-aks-gomod-55-aks"",""creationSource"":""aks-aks-nodepool1-34567890-0"",""orchestrator"":""Kubernetes"",""poolName"":""nodepool1""}","{  ""ResourceType"": ""Bandwidth"",  ""PipelineType"": ""v2"",  ""DataTransferDirection"": ""DataTrOut"",  ""DataCenter"": ""MNZ20"",  ""NetworkBucket"": ""CH1"",  ""ContainerId"": ""ff90fab7-1094-4325-89db-9c12a140131a"",  ""CRPVMId"": ""93b04f9b-4950-42cc-a42e-d72bc852d1e4""}"
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-229217aks-agentpool-2292178OS__1_7fcada7aa38e4d5ca6d15257b8998b7a,"{""resourceNameSuffix"":""23456789"",""aksEngineVersion"":""v0.47.0-aks-gomod-81-aks"",""creationSource"":""aks-aks-agentpool-23456789-vmss"",""orchestrator"":""Kubernetes:1.16.10"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Load Balancer,0.075,0.075,Microsoft.Network,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Network/loadBalancers/kubernetes,,
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.08798544,0.08798544,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/aks-agentpool-45678901-0_OsDisk_1_6bb726d077d84b238780857a380772ea,"{""resourceNameSuffix"":""45678901"",""aksEngineVersion"":""v0.35.3-aks"",""creationSource"":""aks-aks-agentpool-45678901-0"",""orchestrator"":""Kubernetes:1.12.8"",""poolName"":""agentpool""}",
+11111111-12ab-34dc-56ef-123456abcdef,0bd50fdf-c923-4e1e-850c-196dd3dcc123,2021-02-02,Storage,0.006856704,0.006856704,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/disks/kubernetes-dynamic-pvc-1234abcd-ab12-cd34-ef56-123456abcd05,"{""kubernetes.io-created-for-pvc-namespace"":""kubecost"",""kubernetes.io-created-for-pvc-name"":""kubecost-prometheus-alertmanager"",""kubernetes.io-created-for-pv-name"":""pvc-1234abcd-ab12-cd34-ef56-123456abcd05"",""created-by"":""kubernetes-azure-dd""}",

+ 2 - 0
pkg/cloud/azure/resources/billingexports/values/VirtualMachine.csv

@@ -0,0 +1,2 @@
+subscriptionid,billingaccountid,UsageDateTime,MeterCategory,costinbillingcurrency,paygcostinbillingcurrency,ConsumedService,InstanceId,Tags,AdditionalInfo
+11111111-12ab-34dc-56ef-123456abcdef,11111111-12ab-34dc-56ef-123456billing,2021-02-01,Virtual Machines,4,5,Microsoft.Compute,/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss,"{""resourceNameSuffix"":""12345678"",""aksEngineVersion"":""aks-release-v0.47.0-1-aks"",""creationSource"":""aks-aks-nodepool1-12345678-vmss""}","{ ""ServiceType"": ""Standard_DS2_v2"",  ""VMName"": ""aks-nodepool1-12345678-vmss_0"",  ""VCPUs"": 2  }"

+ 170 - 0
pkg/cloud/azure/storagebillingparser.go

@@ -0,0 +1,170 @@
+package azure
+
+import (
+	"bytes"
+	"context"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	"github.com/opencost/opencost/pkg/cloud"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// AzureStorageBillingParser accesses billing data stored in CSV files in Azure Storage
+type AzureStorageBillingParser struct {
+	StorageConnection
+}
+
+func (asbp *AzureStorageBillingParser) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*AzureStorageBillingParser)
+	if !ok {
+		return false
+	}
+	return asbp.StorageConnection.Equals(&thatConfig.StorageConnection)
+}
+
+type AzureBillingResultFunc func(*BillingRowValues) error
+
+func (asbp *AzureStorageBillingParser) ParseBillingData(start, end time.Time, resultFn AzureBillingResultFunc) (cloud.ConnectionStatus, error) {
+	err := asbp.Validate()
+	if err != nil {
+		return cloud.InvalidConfiguration, err
+	}
+
+	containerURL, err := asbp.getContainer()
+	if err != nil {
+		return cloud.FailedConnection, err
+	}
+	ctx := context.Background()
+	blobNames, err := asbp.getMostRecentBlobs(start, end, containerURL, ctx)
+	if err != nil {
+		return cloud.FailedConnection, err
+	}
+	for _, blobName := range blobNames {
+		blobBytes, err2 := asbp.DownloadBlob(blobName, containerURL, ctx)
+		if err2 != nil {
+			return cloud.FailedConnection, err2
+		}
+		err2 = asbp.parseCSV(start, end, csv.NewReader(bytes.NewReader(blobBytes)), resultFn)
+		if err2 != nil {
+			return cloud.ParseError, err2
+		}
+
+	}
+	return cloud.SuccessfulConnection, nil
+}
+
+func (asbp *AzureStorageBillingParser) parseCSV(start, end time.Time, reader *csv.Reader, resultFn AzureBillingResultFunc) error {
+	headers, err := reader.Read()
+	if err != nil {
+		return err
+	}
+	abp, err := NewBillingParseSchema(headers)
+	if err != nil {
+		return err
+	}
+	for {
+		var record, err = reader.Read()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		abv := abp.ParseRow(start, end, record)
+		if abv == nil {
+			continue
+		}
+
+		err = resultFn(abv)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (asbp *AzureStorageBillingParser) getMostRecentBlobs(start, end time.Time, containerURL *azblob.ContainerURL, ctx context.Context) ([]string, error) {
+	log.Infof("Azure Storage: retrieving most recent reports from: %v - %v", start, end)
+
+	// Get list of month substrings for months contained in the start to end range
+	monthStrs, err := asbp.getMonthStrings(start, end)
+	if err != nil {
+		return nil, err
+	}
+	mostResentBlobs := make(map[string]azblob.BlobItemInternal)
+	for marker := (azblob.Marker{}); marker.NotDone(); {
+		// Get a result segment starting with the blob indicated by the current Marker.
+		listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{})
+		if err != nil {
+			return nil, err
+		}
+
+		// ListBlobs returns the start of the next segment; you MUST use this to get
+		// the next segment (after processing the current result segment).
+		marker = listBlob.NextMarker
+
+		// Using the list of months strings find the most resent blob for each month in the range
+		for _, blobInfo := range listBlob.Segment.BlobItems {
+			for _, month := range monthStrs {
+				if strings.Contains(blobInfo.Name, month) {
+					// If Container Path configuration exists, check if it is in the blobs name
+					if asbp.Path != "" && !strings.Contains(blobInfo.Name, asbp.Path) {
+						continue
+					}
+
+					if prevBlob, ok := mostResentBlobs[month]; ok {
+						if prevBlob.Properties.CreationTime.After(*blobInfo.Properties.CreationTime) {
+							continue
+						}
+					}
+					mostResentBlobs[month] = blobInfo
+				}
+			}
+		}
+	}
+
+	// convert blob names into blob urls and move from map into ordered list of blob names
+	var blobNames []string
+	for _, month := range monthStrs {
+		if blob, ok := mostResentBlobs[month]; ok {
+			blobNames = append(blobNames, blob.Name)
+		}
+	}
+
+	return blobNames, nil
+}
+
+func (asbp *AzureStorageBillingParser) getMonthStrings(start, end time.Time) ([]string, error) {
+	if start.After(end) {
+		return []string{}, fmt.Errorf("start date must be before end date")
+	}
+	if end.After(time.Now()) {
+		end = time.Now()
+	}
+	var monthStrs []string
+	monthStr := asbp.timeToMonthString(start)
+	endStr := asbp.timeToMonthString(end)
+	monthStrs = append(monthStrs, monthStr)
+	currMonth := start.AddDate(0, 0, -start.Day()+1)
+	for monthStr != endStr {
+		currMonth = currMonth.AddDate(0, 1, 0)
+		monthStr = asbp.timeToMonthString(currMonth)
+		monthStrs = append(monthStrs, monthStr)
+	}
+
+	return monthStrs, nil
+}
+
+func (asbp *AzureStorageBillingParser) timeToMonthString(input time.Time) string {
+	format := "20060102"
+	startOfMonth := input.AddDate(0, 0, -input.Day()+1)
+	endOfMonth := input.AddDate(0, 1, -input.Day())
+	return startOfMonth.Format(format) + "-" + endOfMonth.Format(format)
+}

+ 204 - 0
pkg/cloud/azure/storagebillingparser_test.go

@@ -0,0 +1,204 @@
+package azure
+
+import (
+	"testing"
+	"time"
+)
+
+func TestAzureStorageBillingParser_getMonthStrings(t *testing.T) {
+	asbp := AzureStorageBillingParser{}
+	loc, _ := time.LoadLocation("UTC")
+	testCases := map[string]struct {
+		start    time.Time
+		end      time.Time
+		expected []string
+	}{
+		"Single Month": {
+			start: time.Date(2021, 2, 1, 00, 00, 00, 00, loc),
+			end:   time.Date(2021, 2, 3, 00, 00, 00, 00, loc),
+			expected: []string{
+				"20210201-20210228",
+			},
+		},
+		"Two Month": {
+			start: time.Date(2021, 2, 1, 00, 00, 00, 00, loc),
+			end:   time.Date(2021, 3, 3, 00, 00, 00, 00, loc),
+			expected: []string{
+				"20210201-20210228",
+				"20210301-20210331",
+			},
+		},
+	}
+
+	for name, tc := range testCases {
+		t.Run(name, func(t *testing.T) {
+			months, err := asbp.getMonthStrings(tc.start, tc.end)
+			if err != nil {
+				t.Errorf("Could not retrieve month strings %v", err)
+			}
+
+			if len(months) != len(tc.expected) {
+				t.Errorf("Did not create the expected number of month strings. Expected: %d, Actual: %d", len(tc.expected), len(months))
+			}
+
+			for i, monthStr := range months {
+				if monthStr != tc.expected[i] {
+					t.Errorf("Incorrect month string at index %d. Expected: %s, Actual: %s", i, tc.expected[i], monthStr)
+				}
+			}
+		})
+	}
+}
+
+func TestAzureStorageBillingParser_parseCSV(t *testing.T) {
+	loc, _ := time.LoadLocation("UTC")
+	start := time.Date(2021, 2, 1, 00, 00, 00, 00, loc)
+	end := time.Date(2021, 2, 3, 00, 00, 00, 00, loc)
+	tests := map[string]struct {
+		input    string
+		expected []BillingRowValues
+	}{
+		"Virtual Machine": {
+			input: "VirtualMachine.csv",
+			expected: []BillingRowValues{
+				{
+					Date:            start,
+					MeterCategory:   "Virtual Machines",
+					SubscriptionID:  "11111111-12ab-34dc-56ef-123456abcdef",
+					InvoiceEntityID: "11111111-12ab-34dc-56ef-123456billing",
+					InstanceID:      "/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss",
+					Service:         "Microsoft.Compute",
+					Tags: map[string]string{
+						"resourceNameSuffix": "12345678",
+						"aksEngineVersion":   "aks-release-v0.47.0-1-aks",
+						"creationSource":     "aks-aks-nodepool1-12345678-vmss",
+					},
+					AdditionalInfo: map[string]any{
+						"ServiceType": "Standard_DS2_v2",
+						"VMName":      "aks-nodepool1-12345678-vmss_0",
+						"VCPUs":       2.0,
+					},
+					Cost:    5,
+					NetCost: 4,
+				},
+			},
+		},
+		"Missing Brackets": {
+			input: "MissingBrackets.csv",
+			expected: []BillingRowValues{
+				{
+					Date:            start,
+					MeterCategory:   "Virtual Machines",
+					SubscriptionID:  "11111111-12ab-34dc-56ef-123456abcdef",
+					InvoiceEntityID: "11111111-12ab-34dc-56ef-123456abcdef",
+					InstanceID:      "/subscriptions/11111111-12ab-34dc-56ef-123456abcdef/resourceGroups/Example-Resource-Group/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-12345678-vmss",
+					Service:         "Microsoft.Compute",
+					Tags: map[string]string{
+						"resourceNameSuffix": "12345678",
+						"aksEngineVersion":   "aks-release-v0.47.0-1-aks",
+						"creationSource":     "aks-aks-nodepool1-12345678-vmss",
+					},
+					AdditionalInfo: map[string]any{
+						"ServiceType": "Standard_DS2_v2",
+						"VMName":      "aks-nodepool1-12345678-vmss_0",
+						"VCPUs":       2.0,
+					},
+					Cost:    5,
+					NetCost: 4,
+				},
+			},
+		},
+	}
+	asbp := &AzureStorageBillingParser{}
+	for name, tc := range tests {
+		t.Run(name, func(t *testing.T) {
+			csvRetriever := &TestCSVRetriever{
+				CSVName: valueCasesPath + tc.input,
+			}
+			csvs, err := csvRetriever.getCSVReaders(start, end)
+			if err != nil {
+				t.Errorf("Failed to read specified CSV: %s", err.Error())
+			}
+			reader := csvs[0]
+
+			var actual []*BillingRowValues
+			resultFn := func(abv *BillingRowValues) error {
+				actual = append(actual, abv)
+				return nil
+			}
+
+			err = asbp.parseCSV(start, end, reader, resultFn)
+			if err != nil {
+				t.Errorf("Error generating BillingRowValues: %s", err.Error())
+			}
+
+			if len(actual) != len(tc.expected) {
+				t.Errorf("Actual output length did not match expected. Expected: %d, Actual: %d", len(tc.expected), len(actual))
+			}
+
+			for i, this := range actual {
+				that := tc.expected[i]
+
+				if !this.Date.Equal(that.Date) {
+					t.Errorf("Parsed data at index %d has incorrect Date value. Expected: %s, Actual: %s", i, this.Date.String(), that.Date.String())
+				}
+
+				if this.MeterCategory != that.MeterCategory {
+					t.Errorf("Parsed data at index %d has incorrect MeterCategroy value. Expected: %s, Actual: %s", i, this.MeterCategory, that.MeterCategory)
+				}
+
+				if this.SubscriptionID != that.SubscriptionID {
+					t.Errorf("Parsed data at index %d has incorrect SubscriptionID value. Expected: %s, Actual: %s", i, this.SubscriptionID, that.SubscriptionID)
+				}
+
+				if this.InvoiceEntityID != that.InvoiceEntityID {
+					t.Errorf("Parsed data at index %d has incorrect InvoiceEntityID value. Expected: %s, Actual: %s", i, this.InvoiceEntityID, that.InvoiceEntityID)
+				}
+
+				if this.InstanceID != that.InstanceID {
+					t.Errorf("Parsed data at index %d has incorrect InstanceID value. Expected: %s, Actual: %s", i, this.InstanceID, that.InstanceID)
+				}
+
+				if this.Service != that.Service {
+					t.Errorf("Parsed data at index %d has incorrect Service value. Expected: %s, Actual: %s", i, this.Service, that.Service)
+				}
+
+				if this.Cost != that.Cost {
+					t.Errorf("Parsed data at index %d has incorrect Cost value. Expected: %f, Actual: %f", i, this.Cost, that.Cost)
+				}
+
+				if this.NetCost != that.NetCost {
+					t.Errorf("Parsed data at index %d has incorrect NetCost value. Expected: %f, Actual: %f", i, this.NetCost, that.NetCost)
+				}
+
+				if len(this.Tags) != len(that.Tags) {
+					t.Errorf("Parsed data at index %d did not have the expected number of tags. Expected: %d, Actual: %d", i, len(that.Tags), len(this.Tags))
+				}
+
+				for key, thisTag := range this.Tags {
+					thatTag, ok := that.Tags[key]
+					if !ok {
+						t.Errorf("Parsed data at index %d is has unexpected entry in Tags with key: %s", i, key)
+					}
+
+					if thisTag != thatTag {
+						t.Errorf("Parsed data at index %d is has unexpected value in Tags for key: %s. Expected: %s, Actual: %s", i, key, thatTag, thisTag)
+					}
+				}
+
+				for key, thisAI := range this.AdditionalInfo {
+					thatAI, ok := that.AdditionalInfo[key]
+					if !ok {
+						t.Errorf("Parsed data at index %d is has unexpected entry in Additional Inforamation with key: %s", i, key)
+					}
+
+					if thisAI != thatAI {
+						t.Errorf("Parsed data at index %d is has unexpected value in Tags for key: %s. Expected: %v, Actual: %v", i, key, thisAI, thatAI)
+					}
+				}
+			}
+
+		})
+
+	}
+}

+ 179 - 0
pkg/cloud/azure/storageconfiguration.go

@@ -0,0 +1,179 @@
+package azure
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type StorageConfiguration struct {
+	SubscriptionID string     `json:"subscriptionID"`
+	Account        string     `json:"account"`
+	Container      string     `json:"container"`
+	Path           string     `json:"path"`
+	Cloud          string     `json:"cloud"`
+	Authorizer     Authorizer `json:"authorizer"`
+}
+
+// Check ensures that all required fields are set, and throws an error if they are not
+func (sc *StorageConfiguration) Validate() error {
+
+	if sc.Authorizer == nil {
+		return fmt.Errorf("StorageConfiguration: missing authorizer")
+	}
+
+	err := sc.Authorizer.Validate()
+	if err != nil {
+		return err
+	}
+
+	if sc.SubscriptionID == "" {
+		return fmt.Errorf("StorageConfiguration: missing Subcription ID")
+	}
+
+	if sc.Account == "" {
+		return fmt.Errorf("StorageConfiguration: missing Account")
+	}
+
+	if sc.Container == "" {
+		return fmt.Errorf("StorageConfiguration: missing Container")
+	}
+
+	return nil
+}
+
+func (sc *StorageConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*StorageConfiguration)
+	if !ok {
+		return false
+	}
+
+	if sc.Authorizer != nil {
+		if !sc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if sc.SubscriptionID != thatConfig.SubscriptionID {
+		return false
+	}
+
+	if sc.Account != thatConfig.Account {
+		return false
+	}
+
+	if sc.Container != thatConfig.Container {
+		return false
+	}
+
+	if sc.Path != thatConfig.Path {
+		return false
+	}
+
+	if sc.Cloud != thatConfig.Cloud {
+		return false
+	}
+
+	return true
+}
+
+func (sc *StorageConfiguration) Sanitize() config.Config {
+	return &StorageConfiguration{
+		SubscriptionID: sc.SubscriptionID,
+		Account:        sc.Account,
+		Container:      sc.Container,
+		Path:           sc.Path,
+		Cloud:          sc.Cloud,
+		Authorizer:     sc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+func (sc *StorageConfiguration) Key() string {
+	key := fmt.Sprintf("%s/%s", sc.SubscriptionID, sc.Container)
+	// append container path to key if it exists
+	if sc.Path != "" {
+		key = fmt.Sprintf("%s/%s", key, sc.Path)
+	}
+	return key
+}
+
+func (sc *StorageConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	subscriptionID, err := config.GetInterfaceValue[string](fmap, "subscriptionID")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.SubscriptionID = subscriptionID
+
+	account, err := config.GetInterfaceValue[string](fmap, "account")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Account = account
+
+	container, err := config.GetInterfaceValue[string](fmap, "container")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Container = container
+
+	path, err := config.GetInterfaceValue[string](fmap, "path")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Path = path
+
+	cloud, err := config.GetInterfaceValue[string](fmap, "cloud")
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Cloud = cloud
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	sc.Authorizer = authorizer
+
+	return nil
+}
+
+func ConvertAzureStorageConfigToConfig(asc AzureStorageConfig) config.KeyedConfig {
+	if asc.IsEmpty() {
+		return nil
+	}
+
+	var authorizer Authorizer
+	authorizer = &AccessKey{
+		AccessKey: asc.AccessKey,
+		Account:   asc.AccountName,
+	}
+
+	return &StorageConfiguration{
+		SubscriptionID: asc.SubscriptionId,
+		Account:        asc.AccountName,
+		Container:      asc.ContainerName,
+		Path:           asc.ContainerPath,
+		Cloud:          asc.AzureCloud,
+		Authorizer:     authorizer,
+	}
+}

+ 446 - 0
pkg/cloud/azure/storageconfiguration_test.go

@@ -0,0 +1,446 @@
+package azure
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestStorageConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   StorageConfiguration
+		expected error
+	}{
+		"valid config Azure AccessKey": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+		"access key invalid": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					Account: "account",
+				},
+			},
+			expected: fmt.Errorf("AccessKey: missing access key"),
+		},
+		"missing authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing authorizer"),
+		},
+		"missing subscriptionID": {
+			config: StorageConfiguration{
+				SubscriptionID: "",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Subcription ID"),
+		},
+		"missing account": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Account"),
+		},
+		"missing container": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: fmt.Errorf("StorageConfiguration: missing Container"),
+		},
+		"missing path": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+		"missing cloud": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: nil,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestStorageConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     StorageConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: true,
+		},
+
+		"missing both authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: true,
+		},
+		"missing left authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"missing right authorizer": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+			expected: false,
+		},
+		"different subscriptionID": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID2",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different account": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account2",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different container": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container2",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different path": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path2",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different cloud": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud2",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+			right: &AccessKey{
+				AccessKey: "accessKey",
+				Account:   "account",
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestStorageConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config StorageConfiguration
+	}{
+		"Empty Config": {
+			config: StorageConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer:     nil,
+			},
+		},
+		"AccessKey Authorizer": {
+			config: StorageConfiguration{
+				SubscriptionID: "subscriptionID",
+				Account:        "account",
+				Container:      "container",
+				Path:           "path",
+				Cloud:          "cloud",
+				Authorizer: &AccessKey{
+					AccessKey: "accessKey",
+					Account:   "account",
+				},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &StorageConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 77 - 0
pkg/cloud/azure/storageconnection.go

@@ -0,0 +1,77 @@
+package azure
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/Azure/azure-storage-blob-go/azblob"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+)
+
+// StorageConnection provides access to Azure Storage
+type StorageConnection struct {
+	StorageConfiguration
+}
+
+func (sc *StorageConnection) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*StorageConnection)
+	if !ok {
+		return false
+	}
+
+	return sc.StorageConfiguration.Equals(&thatConfig.StorageConfiguration)
+}
+
+func (sc *StorageConnection) getContainer() (*azblob.ContainerURL, error) {
+
+	credential, err := sc.Authorizer.GetBlobCredentials()
+	if err != nil {
+		return nil, err
+	}
+
+	p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
+
+	// From the Azure portal, get your storage account blob service URL endpoint.
+	URL, _ := url.Parse(
+		fmt.Sprintf(sc.getBlobURLTemplate(), sc.Account, sc.Container))
+
+	// Create a ContainerURL object that wraps the container URL and a request
+	// pipeline to make requests.
+	containerURL := azblob.NewContainerURL(*URL, p)
+	return &containerURL, nil
+}
+
+// getBlobURLTemplate returns the correct BlobUrl for whichever Cloud storage account is specified by the AzureCloud configuration
+// defaults to the Public Cloud template
+func (sc *StorageConnection) getBlobURLTemplate() string {
+	// Use gov cloud blob url if gov is detected in AzureCloud
+	if strings.Contains(strings.ToLower(sc.Cloud), "gov") {
+		return "https://%s.blob.core.usgovcloudapi.net/%s"
+	}
+	// default to Public Cloud template
+	return "https://%s.blob.core.windows.net/%s"
+}
+
+func (sc *StorageConnection) DownloadBlob(blobName string, containerURL *azblob.ContainerURL, ctx context.Context) ([]byte, error) {
+	log.Infof("Azure Storage: retrieving blob: %v", blobName)
+
+	blobURL := containerURL.NewBlobURL(blobName)
+	downloadResponse, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
+	if err != nil {
+		return nil, err
+	}
+	// NOTE: automatically retries are performed if the connection fails
+	bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
+
+	// read the body into a buffer
+	downloadedData := bytes.Buffer{}
+	_, err = downloadedData.ReadFrom(bodyStream)
+	if err != nil {
+		return nil, err
+	}
+	return downloadedData.Bytes(), nil
+}

+ 53 - 0
pkg/cloud/config/authorizer.go

@@ -0,0 +1,53 @@
+package config
+
+import (
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+// AuthorizerTypeProperty is the property where the id of an Authorizer should be placed in its custom MarshalJSON function
+const AuthorizerTypeProperty = "authorizerType"
+
+type Authorizer interface {
+	Config
+	json.Marshaler
+}
+
+// AuthorizerSelectorFn implementations of this function should be a simple switch
+// and acts as a register for the Authorizer types, returned Authorizer should be empty
+// except for its default type property and will have other values marshalled into it
+type AuthorizerSelectorFn[T Authorizer] func(string) (T, error)
+
+// AuthorizerFromInterface this generic function provides Authorizer unmarshalling for all providers
+func AuthorizerFromInterface[T Authorizer](f any, authSelectFn AuthorizerSelectorFn[T]) (T, error) {
+	var emptyAuth T
+	if f == nil {
+		return emptyAuth, nil
+	}
+	fmap, ok := f.(map[string]interface{})
+	if !ok {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not cast interface as map")
+	}
+
+	authType, err := GetInterfaceValue[string](fmap, AuthorizerTypeProperty)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not retrieve type property: %w", err)
+	}
+	authorizer, err := authSelectFn(authType)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: %w", err)
+	}
+
+	// convert the interface back to a []Byte so that it can be unmarshalled into the correct type
+	fBin, err := json.Marshal(f)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: could not marshal value %v: %w", f, err)
+	}
+
+	err = json.Unmarshal(fBin, authorizer)
+	if err != nil {
+		return emptyAuth, fmt.Errorf("AuthorizerFromInterface: failed to unmarshal into Authorizer type %T from value %v: %w", authorizer, f, err)
+	}
+	return authorizer, nil
+}

+ 37 - 0
pkg/cloud/config/config.go

@@ -0,0 +1,37 @@
+package config
+
+import (
+	"fmt"
+)
+
+const Redacted = "REDACTED"
+
+// Config allows for nested configurations which encapsulate their functionality to be validated and compared easily
+type Config interface {
+	Validate() error
+	Sanitize() Config
+	Equals(Config) bool
+}
+
+// KeyedConfig is a top level Config which uses its public values as a unique identifier allowing duplicates to be identified
+type KeyedConfig interface {
+	Config
+	Key() string
+}
+
+type KeyedConfigWatcher interface {
+	GetConfigs() []KeyedConfig
+}
+
+func GetInterfaceValue[T any](fmap map[string]interface{}, key string) (T, error) {
+	var value T
+	interfaceValue, ok := fmap[key]
+	if !ok {
+		return value, fmt.Errorf("FromInterface: missing '%s' property", key)
+	}
+	typedValue, ok := interfaceValue.(T)
+	if !ok {
+		return value, fmt.Errorf("GetInterfaceValue: property '%s' had expected type '%T' but did not match", key, value)
+	}
+	return typedValue, nil
+}

+ 42 - 0
pkg/cloud/connectionstatus.go

@@ -0,0 +1,42 @@
+package cloud
+
+// ConnectionStatus communicates the status of a cloud connection in a way that is general enough to apply to each
+// Cloud Provider, but still give actionable information on how to trouble shoot one the four failing statuses.
+type ConnectionStatus string
+
+const (
+	// InitialStatus is the zero value of CloudConnectionStatus and means that cloud connection is untested. Once
+	// CloudConnection Status has been changed in should not return to this value. This status is assigned on creation
+	// to the cloud provider
+	InitialStatus ConnectionStatus = "No Connection"
+
+	// InvalidConfiguration means that Cloud Configuration is missing required values to connect to cloud provider.
+	// This status is assigned during failures in the provider implementation of getCloudConfig()
+	InvalidConfiguration = "Invalid Configuration"
+
+	// FailedConnection means that all required Cloud Configuration values are filled in, but a connection with the
+	// Cloud Provider cannot be established. This is indicative of a typo in one of the Cloud Configuration values or an
+	// issue in how the connection was set up in the Cloud Provider's Console. The assignment of this status varies
+	// between implementations, but should happen if an error is thrown when an interaction with an object from
+	// the Cloud Service Provider's sdk occurs.
+	FailedConnection = "Failed Connection"
+
+	// ParseError indicates an issue with our functions which parse responses
+	ParseError = "Parse Error"
+
+	// MissingData means that the Cloud Integration is properly configured, but the cloud provider is not returning
+	// billing/cost and usage data. This status is indicative of the billing/cost and usage data export of the Cloud Provider
+	// being incorrectly set up or the export being set up in the last 48 hours and not having started populating data yet.
+	// This status is set when a query has been successfully made but the results come back empty. If the cloud provider,
+	// already has a SUCCESSFUL_CONNECTION status then this status should not be set, because this indicates that the specific
+	// query made may have been empty.
+	MissingData = "Data Missing"
+
+	// SuccessfulConnection means that the Cloud Integration is properly configured and returning data. This status is
+	// set on any successful query where data is returned
+	SuccessfulConnection = "Connection Successful"
+)
+
+func (cs ConnectionStatus) String() string {
+	return string(cs)
+}

+ 132 - 0
pkg/cloud/gcp/authorizer.go

@@ -0,0 +1,132 @@
+package gcp
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"google.golang.org/api/option"
+)
+
+const ServiceAccountKeyAuthorizerType = "GCPServiceAccountKey"
+const WorkloadIdentityAuthorizerType = "GCPWorkloadIdentity"
+
+// Authorizer provide a []option.ClientOption which is used in when creating clients in the GCP SDK
+type Authorizer interface {
+	config.Authorizer
+	CreateGCPClientOptions() ([]option.ClientOption, error)
+}
+
+// SelectAuthorizerByType is an implementation of AuthorizerSelectorFn and acts as a register for Authorizer types
+func SelectAuthorizerByType(typeStr string) (Authorizer, error) {
+	switch typeStr {
+	case ServiceAccountKeyAuthorizerType:
+		return &ServiceAccountKey{}, nil
+	case WorkloadIdentityAuthorizerType:
+		return &WorkloadIdentity{}, nil
+	default:
+		return nil, fmt.Errorf("GCP: provider authorizer type '%s' is not valid", typeStr)
+	}
+}
+
+type ServiceAccountKey struct {
+	Key map[string]string `json:"key"`
+}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (gkc *ServiceAccountKey) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 2)
+	fmap[config.AuthorizerTypeProperty] = ServiceAccountKeyAuthorizerType
+	fmap["key"] = gkc.Key
+	return json.Marshal(fmap)
+}
+
+func (gkc *ServiceAccountKey) Validate() error {
+	if gkc.Key == nil || len(gkc.Key) == 0 {
+		return fmt.Errorf("ServiceAccountKey: missing Key")
+	}
+
+	return nil
+}
+
+func (gkc *ServiceAccountKey) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*ServiceAccountKey)
+	if !ok {
+		return false
+	}
+
+	if len(gkc.Key) != len(thatConfig.Key) {
+		return false
+	}
+
+	for k, v := range gkc.Key {
+		if thatConfig.Key[k] != v {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (gkc *ServiceAccountKey) Sanitize() config.Config {
+	redactedMap := make(map[string]string, len(gkc.Key))
+	for key, _ := range gkc.Key {
+		redactedMap[key] = config.Redacted
+	}
+	return &ServiceAccountKey{
+		Key: redactedMap,
+	}
+}
+
+func (gkc *ServiceAccountKey) CreateGCPClientOptions() ([]option.ClientOption, error) {
+	err := gkc.Validate()
+	if err != nil {
+		return nil, err
+	}
+
+	b, err := json.Marshal(gkc.Key)
+	if err != nil {
+		return nil, fmt.Errorf("Key: failed to marshal Key: %s", err.Error())
+	}
+	clientOption := option.WithCredentialsJSON(b)
+
+	// The creation of the BigQuery Client is where FAILED_CONNECTION CloudConnectionStatus is recorded for GCP
+	return []option.ClientOption{clientOption}, nil
+}
+
+// WorkloadIdentity passes an empty slice of client options which causes the GCP SDK to check for the workload identity in the environment
+type WorkloadIdentity struct{}
+
+// MarshalJSON custom json marshalling functions, sets properties as tagged in struct and sets the authorizer type property
+func (wi *WorkloadIdentity) MarshalJSON() ([]byte, error) {
+	fmap := make(map[string]any, 1)
+	fmap[config.AuthorizerTypeProperty] = WorkloadIdentityAuthorizerType
+	return json.Marshal(fmap)
+}
+
+func (wi *WorkloadIdentity) Validate() error {
+	return nil
+}
+
+func (wi *WorkloadIdentity) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	_, ok := config.(*WorkloadIdentity)
+	if !ok {
+		return false
+	}
+
+	return true
+}
+
+func (wi *WorkloadIdentity) Sanitize() config.Config {
+	return &WorkloadIdentity{}
+}
+
+func (wi *WorkloadIdentity) CreateGCPClientOptions() ([]option.ClientOption, error) {
+	return []option.ClientOption{}, nil
+}

+ 172 - 0
pkg/cloud/gcp/bigqueryconfiguration.go

@@ -0,0 +1,172 @@
+package gcp
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"cloud.google.com/go/bigquery"
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+type BigQueryConfiguration struct {
+	ProjectID  string     `json:"projectID"`
+	Dataset    string     `json:"dataset"`
+	Table      string     `json:"table"`
+	Authorizer Authorizer `json:"authorizer"`
+}
+
+func (bqc *BigQueryConfiguration) Validate() error {
+
+	if bqc.Authorizer == nil {
+		return fmt.Errorf("BigQueryConfig: missing configurer")
+	}
+
+	err := bqc.Authorizer.Validate()
+	if err != nil {
+		return fmt.Errorf("BigQueryConfig: issue with GCP Authorizer: %s", err.Error())
+	}
+
+	if bqc.ProjectID == "" {
+		return fmt.Errorf("BigQueryConfig: missing ProjectID")
+	}
+
+	if bqc.Dataset == "" {
+		return fmt.Errorf("BigQueryConfig: missing Dataset")
+	}
+
+	if bqc.Table == "" {
+		return fmt.Errorf("BigQueryConfig: missing Table")
+	}
+
+	return nil
+}
+
+func (bqc *BigQueryConfiguration) Equals(config config.Config) bool {
+	if config == nil {
+		return false
+	}
+	thatConfig, ok := config.(*BigQueryConfiguration)
+	if !ok {
+		return false
+	}
+
+	if bqc.Authorizer != nil {
+		if !bqc.Authorizer.Equals(thatConfig.Authorizer) {
+			return false
+		}
+	} else {
+		if thatConfig.Authorizer != nil {
+			return false
+		}
+	}
+
+	if bqc.ProjectID != thatConfig.ProjectID {
+		return false
+	}
+
+	if bqc.Dataset != thatConfig.Dataset {
+		return false
+	}
+
+	if bqc.Table != thatConfig.Table {
+		return false
+	}
+
+	return true
+}
+
+func (bqc *BigQueryConfiguration) Sanitize() config.Config {
+	return &BigQueryConfiguration{
+		ProjectID:  bqc.ProjectID,
+		Dataset:    bqc.Dataset,
+		Table:      bqc.Table,
+		Authorizer: bqc.Authorizer.Sanitize().(Authorizer),
+	}
+}
+
+// Key uses the Usage Project Id as the Provider Key for GCP
+func (bqc *BigQueryConfiguration) Key() string {
+	return fmt.Sprintf("%s/%s", bqc.ProjectID, bqc.GetBillingDataDataset())
+}
+
+func (bqc *BigQueryConfiguration) GetBillingDataDataset() string {
+	return fmt.Sprintf("%s.%s", bqc.Dataset, bqc.Table)
+}
+
+func (bqc *BigQueryConfiguration) GetBigQueryClient(ctx context.Context) (*bigquery.Client, error) {
+	clientOpts, err := bqc.Authorizer.CreateGCPClientOptions()
+	if err != nil {
+		return nil, err
+	}
+	return bigquery.NewClient(ctx, bqc.ProjectID, clientOpts...)
+}
+
+// UnmarshalJSON assumes data is save as an BigQueryConfigurationDTO
+func (bqc *BigQueryConfiguration) UnmarshalJSON(b []byte) error {
+	var f interface{}
+	err := json.Unmarshal(b, &f)
+	if err != nil {
+		return err
+	}
+
+	fmap := f.(map[string]interface{})
+
+	projectID, err := config.GetInterfaceValue[string](fmap, "projectID")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.ProjectID = projectID
+
+	dataset, err := config.GetInterfaceValue[string](fmap, "dataset")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.Dataset = dataset
+
+	table, err := config.GetInterfaceValue[string](fmap, "table")
+	if err != nil {
+		return fmt.Errorf("BigQueryConfiguration: FromInterface: %s", err.Error())
+	}
+	bqc.Table = table
+
+	authAny, ok := fmap["authorizer"]
+	if !ok {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: missing authorizer")
+	}
+	authorizer, err := config.AuthorizerFromInterface(authAny, SelectAuthorizerByType)
+	if err != nil {
+		return fmt.Errorf("StorageConfiguration: UnmarshalJSON: %s", err.Error())
+	}
+	bqc.Authorizer = authorizer
+	return nil
+}
+
+func ConvertBigQueryConfigToConfig(bqc BigQueryConfig) config.KeyedConfig {
+	if bqc.IsEmpty() {
+		return nil
+	}
+
+	BillingDataDataset := strings.Split(bqc.BillingDataDataset, ".")
+	dataset := BillingDataDataset[0]
+	var table string
+	if len(BillingDataDataset) > 1 {
+		table = BillingDataDataset[1]
+	}
+
+	bigQueryConfiguration := &BigQueryConfiguration{
+		ProjectID:  bqc.ProjectID,
+		Dataset:    dataset,
+		Table:      table,
+		Authorizer: &WorkloadIdentity{}, // Default to WorkloadIdentity
+	}
+
+	if len(bqc.Key) != 0 {
+		bigQueryConfiguration.Authorizer = &ServiceAccountKey{
+			Key: bqc.Key,
+		}
+	}
+
+	return bigQueryConfiguration
+}

+ 388 - 0
pkg/cloud/gcp/bigqueryconfiguration_test.go

@@ -0,0 +1,388 @@
+package gcp
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util/json"
+)
+
+func TestBigQueryConfiguration_Validate(t *testing.T) {
+	testCases := map[string]struct {
+		config   BigQueryConfiguration
+		expected error
+	}{
+		"valid config GCP Key": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: nil,
+		},
+		"valid config WorkloadIdentity": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: nil,
+		},
+		"access Key invalid": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: nil,
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: issue with GCP Authorizer: ServiceAccountKey: missing Key"),
+		},
+		"missing configurer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing configurer"),
+		},
+		"missing projectID": {
+			config: BigQueryConfiguration{
+				ProjectID: "",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing ProjectID"),
+		},
+		"missing dataset": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing Dataset"),
+		},
+		"missing table": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: fmt.Errorf("BigQueryConfig: missing Table"),
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.config.Validate()
+			actualString := "nil"
+			if actual != nil {
+				actualString = actual.Error()
+			}
+			expectedString := "nil"
+			if testCase.expected != nil {
+				expectedString = testCase.expected.Error()
+			}
+			if actualString != expectedString {
+				t.Errorf("errors do not match: Actual: '%s', Expected: '%s", actualString, expectedString)
+			}
+		})
+	}
+}
+
+func TestBigQueryConfiguration_Equals(t *testing.T) {
+	testCases := map[string]struct {
+		left     BigQueryConfiguration
+		right    config.Config
+		expected bool
+	}{
+		"matching config": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: true,
+		},
+		"different configurer": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: false,
+		},
+		"missing both configurer": {
+			left: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: true,
+		},
+		"missing left configurer": {
+			left: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+			expected: false,
+		},
+		"missing right configurer": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+			expected: false,
+		},
+		"different projectID": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID2",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different dataset": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset2",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different table": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table2",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			expected: false,
+		},
+		"different config": {
+			left: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+			right: &ServiceAccountKey{
+
+				Key: map[string]string{
+					"Key":  "Key",
+					"key1": "key2",
+				},
+			},
+			expected: false,
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+			actual := testCase.left.Equals(testCase.right)
+			if actual != testCase.expected {
+				t.Errorf("incorrect result: Actual: '%t', Expected: '%t", actual, testCase.expected)
+			}
+		})
+	}
+}
+
+func TestBigQueryConfiguration_JSON(t *testing.T) {
+	testCases := map[string]struct {
+		config BigQueryConfiguration
+	}{
+		"Empty Config": {
+			config: BigQueryConfiguration{},
+		},
+		"Nil Authorizer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: nil,
+			},
+		},
+		"ServiceAccountKeyConfigurer": {
+			config: BigQueryConfiguration{
+				ProjectID: "projectID",
+				Dataset:   "dataset",
+				Table:     "table",
+				Authorizer: &ServiceAccountKey{
+					Key: map[string]string{
+						"Key":  "Key",
+						"key1": "key2",
+					},
+				},
+			},
+		},
+		"WorkLoadIdentityConfigurer": {
+			config: BigQueryConfiguration{
+				ProjectID:  "projectID",
+				Dataset:    "dataset",
+				Table:      "table",
+				Authorizer: &WorkloadIdentity{},
+			},
+		},
+	}
+
+	for name, testCase := range testCases {
+		t.Run(name, func(t *testing.T) {
+
+			// test JSON Marshalling
+			configJSON, err := json.Marshal(testCase.config)
+			if err != nil {
+				t.Errorf("failed to marshal configuration: %s", err.Error())
+			}
+			log.Info(string(configJSON))
+			unmarshalledConfig := &BigQueryConfiguration{}
+			err = json.Unmarshal(configJSON, unmarshalledConfig)
+			if err != nil {
+				t.Errorf("failed to unmarshal configuration: %s", err.Error())
+			}
+			if !testCase.config.Equals(unmarshalledConfig) {
+				t.Error("config does not equal unmarshalled config")
+			}
+		})
+	}
+}

+ 110 - 0
pkg/cloud/gcp/bigqueryquerier.go

@@ -0,0 +1,110 @@
+package gcp
+
+import (
+	"context"
+	"regexp"
+	"strings"
+
+	"cloud.google.com/go/bigquery"
+	cloudconfig "github.com/opencost/opencost/pkg/cloud/config"
+	"github.com/opencost/opencost/pkg/kubecost"
+)
+
+type BigQueryQuerier struct {
+	BigQueryConfiguration
+}
+
+func (bqq *BigQueryQuerier) Equals(config cloudconfig.Config) bool {
+	thatConfig, ok := config.(*BigQueryQuerier)
+	if !ok {
+		return false
+	}
+
+	return bqq.BigQueryConfiguration.Equals(&thatConfig.BigQueryConfiguration)
+}
+
+func (bqq *BigQueryQuerier) QueryBigQuery(ctx context.Context, queryStr string) (*bigquery.RowIterator, error) {
+	client, err := bqq.GetBigQueryClient(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	query := client.Query(queryStr)
+	return query.Read(ctx)
+}
+
+func GCPSelectCategory(service, description string) string {
+	s := strings.ToLower(service)
+	d := strings.ToLower(description)
+
+	// Network descriptions
+	if strings.Contains(d, "download") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "network") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "ingress") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "egress") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "static ip") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "external ip") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "load balanced") {
+		return kubecost.NetworkCategory
+	}
+	if strings.Contains(d, "licensing fee") {
+		return kubecost.OtherCategory
+	}
+
+	// Storage Descriptions
+	if strings.Contains(d, "storage") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd capacity") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd iops") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(d, "pd snapshot") {
+		return kubecost.StorageCategory
+	}
+
+	// Service Defaults
+	if strings.Contains(s, "storage") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "compute") {
+		return kubecost.ComputeCategory
+	}
+	if strings.Contains(s, "sql") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "bigquery") {
+		return kubecost.StorageCategory
+	}
+	if strings.Contains(s, "kubernetes") {
+		return kubecost.ManagementCategory
+	} else if strings.Contains(s, "pub/sub") {
+		return kubecost.NetworkCategory
+	}
+
+	return kubecost.OtherCategory
+}
+
+var parseProviderIDRx = regexp.MustCompile("^.+\\/(.+)?") // Capture "gke-cluster-3-default-pool-xxxx-yy" from "projects/###/instances/gke-cluster-3-default-pool-xxxx-yy"
+
+func GCPParseProviderID(id string) string {
+	match := parseProviderIDRx.FindStringSubmatch(id)
+	if len(match) == 0 {
+		return id
+	}
+	return match[len(match)-1]
+}

+ 21 - 21
pkg/cloud/gcpprovider.go → pkg/cloud/gcp/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package gcp
 
 import (
 	"context"
@@ -14,6 +14,7 @@ import (
 	"sync"
 	"time"
 
+	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
 	"github.com/opencost/opencost/pkg/kubecost"
@@ -46,7 +47,6 @@ const (
 
 	GKEPreemptibleLabel = "cloud.google.com/gke-preemptible"
 	GKESpotLabel        = "cloud.google.com/gke-spot"
-	
 )
 
 // List obtained by installing the `gcloud` CLI tool,
@@ -100,16 +100,15 @@ type GCP struct {
 	BillingDataDataset      string
 	DownloadPricingDataLock sync.RWMutex
 	ReservedInstances       []*GCPReservedInstance
-	Config                  *ProviderConfig
+	Config                  models.ProviderConfig
 	ServiceKeyProvided      bool
 	ValidPricingKeys        map[string]bool
-	metadataClient          *metadata.Client
+	MetadataClient          *metadata.Client
 	clusterManagementPrice  float64
-	clusterRegion           string
-	clusterAccountID        string
-	clusterProjectID        string
+	ClusterRegion           string
+	ClusterAccountID        string
+	ClusterProjectID        string
 	clusterProvisioner      string
-	*CustomProvider
 }
 
 type gcpAllocation struct {
@@ -177,6 +176,7 @@ func (gcp *GCP) GetConfig() (*models.CustomPricing, error) {
 }
 
 // BigQueryConfig contain the required config and credentials to access OOC resources for GCP
+// Deprecated: v1.104 Use BigQueryConfiguration instead
 type BigQueryConfig struct {
 	ProjectID          string            `json:"projectID"`
 	BillingDataDataset string            `json:"billingDataDataset"`
@@ -267,8 +267,8 @@ func (gcp *GCP) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 				}
 				gcp.ServiceKeyProvided = true
 			}
-		} else if updateType == AthenaInfoUpdateType {
-			a := AwsAthenaInfo{}
+		} else if updateType == aws.AthenaInfoUpdateType {
+			a := aws.AwsAthenaInfo{}
 			err := json.NewDecoder(r).Decode(&a)
 			if err != nil {
 				return err
@@ -316,7 +316,7 @@ func (gcp *GCP) UpdateConfig(r io.Reader, updateType string) (*models.CustomPric
 func (gcp *GCP) ClusterInfo() (map[string]string, error) {
 	remoteEnabled := env.IsRemoteEnabled()
 
-	attribute, err := gcp.metadataClient.InstanceAttributeValue("cluster-name")
+	attribute, err := gcp.MetadataClient.InstanceAttributeValue("cluster-name")
 	if err != nil {
 		log.Infof("Error loading metadata cluster-name: %s", err.Error())
 	}
@@ -337,9 +337,9 @@ func (gcp *GCP) ClusterInfo() (map[string]string, error) {
 	m := make(map[string]string)
 	m["name"] = attribute
 	m["provider"] = kubecost.GCPProvider
-	m["region"] = gcp.clusterRegion
-	m["account"] = gcp.clusterAccountID
-	m["project"] = gcp.clusterProjectID
+	m["region"] = gcp.ClusterRegion
+	m["account"] = gcp.ClusterAccountID
+	m["project"] = gcp.ClusterProjectID
 	m["provisioner"] = gcp.clusterProvisioner
 	m["id"] = env.GetClusterID()
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
@@ -351,7 +351,7 @@ func (gcp *GCP) ClusterManagementPricing() (string, float64, error) {
 }
 
 func (gcp *GCP) getAllAddresses() (*compute.AddressAggregatedList, error) {
-	projID, err := gcp.metadataClient.ProjectID()
+	projID, err := gcp.MetadataClient.ProjectID()
 	if err != nil {
 		return nil, err
 	}
@@ -391,7 +391,7 @@ func (gcp *GCP) isAddressOrphaned(address *compute.Address) bool {
 }
 
 func (gcp *GCP) getAllDisks() (*compute.DiskAggregatedList, error) {
-	projID, err := gcp.metadataClient.ProjectID()
+	projID, err := gcp.MetadataClient.ProjectID()
 	if err != nil {
 		return nil, err
 	}
@@ -799,7 +799,7 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]models.Key, pvKeys m
 				}
 
 				for _, candidateKey := range candidateKeys {
-					instanceType = strings.Split(candidateKey, ",")[1] // we may have overriden this while generating candidate keys
+					instanceType = strings.Split(candidateKey, ",")[1] // we may have overridden this while generating candidate keys
 					region := strings.Split(candidateKey, ",")[0]
 					candidateKeyGPU := candidateKey + ",gpu"
 					gcp.ValidPricingKeys[candidateKey] = true
@@ -1209,12 +1209,12 @@ func newReservedCounter(instance *GCPReservedInstance) *GCPReservedCounter {
 
 // Two available Reservation plans for GCP, 1-year and 3-year
 var gcpReservedInstancePlans map[string]*GCPReservedInstancePlan = map[string]*GCPReservedInstancePlan{
-	GCPReservedInstancePlanOneYear: &GCPReservedInstancePlan{
+	GCPReservedInstancePlanOneYear: {
 		Name:    GCPReservedInstancePlanOneYear,
 		CPUCost: 0.019915,
 		RAMCost: 0.002669,
 	},
-	GCPReservedInstancePlanThreeYear: &GCPReservedInstancePlan{
+	GCPReservedInstancePlanThreeYear: {
 		Name:    GCPReservedInstancePlanThreeYear,
 		CPUCost: 0.014225,
 		RAMCost: 0.001907,
@@ -1601,7 +1601,7 @@ func sustainedUseDiscount(class string, defaultDiscount float64, isPreemptible b
 	return discount
 }
 
-func parseGCPProjectID(id string) string {
+func ParseGCPProjectID(id string) string {
 	// gce://guestbook-12345/...
 	//  => guestbook-12345
 	match := gceRegex.FindStringSubmatch(id)
@@ -1618,7 +1618,7 @@ func getUsageType(labels map[string]string) string {
 	} else if t, ok := labels[GKESpotLabel]; ok && t == "true" {
 		// https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms
 		return "preemptible"
-	} else if t, ok := labels[KarpenterCapacityTypeLabel]; ok && t == KarpenterCapacitySpotTypeValue {
+	} else if t, ok := labels[models.KarpenterCapacityTypeLabel]; ok && t == models.KarpenterCapacitySpotTypeValue {
 		return "preemptible"
 	}
 	return "ondemand"

+ 7 - 7
pkg/cloud/gcpprovider_test.go → pkg/cloud/gcp/provider_test.go

@@ -1,4 +1,4 @@
-package cloud
+package gcp
 
 import (
 	"bytes"
@@ -68,7 +68,7 @@ func TestParseGCPProjectID(t *testing.T) {
 	}
 
 	for _, test := range cases {
-		result := parseGCPProjectID(test.input)
+		result := ParseGCPProjectID(test.input)
 		if result != test.expected {
 			t.Errorf("Input: %s, Expected: %s, Actual: %s", test.input, test.expected, result)
 		}
@@ -94,7 +94,7 @@ func TestGetUsageType(t *testing.T) {
 		},
 		{
 			input: map[string]string{
-				KarpenterCapacityTypeLabel: KarpenterCapacitySpotTypeValue,
+				models.KarpenterCapacityTypeLabel: models.KarpenterCapacitySpotTypeValue,
 			},
 			expected: "preemptible",
 		},
@@ -308,7 +308,7 @@ func TestParsePage(t *testing.T) {
 	}
 
 	expectedActualPrices := map[string]*GCPPricing{
-		"us-central1,a2highgpu,ondemand,gpu": &GCPPricing{
+		"us-central1,a2highgpu,ondemand,gpu": {
 			Name:        "services/6F81-5844-456A/skus/039F-D0DA-4055",
 			SKUID:       "039F-D0DA-4055",
 			Description: "Nvidia Tesla A100 GPU running in Americas",
@@ -320,7 +320,7 @@ func TestParsePage(t *testing.T) {
 			},
 			ServiceRegions: []string{"us-central1", "us-east1", "us-west1"},
 			PricingInfo: []*PricingInfo{
-				&PricingInfo{
+				{
 					Summary: "",
 					PricingExpression: &PricingExpression{
 						UsageUnit:                "h",
@@ -329,7 +329,7 @@ func TestParsePage(t *testing.T) {
 						BaseUnitConversionFactor: 0,
 						DisplayQuantity:          1,
 						TieredRates: []*TieredRates{
-							&TieredRates{
+							{
 								StartUsageAmount: 0,
 								UnitPrice: &UnitPriceInfo{
 									CurrencyCode: "USD",
@@ -353,7 +353,7 @@ func TestParsePage(t *testing.T) {
 				GPUCost:          "2.933908",
 			},
 		},
-		"us-central1,a2highgpu,ondemand": &GCPPricing{
+		"us-central1,a2highgpu,ondemand": {
 			Node: &models.Node{
 				VCPUCost:         "0.031611",
 				RAMCost:          "0.004237",

+ 5 - 3
pkg/cloud/models/models.go

@@ -20,9 +20,11 @@ var (
 )
 
 const (
-	AuthSecretPath          = "/var/secrets/service-key.json"
-	StorageConfigSecretPath = "/var/azure-storage-config/azure-storage-config.json"
-	DefaultShareTenancyCost = "true"
+	AuthSecretPath                 = "/var/secrets/service-key.json"
+	StorageConfigSecretPath        = "/var/azure-storage-config/azure-storage-config.json"
+	DefaultShareTenancyCost        = "true"
+	KarpenterCapacityTypeLabel     = "karpenter.sh/capacity-type"
+	KarpenterCapacitySpotTypeValue = "spot"
 )
 
 // ReservedInstanceData keeps record of resources on a node should be

+ 10 - 5
pkg/cloud/csvprovider.go → pkg/cloud/provider/csvprovider.go

@@ -1,10 +1,11 @@
-package cloud
+package provider
 
 import (
 	"encoding/csv"
 	"fmt"
 	"io"
 	"os"
+	"regexp"
 	"strconv"
 	"strings"
 	"sync"
@@ -25,6 +26,10 @@ import (
 
 const refreshMinutes = 60
 
+var (
+	provIdRx = regexp.MustCompile("aws:///([^/]+)/([^/]+)")
+)
+
 type CSVProvider struct {
 	*CustomProvider
 	CSVLocation             string
@@ -303,10 +308,10 @@ func NodeValueFromMapField(m string, n *v1.Node, useRegion bool) string {
 		if mf[1] == "name" {
 			return toReturn + n.Name
 		} else if mf[1] == "labels" {
-			lkey := strings.Join(mf[2:len(mf)], "")
+			lkey := strings.Join(mf[2:], "")
 			return toReturn + n.Labels[lkey]
 		} else if mf[1] == "annotations" {
-			akey := strings.Join(mf[2:len(mf)], "")
+			akey := strings.Join(mf[2:], "")
 			return toReturn + n.Annotations[akey]
 		} else {
 			log.Errorf("Unsupported InstanceIDField %s in CSV For Node", m)
@@ -324,10 +329,10 @@ func PVValueFromMapField(m string, n *v1.PersistentVolume) string {
 		if mf[1] == "name" {
 			return n.Name
 		} else if mf[1] == "labels" {
-			lkey := strings.Join(mf[2:len(mf)], "")
+			lkey := strings.Join(mf[2:], "")
 			return n.Labels[lkey]
 		} else if mf[1] == "annotations" {
-			akey := strings.Join(mf[2:len(mf)], "")
+			akey := strings.Join(mf[2:], "")
 			return n.Annotations[akey]
 		} else {
 			log.Errorf("Unsupported InstanceIDField %s in CSV For PV", m)

+ 63 - 8
pkg/cloud/customprovider.go → pkg/cloud/provider/customprovider.go

@@ -1,4 +1,4 @@
-package cloud
+package provider
 
 import (
 	"errors"
@@ -14,6 +14,7 @@ import (
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
+	"github.com/opencost/opencost/pkg/util"
 	"github.com/opencost/opencost/pkg/util/json"
 
 	v1 "k8s.io/api/core/v1"
@@ -32,10 +33,35 @@ type CustomProvider struct {
 	SpotLabelValue          string
 	GPULabel                string
 	GPULabelValue           string
-	clusterRegion           string
-	clusterAccountID        string
+	ClusterRegion           string
+	ClusterAccountID        string
 	DownloadPricingDataLock sync.RWMutex
-	Config                  *ProviderConfig
+	Config                  models.ProviderConfig
+}
+
+var volTypes = map[string]string{
+	"EBS:VolumeUsage.gp2":    "gp2",
+	"EBS:VolumeUsage.gp3":    "gp3",
+	"EBS:VolumeUsage":        "standard",
+	"EBS:VolumeUsage.sc1":    "sc1",
+	"EBS:VolumeP-IOPS.piops": "io1",
+	"EBS:VolumeUsage.st1":    "st1",
+	"EBS:VolumeUsage.piops":  "io1",
+	"gp2":                    "EBS:VolumeUsage.gp2",
+	"gp3":                    "EBS:VolumeUsage.gp3",
+	"standard":               "EBS:VolumeUsage",
+	"sc1":                    "EBS:VolumeUsage.sc1",
+	"io1":                    "EBS:VolumeUsage.piops",
+	"st1":                    "EBS:VolumeUsage.st1",
+}
+
+type customPVKey struct {
+	Labels                 map[string]string
+	StorageClassParameters map[string]string
+	StorageClassName       string
+	Name                   string
+	DefaultRegion          string
+	ProviderID             string
 }
 
 // PricingSourceSummary returns the pricing source summary for the provider.
@@ -121,8 +147,8 @@ func (cp *CustomProvider) ClusterInfo() (map[string]string, error) {
 		m["name"] = conf.ClusterName
 	}
 	m["provider"] = kubecost.CustomProvider
-	m["region"] = cp.clusterRegion
-	m["account"] = cp.clusterAccountID
+	m["region"] = cp.ClusterRegion
+	m["account"] = cp.ClusterAccountID
 	m["id"] = env.GetClusterID()
 	return m, nil
 }
@@ -153,7 +179,7 @@ func (cp *CustomProvider) NodePricing(key models.Key) (*models.Node, error) {
 	k := key.Features()
 	var gpuCount string
 	if _, ok := cp.Pricing[k]; !ok {
-		// Default is saying that there is no pricing info for the cluster and we should fall back to the defualt values.
+		// Default is saying that there is no pricing info for the cluster and we should fall back to the default values.
 		// An interesting case is if the default values weren't loaded.
 		k = "default"
 	}
@@ -302,7 +328,7 @@ func (cp *CustomProvider) LoadBalancerPricing() (*models.LoadBalancer, error) {
 }
 
 func (*CustomProvider) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string, defaultRegion string) models.PVKey {
-	return &awsPVKey{
+	return &customPVKey{
 		Labels:                 pv.Labels,
 		StorageClassName:       pv.Spec.StorageClassName,
 		StorageClassParameters: parameters,
@@ -310,6 +336,35 @@ func (*CustomProvider) GetPVKey(pv *v1.PersistentVolume, parameters map[string]s
 	}
 }
 
+func (key *customPVKey) ID() string {
+	return key.ProviderID
+}
+
+func (key *customPVKey) GetStorageClass() string {
+	return key.StorageClassName
+}
+
+// Features returns a comma separated string of features for a given PV
+// (@pokom): This was imported from aws which caused a cyclical dependency. This _should_ be refactored to be specific to a custom pvkey
+func (key *customPVKey) Features() string {
+	storageClass := key.StorageClassParameters["type"]
+	if storageClass == "standard" {
+		storageClass = "gp2"
+	}
+	// Storage class names are generally EBS volume types (gp2)
+	// Keys in Pricing are based on UsageTypes (EBS:VolumeType.gp2)
+	// Converts between the 2
+	region, ok := util.GetRegion(key.Labels)
+	if !ok {
+		region = key.DefaultRegion
+	}
+	class, ok := volTypes[storageClass]
+	if !ok {
+		log.Debugf("No voltype mapping for %s's storageClass: %s", key.Name, storageClass)
+	}
+	return region + "," + class
+}
+
 func (k *customProviderKey) GPUCount() int {
 	return 0
 }

+ 26 - 25
pkg/cloud/provider.go → pkg/cloud/provider/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package provider
 
 import (
 	"errors"
@@ -8,8 +8,12 @@ import (
 	"strings"
 	"time"
 
+	"github.com/opencost/opencost/pkg/cloud/alibaba"
+	"github.com/opencost/opencost/pkg/cloud/aws"
 	"github.com/opencost/opencost/pkg/cloud/azure"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
 	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/scaleway"
 	"github.com/opencost/opencost/pkg/kubecost"
 
 	"github.com/opencost/opencost/pkg/util"
@@ -26,9 +30,6 @@ import (
 	v1 "k8s.io/api/core/v1"
 )
 
-const KarpenterCapacityTypeLabel = "karpenter.sh/capacity-type"
-const KarpenterCapacitySpotTypeValue = "spot"
-
 // ClusterName returns the name defined in cluster info, defaulting to the
 // CLUSTER_ID environment variable
 func ClusterName(p models.Provider) string {
@@ -168,8 +169,8 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 			CSVLocation: env.GetCSVPath(),
 			CustomProvider: &CustomProvider{
 				Clientset:        cache,
-				clusterRegion:    cp.region,
-				clusterAccountID: cp.accountID,
+				ClusterRegion:    cp.region,
+				ClusterAccountID: cp.accountID,
 				Config:           NewProviderConfig(config, cp.configFileName),
 			},
 		}, nil
@@ -178,14 +179,14 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		if apiKey == "" {
 			return nil, errors.New("Supply a GCP Key to start getting data")
 		}
-		return &GCP{
+		return &gcp.GCP{
 			Clientset:        cache,
 			APIKey:           apiKey,
 			Config:           NewProviderConfig(config, cp.configFileName),
-			clusterRegion:    cp.region,
-			clusterAccountID: cp.accountID,
-			clusterProjectID: cp.projectID,
-			metadataClient: metadata.NewClient(
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
+			ClusterProjectID: cp.projectID,
+			MetadataClient: metadata.NewClient(
 				&http.Client{
 					Transport: httputil.NewUserAgentTransport("kubecost", &http.Transport{
 						Dial: (&net.Dialer{
@@ -198,12 +199,12 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		}, nil
 	case kubecost.AWSProvider:
 		log.Info("Found ProviderID starting with \"aws\", using AWS Provider")
-		return &AWS{
+		return &aws.AWS{
 			Clientset:            cache,
 			Config:               NewProviderConfig(config, cp.configFileName),
-			clusterRegion:        cp.region,
-			clusterAccountID:     cp.accountID,
-			serviceAccountChecks: models.NewServiceAccountChecks(),
+			ClusterRegion:        cp.region,
+			ClusterAccountID:     cp.accountID,
+			ServiceAccountChecks: models.NewServiceAccountChecks(),
 		}, nil
 	case kubecost.AzureProvider:
 		log.Info("Found ProviderID starting with \"azure\", using Azure Provider")
@@ -216,19 +217,19 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		}, nil
 	case kubecost.AlibabaProvider:
 		log.Info("Found ProviderID starting with \"alibaba\", using Alibaba Cloud Provider")
-		return &Alibaba{
+		return &alibaba.Alibaba{
 			Clientset:            cache,
 			Config:               NewProviderConfig(config, cp.configFileName),
-			clusterRegion:        cp.region,
-			clusterAccountId:     cp.accountID,
-			serviceAccountChecks: models.NewServiceAccountChecks(),
+			ClusterRegion:        cp.region,
+			ClusterAccountId:     cp.accountID,
+			ServiceAccountChecks: models.NewServiceAccountChecks(),
 		}, nil
 	case kubecost.ScalewayProvider:
 		log.Info("Found ProviderID starting with \"scaleway\", using Scaleway Provider")
-		return &Scaleway{
+		return &scaleway.Scaleway{
 			Clientset:        cache,
-			clusterRegion:    cp.region,
-			clusterAccountID: cp.accountID,
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
 			Config:           NewProviderConfig(config, cp.configFileName),
 		}, nil
 
@@ -236,8 +237,8 @@ func NewProvider(cache clustercache.ClusterCache, apiKey string, config *config.
 		log.Info("Unsupported provider, falling back to default")
 		return &CustomProvider{
 			Clientset:        cache,
-			clusterRegion:    cp.region,
-			clusterAccountID: cp.accountID,
+			ClusterRegion:    cp.region,
+			ClusterAccountID: cp.accountID,
 			Config:           NewProviderConfig(config, cp.configFileName),
 		}, nil
 	}
@@ -265,7 +266,7 @@ func getClusterProperties(node *v1.Node) clusterProperties {
 	if metadata.OnGCE() || strings.HasPrefix(providerID, "gce") {
 		cp.provider = kubecost.GCPProvider
 		cp.configFileName = "gcp.json"
-		cp.projectID = parseGCPProjectID(providerID)
+		cp.projectID = gcp.ParseGCPProjectID(providerID)
 	} else if strings.HasPrefix(providerID, "aws") {
 		cp.provider = kubecost.AWSProvider
 		cp.configFileName = "aws.json"

+ 2 - 3
pkg/cloud/providerconfig.go → pkg/cloud/provider/providerconfig.go

@@ -1,8 +1,7 @@
-package cloud
+package provider
 
 import (
 	"fmt"
-	"io/ioutil"
 	"os"
 	gopath "path"
 	"strconv"
@@ -277,7 +276,7 @@ func ReturnPricingFromConfigs(filename string) (*models.CustomPricing, error) {
 	if _, err := os.Stat(providerConfigFile); err != nil {
 		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to find file %s with err: %v", providerConfigFile, err)
 	}
-	configFile, err := ioutil.ReadFile(providerConfigFile)
+	configFile, err := os.ReadFile(providerConfigFile)
 	if err != nil {
 		return &models.CustomPricing{}, fmt.Errorf("ReturnPricingFromConfigs: unable to open file %s with err: %v", providerConfigFile, err)
 	}

+ 7 - 7
pkg/cloud/scalewayprovider.go → pkg/cloud/scaleway/provider.go

@@ -1,4 +1,4 @@
-package cloud
+package scaleway
 
 import (
 	"errors"
@@ -36,10 +36,10 @@ type ScalewayPricing struct {
 
 type Scaleway struct {
 	Clientset               clustercache.ClusterCache
-	Config                  *ProviderConfig
+	Config                  models.ProviderConfig
 	Pricing                 map[string]*ScalewayPricing
-	clusterRegion           string
-	clusterAccountID        string
+	ClusterRegion           string
+	ClusterAccountID        string
 	DownloadPricingDataLock sync.RWMutex
 }
 
@@ -288,8 +288,8 @@ func (scw *Scaleway) ClusterInfo() (map[string]string, error) {
 		m["name"] = c.ClusterName
 	}
 	m["provider"] = kubecost.ScalewayProvider
-	m["region"] = scw.clusterRegion
-	m["account"] = scw.clusterAccountID
+	m["region"] = scw.ClusterRegion
+	m["account"] = scw.ClusterAccountID
 	m["remoteReadEnabled"] = strconv.FormatBool(remoteEnabled)
 	m["id"] = env.GetClusterID()
 	return m, nil
@@ -370,7 +370,7 @@ func (scw *Scaleway) GetManagementPlatform() (string, error) {
 
 func (c *Scaleway) PricingSourceStatus() map[string]*models.PricingSource {
 	return map[string]*models.PricingSource{
-		InstanceAPIPricing: &models.PricingSource{
+		InstanceAPIPricing: {
 			Name:      InstanceAPIPricing,
 			Enabled:   true,
 			Available: true,

+ 3 - 3
pkg/cmd/agent/agent.go

@@ -7,7 +7,7 @@ import (
 	"path"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/costmodel"
@@ -157,13 +157,13 @@ func Execute(opts *AgentOpts) error {
 	})
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
-	cloudProvider, err := cloud.NewProvider(clusterCache, cloudProviderKey, confManager)
+	cloudProvider, err := provider.NewProvider(clusterCache, cloudProviderKey, confManager)
 	if err != nil {
 		panic(err.Error())
 	}
 
 	// Append the pricing config watcher
-	configWatchers.AddWatcher(cloud.ConfigWatcherFor(cloudProvider))
+	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	watchConfigFunc := configWatchers.ToWatchFunc()
 	watchedConfigs := configWatchers.GetWatchedConfigs()
 

+ 7 - 7
pkg/costmodel/aggregation.go

@@ -11,10 +11,10 @@ import (
 	"time"
 
 	"github.com/julienschmidt/httprouter"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/patrickmn/go-cache"
 	prometheusClient "github.com/prometheus/client_golang/api"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/errors"
@@ -452,7 +452,7 @@ func AggregateCostData(costData map[string]*CostData, field string, subfields []
 		agg.PVAllocationHourlyAverage = totalVectors(agg.PVAllocationVectors) / agg.TotalHours(resolutionHours)
 
 		// TODO niko/etl does this check out for GPU data? Do we need to rewrite GPU queries to be
-		// culumative?
+		// cumulative?
 		agg.CPUAllocationTotal = totalVectors(agg.CPUAllocationVectors)
 		agg.GPUAllocationTotal = totalVectors(agg.GPUAllocationVectors)
 		agg.PVAllocationTotal = totalVectors(agg.PVAllocationVectors)
@@ -761,7 +761,7 @@ func getPriceVectors(cp models.Provider, costDatum *CostData, rate string, disco
 	if err != nil {
 		log.Errorf("failed to load custom pricing: %s", err)
 	}
-	if cloud.CustomPricesEnabled(cp) && err == nil {
+	if provider.CustomPricesEnabled(cp) && err == nil {
 		var cpuCostStr string
 		var ramCostStr string
 		var gpuCostStr string
@@ -839,7 +839,7 @@ func getPriceVectors(cp models.Provider, costDatum *CostData, rate string, disco
 			cost, _ := strconv.ParseFloat(pvcData.Volume.Cost, 64)
 
 			// override with custom pricing if enabled
-			if cloud.CustomPricesEnabled(cp) {
+			if provider.CustomPricesEnabled(cp) {
 				cost = pvCost
 			}
 
@@ -1768,10 +1768,10 @@ func (a *Accesses) warmAggregateCostModelCache() {
 		aggOpts.NoExpireCache = false
 		aggOpts.ShareSplit = SplitTypeWeighted
 		aggOpts.RemoteEnabled = env.IsRemoteEnabled()
-		aggOpts.AllocateIdle = cloud.AllocateIdleByDefault(a.CloudProvider)
+		aggOpts.AllocateIdle = provider.AllocateIdleByDefault(a.CloudProvider)
 
-		sharedNamespaces := cloud.SharedNamespaces(a.CloudProvider)
-		sharedLabelNames, sharedLabelValues := cloud.SharedLabels(a.CloudProvider)
+		sharedNamespaces := provider.SharedNamespaces(a.CloudProvider)
+		sharedLabelNames, sharedLabelValues := provider.SharedLabels(a.CloudProvider)
 
 		if len(sharedNamespaces) > 0 || len(sharedLabelNames) > 0 {
 			aggOpts.SharedResources = NewSharedResourceInfo(true, sharedNamespaces, sharedLabelNames, sharedLabelValues)

+ 5 - 5
pkg/costmodel/allocation_helpers.go

@@ -7,7 +7,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/kubecost"
 	"github.com/opencost/opencost/pkg/log"
@@ -1432,7 +1432,7 @@ func applyNodeCostPerCPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerCPUHr
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1470,7 +1470,7 @@ func applyNodeCostPerRAMGiBHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerRA
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1508,7 +1508,7 @@ func applyNodeCostPerGPUHr(nodeMap map[nodeKey]*nodePricing, resNodeCostPerGPUHr
 			nodeMap[key] = &nodePricing{
 				Name:       node,
 				NodeType:   instanceType,
-				ProviderID: cloud.ParseID(providerID),
+				ProviderID: provider.ParseID(providerID),
 			}
 		}
 
@@ -1654,7 +1654,7 @@ func (cm *CostModel) getNodePricing(nodeMap map[nodeKey]*nodePricing, nodeKey no
 	if err != nil {
 		log.Warnf("CostModel: failed to load custom pricing: %s", err)
 	}
-	if cloud.CustomPricesEnabled(cm.Provider) && customPricingConfig != nil {
+	if provider.CustomPricesEnabled(cm.Provider) && customPricingConfig != nil {
 		return cm.getCustomNodePricing(node.Preemptible, node.ProviderID)
 	}
 

+ 4 - 4
pkg/costmodel/cluster.go

@@ -5,10 +5,10 @@ import (
 	"strconv"
 	"time"
 
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	prometheus "github.com/prometheus/client_golang/api"
 	"golang.org/x/exp/slices"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/kubecost"
@@ -779,7 +779,7 @@ func ClusterLoadBalancers(client prometheus.Client, start, end time.Time) (map[L
 				Cluster:    cluster,
 				Namespace:  namespace,
 				Name:       fmt.Sprintf("%s/%s", namespace, name), // TODO:ETL this is kept for backwards-compatibility, but not good
-				ProviderID: cloud.ParseLBID(providerID),
+				ProviderID: provider.ParseLBID(providerID),
 			}
 		}
 
@@ -1360,7 +1360,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		diskMap[key].Bytes = bytes
 	}
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterDisks: failed to load custom pricing: %s", err)
@@ -1405,7 +1405,7 @@ func pvCosts(diskMap map[DiskIdentifier]*Disk, resolution time.Duration, resActi
 		diskMap[key].Cost = cost * (diskMap[key].Bytes / 1024 / 1024 / 1024) * (diskMap[key].Minutes / 60)
 		providerID, _ := result.GetString("provider_id") // just put the providerID set up here, it's the simplest query.
 		if providerID != "" {
-			diskMap[key].ProviderID = cloud.ParsePVID(providerID)
+			diskMap[key].ProviderID = provider.ParsePVID(providerID)
 		}
 	}
 

+ 10 - 10
pkg/costmodel/cluster_helpers.go

@@ -4,8 +4,8 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 
 	"github.com/opencost/opencost/pkg/env"
 	"github.com/opencost/opencost/pkg/log"
@@ -41,7 +41,7 @@ func buildCPUCostMap(
 	cpuCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -65,7 +65,7 @@ func buildCPUCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -115,7 +115,7 @@ func buildRAMCostMap(
 	ramCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -139,7 +139,7 @@ func buildRAMCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -190,7 +190,7 @@ func buildGPUCostMap(
 	gpuCostMap := make(map[NodeIdentifier]float64)
 	clusterAndNameToType := make(map[nodeIdentifierNoProviderID]string)
 
-	customPricingEnabled := cloud.CustomPricesEnabled(cp)
+	customPricingEnabled := provider.CustomPricesEnabled(cp)
 	customPricingConfig, err := cp.GetConfig()
 	if err != nil {
 		log.Warnf("ClusterNodes: failed to load custom pricing: %s", err)
@@ -214,7 +214,7 @@ func buildGPUCostMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		keyNon := nodeIdentifierNoProviderID{
 			Cluster: cluster,
@@ -282,7 +282,7 @@ func buildGPUCountMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 		gpuCountMap[key] = gpuCount
 	}
@@ -511,7 +511,7 @@ func buildActiveDataMap(resActiveMins []*prom.QueryResult, resolution time.Durat
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       name,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 
 		if len(result.Values) == 0 {
@@ -560,7 +560,7 @@ func buildPreemptibleMap(
 		key := NodeIdentifier{
 			Cluster:    cluster,
 			Name:       nodeName,
-			ProviderID: cloud.ParseID(providerID),
+			ProviderID: provider.ParseID(providerID),
 		}
 
 		// TODO(michaelmdresser): check this condition at merge time?

+ 118 - 118
pkg/costmodel/cluster_helpers_test.go

@@ -5,7 +5,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/prom"
 	"github.com/opencost/opencost/pkg/util"
@@ -29,14 +29,14 @@ func TestMergeTypeMaps(t *testing.T) {
 		{
 			name: "map2 empty",
 			map1: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			map2: map[nodeIdentifierNoProviderID]string{},
 			expected: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
@@ -46,13 +46,13 @@ func TestMergeTypeMaps(t *testing.T) {
 			name: "map1 empty",
 			map1: map[nodeIdentifierNoProviderID]string{},
 			map2: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			expected: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
@@ -61,31 +61,31 @@ func TestMergeTypeMaps(t *testing.T) {
 		{
 			name: "no overlap",
 			map1: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			map2: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node4",
 				}: "type4",
 			},
 			expected: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node4",
 				}: "type4",
@@ -94,27 +94,27 @@ func TestMergeTypeMaps(t *testing.T) {
 		{
 			name: "with overlap",
 			map1: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			map2: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type4",
 			},
 			expected: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
@@ -158,24 +158,24 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "just cpu cost",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
 				}: 0.048,
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1",
@@ -189,22 +189,22 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "just cpu cost with empty provider ID",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 0.048,
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					NodeType:     "type1",
@@ -217,29 +217,29 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "cpu cost with overlapping node names",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 0.048,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 0.087,
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1_A",
@@ -248,11 +248,11 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1_B",
@@ -266,207 +266,207 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "all fields + overlapping node names",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 0.048,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 0.087,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: 0.033,
 			},
 			ramCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 0.09,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 0.3,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: 0.024,
 			},
 			gpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 0.8,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 1.4,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: 3.1,
 			},
 			gpuCountMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 1.0,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 1.0,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: 2.0,
 			},
 			cpuCoresMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 2.0,
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: 5.0,
 			},
 			ramBytesMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 2048.0,
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: 6303.0,
 			},
 			ramUserPctMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 30.0,
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: 42.6,
 			},
 			ramSystemPctMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 15.0,
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: 20.1,
 			},
 			cpuBreakdownMap: map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
-				}: &ClusterCostsBreakdown{
+				}: {
 					System: 20.2,
 					User:   68.0,
 				},
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
-				}: &ClusterCostsBreakdown{
+				}: {
 					System: 28.9,
 					User:   34.0,
 				},
 			},
 			activeDataMap: map[NodeIdentifier]activeData{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
-				}: activeData{
+				}: {
 					start:   time.Date(2020, 6, 16, 3, 45, 28, 0, time.UTC),
 					end:     time.Date(2020, 6, 16, 9, 20, 39, 0, time.UTC),
 					minutes: 5*60 + 35 + (11.0 / 60.0),
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
-				}: activeData{
+				}: {
 					start:   time.Date(2020, 6, 16, 3, 45, 28, 0, time.UTC),
 					end:     time.Date(2020, 6, 16, 9, 21, 39, 0, time.UTC),
 					minutes: 5*60 + 36 + (11.0 / 60.0),
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
-				}: activeData{
+				}: {
 					start:   time.Date(2020, 6, 16, 3, 45, 28, 0, time.UTC),
 					end:     time.Date(2020, 6, 16, 9, 10, 39, 0, time.UTC),
 					minutes: 5*60 + 25 + (11.0 / 60.0),
 				},
 			},
 			preemptibleMap: map[NodeIdentifier]bool{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: true,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: false,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: false,
 			},
 			labelsMap: map[nodeIdentifierNoProviderID]map[string]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
-				}: map[string]string{
+				}: {
 					"labelname1_A": "labelvalue1_A",
 					"labelname1_B": "labelvalue1_B",
 				},
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
-				}: map[string]string{
+				}: {
 					"labelname2_A": "labelvalue2_A",
 					"labelname2_B": "labelvalue2_B",
 				},
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
-				}: &Node{
+				}: {
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
@@ -494,11 +494,11 @@ func TestBuildNodeMap(t *testing.T) {
 						"labelname1_B": "labelvalue1_B",
 					},
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
-				}: &Node{
+				}: {
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
@@ -526,11 +526,11 @@ func TestBuildNodeMap(t *testing.T) {
 						"labelname1_B": "labelvalue1_B",
 					},
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
-				}: &Node{
+				}: {
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
@@ -563,30 +563,30 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "e2-micro cpu cost adjustment",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
 				}: 0.048,
 			},
 			cpuCoresMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 6.0, // GKE lies about number of cores
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "e2-micro", // for this node type
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1",
@@ -601,30 +601,30 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "e2-small cpu cost adjustment",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
 				}: 0.048,
 			},
 			cpuCoresMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 6.0, // GKE lies about number of cores
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "e2-small", // for this node type
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1",
@@ -639,30 +639,30 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "e2-medium cpu cost adjustment",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
 				}: 0.048,
 			},
 			cpuCoresMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 6.0, // GKE lies about number of cores
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "e2-medium", // for this node type
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1",
@@ -720,7 +720,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 						"provider_id":   "provider1",
 					},
 					Values: []*util.Vector{
-						&util.Vector{
+						{
 							Timestamp: 0,
 							Value:     0,
 						},
@@ -728,14 +728,14 @@ func TestBuildGPUCostMap(t *testing.T) {
 				},
 			},
 			countMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
 				}: 0,
 			},
 			expected: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -753,7 +753,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 						"provider_id":   "provider1",
 					},
 					Values: []*util.Vector{
-						&util.Vector{
+						{
 							Timestamp: 0,
 							Value:     2,
 						},
@@ -761,14 +761,14 @@ func TestBuildGPUCostMap(t *testing.T) {
 				},
 			},
 			countMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
 				}: 0,
 			},
 			expected: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -786,7 +786,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 						"provider_id":   "provider1",
 					},
 					Values: []*util.Vector{
-						&util.Vector{
+						{
 							Timestamp: 0,
 							Value:     2,
 						},
@@ -795,7 +795,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 			},
 			countMap: map[NodeIdentifier]float64{},
 			expected: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -808,7 +808,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 				{},
 			},
 			countMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -827,7 +827,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 						"provider_id":   "provider1",
 					},
 					Values: []*util.Vector{
-						&util.Vector{
+						{
 							Timestamp: 0,
 							Value:     2,
 						},
@@ -835,14 +835,14 @@ func TestBuildGPUCostMap(t *testing.T) {
 				},
 			},
 			countMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
 				}: 2,
 			},
 			expected: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -853,8 +853,8 @@ func TestBuildGPUCostMap(t *testing.T) {
 
 	for _, testCase := range cases {
 		t.Run(testCase.name, func(t *testing.T) {
-			testProvider := &cloud.CustomProvider{
-				Config: cloud.NewProviderConfig(config.NewConfigFileManager(nil), "fakeFile"),
+			testProvider := &provider.CustomProvider{
+				Config: provider.NewProviderConfig(config.NewConfigFileManager(nil), "fakeFile"),
 			}
 			testPreemptible := make(map[NodeIdentifier]bool)
 			result, _ := buildGPUCostMap(testCase.promResult, testCase.countMap, testProvider, testPreemptible)
@@ -876,7 +876,7 @@ func TestAssetCustompricing(t *testing.T) {
 				"provider_id":   "provider1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     0.5,
 				},
@@ -892,7 +892,7 @@ func TestAssetCustompricing(t *testing.T) {
 				"provider_id":      "provider1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
@@ -908,7 +908,7 @@ func TestAssetCustompricing(t *testing.T) {
 				"provider_id":      "provider1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1073741824.0,
 				},
@@ -924,11 +924,11 @@ func TestAssetCustompricing(t *testing.T) {
 				"provider_id":      "provider1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
-				&util.Vector{
+				{
 					Timestamp: 3600.0,
 					Value:     1.0,
 				},
@@ -944,11 +944,11 @@ func TestAssetCustompricing(t *testing.T) {
 				"namespace":             "ns1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
-				&util.Vector{
+				{
 					Timestamp: 3600.0,
 					Value:     1.0,
 				},
@@ -964,11 +964,11 @@ func TestAssetCustompricing(t *testing.T) {
 				"namespace":             "ns1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
-				&util.Vector{
+				{
 					Timestamp: 3600.0,
 					Value:     1.0,
 				},
@@ -985,7 +985,7 @@ func TestAssetCustompricing(t *testing.T) {
 				"namespace":             "ns1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
@@ -994,7 +994,7 @@ func TestAssetCustompricing(t *testing.T) {
 	}
 
 	gpuCountMap := map[NodeIdentifier]float64{
-		NodeIdentifier{
+		{
 			Cluster:    "cluster1",
 			Name:       "node1",
 			ProviderID: "provider1",
@@ -1042,8 +1042,8 @@ func TestAssetCustompricing(t *testing.T) {
 
 	for _, testCase := range cases {
 		t.Run(testCase.name, func(t *testing.T) {
-			testProvider := &cloud.CustomProvider{
-				Config: cloud.NewProviderConfig(config.NewConfigFileManager(nil), ""),
+			testProvider := &provider.CustomProvider{
+				Config: provider.NewProviderConfig(config.NewConfigFileManager(nil), ""),
 			}
 			testProvider.UpdateConfigFromConfigMap(testCase.customPricingMap)
 

+ 1 - 1
pkg/costmodel/csv_export_test.go

@@ -40,7 +40,7 @@ func Test_UpdateCSV(t *testing.T) {
 							NetworkTransferBytes:   10,
 							NetworkReceiveBytes:    11,
 							PVs: map[kubecost.PVKey]*kubecost.PVAllocation{
-								kubecost.PVKey{
+								{
 									Cluster: "test-cluster",
 									Name:    "test-pv",
 								}: {

+ 1 - 1
pkg/costmodel/intervals.go

@@ -45,7 +45,7 @@ func NewIntervalPoint(time time.Time, pointType string, key podKey) IntervalPoin
 	}
 }
 
-// CoefficientComponent is a representitive struct holding two fields which describe an interval
+// CoefficientComponent is a representative struct holding two fields which describe an interval
 // as part of a single number cost coefficient calculation:
 // 1. Proportion: The division of cost based on how many pods were running between those points
 // 2. Time: The ratio of the time between those points to the total time that pod was running

+ 44 - 44
pkg/costmodel/intervals_test.go

@@ -18,28 +18,28 @@ func TestGetIntervalPointsFromWindows(t *testing.T) {
 			name: "four pods w/ various overlaps",
 			pvcIntervalMap: map[podKey]kubecost.Window{
 				// Pod running from 8 am to 9 am
-				podKey{
+				{
 					Pod: "Pod1",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
 				)),
 				// Pod running from 8:30 am to 9 am
-				podKey{
+				{
 					Pod: "Pod2",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
 				)),
 				// Pod running from 8:45 am to 9 am
-				podKey{
+				{
 					Pod: "Pod3",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 45, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
 				)),
 				// Pod running from 8 am to 8:15 am
-				podKey{
+				{
 					Pod: "Pod4",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC),
@@ -61,14 +61,14 @@ func TestGetIntervalPointsFromWindows(t *testing.T) {
 			name: "two pods no overlap",
 			pvcIntervalMap: map[podKey]kubecost.Window{
 				// Pod running from 8 am to 8:30 am
-				podKey{
+				{
 					Pod: "Pod1",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
 				)),
 				// Pod running from 8:30 am to 9 am
-				podKey{
+				{
 					Pod: "Pod2",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
@@ -86,14 +86,14 @@ func TestGetIntervalPointsFromWindows(t *testing.T) {
 			name: "two pods total overlap",
 			pvcIntervalMap: map[podKey]kubecost.Window{
 				// Pod running from 8:30 am to 9 am
-				podKey{
+				{
 					Pod: "Pod1",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
 				)),
 				// Pod running from 8:30 am to 9 am
-				podKey{
+				{
 					Pod: "Pod2",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
@@ -111,7 +111,7 @@ func TestGetIntervalPointsFromWindows(t *testing.T) {
 			name: "one pod",
 			pvcIntervalMap: map[podKey]kubecost.Window{
 				// Pod running from 8 am to 9 am
-				podKey{
+				{
 					Pod: "Pod1",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC),
@@ -185,21 +185,21 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod1Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.25},
-					CoefficientComponent{1, 0.25},
-					CoefficientComponent{0.5, 0.25},
-					CoefficientComponent{1.0 / 3.0, 0.25},
+				pod1Key: {
+					{0.5, 0.25},
+					{1, 0.25},
+					{0.5, 0.25},
+					{1.0 / 3.0, 0.25},
 				},
-				pod2Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.25},
-					CoefficientComponent{1.0 / 3.0, 0.25},
+				pod2Key: {
+					{0.5, 0.25},
+					{1.0 / 3.0, 0.25},
 				},
-				pod3Key: []CoefficientComponent{
-					CoefficientComponent{1.0 / 3.0, 0.25},
+				pod3Key: {
+					{1.0 / 3.0, 0.25},
 				},
-				pod4Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.25},
+				pod4Key: {
+					{0.5, 0.25},
 				},
 			},
 		},
@@ -213,11 +213,11 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				pod1Key: {
+					{1.0, 0.5},
 				},
-				pod2Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				pod2Key: {
+					{1.0, 0.5},
 				},
 			},
 		},
@@ -231,14 +231,14 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.5},
+				pod1Key: {
+					{0.5, 0.5},
 				},
-				pod2Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.5},
+				pod2Key: {
+					{0.5, 0.5},
 				},
-				ummountedPodKey: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				ummountedPodKey: {
+					{1.0, 0.5},
 				},
 			},
 		},
@@ -250,8 +250,8 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod1Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 1.0},
+				pod1Key: {
+					{1.0, 1.0},
 				},
 			},
 		},
@@ -265,14 +265,14 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.25},
+				pod1Key: {
+					{1.0, 0.25},
 				},
-				pod2Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.25},
+				pod2Key: {
+					{1.0, 0.25},
 				},
-				ummountedPodKey: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				ummountedPodKey: {
+					{1.0, 0.5},
 				},
 			},
 		},
@@ -284,12 +284,12 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 8, 45, 0, 0, time.UTC), "end", pod1Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				pod1Key: {
+					{1.0, 0.5},
 				},
-				ummountedPodKey: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.25},
-					CoefficientComponent{1.0, 0.25},
+				ummountedPodKey: {
+					{1.0, 0.25},
+					{1.0, 0.25},
 				},
 			},
 		},

+ 9 - 7
pkg/costmodel/router.go

@@ -16,6 +16,9 @@ import (
 	"time"
 
 	"github.com/microcosm-cc/bluemonday"
+	"github.com/opencost/opencost/pkg/cloud/aws"
+	"github.com/opencost/opencost/pkg/cloud/gcp"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/kubeconfig"
 	"github.com/opencost/opencost/pkg/metrics"
@@ -30,9 +33,8 @@ import (
 
 	"github.com/julienschmidt/httprouter"
 
-	sentry "github.com/getsentry/sentry-go"
+	"github.com/getsentry/sentry-go"
 
-	"github.com/opencost/opencost/pkg/cloud"
 	"github.com/opencost/opencost/pkg/cloud/azure"
 	"github.com/opencost/opencost/pkg/cloud/models"
 	"github.com/opencost/opencost/pkg/cloud/utils"
@@ -585,7 +587,7 @@ func (a *Accesses) GetConfigs(w http.ResponseWriter, r *http.Request, ps httprou
 func (a *Accesses) UpdateSpotInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	data, err := a.CloudProvider.UpdateConfig(r.Body, cloud.SpotInfoUpdateType)
+	data, err := a.CloudProvider.UpdateConfig(r.Body, aws.SpotInfoUpdateType)
 	if err != nil {
 		w.Write(WrapData(data, err))
 		return
@@ -601,7 +603,7 @@ func (a *Accesses) UpdateSpotInfoConfigs(w http.ResponseWriter, r *http.Request,
 func (a *Accesses) UpdateAthenaInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	data, err := a.CloudProvider.UpdateConfig(r.Body, cloud.AthenaInfoUpdateType)
+	data, err := a.CloudProvider.UpdateConfig(r.Body, aws.AthenaInfoUpdateType)
 	if err != nil {
 		w.Write(WrapData(data, err))
 		return
@@ -613,7 +615,7 @@ func (a *Accesses) UpdateAthenaInfoConfigs(w http.ResponseWriter, r *http.Reques
 func (a *Accesses) UpdateBigQueryInfoConfigs(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Access-Control-Allow-Origin", "*")
-	data, err := a.CloudProvider.UpdateConfig(r.Body, cloud.BigqueryUpdateType)
+	data, err := a.CloudProvider.UpdateConfig(r.Body, gcp.BigqueryUpdateType)
 	if err != nil {
 		w.Write(WrapData(data, err))
 		return
@@ -1590,13 +1592,13 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 	k8sCache.Run()
 
 	cloudProviderKey := env.GetCloudProviderAPIKey()
-	cloudProvider, err := cloud.NewProvider(k8sCache, cloudProviderKey, confManager)
+	cloudProvider, err := provider.NewProvider(k8sCache, cloudProviderKey, confManager)
 	if err != nil {
 		panic(err.Error())
 	}
 
 	// Append the pricing config watcher
-	configWatchers.AddWatcher(cloud.ConfigWatcherFor(cloudProvider))
+	configWatchers.AddWatcher(provider.ConfigWatcherFor(cloudProvider))
 	configWatchers.AddWatcher(metrics.GetMetricsConfigWatcher())
 
 	watchConfigFunc := configWatchers.ToWatchFunc()

+ 1 - 1
pkg/costmodel/sql.go

@@ -260,7 +260,7 @@ func CostDataRangeFromSQL(field string, value string, window string, start strin
 	rawResult := make([][]byte, len(cols))
 	result := make([]string, len(cols))
 	dest := make([]interface{}, len(cols)) // A temporary interface{} slice
-	for i, _ := range rawResult {
+	for i := range rawResult {
 		dest[i] = &rawResult[i] // Put pointers to each string in the interface slice
 	}
 	nsToLabels := make(map[string]map[string]string)

+ 5 - 5
pkg/env/costmodelenv.go

@@ -165,7 +165,7 @@ func GetPrometheusRetryOnRateLimitDefaultWait() time.Duration {
 // data arriving in the target prom db. For example, if supplying a thanos or cortex querier for the prometheus server, using
 // a 3h offset will ensure that current time = current time - 3h.
 //
-// This offset is NOT the same as the GetThanosOffset() option, as that is only applied to queries made specifically targetting
+// This offset is NOT the same as the GetThanosOffset() option, as that is only applied to queries made specifically targeting
 // thanos. This offset is applied globally.
 func GetPrometheusQueryOffset() time.Duration {
 	offset := Get(PrometheusQueryOffsetEnvVar, "")
@@ -441,12 +441,12 @@ func GetDBBearerToken() string {
 	return Get(DBBearerToken, "")
 }
 
-// GetMultiClusterBasicAuthUsername returns the environemnt variable value for MultiClusterBasicAuthUsername
+// GetMultiClusterBasicAuthUsername returns the environment variable value for MultiClusterBasicAuthUsername
 func GetMultiClusterBasicAuthUsername() string {
 	return Get(MultiClusterBasicAuthUsername, "")
 }
 
-// GetMultiClusterBasicAuthPassword returns the environemnt variable value for MultiClusterBasicAuthPassword
+// GetMultiClusterBasicAuthPassword returns the environment variable value for MultiClusterBasicAuthPassword
 func GetMultiClusterBasicAuthPassword() string {
 	return Get(MultiClusterBasicAuthPassword, "")
 }
@@ -460,7 +460,7 @@ func GetKubeConfigPath() string {
 	return Get(KubeConfigPathEnvVar, "")
 }
 
-// GetUTCOffset returns the environemnt variable value for UTCOffset
+// GetUTCOffset returns the environment variable value for UTCOffset
 func GetUTCOffset() string {
 	return Get(UTCOffsetEnvVar, "")
 }
@@ -526,7 +526,7 @@ func LegacyExternalCostsAPIDisabled() bool {
 	return GetBool(LegacyExternalAPIDisabledVar, false)
 }
 
-// GetPromClusterLabel returns the environemnt variable value for PromClusterIDLabel
+// GetPromClusterLabel returns the environment variable value for PromClusterIDLabel
 func GetPromClusterLabel() string {
 	return Get(PromClusterIDLabelEnvVar, "cluster_id")
 }

+ 1 - 1
pkg/env/env.go

@@ -128,7 +128,7 @@ func GetDuration(key string, defaultValue time.Duration) time.Duration {
 	return envMapper.GetDuration(key, defaultValue)
 }
 
-// GetList parses a []string from the enviroment variable key parameter.  If the environment
+// GetList parses a []string from the environment variable key parameter.  If the environment
 // // variable is empty or fails to parse, nil is returned.
 func GetList(key, delimiter string) []string {
 	return envMapper.GetList(key, delimiter)

+ 2 - 2
pkg/kubecost/allocation.go

@@ -1434,7 +1434,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 
 	// groupingIdleFiltrationCoeffs is used to track per-resource idle
 	// coefficients on a cluster-by-cluster or node-by-node basis depending
-	// on the IdleByNode option. It is, essentailly, an aggregation of
+	// on the IdleByNode option. It is, essentially, an aggregation of
 	// idleFiltrationCoefficients after they have been
 	// filtered above (in step 3)
 	var groupingIdleFiltrationCoeffs map[string]map[string]float64
@@ -1486,7 +1486,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 			for _, sharedAlloc := range shareSet.Allocations {
 				if _, ok := shareCoefficients[alloc.Name]; !ok {
 					if !alloc.IsIdle() && !alloc.IsUnmounted() {
-						log.Warnf("AllocationSet.AggregateBy: error getting share coefficienct for '%s'", alloc.Name)
+						log.Warnf("AllocationSet.AggregateBy: error getting share coefficient for '%s'", alloc.Name)
 					}
 					continue
 				}

+ 1 - 1
pkg/kubecost/allocationfilter.go

@@ -84,7 +84,7 @@ const (
 // expect to receive these benefits. Passing a custom implementation to a
 // handler may in errors.
 type AllocationFilter interface {
-	// Matches is the canonical in-Go function for determing if an Allocation
+	// Matches is the canonical in-Go function for determining if an Allocation
 	// matches a filter.
 	Matches(a *Allocation) bool
 

+ 1 - 1
pkg/kubecost/asset.go

@@ -69,7 +69,7 @@ type Asset interface {
 // the Properties to use to aggregate, and the mapping from Allocation property
 // to Asset label. For example, consider this asset:
 //
-// CURRENT: Asset ETL stores its data ALREADY MAPPED from label to k8s concept. This isn't ideal-- see the TOOD.
+// CURRENT: Asset ETL stores its data ALREADY MAPPED from label to k8s concept. This isn't ideal-- see the TODO.
 //
 //	  Cloud {
 //		   TotalCost: 10.00,

+ 1 - 1
pkg/kubecost/asset_json.go

@@ -564,7 +564,7 @@ func (n *Node) InterfaceToNode(itf interface{}) error {
 	if NodeType, err := getTypedVal(fmap["nodeType"]); err == nil {
 		n.NodeType = NodeType.(string)
 	}
-	
+
 	if CPUCoreHours, err := getTypedVal(fmap["cpuCoreHours"]); err == nil {
 		n.CPUCoreHours = CPUCoreHours.(float64)
 	}

+ 4 - 4
pkg/kubecost/status.go

@@ -37,10 +37,10 @@ type FileStatus struct {
 
 // CloudStatus describes CloudStore metadata
 type CloudStatus struct {
-	CloudConnectionStatus string                `json:"cloudConnectionStatus"`
-	ProviderType          string                `json:"providerType"`
-	CloudUsage            *CloudAssetStatus     `json:"cloudUsage,omitempty"`
-	Reconciliation        *ReconciliationStatus `json:"reconciliation,omitempty"`
+	ConnectionStatus string                `json:"cloudConnectionStatus"`
+	ProviderType     string                `json:"providerType"`
+	CloudUsage       *CloudAssetStatus     `json:"cloudUsage,omitempty"`
+	Reconciliation   *ReconciliationStatus `json:"reconciliation,omitempty"`
 }
 
 // CloudAssetStatus describes CloudAsset metadata of a CloudStore

+ 2 - 2
pkg/kubecost/summaryallocation.go

@@ -465,7 +465,7 @@ func (sas *SummaryAllocationSet) Clone() *SummaryAllocationSet {
 }
 
 // Add sums two SummaryAllocationSets, which Adds all SummaryAllocations in the
-// given SummaryAllocationSet to thier counterparts in the receiving set. Add
+// given SummaryAllocationSet to their counterparts in the receiving set. Add
 // also expands the Window to include both constituent Windows, in the case
 // that Add is being used from accumulating (as opposed to aggregating). For
 // performance reasons, the function may return either a new set, or an
@@ -985,7 +985,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		}
 
 		// Do not include the unmounted costs when determining sharing
-		// coefficients becuase they do not receive shared costs.
+		// coefficients because they do not receive shared costs.
 		sharingCoeffDenominator -= totalUnmountedCost
 
 		if sharingCoeffDenominator <= 0.0 {

+ 1 - 1
pkg/kubecost/summaryallocation_test.go

@@ -421,7 +421,7 @@ func TestSummaryAllocationSet_RAMEfficiency(t *testing.T) {
 			expectedEfficiency: 0.65,
 		},
 		{
-			name:               "Check RAMEfficiency in presense of an idle allocation",
+			name:               "Check RAMEfficiency in presence of an idle allocation",
 			testsas:            sas6,
 			expectedEfficiency: 0.25,
 		},

+ 1 - 1
pkg/log/profiler.go

@@ -45,7 +45,7 @@ func (p *Profiler) LogAll() {
 		return
 	}
 
-	// Print profiles, largest to smallest. (Inefficienct, but shouldn't matter.)
+	// Print profiles, largest to smallest. (Inefficient, but shouldn't matter.)
 	print := map[string]time.Duration{}
 	for name, value := range p.profiles {
 		print[name] = value

+ 5 - 3
pkg/metrics/pvmetrics.go

@@ -174,9 +174,11 @@ func (kpcrr KubePVStatusPhaseMetric) Write(m *dto.Metric) error {
 	return nil
 }
 
-//--------------------------------------------------------------------------
-//  KubecostPVInfoMetric
-//--------------------------------------------------------------------------
+// --------------------------------------------------------------------------
+//
+//	KubecostPVInfoMetric
+//
+// --------------------------------------------------------------------------
 // KubecostPVInfoMetric is a prometheus.Metric
 type KubecostPVInfoMetric struct {
 	fqName       string

+ 2 - 2
pkg/prom/query.go

@@ -35,7 +35,7 @@ type Context struct {
 	errorCollector *QueryErrorCollector
 }
 
-// NewContext creates a new Promethues querying context from the given client
+// NewContext creates a new Prometheus querying context from the given client
 func NewContext(client prometheus.Client) *Context {
 	var ec QueryErrorCollector
 
@@ -46,7 +46,7 @@ func NewContext(client prometheus.Client) *Context {
 	}
 }
 
-// NewNamedContext creates a new named Promethues querying context from the given client
+// NewNamedContext creates a new named Prometheus querying context from the given client
 func NewNamedContext(client prometheus.Client, name string) *Context {
 	ctx := NewContext(client)
 	ctx.name = name

+ 0 - 1
pkg/prom/ratelimitedclient_test.go

@@ -293,7 +293,6 @@ func TestRateLimitedResponses(t *testing.T) {
 
 }
 
-//
 func AssertDurationEqual(t *testing.T, expected, actual time.Duration) {
 	if actual != expected {
 		t.Fatalf("Expected: %dms, Got: %dms", expected.Milliseconds(), actual.Milliseconds())

+ 1 - 1
pkg/prom/result.go

@@ -17,7 +17,7 @@ var (
 )
 
 func DataFieldFormatErr(query string) error {
-	return fmt.Errorf("Data field improperly formatted in prometheus repsonse fetching query '%s'", query)
+	return fmt.Errorf("Data field improperly formatted in prometheus response fetching query '%s'", query)
 }
 
 func DataPointFormatErr(query string) error {

+ 1 - 1
pkg/util/buffer.go

@@ -392,7 +392,7 @@ func bytesToString(b []byte) string {
 	// this, we are pinning the byte slice's underlying array in memory, preventing it from
 	// being garbage collected while the string is still in use. If we are using the Bank()
 	// functionality to cache new strings, we risk keeping the pinned array alive. To avoid this,
-	// we will use the BankFunc() call which uses the casted string to check for existance of a
+	// we will use the BankFunc() call which uses the casted string to check for existence of a
 	// cached string. If it exists, then we drop the pinned reference immediately and use the
 	// cached string. If it does _not_ exist, then we use the passed func() string to allocate a new
 	// string and cache it. This will prevent us from allocating throw-away strings just to

+ 4 - 3
pkg/util/fileutil/fileutil.go

@@ -3,9 +3,10 @@ package fileutil
 import "os"
 
 // File exists has three different return cases that should be handled:
-//   1. File exists and is not a directory (true, nil)
-//   2. File does not exist (false, nil)
-//   3. File may or may not exist. Error occurred during stat (false, error)
+//  1. File exists and is not a directory (true, nil)
+//  2. File does not exist (false, nil)
+//  3. File may or may not exist. Error occurred during stat (false, error)
+//
 // The third case represents the scenario where the stat returns an error,
 // but the error isn't relevant to the path. This can happen when the current
 // user doesn't have permission to access the file.

+ 2 - 2
pkg/util/mapper/mapper.go

@@ -93,7 +93,7 @@ type PrimitiveMapReader interface {
 	// is empty or fails to parse, the defaultValue parameter is returned.
 	GetBool(key string, defaultValue bool) bool
 
-	// GetDuration parses a time.Duration from the map key paramter. If the
+	// GetDuration parses a time.Duration from the map key parameter. If the
 	// value is empty to fails to parse, the defaultValue is returned.
 	GetDuration(key string, defaultValue time.Duration) time.Duration
 
@@ -161,7 +161,7 @@ type PrimitiveMap interface {
 //  Go Map Implementation
 //--------------------------------------------------------------------------
 
-// GoMap is an implementatino of mapper.Map for map[string]string
+// GoMap is an implementation of mapper.Map for map[string]string
 type GoMap struct {
 	m map[string]string
 }

+ 1 - 1
pkg/util/timeutil/timeutil.go

@@ -229,7 +229,7 @@ func CleanDurationString(duration string) string {
 // ParseTimeRange returns a start and end time, respectively, which are converted from
 // a duration and offset, defined as strings with Prometheus-style syntax.
 func ParseTimeRange(duration, offset time.Duration) (time.Time, time.Time) {
-	// endTime defaults to the current time, unless an offset is explicity declared,
+	// endTime defaults to the current time, unless an offset is explicitly declared,
 	// in which case it shifts endTime back by given duration
 	endTime := time.Now()
 	if offset > 0 {

+ 38 - 38
test/cloud_test.go

@@ -8,7 +8,7 @@ import (
 	"testing"
 	"time"
 
-	"github.com/opencost/opencost/pkg/cloud"
+	"github.com/opencost/opencost/pkg/cloud/provider"
 	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/opencost/opencost/pkg/config"
 	"github.com/opencost/opencost/pkg/costmodel"
@@ -34,7 +34,7 @@ func TestRegionValueFromMapField(t *testing.T) {
 	n.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0"
 	n.Labels = make(map[string]string)
 	n.Labels[v1.LabelZoneRegion] = wantRegion
-	got := cloud.NodeValueFromMapField(providerIDMap, n, true)
+	got := provider.NodeValueFromMapField(providerIDMap, n, true)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
@@ -44,7 +44,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant := "i-05445591e0d182d42"
 	n := &v1.Node{}
 	n.Spec.ProviderID = "aws:///us-east-1a/i-05445591e0d182d42"
-	got := cloud.NodeValueFromMapField(providerIDMap, n, false)
+	got := provider.NodeValueFromMapField(providerIDMap, n, false)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
@@ -52,7 +52,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant2 := strings.ToLower("/subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0")
 	n2 := &v1.Node{}
 	n2.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/MC_test_test_eastus/providers/Microsoft.Compute/virtualMachines/aks-agentpool-20139558-0"
-	got2 := cloud.NodeValueFromMapField(providerIDMap, n2, false)
+	got2 := provider.NodeValueFromMapField(providerIDMap, n2, false)
 	if got2 != providerIDWant2 {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant2, got2)
 	}
@@ -60,7 +60,7 @@ func TestTransformedValueFromMapField(t *testing.T) {
 	providerIDWant3 := strings.ToLower("/subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/mc_testspot_testspot_eastus/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-19213364-vmss/virtualMachines/0")
 	n3 := &v1.Node{}
 	n3.Spec.ProviderID = "azure:///subscriptions/0bd50fdf-c923-4e1e-850c-196dd3dcc5d3/resourceGroups/mc_testspot_testspot_eastus/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-19213364-vmss/virtualMachines/0"
-	got3 := cloud.NodeValueFromMapField(providerIDMap, n3, false)
+	got3 := provider.NodeValueFromMapField(providerIDMap, n3, false)
 	if got3 != providerIDWant3 {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant3, got3)
 	}
@@ -77,17 +77,17 @@ func TestNodeValueFromMapField(t *testing.T) {
 	n.Labels = make(map[string]string)
 	n.Labels["foo"] = labelFooWant
 
-	got := cloud.NodeValueFromMapField(providerIDMap, n, false)
+	got := provider.NodeValueFromMapField(providerIDMap, n, false)
 	if got != providerIDWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", providerIDMap, providerIDWant, got)
 	}
 
-	got = cloud.NodeValueFromMapField(nameMap, n, false)
+	got = provider.NodeValueFromMapField(nameMap, n, false)
 	if got != nameWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", nameMap, nameWant, got)
 	}
 
-	got = cloud.NodeValueFromMapField(labelMapFoo, n, false)
+	got = provider.NodeValueFromMapField(labelMapFoo, n, false)
 	if got != labelFooWant {
 		t.Errorf("Assert on '%s' want '%s' got '%s'", labelMapFoo, labelFooWant, got)
 	}
@@ -104,10 +104,10 @@ func TestPVPriceFromCSV(t *testing.T) {
 	})
 
 	wantPrice := "0.1337"
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_pv.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -152,10 +152,10 @@ func TestNodePriceFromCSVWithGPU(t *testing.T) {
 	n2.Status.Capacity = v1.ResourceList{"nvidia.com/gpu": *resource.NewScaledQuantity(2, 0)}
 	wantPrice2 := "1.733700"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 
@@ -211,10 +211,10 @@ func TestNodePriceFromCSV(t *testing.T) {
 
 	wantPrice := "0.133700"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -241,10 +241,10 @@ func TestNodePriceFromCSV(t *testing.T) {
 		t.Errorf("CSV provider should return nil on missing node")
 	}
 
-	c2 := &cloud.CSVProvider{
+	c2 := &provider.CSVProvider{
 		CSVLocation: "../configs/fake.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	k3 := c.GetKey(n.Labels, n)
@@ -287,10 +287,10 @@ func TestNodePriceFromCSVWithRegion(t *testing.T) {
 	n3.Labels[v1.LabelZoneRegion] = "fakeregion"
 	wantPrice3 := "0.1339"
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_region.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -337,10 +337,10 @@ func TestNodePriceFromCSVWithRegion(t *testing.T) {
 		t.Errorf("CSV provider should return nil on missing node, instead returned %+v", resN4)
 	}
 
-	c2 := &cloud.CSVProvider{
+	c2 := &provider.CSVProvider{
 		CSVLocation: "../configs/fake.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 	k5 := c.GetKey(n.Labels, n)
@@ -379,10 +379,10 @@ func TestNodePriceFromCSVWithBadConfig(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "invalid.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "invalid.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -413,10 +413,10 @@ func TestSourceMatchesFromCSV(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "/default.json"),
 		},
 	}
 	c.DownloadPricingData()
@@ -492,10 +492,10 @@ func TestNodePriceFromCSVWithCase(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}
 
@@ -526,10 +526,10 @@ func TestNodePriceFromCSVByClass(t *testing.T) {
 		LocalConfigPath: "./",
 	})
 
-	c := &cloud.CSVProvider{
+	c := &provider.CSVProvider{
 		CSVLocation: "../configs/pricing_schema_case.csv",
-		CustomProvider: &cloud.CustomProvider{
-			Config: cloud.NewProviderConfig(confMan, "../configs/default.json"),
+		CustomProvider: &provider.CustomProvider{
+			Config: provider.NewProviderConfig(confMan, "../configs/default.json"),
 		},
 	}