Quellcode durchsuchen

Spelling and formatting cleanup (#1908)

* Run gotfmt -s -w . (run formatting and simplify code)

Signed-off-by: Neal Ormsbee <neal.ormsbee@gmail.com>

* Correct a series of misspellings as detected by https://goreportcard.com/report/github.com/opencost/opencost

Signed-off-by: Neal Ormsbee <neal.ormsbee@gmail.com>

* Revert gofmt -s changes to allocation v2 filters file. The spacing is important to the explanation in the comments

Signed-off-by: Neal Ormsbee <neal.ormsbee@gmail.com>

* One more spelling correction

Signed-off-by: Neal Ormsbee <neal.ormsbee@gmail.com>

---------

Signed-off-by: Neal Ormsbee <neal.ormsbee@gmail.com>
Neal Ormsbee vor 3 Jahren
Ursprung
Commit
49d8134cb9

+ 3 - 3
pkg/cloud/aliyunprovider.go

@@ -460,7 +460,7 @@ func (alibaba *Alibaba) DownloadPricingData() error {
 		alibaba.Pricing[lookupKey] = pricingObj
 	}
 
-	// set the first occurance of region from the node
+	// set the first occurrence of region from the node
 	if alibaba.clusterRegion == "" {
 		for _, node := range nodeList {
 			if regionID, ok := node.Labels["topology.kubernetes.io/region"]; ok {
@@ -947,7 +947,7 @@ func (alibabaPVKey *AlibabaPVKey) GetStorageClass() string {
 // When supporting subscription and Premptible resources this HTTP call needs to be modified with PriceUnit information
 // When supporting different new type of instances like Compute Optimized, Memory Optimized etc make sure you add the instance type
 // in unit test and check if it works or not to create the ack request and processDescribePriceAndCreateAlibabaPricing function
-// else more paramters need to be pulled from kubernetes node response or gather infromation from elsewhere and function modified.
+// else more parameters need to be pulled from kubernetes node response or gather information from elsewhere and function modified.
 func createDescribePriceACSRequest(i interface{}) (*requests.CommonRequest, error) {
 	request := requests.NewCommonRequest()
 	request.Method = requests.GET
@@ -1315,7 +1315,7 @@ func generateSlimK8sDiskFromV1PV(pv *v1.PersistentVolume, regionID string) *Slim
 		}
 	}
 
-	// Highly unlikely that label pv.Spec.CSI.VolumeAttributes["type"] doesn't exist but if occured default to cloud (most basic disk type)
+	// Highly unlikely that label pv.Spec.CSI.VolumeAttributes["type"] doesn't exist but if occurred default to cloud (most basic disk type)
 	if diskCategory == "" {
 		diskCategory = ALIBABA_DISK_CLOUD_CATEGORY
 	}

+ 18 - 18
pkg/cloud/aliyunprovider_test.go

@@ -594,10 +594,10 @@ func TestDetermineKeyForPricing(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnString, returnErr := determineKeyForPricing(c.testVar)
 			if c.expectedError == nil && returnErr != nil {
-				t.Fatalf("Case name %s: expected error was nil but recieved error %v", c.name, returnErr)
+				t.Fatalf("Case name %s: expected error was nil but received error %v", c.name, returnErr)
 			}
 			if returnString != c.expectedKey {
-				t.Fatalf("Case name %s: determineKeyForPricing recieved %s but expected %s", c.name, returnString, c.expectedKey)
+				t.Fatalf("Case name %s: determineKeyForPricing received %s but expected %s", c.name, returnString, c.expectedKey)
 			}
 		})
 	}
@@ -635,22 +635,22 @@ func TestGenerateSlimK8sNodeFromV1Node(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnSlimK8sNode := generateSlimK8sNodeFromV1Node(c.testNode)
 			if returnSlimK8sNode.InstanceType != c.expectedSlimNode.InstanceType {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceType: %s , recieved InstanceType: %s", c.expectedSlimNode.InstanceType, returnSlimK8sNode.InstanceType)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceType: %s , received InstanceType: %s", c.expectedSlimNode.InstanceType, returnSlimK8sNode.InstanceType)
 			}
 			if returnSlimK8sNode.RegionID != c.expectedSlimNode.RegionID {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected RegionID: %s , recieved RegionID: %s", c.expectedSlimNode.RegionID, returnSlimK8sNode.RegionID)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected RegionID: %s , received RegionID: %s", c.expectedSlimNode.RegionID, returnSlimK8sNode.RegionID)
 			}
 			if returnSlimK8sNode.PriceUnit != c.expectedSlimNode.PriceUnit {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected PriceUnit: %s , recieved PriceUnit: %s", c.expectedSlimNode.PriceUnit, returnSlimK8sNode.PriceUnit)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected PriceUnit: %s , received PriceUnit: %s", c.expectedSlimNode.PriceUnit, returnSlimK8sNode.PriceUnit)
 			}
 			if returnSlimK8sNode.MemorySizeInKiB != c.expectedSlimNode.MemorySizeInKiB {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected MemorySizeInKiB: %s , recieved MemorySizeInKiB: %s", c.expectedSlimNode.MemorySizeInKiB, returnSlimK8sNode.MemorySizeInKiB)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected MemorySizeInKiB: %s , received MemorySizeInKiB: %s", c.expectedSlimNode.MemorySizeInKiB, returnSlimK8sNode.MemorySizeInKiB)
 			}
 			if returnSlimK8sNode.OSType != c.expectedSlimNode.OSType {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected OSType: %s , recieved OSType: %s", c.expectedSlimNode.OSType, returnSlimK8sNode.OSType)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected OSType: %s , received OSType: %s", c.expectedSlimNode.OSType, returnSlimK8sNode.OSType)
 			}
 			if returnSlimK8sNode.InstanceTypeFamily != c.expectedSlimNode.InstanceTypeFamily {
-				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceTypeFamily: %s , recieved InstanceTypeFamily: %s", c.expectedSlimNode.InstanceTypeFamily, returnSlimK8sNode.InstanceTypeFamily)
+				t.Fatalf("unexpected conversion in function generateSlimK8sNodeFromV1Node expected InstanceTypeFamily: %s , received InstanceTypeFamily: %s", c.expectedSlimNode.InstanceTypeFamily, returnSlimK8sNode.InstanceTypeFamily)
 			}
 		})
 	}
@@ -695,28 +695,28 @@ func TestGenerateSlimK8sDiskFromV1PV(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnSlimK8sDisk := generateSlimK8sDiskFromV1PV(c.testPV, c.inpRegionID)
 			if returnSlimK8sDisk.DiskType != c.expectedSlimDisk.DiskType {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskType: %s , recieved DiskType: %s", c.expectedSlimDisk.DiskType, returnSlimK8sDisk.DiskType)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskType: %s , received DiskType: %s", c.expectedSlimDisk.DiskType, returnSlimK8sDisk.DiskType)
 			}
 			if returnSlimK8sDisk.RegionID != c.expectedSlimDisk.RegionID {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected RegionID: %s , recieved RegionID Type: %s", c.expectedSlimDisk.RegionID, returnSlimK8sDisk.RegionID)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected RegionID: %s , received RegionID Type: %s", c.expectedSlimDisk.RegionID, returnSlimK8sDisk.RegionID)
 			}
 			if returnSlimK8sDisk.PriceUnit != c.expectedSlimDisk.PriceUnit {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PriceUnit: %s , recieved PriceUnit Type: %s", c.expectedSlimDisk.PriceUnit, returnSlimK8sDisk.PriceUnit)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PriceUnit: %s , received PriceUnit Type: %s", c.expectedSlimDisk.PriceUnit, returnSlimK8sDisk.PriceUnit)
 			}
 			if returnSlimK8sDisk.SizeInGiB != c.expectedSlimDisk.SizeInGiB {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected SizeInGiB: %s , recieved SizeInGiB Type: %s", c.expectedSlimDisk.SizeInGiB, returnSlimK8sDisk.SizeInGiB)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected SizeInGiB: %s , received SizeInGiB Type: %s", c.expectedSlimDisk.SizeInGiB, returnSlimK8sDisk.SizeInGiB)
 			}
 			if returnSlimK8sDisk.DiskCategory != c.expectedSlimDisk.DiskCategory {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskCategory: %s , recieved DiskCategory Type: %s", c.expectedSlimDisk.DiskCategory, returnSlimK8sDisk.DiskCategory)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected DiskCategory: %s , received DiskCategory Type: %s", c.expectedSlimDisk.DiskCategory, returnSlimK8sDisk.DiskCategory)
 			}
 			if returnSlimK8sDisk.PerformanceLevel != c.expectedSlimDisk.PerformanceLevel {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PerformanceLevel: %s , recieved PerformanceLevel Type: %s", c.expectedSlimDisk.PerformanceLevel, returnSlimK8sDisk.PerformanceLevel)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected PerformanceLevel: %s , received PerformanceLevel Type: %s", c.expectedSlimDisk.PerformanceLevel, returnSlimK8sDisk.PerformanceLevel)
 			}
 			if returnSlimK8sDisk.ProviderID != c.expectedSlimDisk.ProviderID {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected ProviderID: %s , recieved ProviderID Type: %s", c.expectedSlimDisk.ProviderID, returnSlimK8sDisk.ProviderID)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected ProviderID: %s , received ProviderID Type: %s", c.expectedSlimDisk.ProviderID, returnSlimK8sDisk.ProviderID)
 			}
 			if returnSlimK8sDisk.StorageClass != c.expectedSlimDisk.StorageClass {
-				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected StorageClass: %s , recieved StorageClass Type: %s", c.expectedSlimDisk.StorageClass, returnSlimK8sDisk.StorageClass)
+				t.Fatalf("unexpected conversion in function generateSlimK8sDiskFromV1PV expected StorageClass: %s , received StorageClass Type: %s", c.expectedSlimDisk.StorageClass, returnSlimK8sDisk.StorageClass)
 			}
 		})
 	}
@@ -753,7 +753,7 @@ func TestGetNumericalValueFromResourceQuantity(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnValue := getNumericalValueFromResourceQuantity(c.inputResourceQuanity)
 			if c.expectedValue != returnValue {
-				t.Fatalf("Case name %s: getNumericalValueFromResourceQuantity recieved %s but expected %s", c.name, returnValue, c.expectedValue)
+				t.Fatalf("Case name %s: getNumericalValueFromResourceQuantity received %s but expected %s", c.name, returnValue, c.expectedValue)
 			}
 		})
 	}
@@ -831,7 +831,7 @@ func TestDeterminePVRegion(t *testing.T) {
 		t.Run(c.name, func(t *testing.T) {
 			returnRegion := determinePVRegion(c.inputPV)
 			if c.expectedRegion != returnRegion {
-				t.Fatalf("Case name %s: determinePVRegion recieved region :%s but expected region: %s", c.name, returnRegion, c.expectedRegion)
+				t.Fatalf("Case name %s: determinePVRegion received region :%s but expected region: %s", c.name, returnRegion, c.expectedRegion)
 			}
 		})
 	}

+ 1 - 2
pkg/cloud/awsprovider.go

@@ -60,7 +60,6 @@ const (
 	AWSHourlyPublicIPCost    = 0.005
 	EKSCapacityTypeLabel     = "eks.amazonaws.com/capacityType"
 	EKSCapacitySpotTypeValue = "SPOT"
-	
 )
 
 var (
@@ -619,7 +618,7 @@ func (k *awsKey) ID() string {
 	return ""
 }
 
-// Features will return a comma seperated list of features for the given node
+// Features will return a comma separated list of features for the given node
 // If the node has a spot label, it will be included in the list
 // Otherwise, the list include instance type, operating system, and the region
 func (k *awsKey) Features() string {

+ 2 - 2
pkg/cloud/awsprovider_test.go

@@ -326,7 +326,7 @@ func Test_populate_pricing(t *testing.T) {
 			Sku:           "M6UGCCQ3CDJQAA37",
 			OfferTermCode: "JRTCKXETXF",
 			PriceDimensions: map[string]*AWSRateCode{
-				"M6UGCCQ3CDJQAA37.JRTCKXETXF.6YS6EN2CT7": &AWSRateCode{
+				"M6UGCCQ3CDJQAA37.JRTCKXETXF.6YS6EN2CT7": {
 					Unit: "GB-Mo",
 					PricePerUnit: AWSCurrencyCode{
 						USD: "0.0800000000",
@@ -465,7 +465,7 @@ func Test_populate_pricing(t *testing.T) {
 			Sku:           "R83VXG9NAPDASEGN",
 			OfferTermCode: "5Y9WH78GDR",
 			PriceDimensions: map[string]*AWSRateCode{
-				"R83VXG9NAPDASEGN.5Y9WH78GDR.Q7UJUT2CE6": &AWSRateCode{
+				"R83VXG9NAPDASEGN.5Y9WH78GDR.Q7UJUT2CE6": {
 					Unit: "GB-Mo",
 					PricePerUnit: AWSCurrencyCode{
 						USD: "",

+ 4 - 4
pkg/cloud/csvprovider.go

@@ -303,10 +303,10 @@ func NodeValueFromMapField(m string, n *v1.Node, useRegion bool) string {
 		if mf[1] == "name" {
 			return toReturn + n.Name
 		} else if mf[1] == "labels" {
-			lkey := strings.Join(mf[2:len(mf)], "")
+			lkey := strings.Join(mf[2:], "")
 			return toReturn + n.Labels[lkey]
 		} else if mf[1] == "annotations" {
-			akey := strings.Join(mf[2:len(mf)], "")
+			akey := strings.Join(mf[2:], "")
 			return toReturn + n.Annotations[akey]
 		} else {
 			log.Errorf("Unsupported InstanceIDField %s in CSV For Node", m)
@@ -324,10 +324,10 @@ func PVValueFromMapField(m string, n *v1.PersistentVolume) string {
 		if mf[1] == "name" {
 			return n.Name
 		} else if mf[1] == "labels" {
-			lkey := strings.Join(mf[2:len(mf)], "")
+			lkey := strings.Join(mf[2:], "")
 			return n.Labels[lkey]
 		} else if mf[1] == "annotations" {
-			akey := strings.Join(mf[2:len(mf)], "")
+			akey := strings.Join(mf[2:], "")
 			return n.Annotations[akey]
 		} else {
 			log.Errorf("Unsupported InstanceIDField %s in CSV For PV", m)

+ 1 - 1
pkg/cloud/customprovider.go

@@ -153,7 +153,7 @@ func (cp *CustomProvider) NodePricing(key models.Key) (*models.Node, error) {
 	k := key.Features()
 	var gpuCount string
 	if _, ok := cp.Pricing[k]; !ok {
-		// Default is saying that there is no pricing info for the cluster and we should fall back to the defualt values.
+		// Default is saying that there is no pricing info for the cluster and we should fall back to the default values.
 		// An interesting case is if the default values weren't loaded.
 		k = "default"
 	}

+ 3 - 4
pkg/cloud/gcpprovider.go

@@ -46,7 +46,6 @@ const (
 
 	GKEPreemptibleLabel = "cloud.google.com/gke-preemptible"
 	GKESpotLabel        = "cloud.google.com/gke-spot"
-	
 )
 
 // List obtained by installing the `gcloud` CLI tool,
@@ -799,7 +798,7 @@ func (gcp *GCP) parsePage(r io.Reader, inputKeys map[string]models.Key, pvKeys m
 				}
 
 				for _, candidateKey := range candidateKeys {
-					instanceType = strings.Split(candidateKey, ",")[1] // we may have overriden this while generating candidate keys
+					instanceType = strings.Split(candidateKey, ",")[1] // we may have overridden this while generating candidate keys
 					region := strings.Split(candidateKey, ",")[0]
 					candidateKeyGPU := candidateKey + ",gpu"
 					gcp.ValidPricingKeys[candidateKey] = true
@@ -1209,12 +1208,12 @@ func newReservedCounter(instance *GCPReservedInstance) *GCPReservedCounter {
 
 // Two available Reservation plans for GCP, 1-year and 3-year
 var gcpReservedInstancePlans map[string]*GCPReservedInstancePlan = map[string]*GCPReservedInstancePlan{
-	GCPReservedInstancePlanOneYear: &GCPReservedInstancePlan{
+	GCPReservedInstancePlanOneYear: {
 		Name:    GCPReservedInstancePlanOneYear,
 		CPUCost: 0.019915,
 		RAMCost: 0.002669,
 	},
-	GCPReservedInstancePlanThreeYear: &GCPReservedInstancePlan{
+	GCPReservedInstancePlanThreeYear: {
 		Name:    GCPReservedInstancePlanThreeYear,
 		CPUCost: 0.014225,
 		RAMCost: 0.001907,

+ 4 - 4
pkg/cloud/gcpprovider_test.go

@@ -308,7 +308,7 @@ func TestParsePage(t *testing.T) {
 	}
 
 	expectedActualPrices := map[string]*GCPPricing{
-		"us-central1,a2highgpu,ondemand,gpu": &GCPPricing{
+		"us-central1,a2highgpu,ondemand,gpu": {
 			Name:        "services/6F81-5844-456A/skus/039F-D0DA-4055",
 			SKUID:       "039F-D0DA-4055",
 			Description: "Nvidia Tesla A100 GPU running in Americas",
@@ -320,7 +320,7 @@ func TestParsePage(t *testing.T) {
 			},
 			ServiceRegions: []string{"us-central1", "us-east1", "us-west1"},
 			PricingInfo: []*PricingInfo{
-				&PricingInfo{
+				{
 					Summary: "",
 					PricingExpression: &PricingExpression{
 						UsageUnit:                "h",
@@ -329,7 +329,7 @@ func TestParsePage(t *testing.T) {
 						BaseUnitConversionFactor: 0,
 						DisplayQuantity:          1,
 						TieredRates: []*TieredRates{
-							&TieredRates{
+							{
 								StartUsageAmount: 0,
 								UnitPrice: &UnitPriceInfo{
 									CurrencyCode: "USD",
@@ -353,7 +353,7 @@ func TestParsePage(t *testing.T) {
 				GPUCost:          "2.933908",
 			},
 		},
-		"us-central1,a2highgpu,ondemand": &GCPPricing{
+		"us-central1,a2highgpu,ondemand": {
 			Node: &models.Node{
 				VCPUCost:         "0.031611",
 				RAMCost:          "0.004237",

+ 1 - 1
pkg/cloud/scalewayprovider.go

@@ -370,7 +370,7 @@ func (scw *Scaleway) GetManagementPlatform() (string, error) {
 
 func (c *Scaleway) PricingSourceStatus() map[string]*models.PricingSource {
 	return map[string]*models.PricingSource{
-		InstanceAPIPricing: &models.PricingSource{
+		InstanceAPIPricing: {
 			Name:      InstanceAPIPricing,
 			Enabled:   true,
 			Available: true,

+ 1 - 1
pkg/costmodel/aggregation.go

@@ -452,7 +452,7 @@ func AggregateCostData(costData map[string]*CostData, field string, subfields []
 		agg.PVAllocationHourlyAverage = totalVectors(agg.PVAllocationVectors) / agg.TotalHours(resolutionHours)
 
 		// TODO niko/etl does this check out for GPU data? Do we need to rewrite GPU queries to be
-		// culumative?
+		// cumulative?
 		agg.CPUAllocationTotal = totalVectors(agg.CPUAllocationVectors)
 		agg.GPUAllocationTotal = totalVectors(agg.GPUAllocationVectors)
 		agg.PVAllocationTotal = totalVectors(agg.PVAllocationVectors)

+ 113 - 113
pkg/costmodel/cluster_helpers_test.go

@@ -29,14 +29,14 @@ func TestMergeTypeMaps(t *testing.T) {
 		{
 			name: "map2 empty",
 			map1: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			map2: map[nodeIdentifierNoProviderID]string{},
 			expected: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
@@ -46,13 +46,13 @@ func TestMergeTypeMaps(t *testing.T) {
 			name: "map1 empty",
 			map1: map[nodeIdentifierNoProviderID]string{},
 			map2: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			expected: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
@@ -61,31 +61,31 @@ func TestMergeTypeMaps(t *testing.T) {
 		{
 			name: "no overlap",
 			map1: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			map2: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node4",
 				}: "type4",
 			},
 			expected: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node4",
 				}: "type4",
@@ -94,27 +94,27 @@ func TestMergeTypeMaps(t *testing.T) {
 		{
 			name: "with overlap",
 			map1: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			map2: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type4",
 			},
 			expected: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
@@ -158,24 +158,24 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "just cpu cost",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
 				}: 0.048,
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1",
@@ -189,22 +189,22 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "just cpu cost with empty provider ID",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 0.048,
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					NodeType:     "type1",
@@ -217,29 +217,29 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "cpu cost with overlapping node names",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 0.048,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 0.087,
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1_A",
@@ -248,11 +248,11 @@ func TestBuildNodeMap(t *testing.T) {
 					CPUBreakdown: &ClusterCostsBreakdown{},
 					RAMBreakdown: &ClusterCostsBreakdown{},
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1_B",
@@ -266,207 +266,207 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "all fields + overlapping node names",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 0.048,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 0.087,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: 0.033,
 			},
 			ramCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 0.09,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 0.3,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: 0.024,
 			},
 			gpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 0.8,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 1.4,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: 3.1,
 			},
 			gpuCountMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: 1.0,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: 1.0,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: 2.0,
 			},
 			cpuCoresMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 2.0,
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: 5.0,
 			},
 			ramBytesMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 2048.0,
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: 6303.0,
 			},
 			ramUserPctMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 30.0,
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: 42.6,
 			},
 			ramSystemPctMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 15.0,
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: 20.1,
 			},
 			cpuBreakdownMap: map[nodeIdentifierNoProviderID]*ClusterCostsBreakdown{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
-				}: &ClusterCostsBreakdown{
+				}: {
 					System: 20.2,
 					User:   68.0,
 				},
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
-				}: &ClusterCostsBreakdown{
+				}: {
 					System: 28.9,
 					User:   34.0,
 				},
 			},
 			activeDataMap: map[NodeIdentifier]activeData{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
-				}: activeData{
+				}: {
 					start:   time.Date(2020, 6, 16, 3, 45, 28, 0, time.UTC),
 					end:     time.Date(2020, 6, 16, 9, 20, 39, 0, time.UTC),
 					minutes: 5*60 + 35 + (11.0 / 60.0),
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
-				}: activeData{
+				}: {
 					start:   time.Date(2020, 6, 16, 3, 45, 28, 0, time.UTC),
 					end:     time.Date(2020, 6, 16, 9, 21, 39, 0, time.UTC),
 					minutes: 5*60 + 36 + (11.0 / 60.0),
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
-				}: activeData{
+				}: {
 					start:   time.Date(2020, 6, 16, 3, 45, 28, 0, time.UTC),
 					end:     time.Date(2020, 6, 16, 9, 10, 39, 0, time.UTC),
 					minutes: 5*60 + 25 + (11.0 / 60.0),
 				},
 			},
 			preemptibleMap: map[NodeIdentifier]bool{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
 				}: true,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
 				}: false,
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
 				}: false,
 			},
 			labelsMap: map[nodeIdentifierNoProviderID]map[string]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
-				}: map[string]string{
+				}: {
 					"labelname1_A": "labelvalue1_A",
 					"labelname1_B": "labelvalue1_B",
 				},
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
-				}: map[string]string{
+				}: {
 					"labelname2_A": "labelvalue2_A",
 					"labelname2_B": "labelvalue2_B",
 				},
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "type1",
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node2",
 				}: "type2",
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
-				}: &Node{
+				}: {
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_A",
@@ -494,11 +494,11 @@ func TestBuildNodeMap(t *testing.T) {
 						"labelname1_B": "labelvalue1_B",
 					},
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
-				}: &Node{
+				}: {
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1_B",
@@ -526,11 +526,11 @@ func TestBuildNodeMap(t *testing.T) {
 						"labelname1_B": "labelvalue1_B",
 					},
 				},
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
-				}: &Node{
+				}: {
 					Cluster:    "cluster1",
 					Name:       "node2",
 					ProviderID: "prov_node2_A",
@@ -563,30 +563,30 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "e2-micro cpu cost adjustment",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
 				}: 0.048,
 			},
 			cpuCoresMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 6.0, // GKE lies about number of cores
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "e2-micro", // for this node type
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1",
@@ -601,30 +601,30 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "e2-small cpu cost adjustment",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
 				}: 0.048,
 			},
 			cpuCoresMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 6.0, // GKE lies about number of cores
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "e2-small", // for this node type
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1",
@@ -639,30 +639,30 @@ func TestBuildNodeMap(t *testing.T) {
 		{
 			name: "e2-medium cpu cost adjustment",
 			cpuCostMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
 				}: 0.048,
 			},
 			cpuCoresMap: map[nodeIdentifierNoProviderID]float64{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: 6.0, // GKE lies about number of cores
 			},
 			clusterAndNameToType: map[nodeIdentifierNoProviderID]string{
-				nodeIdentifierNoProviderID{
+				{
 					Cluster: "cluster1",
 					Name:    "node1",
 				}: "e2-medium", // for this node type
 			},
 			expected: map[NodeIdentifier]*Node{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "prov_node1",
-				}: &Node{
+				}: {
 					Cluster:      "cluster1",
 					Name:         "node1",
 					ProviderID:   "prov_node1",
@@ -720,7 +720,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 						"provider_id":   "provider1",
 					},
 					Values: []*util.Vector{
-						&util.Vector{
+						{
 							Timestamp: 0,
 							Value:     0,
 						},
@@ -728,14 +728,14 @@ func TestBuildGPUCostMap(t *testing.T) {
 				},
 			},
 			countMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
 				}: 0,
 			},
 			expected: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -753,7 +753,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 						"provider_id":   "provider1",
 					},
 					Values: []*util.Vector{
-						&util.Vector{
+						{
 							Timestamp: 0,
 							Value:     2,
 						},
@@ -761,14 +761,14 @@ func TestBuildGPUCostMap(t *testing.T) {
 				},
 			},
 			countMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
 				}: 0,
 			},
 			expected: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -786,7 +786,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 						"provider_id":   "provider1",
 					},
 					Values: []*util.Vector{
-						&util.Vector{
+						{
 							Timestamp: 0,
 							Value:     2,
 						},
@@ -795,7 +795,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 			},
 			countMap: map[NodeIdentifier]float64{},
 			expected: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -808,7 +808,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 				{},
 			},
 			countMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -827,7 +827,7 @@ func TestBuildGPUCostMap(t *testing.T) {
 						"provider_id":   "provider1",
 					},
 					Values: []*util.Vector{
-						&util.Vector{
+						{
 							Timestamp: 0,
 							Value:     2,
 						},
@@ -835,14 +835,14 @@ func TestBuildGPUCostMap(t *testing.T) {
 				},
 			},
 			countMap: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
 				}: 2,
 			},
 			expected: map[NodeIdentifier]float64{
-				NodeIdentifier{
+				{
 					Cluster:    "cluster1",
 					Name:       "node1",
 					ProviderID: "provider1",
@@ -876,7 +876,7 @@ func TestAssetCustompricing(t *testing.T) {
 				"provider_id":   "provider1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     0.5,
 				},
@@ -892,7 +892,7 @@ func TestAssetCustompricing(t *testing.T) {
 				"provider_id":      "provider1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
@@ -908,7 +908,7 @@ func TestAssetCustompricing(t *testing.T) {
 				"provider_id":      "provider1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1073741824.0,
 				},
@@ -924,11 +924,11 @@ func TestAssetCustompricing(t *testing.T) {
 				"provider_id":      "provider1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
-				&util.Vector{
+				{
 					Timestamp: 3600.0,
 					Value:     1.0,
 				},
@@ -944,11 +944,11 @@ func TestAssetCustompricing(t *testing.T) {
 				"namespace":             "ns1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
-				&util.Vector{
+				{
 					Timestamp: 3600.0,
 					Value:     1.0,
 				},
@@ -964,11 +964,11 @@ func TestAssetCustompricing(t *testing.T) {
 				"namespace":             "ns1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
-				&util.Vector{
+				{
 					Timestamp: 3600.0,
 					Value:     1.0,
 				},
@@ -985,7 +985,7 @@ func TestAssetCustompricing(t *testing.T) {
 				"namespace":             "ns1",
 			},
 			Values: []*util.Vector{
-				&util.Vector{
+				{
 					Timestamp: 0,
 					Value:     1.0,
 				},
@@ -994,7 +994,7 @@ func TestAssetCustompricing(t *testing.T) {
 	}
 
 	gpuCountMap := map[NodeIdentifier]float64{
-		NodeIdentifier{
+		{
 			Cluster:    "cluster1",
 			Name:       "node1",
 			ProviderID: "provider1",

+ 1 - 1
pkg/costmodel/csv_export_test.go

@@ -40,7 +40,7 @@ func Test_UpdateCSV(t *testing.T) {
 							NetworkTransferBytes:   10,
 							NetworkReceiveBytes:    11,
 							PVs: map[kubecost.PVKey]*kubecost.PVAllocation{
-								kubecost.PVKey{
+								{
 									Cluster: "test-cluster",
 									Name:    "test-pv",
 								}: {

+ 1 - 1
pkg/costmodel/intervals.go

@@ -45,7 +45,7 @@ func NewIntervalPoint(time time.Time, pointType string, key podKey) IntervalPoin
 	}
 }
 
-// CoefficientComponent is a representitive struct holding two fields which describe an interval
+// CoefficientComponent is a representative struct holding two fields which describe an interval
 // as part of a single number cost coefficient calculation:
 // 1. Proportion: The division of cost based on how many pods were running between those points
 // 2. Time: The ratio of the time between those points to the total time that pod was running

+ 44 - 44
pkg/costmodel/intervals_test.go

@@ -18,28 +18,28 @@ func TestGetIntervalPointsFromWindows(t *testing.T) {
 			name: "four pods w/ various overlaps",
 			pvcIntervalMap: map[podKey]kubecost.Window{
 				// Pod running from 8 am to 9 am
-				podKey{
+				{
 					Pod: "Pod1",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
 				)),
 				// Pod running from 8:30 am to 9 am
-				podKey{
+				{
 					Pod: "Pod2",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
 				)),
 				// Pod running from 8:45 am to 9 am
-				podKey{
+				{
 					Pod: "Pod3",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 45, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
 				)),
 				// Pod running from 8 am to 8:15 am
-				podKey{
+				{
 					Pod: "Pod4",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC),
@@ -61,14 +61,14 @@ func TestGetIntervalPointsFromWindows(t *testing.T) {
 			name: "two pods no overlap",
 			pvcIntervalMap: map[podKey]kubecost.Window{
 				// Pod running from 8 am to 8:30 am
-				podKey{
+				{
 					Pod: "Pod1",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
 				)),
 				// Pod running from 8:30 am to 9 am
-				podKey{
+				{
 					Pod: "Pod2",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
@@ -86,14 +86,14 @@ func TestGetIntervalPointsFromWindows(t *testing.T) {
 			name: "two pods total overlap",
 			pvcIntervalMap: map[podKey]kubecost.Window{
 				// Pod running from 8:30 am to 9 am
-				podKey{
+				{
 					Pod: "Pod1",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
 					time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC),
 				)),
 				// Pod running from 8:30 am to 9 am
-				podKey{
+				{
 					Pod: "Pod2",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 30, 0, 0, time.UTC),
@@ -111,7 +111,7 @@ func TestGetIntervalPointsFromWindows(t *testing.T) {
 			name: "one pod",
 			pvcIntervalMap: map[podKey]kubecost.Window{
 				// Pod running from 8 am to 9 am
-				podKey{
+				{
 					Pod: "Pod1",
 				}: kubecost.Window(kubecost.NewClosedWindow(
 					time.Date(2021, 2, 19, 8, 0, 0, 0, time.UTC),
@@ -185,21 +185,21 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod1Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.25},
-					CoefficientComponent{1, 0.25},
-					CoefficientComponent{0.5, 0.25},
-					CoefficientComponent{1.0 / 3.0, 0.25},
+				pod1Key: {
+					{0.5, 0.25},
+					{1, 0.25},
+					{0.5, 0.25},
+					{1.0 / 3.0, 0.25},
 				},
-				pod2Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.25},
-					CoefficientComponent{1.0 / 3.0, 0.25},
+				pod2Key: {
+					{0.5, 0.25},
+					{1.0 / 3.0, 0.25},
 				},
-				pod3Key: []CoefficientComponent{
-					CoefficientComponent{1.0 / 3.0, 0.25},
+				pod3Key: {
+					{1.0 / 3.0, 0.25},
 				},
-				pod4Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.25},
+				pod4Key: {
+					{0.5, 0.25},
 				},
 			},
 		},
@@ -213,11 +213,11 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				pod1Key: {
+					{1.0, 0.5},
 				},
-				pod2Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				pod2Key: {
+					{1.0, 0.5},
 				},
 			},
 		},
@@ -231,14 +231,14 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.5},
+				pod1Key: {
+					{0.5, 0.5},
 				},
-				pod2Key: []CoefficientComponent{
-					CoefficientComponent{0.5, 0.5},
+				pod2Key: {
+					{0.5, 0.5},
 				},
-				ummountedPodKey: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				ummountedPodKey: {
+					{1.0, 0.5},
 				},
 			},
 		},
@@ -250,8 +250,8 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod1Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 1.0},
+				pod1Key: {
+					{1.0, 1.0},
 				},
 			},
 		},
@@ -265,14 +265,14 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 9, 0, 0, 0, time.UTC), "end", pod2Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.25},
+				pod1Key: {
+					{1.0, 0.25},
 				},
-				pod2Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.25},
+				pod2Key: {
+					{1.0, 0.25},
 				},
-				ummountedPodKey: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				ummountedPodKey: {
+					{1.0, 0.5},
 				},
 			},
 		},
@@ -284,12 +284,12 @@ func TestGetPVCCostCoefficients(t *testing.T) {
 				NewIntervalPoint(time.Date(2021, 2, 19, 8, 45, 0, 0, time.UTC), "end", pod1Key),
 			},
 			expected: map[podKey][]CoefficientComponent{
-				pod1Key: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.5},
+				pod1Key: {
+					{1.0, 0.5},
 				},
-				ummountedPodKey: []CoefficientComponent{
-					CoefficientComponent{1.0, 0.25},
-					CoefficientComponent{1.0, 0.25},
+				ummountedPodKey: {
+					{1.0, 0.25},
+					{1.0, 0.25},
 				},
 			},
 		},

+ 1 - 1
pkg/costmodel/sql.go

@@ -260,7 +260,7 @@ func CostDataRangeFromSQL(field string, value string, window string, start strin
 	rawResult := make([][]byte, len(cols))
 	result := make([]string, len(cols))
 	dest := make([]interface{}, len(cols)) // A temporary interface{} slice
-	for i, _ := range rawResult {
+	for i := range rawResult {
 		dest[i] = &rawResult[i] // Put pointers to each string in the interface slice
 	}
 	nsToLabels := make(map[string]map[string]string)

+ 5 - 5
pkg/env/costmodelenv.go

@@ -165,7 +165,7 @@ func GetPrometheusRetryOnRateLimitDefaultWait() time.Duration {
 // data arriving in the target prom db. For example, if supplying a thanos or cortex querier for the prometheus server, using
 // a 3h offset will ensure that current time = current time - 3h.
 //
-// This offset is NOT the same as the GetThanosOffset() option, as that is only applied to queries made specifically targetting
+// This offset is NOT the same as the GetThanosOffset() option, as that is only applied to queries made specifically targeting
 // thanos. This offset is applied globally.
 func GetPrometheusQueryOffset() time.Duration {
 	offset := Get(PrometheusQueryOffsetEnvVar, "")
@@ -441,12 +441,12 @@ func GetDBBearerToken() string {
 	return Get(DBBearerToken, "")
 }
 
-// GetMultiClusterBasicAuthUsername returns the environemnt variable value for MultiClusterBasicAuthUsername
+// GetMultiClusterBasicAuthUsername returns the environment variable value for MultiClusterBasicAuthUsername
 func GetMultiClusterBasicAuthUsername() string {
 	return Get(MultiClusterBasicAuthUsername, "")
 }
 
-// GetMultiClusterBasicAuthPassword returns the environemnt variable value for MultiClusterBasicAuthPassword
+// GetMultiClusterBasicAuthPassword returns the environment variable value for MultiClusterBasicAuthPassword
 func GetMultiClusterBasicAuthPassword() string {
 	return Get(MultiClusterBasicAuthPassword, "")
 }
@@ -460,7 +460,7 @@ func GetKubeConfigPath() string {
 	return Get(KubeConfigPathEnvVar, "")
 }
 
-// GetUTCOffset returns the environemnt variable value for UTCOffset
+// GetUTCOffset returns the environment variable value for UTCOffset
 func GetUTCOffset() string {
 	return Get(UTCOffsetEnvVar, "")
 }
@@ -526,7 +526,7 @@ func LegacyExternalCostsAPIDisabled() bool {
 	return GetBool(LegacyExternalAPIDisabledVar, false)
 }
 
-// GetPromClusterLabel returns the environemnt variable value for PromClusterIDLabel
+// GetPromClusterLabel returns the environment variable value for PromClusterIDLabel
 func GetPromClusterLabel() string {
 	return Get(PromClusterIDLabelEnvVar, "cluster_id")
 }

+ 1 - 1
pkg/env/env.go

@@ -128,7 +128,7 @@ func GetDuration(key string, defaultValue time.Duration) time.Duration {
 	return envMapper.GetDuration(key, defaultValue)
 }
 
-// GetList parses a []string from the enviroment variable key parameter.  If the environment
+// GetList parses a []string from the environment variable key parameter.  If the environment
 // // variable is empty or fails to parse, nil is returned.
 func GetList(key, delimiter string) []string {
 	return envMapper.GetList(key, delimiter)

+ 2 - 2
pkg/kubecost/allocation.go

@@ -1372,7 +1372,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 
 	// groupingIdleFiltrationCoeffs is used to track per-resource idle
 	// coefficients on a cluster-by-cluster or node-by-node basis depending
-	// on the IdleByNode option. It is, essentailly, an aggregation of
+	// on the IdleByNode option. It is, essentially, an aggregation of
 	// idleFiltrationCoefficients after they have been
 	// filtered above (in step 3)
 	var groupingIdleFiltrationCoeffs map[string]map[string]float64
@@ -1424,7 +1424,7 @@ func (as *AllocationSet) AggregateBy(aggregateBy []string, options *AllocationAg
 			for _, sharedAlloc := range shareSet.Allocations {
 				if _, ok := shareCoefficients[alloc.Name]; !ok {
 					if !alloc.IsIdle() && !alloc.IsUnmounted() {
-						log.Warnf("AllocationSet.AggregateBy: error getting share coefficienct for '%s'", alloc.Name)
+						log.Warnf("AllocationSet.AggregateBy: error getting share coefficient for '%s'", alloc.Name)
 					}
 					continue
 				}

+ 2 - 2
pkg/kubecost/allocation_test.go

@@ -1074,7 +1074,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 			windowEnd:   endYesterday,
 			expMinutes:  1440.0,
 			expectedParcResults: map[string]ProportionalAssetResourceCosts{
-				"namespace1": ProportionalAssetResourceCosts{
+				"namespace1": {
 					"cluster1": ProportionalAssetResourceCost{
 						Cluster:                    "cluster1",
 						Node:                       "",
@@ -1085,7 +1085,7 @@ func TestAllocationSet_AggregateBy(t *testing.T) {
 						NodeResourceCostPercentage: 0.6785714285714285,
 					},
 				},
-				"namespace2": ProportionalAssetResourceCosts{
+				"namespace2": {
 					"cluster1": ProportionalAssetResourceCost{
 						Cluster:                    "cluster1",
 						Node:                       "",

+ 1 - 1
pkg/kubecost/allocationfilter.go

@@ -84,7 +84,7 @@ const (
 // expect to receive these benefits. Passing a custom implementation to a
 // handler may in errors.
 type AllocationFilter interface {
-	// Matches is the canonical in-Go function for determing if an Allocation
+	// Matches is the canonical in-Go function for determining if an Allocation
 	// matches a filter.
 	Matches(a *Allocation) bool
 

+ 1 - 1
pkg/kubecost/asset.go

@@ -69,7 +69,7 @@ type Asset interface {
 // the Properties to use to aggregate, and the mapping from Allocation property
 // to Asset label. For example, consider this asset:
 //
-// CURRENT: Asset ETL stores its data ALREADY MAPPED from label to k8s concept. This isn't ideal-- see the TOOD.
+// CURRENT: Asset ETL stores its data ALREADY MAPPED from label to k8s concept. This isn't ideal-- see the TODO.
 //
 //	  Cloud {
 //		   TotalCost: 10.00,

+ 1 - 1
pkg/kubecost/asset_json.go

@@ -564,7 +564,7 @@ func (n *Node) InterfaceToNode(itf interface{}) error {
 	if NodeType, err := getTypedVal(fmap["nodeType"]); err == nil {
 		n.NodeType = NodeType.(string)
 	}
-	
+
 	if CPUCoreHours, err := getTypedVal(fmap["cpuCoreHours"]); err == nil {
 		n.CPUCoreHours = CPUCoreHours.(float64)
 	}

+ 2 - 2
pkg/kubecost/summaryallocation.go

@@ -465,7 +465,7 @@ func (sas *SummaryAllocationSet) Clone() *SummaryAllocationSet {
 }
 
 // Add sums two SummaryAllocationSets, which Adds all SummaryAllocations in the
-// given SummaryAllocationSet to thier counterparts in the receiving set. Add
+// given SummaryAllocationSet to their counterparts in the receiving set. Add
 // also expands the Window to include both constituent Windows, in the case
 // that Add is being used from accumulating (as opposed to aggregating). For
 // performance reasons, the function may return either a new set, or an
@@ -985,7 +985,7 @@ func (sas *SummaryAllocationSet) AggregateBy(aggregateBy []string, options *Allo
 		}
 
 		// Do not include the unmounted costs when determining sharing
-		// coefficients becuase they do not receive shared costs.
+		// coefficients because they do not receive shared costs.
 		sharingCoeffDenominator -= totalUnmountedCost
 
 		if sharingCoeffDenominator <= 0.0 {

+ 1 - 1
pkg/kubecost/summaryallocation_test.go

@@ -421,7 +421,7 @@ func TestSummaryAllocationSet_RAMEfficiency(t *testing.T) {
 			expectedEfficiency: 0.65,
 		},
 		{
-			name:               "Check RAMEfficiency in presense of an idle allocation",
+			name:               "Check RAMEfficiency in presence of an idle allocation",
 			testsas:            sas6,
 			expectedEfficiency: 0.25,
 		},

+ 1 - 1
pkg/log/profiler.go

@@ -45,7 +45,7 @@ func (p *Profiler) LogAll() {
 		return
 	}
 
-	// Print profiles, largest to smallest. (Inefficienct, but shouldn't matter.)
+	// Print profiles, largest to smallest. (Inefficient, but shouldn't matter.)
 	print := map[string]time.Duration{}
 	for name, value := range p.profiles {
 		print[name] = value

+ 5 - 3
pkg/metrics/pvmetrics.go

@@ -174,9 +174,11 @@ func (kpcrr KubePVStatusPhaseMetric) Write(m *dto.Metric) error {
 	return nil
 }
 
-//--------------------------------------------------------------------------
-//  KubecostPVInfoMetric
-//--------------------------------------------------------------------------
+// --------------------------------------------------------------------------
+//
+//	KubecostPVInfoMetric
+//
+// --------------------------------------------------------------------------
 // KubecostPVInfoMetric is a prometheus.Metric
 type KubecostPVInfoMetric struct {
 	fqName       string

+ 2 - 2
pkg/prom/query.go

@@ -35,7 +35,7 @@ type Context struct {
 	errorCollector *QueryErrorCollector
 }
 
-// NewContext creates a new Promethues querying context from the given client
+// NewContext creates a new Prometheus querying context from the given client
 func NewContext(client prometheus.Client) *Context {
 	var ec QueryErrorCollector
 
@@ -46,7 +46,7 @@ func NewContext(client prometheus.Client) *Context {
 	}
 }
 
-// NewNamedContext creates a new named Promethues querying context from the given client
+// NewNamedContext creates a new named Prometheus querying context from the given client
 func NewNamedContext(client prometheus.Client, name string) *Context {
 	ctx := NewContext(client)
 	ctx.name = name

+ 0 - 1
pkg/prom/ratelimitedclient_test.go

@@ -293,7 +293,6 @@ func TestRateLimitedResponses(t *testing.T) {
 
 }
 
-//
 func AssertDurationEqual(t *testing.T, expected, actual time.Duration) {
 	if actual != expected {
 		t.Fatalf("Expected: %dms, Got: %dms", expected.Milliseconds(), actual.Milliseconds())

+ 1 - 1
pkg/prom/result.go

@@ -17,7 +17,7 @@ var (
 )
 
 func DataFieldFormatErr(query string) error {
-	return fmt.Errorf("Data field improperly formatted in prometheus repsonse fetching query '%s'", query)
+	return fmt.Errorf("Data field improperly formatted in prometheus response fetching query '%s'", query)
 }
 
 func DataPointFormatErr(query string) error {

+ 1 - 1
pkg/util/buffer.go

@@ -392,7 +392,7 @@ func bytesToString(b []byte) string {
 	// this, we are pinning the byte slice's underlying array in memory, preventing it from
 	// being garbage collected while the string is still in use. If we are using the Bank()
 	// functionality to cache new strings, we risk keeping the pinned array alive. To avoid this,
-	// we will use the BankFunc() call which uses the casted string to check for existance of a
+	// we will use the BankFunc() call which uses the casted string to check for existence of a
 	// cached string. If it exists, then we drop the pinned reference immediately and use the
 	// cached string. If it does _not_ exist, then we use the passed func() string to allocate a new
 	// string and cache it. This will prevent us from allocating throw-away strings just to

+ 4 - 3
pkg/util/fileutil/fileutil.go

@@ -3,9 +3,10 @@ package fileutil
 import "os"
 
 // File exists has three different return cases that should be handled:
-//   1. File exists and is not a directory (true, nil)
-//   2. File does not exist (false, nil)
-//   3. File may or may not exist. Error occurred during stat (false, error)
+//  1. File exists and is not a directory (true, nil)
+//  2. File does not exist (false, nil)
+//  3. File may or may not exist. Error occurred during stat (false, error)
+//
 // The third case represents the scenario where the stat returns an error,
 // but the error isn't relevant to the path. This can happen when the current
 // user doesn't have permission to access the file.

+ 2 - 2
pkg/util/mapper/mapper.go

@@ -93,7 +93,7 @@ type PrimitiveMapReader interface {
 	// is empty or fails to parse, the defaultValue parameter is returned.
 	GetBool(key string, defaultValue bool) bool
 
-	// GetDuration parses a time.Duration from the map key paramter. If the
+	// GetDuration parses a time.Duration from the map key parameter. If the
 	// value is empty to fails to parse, the defaultValue is returned.
 	GetDuration(key string, defaultValue time.Duration) time.Duration
 
@@ -161,7 +161,7 @@ type PrimitiveMap interface {
 //  Go Map Implementation
 //--------------------------------------------------------------------------
 
-// GoMap is an implementatino of mapper.Map for map[string]string
+// GoMap is an implementation of mapper.Map for map[string]string
 type GoMap struct {
 	m map[string]string
 }

+ 1 - 1
pkg/util/timeutil/timeutil.go

@@ -229,7 +229,7 @@ func CleanDurationString(duration string) string {
 // ParseTimeRange returns a start and end time, respectively, which are converted from
 // a duration and offset, defined as strings with Prometheus-style syntax.
 func ParseTimeRange(duration, offset time.Duration) (time.Time, time.Time) {
-	// endTime defaults to the current time, unless an offset is explicity declared,
+	// endTime defaults to the current time, unless an offset is explicitly declared,
 	// in which case it shifts endTime back by given duration
 	endTime := time.Now()
 	if offset > 0 {