Просмотр исходного кода

Move project disks and addresses to cost-model; remove old tests

Niko Kovacevic 6 лет назад
Родитель
Сommit
5f3ba1cc29

+ 236 - 20
pkg/cloud/awsprovider.go

@@ -8,6 +8,7 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
+	"log"
 	"net/http"
 	"os"
 	"regexp"
@@ -23,6 +24,7 @@ import (
 
 	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/awserr"
+	"github.com/aws/aws-sdk-go/aws/credentials"
 	"github.com/aws/aws-sdk-go/aws/session"
 	"github.com/aws/aws-sdk-go/service/athena"
 	"github.com/aws/aws-sdk-go/service/ec2"
@@ -40,6 +42,34 @@ const supportedSpotFeedVersion = "1"
 const SpotInfoUpdateType = "spotinfo"
 const AthenaInfoUpdateType = "athenainfo"
 
+const defaultConfigPath = "/var/configs/"
+
+var awsRegions = []string{
+	"us-east-2",
+	"us-east-1",
+	"us-west-1",
+	"us-west-2",
+	"ap-east-1",
+	"ap-south-1",
+	"ap-northeast-3",
+	"ap-northeast-2",
+	"ap-southeast-1",
+	"ap-southeast-2",
+	"ap-northeast-1",
+	"ca-central-1",
+	"cn-north-1",
+	"cn-northwest-1",
+	"eu-central-1",
+	"eu-west-1",
+	"eu-west-2",
+	"eu-west-3",
+	"eu-north-1",
+	"me-south-1",
+	"sa-east-1",
+	"us-gov-east-1",
+	"us-gov-west-1",
+}
+
 // AWS represents an Amazon Provider
 type AWS struct {
 	Pricing                 map[string]*AWSProductTerms
@@ -1031,38 +1061,224 @@ func getClusterConfig(ccFile string) (map[string]string, error) {
 	return clusterConf, nil
 }
 
-// GetDisks returns the AWS disks backing PVs. Useful because sometimes k8s will not clean up PVs correctly. Requires a json config in /var/configs with key region.
-func (a *AWS) GetDisks() ([]byte, error) {
-	err := a.configureAWSAuth()
+// SetKeyEnv ensures that the two environment variables necessary to configure
+// a new AWS Session are set.
+func (a *AWS) SetKeyEnv() error {
+	// TODO add this to the helm chart, mirroring the cost-model
+	// configPath := os.Getenv("CONFIG_PATH")
+	configPath := defaultConfigPath
+	path := configPath + "aws.json"
+
+	if _, err := os.Stat(path); err != nil {
+		if os.IsNotExist(err) {
+			log.Printf("error: file %s does not exist", path)
+		} else {
+			log.Printf("error: %s", err)
+		}
+		return err
+	}
+
+	jsonFile, err := os.Open(path)
+	defer jsonFile.Close()
+
+	configMap := map[string]string{}
+	configBytes, err := ioutil.ReadAll(jsonFile)
 	if err != nil {
-		return nil, err
+		return err
 	}
+	json.Unmarshal([]byte(configBytes), &configMap)
+
+	keyName := configMap["awsServiceKeyName"]
+	keySecret := configMap["awsServiceKeySecret"]
+
+	// These are required before calling NewEnvCredentials below
+	os.Setenv("AWS_ACCESS_KEY_ID", keyName)
+	os.Setenv("AWS_SECRET_ACCESS_KEY", keySecret)
+
+	return nil
+}
 
-	clusterConfig, err := getClusterConfig("/var/configs/cluster.json")
+func (a *AWS) getAddressesForRegion(region string) (*ec2.DescribeAddressesOutput, error) {
+	sess, err := session.NewSession(&aws.Config{
+		Region:      aws.String(region),
+		Credentials: credentials.NewEnvCredentials(),
+	})
 	if err != nil {
 		return nil, err
 	}
 
-	region := aws.String(clusterConfig["region"])
-	c := &aws.Config{
-		Region: region,
+	ec2Svc := ec2.New(sess)
+	return ec2Svc.DescribeAddresses(&ec2.DescribeAddressesInput{})
+}
+
+func (a *AWS) GetAddresses() ([]byte, error) {
+	if err := a.SetKeyEnv(); err != nil {
+		return nil, err
 	}
-	s := session.Must(session.NewSession(c))
 
-	ec2Svc := ec2.New(s)
-	input := &ec2.DescribeVolumesInput{}
-	volumeResult, err := ec2Svc.DescribeVolumes(input)
+	addressCh := make(chan *ec2.DescribeAddressesOutput, len(awsRegions))
+	errorCh := make(chan error, len(awsRegions))
+
+	var wg sync.WaitGroup
+	wg.Add(len(awsRegions))
+
+	// Get volumes from each AWS region
+	for _, r := range awsRegions {
+		// Fetch IP address response and send results and errors to their
+		// respective channels
+		go func(region string) {
+			defer wg.Done()
+
+			// Query for first page of volume results
+			resp, err := a.getAddressesForRegion(region)
+			if err != nil {
+				if aerr, ok := err.(awserr.Error); ok {
+					switch aerr.Code() {
+					default:
+						errorCh <- aerr
+					}
+					return
+				} else {
+					errorCh <- err
+					return
+				}
+			}
+			addressCh <- resp
+		}(r)
+	}
+
+	// Close the result channels after everything has been sent
+	go func() {
+		wg.Wait()
+		close(errorCh)
+		close(addressCh)
+	}()
+
+	addresses := []*ec2.Address{}
+	for adds := range addressCh {
+		addresses = append(addresses, adds.Addresses...)
+	}
+
+	errors := []error{}
+	for err := range errorCh {
+		log.Printf("error getting addresses: %s", err)
+		errors = append(errors, err)
+	}
+
+	// Return error if no addresses are returned
+	if len(errors) > 0 && len(addresses) == 0 {
+		return nil, fmt.Errorf("%d error(s) retrieving addresses: %v", len(errors), errors)
+	}
+
+	// Format the response this way to match the JSON-encoded formatting of a single response
+	// from DescribeAddresss, so that consumers can always expect AWS disk responses to have
+	// a "Addresss" key at the top level.
+	return json.Marshal(map[string][]*ec2.Address{
+		"Addresses": addresses,
+	})
+}
+
+func (a *AWS) getDisksForRegion(region string, maxResults int64, nextToken *string) (*ec2.DescribeVolumesOutput, error) {
+	sess, err := session.NewSession(&aws.Config{
+		Region:      aws.String(region),
+		Credentials: credentials.NewEnvCredentials(),
+	})
 	if err != nil {
-		if aerr, ok := err.(awserr.Error); ok {
-			switch aerr.Code() {
-			default:
-				return nil, aerr
+		return nil, err
+	}
+
+	ec2Svc := ec2.New(sess)
+	return ec2Svc.DescribeVolumes(&ec2.DescribeVolumesInput{
+		MaxResults: &maxResults,
+		NextToken:  nextToken,
+	})
+}
+
+// GetDisks returns the AWS disks backing PVs. Useful because sometimes k8s will not clean up PVs correctly. Requires a json config in /var/configs with key region.
+func (a *AWS) GetDisks() ([]byte, error) {
+	if err := a.SetKeyEnv(); err != nil {
+		return nil, err
+	}
+
+	volumeCh := make(chan *ec2.DescribeVolumesOutput, len(awsRegions))
+	errorCh := make(chan error, len(awsRegions))
+
+	var wg sync.WaitGroup
+	wg.Add(len(awsRegions))
+
+	// Get volumes from each AWS region
+	for _, r := range awsRegions {
+		// Fetch volume response and send results and errors to their
+		// respective channels
+		go func(region string) {
+			defer wg.Done()
+
+			// Query for first page of volume results
+			resp, err := a.getDisksForRegion(region, 1000, nil)
+			if err != nil {
+				if aerr, ok := err.(awserr.Error); ok {
+					switch aerr.Code() {
+					default:
+						errorCh <- aerr
+					}
+					return
+				} else {
+					errorCh <- err
+					return
+				}
 			}
-		} else {
-			return nil, err
-		}
+			volumeCh <- resp
+
+			// A NextToken indicates more pages of results. Keep querying
+			// until all pages are retrieved.
+			for resp.NextToken != nil {
+				resp, err = a.getDisksForRegion(region, 100, resp.NextToken)
+				if err != nil {
+					if aerr, ok := err.(awserr.Error); ok {
+						switch aerr.Code() {
+						default:
+							errorCh <- aerr
+						}
+						return
+					} else {
+						errorCh <- err
+						return
+					}
+				}
+				volumeCh <- resp
+			}
+		}(r)
+	}
+
+	// Close the result channels after everything has been sent
+	go func() {
+		wg.Wait()
+		close(errorCh)
+		close(volumeCh)
+	}()
+
+	volumes := []*ec2.Volume{}
+	for vols := range volumeCh {
+		volumes = append(volumes, vols.Volumes...)
+	}
+
+	errors := []error{}
+	for err := range errorCh {
+		log.Printf("error getting disks: %s", err)
+		errors = append(errors, err)
+	}
+
+	// Return error if no volumes are returned
+	if len(errors) > 0 && len(volumes) == 0 {
+		return nil, fmt.Errorf("%d error(s) retrieving volumes: %v", len(errors), errors)
 	}
-	return json.Marshal(volumeResult)
+
+	// Format the response this way to match the JSON-encoded formatting of a single response
+	// from DescribeVolumes, so that consumers can always expect AWS disk responses to have
+	// a "Volumes" key at the top level.
+	return json.Marshal(map[string][]*ec2.Volume{
+		"Volumes": volumes,
+	})
 }
 
 // ConvertToGlueColumnFormat takes a string and runs through various regex

+ 4 - 0
pkg/cloud/azureprovider.go

@@ -663,6 +663,10 @@ func (key *azurePvKey) Features() string {
 	return key.DefaultRegion + "," + storageClass
 }
 
+func (*Azure) GetAddresses() ([]byte, error) {
+	return nil, nil
+}
+
 func (*Azure) GetDisks() ([]byte, error) {
 	return nil, nil
 }

+ 4 - 0
pkg/cloud/customprovider.go

@@ -108,6 +108,10 @@ func (cp *CustomProvider) ClusterInfo() (map[string]string, error) {
 	return m, nil
 }
 
+func (*CustomProvider) GetAddresses() ([]byte, error) {
+	return nil, nil
+}
+
 func (*CustomProvider) GetDisks() ([]byte, error) {
 	return nil, nil
 }

+ 28 - 0
pkg/cloud/gcpprovider.go

@@ -497,6 +497,34 @@ func (gcp *GCP) ClusterInfo() (map[string]string, error) {
 	return m, nil
 }
 
+func (*GCP) GetAddresses() ([]byte, error) {
+	// metadata API setup
+	metadataClient := metadata.NewClient(&http.Client{Transport: userAgentTransport{
+		userAgent: "kubecost",
+		base:      http.DefaultTransport,
+	}})
+	projID, err := metadataClient.ProjectID()
+	if err != nil {
+		return nil, err
+	}
+
+	client, err := google.DefaultClient(oauth2.NoContext,
+		"https://www.googleapis.com/auth/compute.readonly")
+	if err != nil {
+		return nil, err
+	}
+	svc, err := compute.New(client)
+	if err != nil {
+		return nil, err
+	}
+	res, err := svc.Addresses.AggregatedList(projID).Do()
+
+	if err != nil {
+		return nil, err
+	}
+	return json.Marshal(res)
+}
+
 // GetDisks returns the GCP disks backing PVs. Useful because sometimes k8s will not clean up PVs correctly. Requires a json config in /var/configs with key region.
 func (*GCP) GetDisks() ([]byte, error) {
 	// metadata API setup

+ 1 - 0
pkg/cloud/provider.go

@@ -162,6 +162,7 @@ type CustomPricing struct {
 // Provider represents a k8s provider.
 type Provider interface {
 	ClusterInfo() (map[string]string, error)
+	GetAddresses() ([]byte, error)
 	GetDisks() ([]byte, error)
 	NodePricing(Key) (*Node, error)
 	PVPricing(PVKey) (*PV, error)

+ 41 - 0
pkg/costmodel/router.go

@@ -616,6 +616,45 @@ func (p *Accesses) GetPrometheusMetadata(w http.ResponseWriter, _ *http.Request,
 	w.Write(WrapData(ValidatePrometheus(p.PrometheusClient, false)))
 }
 
+func (p *Accesses) HandleProjectAddresses(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	body, err := p.Cloud.GetAddresses()
+	if err != nil {
+		if strings.Contains(err.Error(), "not implemented") {
+			w.WriteHeader(http.StatusNotFound)
+		} else {
+			w.WriteHeader(http.StatusInternalServerError)
+		}
+
+		w.Write([]byte(err.Error()))
+		return
+	}
+
+	w.Write(body)
+}
+
+func (p *Accesses) HandleProjectDisks(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+
+	body, err := p.Cloud.GetDisks()
+	if err != nil {
+		if strings.Contains(err.Error(), "not implemented") {
+			w.WriteHeader(http.StatusNotFound)
+		} else {
+			w.WriteHeader(http.StatusInternalServerError)
+		}
+
+		w.Write([]byte(err.Error()))
+		return
+	}
+
+	w.WriteHeader(http.StatusOK)
+	w.Write(body)
+}
+
 func (a *Accesses) recordPrices() {
 	go func() {
 		containerSeen := make(map[string]bool)
@@ -1126,6 +1165,8 @@ func Initialize(additionalConfigWatchers ...ConfigWatchers) {
 	Router.GET("/clusterCosts", A.ClusterCosts)
 	Router.GET("/validatePrometheus", A.GetPrometheusMetadata)
 	Router.GET("/managementPlatform", A.ManagementPlatform)
+	Router.GET("/projectAddresses", A.HandleProjectAddresses)
+	Router.GET("/projectDisks", A.HandleProjectDisks)
 	Router.GET("/clusterInfo", A.ClusterInfo)
 	Router.GET("/clusters", managerEndpoints.GetAllClusters)
 	Router.PUT("/clusters", managerEndpoints.PutCluster)

+ 0 - 113
test/aggregation_test.go

@@ -1,113 +0,0 @@
-package costmodel_test
-
-import (
-	"log"
-	"testing"
-
-	"gotest.tools/assert"
-
-	"github.com/kubecost/cost-model/pkg/cloud"
-	costModel "github.com/kubecost/cost-model/pkg/costmodel"
-)
-
-func TestAggregation(t *testing.T) {
-	cp := &cloud.CustomProvider{}
-
-	cd1 := &costModel.CostData{
-		Namespace: "test1",
-		NodeName:  "testnode",
-		NodeData: &cloud.Node{
-			VCPUCost: "1.0",
-			RAMCost:  "1.0",
-		},
-		RAMAllocation: []*costModel.Vector{&costModel.Vector{
-			Timestamp: 10,
-			Value:     1073741824,
-		}},
-		CPUAllocation: []*costModel.Vector{&costModel.Vector{
-			Timestamp: 10,
-			Value:     1.0,
-		}},
-		GPUReq: []*costModel.Vector{&costModel.Vector{}},
-		PVCData: []*costModel.PersistentVolumeClaimData{
-			&costModel.PersistentVolumeClaimData{
-				Namespace:  "test1",
-				VolumeName: "foo",
-				Volume: &cloud.PV{
-					Cost: "1.0",
-					Size: "1073741824",
-				},
-				Values: []*costModel.Vector{&costModel.Vector{
-					Timestamp: 10,
-					Value:     1073741824,
-				}},
-			},
-			&costModel.PersistentVolumeClaimData{
-				Namespace:  "test1",
-				VolumeName: "bar",
-				Volume: &cloud.PV{
-					Cost: "1.0",
-					Size: "1073741824",
-				},
-				Values: []*costModel.Vector{&costModel.Vector{
-					Timestamp: 10,
-					Value:     1073741824,
-				}},
-			},
-		},
-	}
-	cd2 := &costModel.CostData{
-		Namespace: "test1",
-		NodeName:  "testnode",
-		NodeData: &cloud.Node{
-			VCPUCost: "1.0",
-			RAMCost:  "1.0",
-		},
-		RAMAllocation: []*costModel.Vector{&costModel.Vector{
-			Timestamp: 10,
-			Value:     1073741824,
-		}},
-		CPUAllocation: []*costModel.Vector{&costModel.Vector{
-			Timestamp: 10,
-			Value:     1.0,
-		}},
-		GPUReq: []*costModel.Vector{&costModel.Vector{}},
-		PVCData: []*costModel.PersistentVolumeClaimData{
-			&costModel.PersistentVolumeClaimData{
-				Namespace:  "test1",
-				VolumeName: "foo",
-				Volume: &cloud.PV{
-					Cost: "1.0",
-					Size: "1073741824",
-				},
-				Values: []*costModel.Vector{&costModel.Vector{
-					Timestamp: 10,
-					Value:     1073741824,
-				}},
-			},
-			&costModel.PersistentVolumeClaimData{
-				Namespace:  "test1",
-				VolumeName: "bar",
-				Volume: &cloud.PV{
-					Cost: "1.0",
-					Size: "1073741824",
-				},
-				Values: []*costModel.Vector{&costModel.Vector{
-					Timestamp: 10,
-					Value:     1073741824,
-				}},
-			},
-		},
-	}
-
-	costData := make(map[string]*costModel.CostData)
-	costData["test1,foo,nginx,testnode"] = cd1
-	costData["test1,bar,nginx,testnode"] = cd2
-
-	field := "namespace"
-	subfields := []string{""}
-
-	agg := costModel.AggregateCostData(costData, field, subfields, cp, nil)
-	log.Printf("agg: %+v", agg["test1"])
-	assert.Equal(t, agg["test1"].TotalCost, 8.0)
-}

+ 0 - 272
test/historical_pod_test.go

@@ -1,272 +0,0 @@
-package costmodel_test
-
-import (
-	"fmt"
-	"log"
-	"net"
-	"net/http"
-	"os"
-	"path/filepath"
-	"testing"
-	"time"
-
-	"k8s.io/klog"
-
-	"gotest.tools/assert"
-
-	"github.com/kubecost/cost-model/pkg/cloud"
-	costModel "github.com/kubecost/cost-model/pkg/costmodel"
-	v1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	"k8s.io/apimachinery/pkg/runtime/schema"
-	"k8s.io/client-go/dynamic"
-	"k8s.io/client-go/kubernetes"
-	"k8s.io/client-go/rest"
-	"k8s.io/client-go/tools/clientcmd"
-
-	prometheusClient "github.com/prometheus/client_golang/api"
-
-	_ "k8s.io/client-go/plugin/pkg/client/auth"
-)
-
-var PrometheusEndpoint string
-
-const PROMETHEUS_SERVER_ENDPOINT = "PROMETHEUS_SERVER_ENDPOINT"
-
-func homeDir() string {
-	if h := os.Getenv("HOME"); h != "" {
-		return h
-	}
-	return os.Getenv("USERPROFILE") // windows
-}
-
-func getKubernetesClient() (*kubernetes.Clientset, error) {
-	var kubeconfig string
-	config, err := rest.InClusterConfig()
-	if err != nil {
-
-		if home := homeDir(); home != "" {
-			kubeconfig = filepath.Join(home, ".kube", "config")
-		} else {
-			return nil, fmt.Errorf("Unable to find home directory")
-		}
-		config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return kubernetes.NewForConfig(config)
-
-}
-func getDynamicKubernetesClient() (dynamic.Interface, error) {
-	config, err := rest.InClusterConfig()
-	if err != nil {
-		var kubeconfig string
-		if home := homeDir(); home != "" {
-			kubeconfig = filepath.Join(home, ".kube", "config")
-		} else {
-			return nil, fmt.Errorf("Unable to find home directory")
-		}
-		config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return dynamic.NewForConfig(config)
-}
-func TestPodUpDown(t *testing.T) {
-	client, err := getDynamicKubernetesClient()
-	if err != nil {
-		panic(err)
-	}
-	rclient, err := getKubernetesClient()
-	if err != nil {
-		panic(err)
-	}
-	var LongTimeoutRoundTripper http.RoundTripper = &http.Transport{ // may be necessary for long prometheus queries. TODO: make this configurable
-		Proxy: http.ProxyFromEnvironment,
-		DialContext: (&net.Dialer{
-			Timeout:   120 * time.Second,
-			KeepAlive: 120 * time.Second,
-		}).DialContext,
-		TLSHandshakeTimeout: 10 * time.Second,
-	}
-	a := os.Getenv(PROMETHEUS_SERVER_ENDPOINT)
-	pc := prometheusClient.Config{
-		Address:      a,
-		RoundTripper: LongTimeoutRoundTripper,
-	}
-	promCli, err := prometheusClient.NewClient(pc)
-	if err != nil {
-		panic(err)
-	}
-	cm := costModel.NewCostModel(rclient)
-
-	deployment := &unstructured.Unstructured{
-		Object: map[string]interface{}{
-			"apiVersion": "apps/v1",
-			"kind":       "Deployment",
-			"metadata": map[string]interface{}{
-				"name": "demo-deployment",
-			},
-			"spec": map[string]interface{}{
-				"replicas": 2,
-				"selector": map[string]interface{}{
-					"matchLabels": map[string]interface{}{
-						"app": "demo",
-					},
-				},
-				"template": map[string]interface{}{
-					"metadata": map[string]interface{}{
-						"labels": map[string]interface{}{
-							"app": "demo",
-						},
-					},
-
-					"spec": map[string]interface{}{
-						"containers": []map[string]interface{}{
-							{
-								"name":  "web",
-								"image": "nginx:1.12",
-								"resources": map[string]interface{}{
-									"requests": map[string]interface{}{
-										"memory": "64Mi",
-										"cpu":    "250m",
-									},
-								},
-								"ports": []map[string]interface{}{
-									{
-										"name":          "http",
-										"protocol":      "TCP",
-										"containerPort": 80,
-									},
-								},
-							},
-						},
-					},
-				},
-			},
-		},
-	}
-
-	deploymentRes := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
-	labels := make(map[string]string)
-	labels["testaggregation"] = "foo"
-	namespace := &v1.Namespace{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:   "test2",
-			Labels: labels,
-		},
-	}
-	klog.Infof("Creating namespace test2")
-	rclient.CoreV1().Namespaces().Create(namespace)
-	klog.Infof("Creating deployments in test2")
-	_, err = client.Resource(deploymentRes).Namespace("test2").Create(deployment, metav1.CreateOptions{})
-	if err != nil {
-		panic(err)
-	}
-	klog.Infof("Sleeping 5 minutes to wait for steady state.")
-	time.Sleep(5 * time.Minute)
-
-	qr := `label_replace(label_replace(container_cpu_allocation{container='web',namespace='test2'}, "container_name", "$1", "container","(.+)"), "pod_name", "$1", "pod","(.+)")`
-
-	end := time.Now()
-	start := end.Add(-1 * time.Duration(3*time.Minute))
-	step := time.Duration(time.Minute)
-
-	res, err := costModel.QueryRange(promCli, qr, start, end, step)
-	if err != nil {
-		panic(err)
-	}
-
-	vectors, err := costModel.GetContainerMetricVectors(res, false, 0, "cluster-one")
-	if err != nil {
-		panic(err)
-	}
-	klog.Infof("Found Vectors %+v", vectors)
-	if !(len(vectors) > 0) {
-		panic("Expected vectors to have data")
-	}
-	for _, values := range vectors {
-		assert.Check(t, len(values) > 0)
-		for _, vector := range values {
-			if vector.Value != 0.25 && vector.Value != 0.125 { // It's halved for fractional minute normalization.
-				panic(fmt.Sprintf("Expected %f to equal 0.25", vector.Value))
-			}
-		}
-	}
-
-	deletePolicy := metav1.DeletePropagationForeground
-	deleteOptions := &metav1.DeleteOptions{
-		PropagationPolicy: &deletePolicy,
-	}
-
-	klog.Infof("Deleting deployment in namespace test2")
-	if err := client.Resource(deploymentRes).Namespace("test2").Delete("demo-deployment", deleteOptions); err != nil {
-		panic(err)
-	}
-
-	klog.Infof("Sleeping 5 minutes to wait for steady state.")
-	time.Sleep(5 * time.Minute)
-
-	res, err = costModel.Query(promCli, qr)
-	if err != nil {
-		panic(err)
-	}
-
-	vectors, err = costModel.GetContainerMetricVector(res, false, 0, "cluster-one")
-	if err != nil {
-		panic(err)
-	}
-	if len(vectors) != 0 {
-		panic("Pods are not gone from namespace test2 data")
-	}
-	klog.Infof("Validated that pods are gone from namespace test2 data")
-	provider, err := cloud.NewProvider(rclient, os.Getenv("CLOUD_PROVIDER_API_KEY"))
-	if err != nil {
-		panic(err)
-	}
-	loc, _ := time.LoadLocation("UTC")
-	endTime := time.Now().In(loc)
-	d, _ := time.ParseDuration("10m")
-	startTime := endTime.Add(-1 * d)
-	layout := "2006-01-02T15:04:05.000Z"
-	startStr := startTime.Format(layout)
-	endStr := endTime.Format(layout)
-	log.Printf("Starting at %s \n", startStr)
-	log.Printf("Ending at %s \n", endStr)
-	provider.DownloadPricingData()
-
-	data, err := cm.ComputeCostDataRange(promCli, rclient, provider, startStr, endStr, "1m", "", "", false)
-	if err != nil {
-		panic(err)
-	}
-
-	agg := costModel.AggregateCostData(data, "namespace", []string{""}, provider, nil)
-	_, ok := agg["test"]
-	assert.Assert(t, ok)
-	_, ok = agg["test2"]
-	if !ok {
-		panic("No test2 namespace!")
-	}
-
-	data2, err := cm.ComputeCostData(promCli, rclient, provider, "10m", "", "")
-	if err != nil {
-		panic(err)
-	}
-
-	agg2 := costModel.AggregateCostData(data2, "namespace", []string{""}, provider, nil)
-	_, ok2 := agg2["test"]
-	assert.Assert(t, ok2)
-	_, ok2 = agg2["test2"]
-	if !ok2 {
-		panic("No test2 namespace!")
-	}
-
-	agg3 := costModel.AggregateCostData(data, "label", []string{"testaggregation"}, provider, nil)
-	_, ok3 := agg3["foo"]
-	if !ok3 {
-		panic("No label foo aggregate!")
-	}
-}