Преглед на файлове

Merge pull request #168 from kubecost/AjayTripathy-add-tests

add integration tests
Ajay Tripathy преди 6 години
родител
ревизия
11bbadb2d1
променени са 4 файла, в които са добавени 445 реда и са изтрити 15 реда
  1. 9 0
      CONTRIBUTING.md
  2. 224 0
      test/cluster_test.go
  3. 0 15
      test/costmodel_test.go
  4. 212 0
      test/historical_pod_test.go

+ 9 - 0
CONTRIBUTING.md

@@ -22,6 +22,15 @@ To test, build the cost-model docker container and then push it to a Kubernetes
 
 To confirm that the server is running, you can hit [http://localhost:9003/costDataModel?timeWindow=1d](http://localhost:9003/costDataModel?timeWindow=1d)
 
+## Running the integration tests ##
+To run these tests:
+* Make sure you have a kubeconfig that can point to your cluster, and have permissions to create/modify a namespace called "test"
+* Connect to your the prometheus kubecost emits to on localhost:9003: 
+```kubectl port-forward --namespace kubecost service/kubecost-prometheus-server 9003:80```
+* Temporary workaround: Copy the default.json file in this project at cloud/default.json to /models/default.json on the machine your test is running on. TODO: fix this and inject the cloud/default.json path into provider.go.
+* Navigate to cost-model/test
+* Run ```go test -timeout 700s``` from the testing directory. The tests right now take about 10 minutes (600s) to run because they bring up and down pods and wait for Prometheus to scrape data about them.
+
 
 ## Certification of Origin ##
 

+ 224 - 0
test/cluster_test.go

@@ -0,0 +1,224 @@
+package costmodel_test
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+
+	//	"math"
+	//	"net"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strconv"
+
+	//	"testing"
+	//	"time"
+
+	//	"gotest.tools/assert"
+
+	prometheusClient "github.com/prometheus/client_golang/api"
+
+	//	v1 "k8s.io/api/core/v1"
+	//	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/client-go/kubernetes"
+	"k8s.io/client-go/tools/clientcmd"
+
+	_ "k8s.io/client-go/plugin/pkg/client/auth"
+
+	"log"
+)
+
+const address = "http://localhost:9003"
+
+const apiPrefix = "/api/v1"
+
+const epQuery = apiPrefix + "/query"
+
+func homeDir() string {
+	if h := os.Getenv("HOME"); h != "" {
+		return h
+	}
+	return os.Getenv("USERPROFILE") // windows
+}
+
+func getKubernetesClient() (*kubernetes.Clientset, error) {
+	var kubeconfig string
+
+	if home := homeDir(); home != "" {
+		kubeconfig = filepath.Join(home, ".kube", "config")
+	} else {
+		return nil, fmt.Errorf("Unable to find home directory")
+	}
+	config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
+	if err != nil {
+		return nil, err
+	}
+
+	return kubernetes.NewForConfig(config)
+
+}
+
+// The integration test assumes a GKE cluster in us-central-1 or an AWS cluster in us-east-2, with the following instance types
+// and storage classes.
+var prices = map[string]float64{
+	"n1standardRAM":   0.004237,
+	"n1standardCPU":   0.031611,
+	"t2.medium":       0.0464,
+	"t2.small":        0.023,
+	"t2.micro":        0.0116,
+	"c4.large":        0.1,
+	"gp2":             0.000137,
+	"ssd":             0.170,
+	"Standard_DS2_v2": 0.252,
+	"g1smallCPU":      0.025643,
+	"g1smallRAM":      0.000034,
+	"n1-highmem-2":    0.1171,
+}
+
+func parseQuery(qr interface{}) (float64, error) {
+	data, ok := qr.(map[string]interface{})["data"]
+	if !ok {
+		return 0, fmt.Errorf("Improperly formatted response from prometheus, response %+v has no data field", data)
+	}
+	r, ok := data.(map[string]interface{})["result"]
+	if !ok {
+		return 0, fmt.Errorf("Improperly formatted data from prometheus, data has no result field")
+	}
+	results, ok := r.([]interface{})
+	if !ok {
+		return 0, fmt.Errorf("Improperly formatted results from prometheus, result field is not a slice")
+	}
+	val, ok := results[0].(map[string]interface{})["value"]
+	if !ok {
+		return 0, fmt.Errorf("Improperly formatted results from prometheus, value is not a field in the vector")
+	}
+	dataPoint, ok := val.([]interface{})
+	if !ok || len(dataPoint) != 2 {
+		return 0, fmt.Errorf("Improperly formatted datapoint from Prometheus")
+	}
+
+	return strconv.ParseFloat(dataPoint[1].(string), 64)
+
+}
+
+func query(cli prometheusClient.Client, query string) (interface{}, error) {
+	u := cli.URL(epQuery, nil)
+	q := u.Query()
+	q.Set("query", query)
+	u.RawQuery = q.Encode()
+
+	req, err := http.NewRequest(http.MethodGet, u.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+
+	_, body, _, err := cli.Do(context.Background(), req)
+	if err != nil {
+		return nil, err
+	}
+	var toReturn interface{}
+	err = json.Unmarshal(body, &toReturn)
+	if err != nil {
+		log.Printf("ERROR" + err.Error())
+	}
+	return toReturn, err
+}
+
+/*
+func TestKubernetesPVCosts(t *testing.T) {
+	cli, err := getKubernetesClient()
+	if err != nil {
+		panic(err)
+	}
+	var LongTimeoutRoundTripper http.RoundTripper = &http.Transport{ // may be necessary for long prometheus queries. TODO: make this configurable
+		Proxy: http.ProxyFromEnvironment,
+		DialContext: (&net.Dialer{
+			Timeout:   120 * time.Second,
+			KeepAlive: 120 * time.Second,
+		}).DialContext,
+		TLSHandshakeTimeout: 10 * time.Second,
+	}
+
+	pc := prometheusClient.Config{
+		Address:      address,
+		RoundTripper: LongTimeoutRoundTripper,
+	}
+	promCli, err := prometheusClient.NewClient(pc)
+	if err != nil {
+		panic(err)
+	}
+
+	pvs, err := cli.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
+	if err != nil {
+		panic(err)
+	}
+	for _, pv := range pvs.Items {
+		name := pv.Name
+		class := pv.Spec.StorageClassName
+
+		q := fmt.Sprintf(`pv_hourly_cost{persistentvolume="%s"}`, name)
+		qt, err := query(promCli, q)
+		total, err := parseQuery(qt)
+		if err != nil {
+			log.Printf(err.Error())
+		}
+		if price, ok := prices[class]; ok {
+			assert.Equal(t, math.Round(total*1000000)/1000000, price)
+		}
+
+	}
+
+}
+
+func TestKubernetesClusterCosts(t *testing.T) {
+	prices["n1-standard-1"] = math.Round((prices["n1standardCPU"]+3.61219*prices["n1standardRAM"])*10000) / 10000
+	prices["g1-small"] = math.Round(((prices["g1smallCPU"] + 0.216998*prices["g1smallRAM"]) * 10000)) / 10000
+	cli, err := getKubernetesClient()
+	if err != nil {
+		panic(err)
+	}
+	var LongTimeoutRoundTripper http.RoundTripper = &http.Transport{ // may be necessary for long prometheus queries. TODO: make this configurable
+		Proxy: http.ProxyFromEnvironment,
+		DialContext: (&net.Dialer{
+			Timeout:   120 * time.Second,
+			KeepAlive: 120 * time.Second,
+		}).DialContext,
+		TLSHandshakeTimeout: 10 * time.Second,
+	}
+
+	pc := prometheusClient.Config{
+		Address:      address,
+		RoundTripper: LongTimeoutRoundTripper,
+	}
+	promCli, err := prometheusClient.NewClient(pc)
+	if err != nil {
+		panic(err)
+	}
+
+	nodes, err := cli.CoreV1().Nodes().List(metav1.ListOptions{})
+	if err != nil {
+		panic(err)
+	}
+	for _, n := range nodes.Items {
+		name := n.GetObjectMeta().GetName()
+		q := fmt.Sprintf(`node_total_hourly_cost{instance="%s"}`, name)
+		labels := n.GetObjectMeta().GetLabels()
+		instanceType := labels[v1.LabelInstanceType]
+
+		qt, err := query(promCli, q)
+		if err != nil {
+			panic(err)
+		}
+		total, err := parseQuery(qt)
+		if err != nil {
+			panic(err)
+		}
+
+		if price, ok := prices[instanceType]; ok {
+			assert.Equal(t, math.Round(total*10000)/10000, price)
+		}
+
+	}
+}
+*/

+ 0 - 15
test/costmodel_test.go

@@ -8,12 +8,10 @@ import (
 	"time"
 
 	"github.com/golang/mock/gomock"
-	//costModel "github.com/kubecost/cost-model/costmodel"
 	"github.com/kubecost/test/mocks"
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
-	//testclient "k8s.io/client-go/kubernetes/fake"
 	fakecontroller "k8s.io/client-go/tools/cache/testing"
 )
 
@@ -27,24 +25,11 @@ func TestCostModel(t *testing.T) {
 	cli.EXPECT().URL(gomock.Any(), gomock.Any()).AnyTimes().Return(u)
 	cli.EXPECT().Do(gomock.Any(), gomock.Any()).AnyTimes()
 
-	//clientset := testclient.NewSimpleClientset()
-	//provider := mocks.NewMockProvider(ctrl)
-
 	fc := fakecontroller.NewFakeControllerSource()
-	//cm := costModel.NewCostModel(fc)
 	fc.Add(&v1.Pod{
 		ObjectMeta: metav1.ObjectMeta{
 			Name: "foo",
 		},
 	})
 	time.Sleep(100 * time.Millisecond)
-	/*
-		c, err := cm.ComputeCostData(cli, clientset, provider, "1d", "", "")
-		if err != nil {
-			panic(err)
-		}
-		for _, costs := range c {
-			assert.Equal(t, "foo", costs.PodName)
-		}
-	*/
 }

+ 212 - 0
test/historical_pod_test.go

@@ -0,0 +1,212 @@
+package costmodel_test
+
+import (
+	"fmt"
+	"log"
+	"net"
+	"net/http"
+	"path/filepath"
+	"testing"
+	"time"
+
+	"gotest.tools/assert"
+
+	"github.com/kubecost/cost-model/cloud"
+	costModel "github.com/kubecost/cost-model/costmodel"
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/dynamic"
+	"k8s.io/client-go/tools/clientcmd"
+
+	prometheusClient "github.com/prometheus/client_golang/api"
+
+	_ "k8s.io/client-go/plugin/pkg/client/auth"
+)
+
+func getDynamicKubernetesClient() (dynamic.Interface, error) {
+	var kubeconfig string
+
+	if home := homeDir(); home != "" {
+		kubeconfig = filepath.Join(home, ".kube", "config")
+	} else {
+		return nil, fmt.Errorf("Unable to find home directory")
+	}
+	config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
+	if err != nil {
+		return nil, err
+	}
+
+	return dynamic.NewForConfig(config)
+
+}
+
+func TestPodUpDown(t *testing.T) {
+	client, err := getDynamicKubernetesClient()
+	if err != nil {
+		panic(err)
+	}
+	rclient, err := getKubernetesClient()
+	if err != nil {
+		panic(err)
+	}
+	var LongTimeoutRoundTripper http.RoundTripper = &http.Transport{ // may be necessary for long prometheus queries. TODO: make this configurable
+		Proxy: http.ProxyFromEnvironment,
+		DialContext: (&net.Dialer{
+			Timeout:   120 * time.Second,
+			KeepAlive: 120 * time.Second,
+		}).DialContext,
+		TLSHandshakeTimeout: 10 * time.Second,
+	}
+
+	pc := prometheusClient.Config{
+		Address:      address,
+		RoundTripper: LongTimeoutRoundTripper,
+	}
+	promCli, err := prometheusClient.NewClient(pc)
+	if err != nil {
+		panic(err)
+	}
+	cm := costModel.NewCostModel(rclient)
+
+	deployment := &unstructured.Unstructured{
+		Object: map[string]interface{}{
+			"apiVersion": "apps/v1",
+			"kind":       "Deployment",
+			"metadata": map[string]interface{}{
+				"name": "demo-deployment",
+			},
+			"spec": map[string]interface{}{
+				"replicas": 2,
+				"selector": map[string]interface{}{
+					"matchLabels": map[string]interface{}{
+						"app": "demo",
+					},
+				},
+				"template": map[string]interface{}{
+					"metadata": map[string]interface{}{
+						"labels": map[string]interface{}{
+							"app": "demo",
+						},
+					},
+
+					"spec": map[string]interface{}{
+						"containers": []map[string]interface{}{
+							{
+								"name":  "web",
+								"image": "nginx:1.12",
+								"resources": map[string]interface{}{
+									"requests": map[string]interface{}{
+										"memory": "64Mi",
+										"cpu":    "250m",
+									},
+								},
+								"ports": []map[string]interface{}{
+									{
+										"name":          "http",
+										"protocol":      "TCP",
+										"containerPort": 80,
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	deploymentRes := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
+	labels := make(map[string]string)
+	labels["testaggregation"] = "foo"
+	namespace := &v1.Namespace{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:   "test",
+			Labels: labels,
+		},
+	}
+	rclient.CoreV1().Namespaces().Create(namespace)
+	_, err = client.Resource(deploymentRes).Namespace("test").Create(deployment, metav1.CreateOptions{})
+	if err != nil {
+		panic(err)
+	}
+	time.Sleep(5 * time.Minute)
+
+	qr := `label_replace(label_replace(container_cpu_allocation{container='web',namespace='test'}, "container_name", "$1", "container","(.+)"), "pod_name", "$1", "pod","(.+)")`
+
+	end := time.Now()
+	start := end.Add(-1 * time.Duration(3*time.Minute))
+	step := time.Duration(time.Minute)
+
+	res, err := costModel.QueryRange(promCli, qr, start, end, step)
+	if err != nil {
+		panic(err)
+	}
+
+	vectors, err := costModel.GetContainerMetricVectors(res, false, 0)
+	if err != nil {
+		panic(err)
+	}
+
+	assert.Check(t, len(vectors) > 0)
+	for _, values := range vectors {
+		assert.Check(t, len(values) > 0)
+		for _, vector := range values {
+			assert.Check(t, vector.Value == 0.25 || vector.Value == 0.125) // It's halved for fractional minute normalization.
+		}
+	}
+
+	deletePolicy := metav1.DeletePropagationForeground
+	deleteOptions := &metav1.DeleteOptions{
+		PropagationPolicy: &deletePolicy,
+	}
+	if err := client.Resource(deploymentRes).Namespace("test").Delete("demo-deployment", deleteOptions); err != nil {
+		panic(err)
+	}
+
+	time.Sleep(5 * time.Minute)
+
+	res, err = costModel.Query(promCli, qr)
+	if err != nil {
+		panic(err)
+	}
+
+	vectors, err = costModel.GetContainerMetricVector(res, false, 0)
+	if err != nil {
+		panic(err)
+	}
+	assert.Equal(t, len(vectors), 0)
+	provider := &cloud.CustomProvider{
+		Clientset: rclient,
+	}
+	loc, _ := time.LoadLocation("UTC")
+	endTime := time.Now().In(loc)
+	d, _ := time.ParseDuration("10m")
+	startTime := endTime.Add(-1 * d)
+	layout := "2006-01-02T15:04:05.000Z"
+	startStr := startTime.Format(layout)
+	endStr := endTime.Format(layout)
+	log.Printf("Starting at %s \n", startStr)
+	log.Printf("Ending at %s \n", endStr)
+	provider.DownloadPricingData()
+	data, err := cm.ComputeCostDataRange(promCli, rclient, provider, startStr, endStr, "1m", "")
+	if err != nil {
+		panic(err)
+	}
+	agg := costModel.AggregateCostModel(data, 0.0, "namespace", "")
+	_, ok := agg["test"]
+	assert.Assert(t, ok)
+
+	data2, err := cm.ComputeCostData(promCli, rclient, provider, "10m", "", "")
+	if err != nil {
+		panic(err)
+	}
+	agg2 := costModel.AggregateCostModel(data2, 0.0, "namespace", "")
+	_, ok2 := agg2["test"]
+	assert.Assert(t, ok2)
+
+	agg3 := costModel.AggregateCostModel(data, 0.0, "label", "testaggregation")
+	_, ok3 := agg3["foo"]
+	assert.Assert(t, ok3)
+}