Просмотр исходного кода

fix post-merge conflicts

Signed-off-by: r2k1 <yokree@gmail.com>
r2k1 1 год назад
Родитель
Сommit
8d45479d7b

+ 5 - 3
pkg/cloud/aws/provider_test.go

@@ -11,6 +11,7 @@ import (
 	"testing"
 
 	"github.com/opencost/opencost/pkg/cloud/models"
+	"github.com/opencost/opencost/pkg/clustercache"
 	v1 "k8s.io/api/core/v1"
 )
 
@@ -120,11 +121,12 @@ func Test_PricingData_Regression(t *testing.T) {
 
 	// Check pricing data produced for each region
 	for _, region := range awsRegions {
-		node := v1.Node{}
-		node.SetLabels(map[string]string{"topology.kubernetes.io/region": region})
 
 		awsTest := AWS{}
-		res, _, err := awsTest.getRegionPricing([]*v1.Node{&node})
+		res, _, err := awsTest.getRegionPricing([]*clustercache.Node{
+			{
+				Labels: map[string]string{"topology.kubernetes.io/region": region},
+			}})
 		if err != nil {
 			t.Errorf("Failed to download pricing data for region %s: %v", region, err)
 		}

+ 16 - 12
pkg/clustercache/clustercache.go

@@ -59,11 +59,11 @@ type Node struct {
 }
 
 type Service struct {
-	Name      string
-	Namespace string
-	Selector  map[string]string
-	Type      v1.ServiceType
-	Status    v1.ServiceStatus
+	Name         string
+	Namespace    string
+	SpecSelector map[string]string
+	Type         v1.ServiceType
+	Status       v1.ServiceStatus
 }
 
 type DaemonSet struct {
@@ -121,7 +121,9 @@ type PersistentVolume struct {
 type ReplicationController struct{}
 
 type PodDisruptionBudget struct{}
-type ReplicaSet struct{}
+type ReplicaSet struct {
+	SpecSelector *metav1.LabelSelector
+}
 
 func transformNamespace(input *v1.Namespace) *Namespace {
 	return &Namespace{
@@ -183,11 +185,11 @@ func transformNode(input *v1.Node) *Node {
 
 func transformService(input *v1.Service) *Service {
 	return &Service{
-		Name:      input.Name,
-		Namespace: input.Namespace,
-		Selector:  input.Spec.Selector,
-		Type:      input.Spec.Type,
-		Status:    input.Status,
+		Name:         input.Name,
+		Namespace:    input.Namespace,
+		SpecSelector: input.Spec.Selector,
+		Type:         input.Spec.Type,
+		Status:       input.Status,
 	}
 }
 
@@ -266,7 +268,9 @@ func transformPodDisruptionBudget(input *policyv1.PodDisruptionBudget) *PodDisru
 }
 
 func transformReplicaSet(input *appsv1.ReplicaSet) *ReplicaSet {
-	return &ReplicaSet{}
+	return &ReplicaSet{
+		SpecSelector: input.Spec.Selector,
+	}
 }
 
 // ClusterCache defines an contract for an object which caches components within a cluster, ensuring

+ 3 - 3
pkg/costmodel/costmodel.go

@@ -1389,8 +1389,8 @@ func getPodServices(cache clustercache.ClusterCache, podList []*clustercache.Pod
 			podServicesMapping[key] = make(map[string][]string)
 		}
 		s := labels.Nothing()
-		if service.Selector != nil && len(service.Selector) > 0 {
-			s = labels.Set(service.Selector).AsSelectorPreValidated()
+		if service.SpecSelector != nil && len(service.SpecSelector) > 0 {
+			s = labels.Set(service.SpecSelector).AsSelectorPreValidated()
 		}
 		for _, pod := range podList {
 			labelSet := labels.Set(pod.Labels)
@@ -2330,7 +2330,7 @@ func getStatefulSetsOfPod(pod clustercache.Pod) []string {
 // getGPUCount reads the node's Status and Labels (via the k8s API) to identify
 // the number of GPUs and vGPUs are equipped on the node. If unable to identify
 // a GPU count, it will return -1.
-func getGPUCount(cache clustercache.ClusterCache, n *v1.Node) (float64, float64, error) {
+func getGPUCount(cache clustercache.ClusterCache, n *clustercache.Node) (float64, float64, error) {
 	g, hasGpu := n.Status.Capacity["nvidia.com/gpu"]
 	_, hasReplicas := n.Labels["nvidia.com/gpu.replicas"]
 

+ 12 - 16
pkg/costmodel/costmodel_test.go

@@ -4,23 +4,23 @@ import (
 	"testing"
 
 	"github.com/opencost/opencost/core/pkg/util"
+	"github.com/opencost/opencost/pkg/clustercache"
 	"github.com/stretchr/testify/assert"
 	v1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/api/resource"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
 func TestGetGPUCount(t *testing.T) {
 	tests := []struct {
 		name          string
-		node          *v1.Node
+		node          *clustercache.Node
 		expectedGPU   float64
 		expectedVGPU  float64
 		expectedError bool
 	}{
 		{
 			name: "Standard NVIDIA GPU",
-			node: &v1.Node{
+			node: &clustercache.Node{
 				Status: v1.NodeStatus{
 					Capacity: v1.ResourceList{
 						"nvidia.com/gpu": resource.MustParse("2"),
@@ -32,12 +32,10 @@ func TestGetGPUCount(t *testing.T) {
 		},
 		{
 			name: "NVIDIA GPU with GFD - renameByDefault=true",
-			node: &v1.Node{
-				ObjectMeta: metav1.ObjectMeta{
-					Labels: map[string]string{
-						"nvidia.com/gpu.replicas": "4",
-						"nvidia.com/gpu.count":    "1",
-					},
+			node: &clustercache.Node{
+				Labels: map[string]string{
+					"nvidia.com/gpu.replicas": "4",
+					"nvidia.com/gpu.count":    "1",
 				},
 				Status: v1.NodeStatus{
 					Capacity: v1.ResourceList{
@@ -50,12 +48,10 @@ func TestGetGPUCount(t *testing.T) {
 		},
 		{
 			name: "NVIDIA GPU with GFD - renameByDefault=false",
-			node: &v1.Node{
-				ObjectMeta: metav1.ObjectMeta{
-					Labels: map[string]string{
-						"nvidia.com/gpu.replicas": "4",
-						"nvidia.com/gpu.count":    "1",
-					},
+			node: &clustercache.Node{
+				Labels: map[string]string{
+					"nvidia.com/gpu.replicas": "4",
+					"nvidia.com/gpu.count":    "1",
 				},
 				Status: v1.NodeStatus{
 					Capacity: v1.ResourceList{
@@ -68,7 +64,7 @@ func TestGetGPUCount(t *testing.T) {
 		},
 		{
 			name: "No GPU",
-			node: &v1.Node{
+			node: &clustercache.Node{
 				Status: v1.NodeStatus{
 					Capacity: v1.ResourceList{},
 				},

+ 5 - 5
pkg/metrics/podlabelmetrics.go

@@ -39,18 +39,18 @@ func (kpmc KubePodLabelsCollector) Describe(ch chan<- *prometheus.Desc) {
 
 func (kpmc *KubePodLabelsCollector) UpdateControllerSelectorsCache() {
 	for _, r := range kpmc.KubeClusterCache.GetAllReplicaSets() {
-		for k := range r.Spec.Selector.MatchLabels {
+		for k := range r.SpecSelector.MatchLabels {
 			kpmc.labelsWhitelist[k] = true
 		}
-		for _, v := range r.Spec.Selector.MatchExpressions {
+		for _, v := range r.SpecSelector.MatchExpressions {
 			kpmc.labelsWhitelist[v.Key] = true
 		}
 	}
 	for _, ss := range kpmc.KubeClusterCache.GetAllStatefulSets() {
-		for k := range ss.Spec.Selector.MatchLabels {
+		for k := range ss.SpecSelector.MatchLabels {
 			kpmc.labelsWhitelist[k] = true
 		}
-		for _, v := range ss.Spec.Selector.MatchExpressions {
+		for _, v := range ss.SpecSelector.MatchExpressions {
 			kpmc.labelsWhitelist[v.Key] = true
 		}
 	}
@@ -59,7 +59,7 @@ func (kpmc *KubePodLabelsCollector) UpdateControllerSelectorsCache() {
 func (kpmc *KubePodLabelsCollector) UpdateServiceLabels() {
 	for _, service := range kpmc.KubeClusterCache.GetAllServices() {
 		// Just unroll the selector and keep all labels whose keys could match a service selector
-		for k := range service.Spec.Selector {
+		for k := range service.SpecSelector {
 			kpmc.labelsWhitelist[k] = true
 		}
 	}

+ 12 - 18
pkg/metrics/podlabelmetrics_test.go

@@ -4,27 +4,21 @@ import (
 	"testing"
 
 	"github.com/opencost/opencost/pkg/clustercache"
-	appsv1 "k8s.io/api/apps/v1"
-	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
 func TestWhitelist(t *testing.T) {
-	sampleServices := []*v1.Service{&v1.Service{
-		Spec: v1.ServiceSpec{
-			Selector: map[string]string{"servicewhitelistlabel": "foo"},
-		},
+	sampleServices := []*clustercache.Service{{
+		SpecSelector: map[string]string{"servicewhitelistlabel": "foo"},
 	}}
 	replicaSetLabelSelector := metav1.LabelSelector{
 		MatchLabels: map[string]string{"replicasetwhitelistlabel1": "bar"},
 	}
-	sampleReplicaSets := []*appsv1.ReplicaSet{{
-		Spec: appsv1.ReplicaSetSpec{
-			Selector: &replicaSetLabelSelector,
-		},
+	sampleReplicaSets := []*clustercache.ReplicaSet{{
+		SpecSelector: &replicaSetLabelSelector,
 	}}
 
-	sampleStatefulSets := []*appsv1.StatefulSet{}
+	sampleStatefulSets := []*clustercache.StatefulSet{}
 
 	kc := NewFakeCache(sampleReplicaSets, sampleStatefulSets, sampleServices)
 	wl := map[string]bool{
@@ -51,24 +45,24 @@ func TestWhitelist(t *testing.T) {
 
 type FakeCache struct {
 	clustercache.ClusterCache
-	replicasets  []*appsv1.ReplicaSet
-	statefulsets []*appsv1.StatefulSet
-	services     []*v1.Service
+	replicasets  []*clustercache.ReplicaSet
+	statefulsets []*clustercache.StatefulSet
+	services     []*clustercache.Service
 }
 
-func (f FakeCache) GetAllReplicaSets() []*appsv1.ReplicaSet {
+func (f FakeCache) GetAllReplicaSets() []*clustercache.ReplicaSet {
 	return f.replicasets
 }
 
-func (f FakeCache) GetAllStatefulSets() []*appsv1.StatefulSet {
+func (f FakeCache) GetAllStatefulSets() []*clustercache.StatefulSet {
 	return f.statefulsets
 }
 
-func (f FakeCache) GetAllServices() []*v1.Service {
+func (f FakeCache) GetAllServices() []*clustercache.Service {
 	return f.services
 }
 
-func NewFakeCache(replicasets []*appsv1.ReplicaSet, statefulsets []*appsv1.StatefulSet, services []*v1.Service) FakeCache {
+func NewFakeCache(replicasets []*clustercache.ReplicaSet, statefulsets []*clustercache.StatefulSet, services []*clustercache.Service) FakeCache {
 	return FakeCache{
 		replicasets:  replicasets,
 		statefulsets: statefulsets,

+ 1 - 1
pkg/metrics/servicemetrics.go

@@ -42,7 +42,7 @@ func (sc KubecostServiceCollector) Collect(ch chan<- prometheus.Metric) {
 		serviceName := svc.Name
 		serviceNS := svc.Namespace
 
-		labels, values := promutil.KubeLabelsToLabels(promutil.SanitizeLabels(svc.Selector))
+		labels, values := promutil.KubeLabelsToLabels(promutil.SanitizeLabels(svc.SpecSelector))
 		if len(labels) > 0 {
 			m := newServiceSelectorLabelsMetric(serviceName, serviceNS, "service_selector_labels", labels, values)
 			ch <- m