Jelajahi Sumber

Logic for exclusion of all non-core cost-model metrics

Kaelan Patel 4 tahun lalu
induk
melakukan
46bf8a9ed0

+ 15 - 112
pkg/costmodel/metrics.go

@@ -1,10 +1,7 @@
 package costmodel
 package costmodel
 
 
 import (
 import (
-	"fmt"
-	"io/ioutil"
 	"math"
 	"math"
-	"os"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
@@ -20,8 +17,6 @@ import (
 	"github.com/kubecost/cost-model/pkg/prom"
 	"github.com/kubecost/cost-model/pkg/prom"
 	"github.com/kubecost/cost-model/pkg/util"
 	"github.com/kubecost/cost-model/pkg/util"
 	"github.com/kubecost/cost-model/pkg/util/atomic"
 	"github.com/kubecost/cost-model/pkg/util/atomic"
-	"github.com/kubecost/cost-model/pkg/util/json"
-	"github.com/kubecost/cost-model/pkg/util/watcher"
 
 
 	promclient "github.com/prometheus/client_golang/api"
 	promclient "github.com/prometheus/client_golang/api"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus"
@@ -130,20 +125,8 @@ var (
 )
 )
 
 
 // initCostModelMetrics uses a sync.Once to ensure that these metrics are only created once
 // initCostModelMetrics uses a sync.Once to ensure that these metrics are only created once
-func initCostModelMetrics(clusterCache clustercache.ClusterCache, provider cloud.Provider, clusterInfo clusters.ClusterInfoProvider) {
+func initCostModelMetrics(clusterCache clustercache.ClusterCache, provider cloud.Provider, clusterInfo clusters.ClusterInfoProvider, metricsConfig *metrics.MetricsConfig) {
 	metricsInit.Do(func() {
 	metricsInit.Do(func() {
-
-		metricsConfig, err := GetMetricsConfig()
-		if err != nil {
-			log.Infof("Failed to get metrics configuration: %s", err)
-		}
-
-		log.Infof("--DISABLED LABELS--")
-		for i := range metricsConfig.DisabledMetrics {
-			log.Infof("DISABLE LABEL: %s", metricsConfig.DisabledMetrics[i])
-		}
-		log.Infof("-------------------")
-
 		cpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 		cpuGv = prometheus.NewGaugeVec(prometheus.GaugeOpts{
 			Name: "node_cpu_hourly_cost",
 			Name: "node_cpu_hourly_cost",
 			Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
 			Help: "node_cpu_hourly_cost hourly cost for each cpu on this node",
@@ -237,98 +220,6 @@ func initCostModelMetrics(clusterCache clustercache.ClusterCache, provider cloud
 	})
 	})
 }
 }
 
 
-var metricsConfigLock = new(sync.Mutex)
-
-type MetricsConfig struct {
-	DisabledMetrics []string `json:"disabledMetrics"`
-}
-
-func (mc MetricsConfig) GetDisabledMetricsMap() map[string]struct{} {
-	disabledMetricsMap := make(map[string]struct{})
-
-	for i := range mc.DisabledMetrics {
-		disabledMetricsMap[mc.DisabledMetrics[i]] = struct{}{}
-	}
-
-	return disabledMetricsMap
-}
-
-func GetMetricsConfig() (*MetricsConfig, error) {
-	metricsConfigLock.Lock()
-	defer metricsConfigLock.Unlock()
-	mc := &MetricsConfig{}
-	body, err := ioutil.ReadFile("/var/configs/metrics.json")
-	if os.IsNotExist(err) {
-
-		return mc, nil
-	} else if err != nil {
-		return mc, err
-	}
-
-	err = json.Unmarshal(body, mc)
-	if err != nil {
-		return mc, err
-	}
-
-	return mc, nil
-}
-
-func UpdateMetricsConfig(mc *MetricsConfig) (*MetricsConfig, error) {
-	metricsConfigLock.Lock()
-	defer metricsConfigLock.Unlock()
-
-	mcb, err := json.Marshal(mc)
-	if err != nil {
-		return nil, fmt.Errorf("error decoding metrics config struct: %s", err)
-	}
-
-	err = ioutil.WriteFile("/var/configs/metrics.json", mcb, 0644)
-	if err != nil {
-		return nil, fmt.Errorf("error writing to metrics config file: %s", err)
-	}
-
-	return mc, nil
-}
-
-func UpdateMetricsConfigFromConfigmap(data map[string]string) error {
-
-	mc := &MetricsConfig{}
-	key := "metrics.json"
-
-	cdata, ok := data[key]
-	if !ok {
-		return fmt.Errorf("error finding metrics config data")
-	}
-
-	err := json.Unmarshal([]byte(cdata), &mc)
-	if err != nil {
-		return fmt.Errorf("failed to unmarshal metrics configs: %s", err)
-	}
-
-	_, err = UpdateMetricsConfig(mc)
-	if err != nil {
-		return err
-	}
-
-	return nil
-
-}
-
-func GetMetricsConfigWatcher() *watcher.ConfigMapWatcher {
-	return &watcher.ConfigMapWatcher{
-		ConfigMapName: "metrics-config", // temporary, use env
-		WatchFunc: func(name string, data map[string]string) error {
-			klog.Infof("--CONFIGMAP DATA--")
-			for key, val := range data {
-				klog.Infof("%s : %s", key, val)
-			}
-			klog.Infof("------------------")
-			err := UpdateMetricsConfigFromConfigmap(data)
-			return err
-		},
-	}
-}
-
 //--------------------------------------------------------------------------
 //--------------------------------------------------------------------------
 //  CostModelMetricsEmitter
 //  CostModelMetricsEmitter
 //--------------------------------------------------------------------------
 //--------------------------------------------------------------------------
@@ -365,10 +256,22 @@ type CostModelMetricsEmitter struct {
 
 
 // NewCostModelMetricsEmitter creates a new cost-model metrics emitter. Use Start() to begin metric emission.
 // NewCostModelMetricsEmitter creates a new cost-model metrics emitter. Use Start() to begin metric emission.
 func NewCostModelMetricsEmitter(promClient promclient.Client, clusterCache clustercache.ClusterCache, provider cloud.Provider, clusterInfo clusters.ClusterInfoProvider, model *CostModel) *CostModelMetricsEmitter {
 func NewCostModelMetricsEmitter(promClient promclient.Client, clusterCache clustercache.ClusterCache, provider cloud.Provider, clusterInfo clusters.ClusterInfoProvider, model *CostModel) *CostModelMetricsEmitter {
+
+	metricsConfig, err := metrics.GetMetricsConfig()
+	if err != nil {
+		log.Infof("Failed to get metrics configuration: %s", err)
+	}
+
+	log.Infof("--DISABLED LABELS--")
+	for i := range metricsConfig.DisabledMetrics {
+		log.Infof("DISABLE LABEL: %s", metricsConfig.DisabledMetrics[i])
+	}
+	log.Infof("-------------------")
+
 	// init will only actually execute once to register the custom gauges
 	// init will only actually execute once to register the custom gauges
-	initCostModelMetrics(clusterCache, provider, clusterInfo)
+	initCostModelMetrics(clusterCache, provider, clusterInfo, metricsConfig)
 
 
-	metrics.InitKubeMetrics(clusterCache, &metrics.KubeMetricsOpts{
+	metrics.InitKubeMetrics(clusterCache, metricsConfig, &metrics.KubeMetricsOpts{
 		EmitKubecostControllerMetrics: true,
 		EmitKubecostControllerMetrics: true,
 		EmitNamespaceAnnotations:      env.IsEmitNamespaceAnnotationsMetric(),
 		EmitNamespaceAnnotations:      env.IsEmitNamespaceAnnotationsMetric(),
 		EmitPodAnnotations:            env.IsEmitPodAnnotationsMetric(),
 		EmitPodAnnotations:            env.IsEmitPodAnnotationsMetric(),

+ 2 - 2
pkg/costmodel/router.go

@@ -13,6 +13,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/kubecost/cost-model/pkg/config"
 	"github.com/kubecost/cost-model/pkg/config"
+	"github.com/kubecost/cost-model/pkg/metrics"
 	"github.com/kubecost/cost-model/pkg/services"
 	"github.com/kubecost/cost-model/pkg/services"
 	"github.com/kubecost/cost-model/pkg/util/httputil"
 	"github.com/kubecost/cost-model/pkg/util/httputil"
 	"github.com/kubecost/cost-model/pkg/util/timeutil"
 	"github.com/kubecost/cost-model/pkg/util/timeutil"
@@ -1416,8 +1417,7 @@ func Initialize(additionalConfigWatchers ...*watcher.ConfigMapWatcher) *Accesses
 
 
 	// Append the pricing config watcher
 	// Append the pricing config watcher
 	configWatchers.AddWatcher(cloud.ConfigWatcherFor(cloudProvider))
 	configWatchers.AddWatcher(cloud.ConfigWatcherFor(cloudProvider))
-
-	configWatchers.AddWatcher(GetMetricsConfigWatcher())
+	configWatchers.AddWatcher(metrics.GetMetricsConfigWatcher())
 
 
 	watchConfigFunc := configWatchers.ToWatchFunc()
 	watchConfigFunc := configWatchers.ToWatchFunc()
 	watchedConfigs := configWatchers.GetWatchedConfigs()
 	watchedConfigs := configWatchers.GetWatchedConfigs()

+ 40 - 21
pkg/metrics/deploymentmetrics.go

@@ -16,26 +16,34 @@ import (
 // specific deployment metrics.
 // specific deployment metrics.
 type KubecostDeploymentCollector struct {
 type KubecostDeploymentCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (kdc KubecostDeploymentCollector) Describe(ch chan<- *prometheus.Desc) {
 func (kdc KubecostDeploymentCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("deployment_match_labels", "deployment match labels", []string{}, nil)
+	disabledMetrics := kdc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["deployment_match_labels"]; !ok {
+		ch <- prometheus.NewDesc("deployment_match_labels", "deployment match labels", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kdc KubecostDeploymentCollector) Collect(ch chan<- prometheus.Metric) {
 func (kdc KubecostDeploymentCollector) Collect(ch chan<- prometheus.Metric) {
-	ds := kdc.KubeClusterCache.GetAllDeployments()
-
-	for _, deployment := range ds {
-		deploymentName := deployment.GetName()
-		deploymentNS := deployment.GetNamespace()
-
-		labels, values := prom.KubeLabelsToLabels(deployment.Spec.Selector.MatchLabels)
-		if len(labels) > 0 {
-			m := newDeploymentMatchLabelsMetric(deploymentName, deploymentNS, "deployment_match_labels", labels, values)
-			ch <- m
+	disabledMetrics := kdc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["deployment_match_labels"]; !ok {
+		ds := kdc.KubeClusterCache.GetAllDeployments()
+		for _, deployment := range ds {
+			deploymentName := deployment.GetName()
+			deploymentNS := deployment.GetNamespace()
+
+			labels, values := prom.KubeLabelsToLabels(deployment.Spec.Selector.MatchLabels)
+			if len(labels) > 0 {
+				m := newDeploymentMatchLabelsMetric(deploymentName, deploymentNS, "deployment_match_labels", labels, values)
+				ch <- m
+			}
 		}
 		}
 	}
 	}
 }
 }
@@ -109,19 +117,27 @@ func (dmlm DeploymentMatchLabelsMetric) Write(m *dto.Metric) error {
 // KubeDeploymentCollector is a prometheus collector that generates
 // KubeDeploymentCollector is a prometheus collector that generates
 type KubeDeploymentCollector struct {
 type KubeDeploymentCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (kdc KubeDeploymentCollector) Describe(ch chan<- *prometheus.Desc) {
 func (kdc KubeDeploymentCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_deployment_spec_replicas", "Number of desired pods for a deployment.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_deployment_status_replicas_available", "The number of available replicas per deployment.", []string{}, nil)
+	disabledMetrics := kdc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_deployment_spec_replicas"]; !ok {
+		ch <- prometheus.NewDesc("kube_deployment_spec_replicas", "Number of desired pods for a deployment.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_deployment_status_replicas_available"]; !ok {
+		ch <- prometheus.NewDesc("kube_deployment_status_replicas_available", "The number of available replicas per deployment.", []string{}, nil)
+	}
 
 
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kdc KubeDeploymentCollector) Collect(ch chan<- prometheus.Metric) {
 func (kdc KubeDeploymentCollector) Collect(ch chan<- prometheus.Metric) {
 	deployments := kdc.KubeClusterCache.GetAllDeployments()
 	deployments := kdc.KubeClusterCache.GetAllDeployments()
+	disabledMetrics := kdc.metricsConfig.GetDisabledMetricsMap()
 
 
 	for _, deployment := range deployments {
 	for _, deployment := range deployments {
 		deploymentName := deployment.GetName()
 		deploymentName := deployment.GetName()
@@ -135,14 +151,17 @@ func (kdc KubeDeploymentCollector) Collect(ch chan<- prometheus.Metric) {
 			replicas = *deployment.Spec.Replicas
 			replicas = *deployment.Spec.Replicas
 		}
 		}
 
 
-		ch <- newKubeDeploymentReplicasMetric("kube_deployment_spec_replicas", deploymentName, deploymentNS, replicas)
-
-		// Replicas Available
-		ch <- newKubeDeploymentStatusAvailableReplicasMetric(
-			"kube_deployment_status_replicas_available",
-			deploymentName,
-			deploymentNS,
-			deployment.Status.AvailableReplicas)
+		if _, ok := disabledMetrics["kube_deployment_spec_replicas"]; !ok {
+			ch <- newKubeDeploymentReplicasMetric("kube_deployment_spec_replicas", deploymentName, deploymentNS, replicas)
+		}
+		if _, ok := disabledMetrics["kube_deployment_status_replicas_available"]; !ok {
+			// Replicas Available
+			ch <- newKubeDeploymentStatusAvailableReplicasMetric(
+				"kube_deployment_status_replicas_available",
+				deploymentName,
+				deploymentNS,
+				deployment.Status.AvailableReplicas)
+		}
 	}
 	}
 }
 }
 
 

+ 29 - 20
pkg/metrics/jobmetrics.go

@@ -18,36 +18,45 @@ var (
 // KubeJobCollector is a prometheus collector that generates job sourced metrics.
 // KubeJobCollector is a prometheus collector that generates job sourced metrics.
 type KubeJobCollector struct {
 type KubeJobCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (kjc KubeJobCollector) Describe(ch chan<- *prometheus.Desc) {
 func (kjc KubeJobCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_job_status_failed", "The number of pods which reached Phase Failed and the reason for failure.", []string{}, nil)
+	disabledMetrics := kjc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_pod_annotations"]; !ok {
+		ch <- prometheus.NewDesc("kube_job_status_failed", "The number of pods which reached Phase Failed and the reason for failure.", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kjc KubeJobCollector) Collect(ch chan<- prometheus.Metric) {
 func (kjc KubeJobCollector) Collect(ch chan<- prometheus.Metric) {
-	jobs := kjc.KubeClusterCache.GetAllJobs()
-	for _, job := range jobs {
-		jobName := job.GetName()
-		jobNS := job.GetNamespace()
-
-		if job.Status.Failed == 0 {
-			ch <- newKubeJobStatusFailedMetric(jobName, jobNS, "kube_job_status_failed", "", 0)
-		} else {
-			for _, condition := range job.Status.Conditions {
-				if condition.Type == batchv1.JobFailed {
-					reasonKnown := false
-					for _, reason := range jobFailureReasons {
-						reasonKnown = reasonKnown || failureReason(&condition, reason)
-
-						ch <- newKubeJobStatusFailedMetric(jobName, jobNS, "kube_job_status_failed", reason, boolFloat64(failureReason(&condition, reason)))
-					}
+	disabledMetrics := kjc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_pod_annotations"]; !ok {
+		jobs := kjc.KubeClusterCache.GetAllJobs()
+		for _, job := range jobs {
+			jobName := job.GetName()
+			jobNS := job.GetNamespace()
+
+			if job.Status.Failed == 0 {
+				ch <- newKubeJobStatusFailedMetric(jobName, jobNS, "kube_job_status_failed", "", 0)
+			} else {
+				for _, condition := range job.Status.Conditions {
+					if condition.Type == batchv1.JobFailed {
+						reasonKnown := false
+						for _, reason := range jobFailureReasons {
+							reasonKnown = reasonKnown || failureReason(&condition, reason)
+
+							ch <- newKubeJobStatusFailedMetric(jobName, jobNS, "kube_job_status_failed", reason, boolFloat64(failureReason(&condition, reason)))
+						}
 
 
-					// for unknown reasons
-					if !reasonKnown {
-						ch <- newKubeJobStatusFailedMetric(jobName, jobNS, "kube_job_status_failed", "", float64(job.Status.Failed))
+						// for unknown reasons
+						if !reasonKnown {
+							ch <- newKubeJobStatusFailedMetric(jobName, jobNS, "kube_job_status_failed", "", float64(job.Status.Failed))
+						}
 					}
 					}
 				}
 				}
 			}
 			}

+ 16 - 1
pkg/metrics/kubemetrics.go

@@ -43,7 +43,7 @@ func DefaultKubeMetricsOpts() *KubeMetricsOpts {
 }
 }
 
 
 // InitKubeMetrics initializes kubernetes metric emission using the provided options.
 // InitKubeMetrics initializes kubernetes metric emission using the provided options.
-func InitKubeMetrics(clusterCache clustercache.ClusterCache, opts *KubeMetricsOpts) {
+func InitKubeMetrics(clusterCache clustercache.ClusterCache, metricsConfig *MetricsConfig, opts *KubeMetricsOpts) {
 	if opts == nil {
 	if opts == nil {
 		opts = DefaultKubeMetricsOpts()
 		opts = DefaultKubeMetricsOpts()
 	}
 	}
@@ -52,58 +52,73 @@ func InitKubeMetrics(clusterCache clustercache.ClusterCache, opts *KubeMetricsOp
 		if opts.EmitKubecostControllerMetrics {
 		if opts.EmitKubecostControllerMetrics {
 			prometheus.MustRegister(KubecostServiceCollector{
 			prometheus.MustRegister(KubecostServiceCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubecostDeploymentCollector{
 			prometheus.MustRegister(KubecostDeploymentCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubecostStatefulsetCollector{
 			prometheus.MustRegister(KubecostStatefulsetCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 		}
 		}
 
 
 		if opts.EmitPodAnnotations {
 		if opts.EmitPodAnnotations {
 			prometheus.MustRegister(KubecostPodCollector{
 			prometheus.MustRegister(KubecostPodCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 		}
 		}
 
 
 		if opts.EmitNamespaceAnnotations {
 		if opts.EmitNamespaceAnnotations {
 			prometheus.MustRegister(KubecostNamespaceCollector{
 			prometheus.MustRegister(KubecostNamespaceCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 		}
 		}
 
 
 		if opts.EmitKubeStateMetrics {
 		if opts.EmitKubeStateMetrics {
 			prometheus.MustRegister(KubeNodeCollector{
 			prometheus.MustRegister(KubeNodeCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubeNamespaceCollector{
 			prometheus.MustRegister(KubeNamespaceCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubeDeploymentCollector{
 			prometheus.MustRegister(KubeDeploymentCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubePodCollector{
 			prometheus.MustRegister(KubePodCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubePVCollector{
 			prometheus.MustRegister(KubePVCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubePVCCollector{
 			prometheus.MustRegister(KubePVCCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubeJobCollector{
 			prometheus.MustRegister(KubeJobCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 		} else if opts.EmitKubeStateMetricsV1Only {
 		} else if opts.EmitKubeStateMetricsV1Only {
 			prometheus.MustRegister(KubeNodeCollector{
 			prometheus.MustRegister(KubeNodeCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubeNamespaceCollector{
 			prometheus.MustRegister(KubeNamespaceCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 			prometheus.MustRegister(KubePodLabelsCollector{
 			prometheus.MustRegister(KubePodLabelsCollector{
 				KubeClusterCache: clusterCache,
 				KubeClusterCache: clusterCache,
+				metricsConfig:    *metricsConfig,
 			})
 			})
 		}
 		}
 	})
 	})

+ 106 - 0
pkg/metrics/metricsconfig.go

@@ -0,0 +1,106 @@
+package metrics
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"sync"
+
+	"github.com/kubecost/cost-model/pkg/log"
+	"github.com/kubecost/cost-model/pkg/util/watcher"
+	"k8s.io/klog"
+)
+
+var metricsConfigLock = new(sync.Mutex)
+
+type MetricsConfig struct {
+	DisabledMetrics []string `json:"disabledMetrics"`
+}
+
+func (mc MetricsConfig) GetDisabledMetricsMap() map[string]struct{} {
+	disabledMetricsMap := make(map[string]struct{})
+
+	for i := range mc.DisabledMetrics {
+		disabledMetricsMap[mc.DisabledMetrics[i]] = struct{}{}
+		log.Infof("Adding disabled metric %s", mc.DisabledMetrics[i])
+	}
+
+	return disabledMetricsMap
+}
+
+func GetMetricsConfig() (*MetricsConfig, error) {
+	metricsConfigLock.Lock()
+	defer metricsConfigLock.Unlock()
+	mc := &MetricsConfig{}
+	body, err := ioutil.ReadFile("/var/configs/metrics.json")
+	if os.IsNotExist(err) {
+
+		return mc, nil
+	} else if err != nil {
+		return mc, err
+	}
+
+	err = json.Unmarshal(body, mc)
+	if err != nil {
+		return mc, err
+	}
+
+	return mc, nil
+}
+
+func UpdateMetricsConfig(mc *MetricsConfig) (*MetricsConfig, error) {
+	metricsConfigLock.Lock()
+	defer metricsConfigLock.Unlock()
+
+	mcb, err := json.Marshal(mc)
+	if err != nil {
+		return nil, fmt.Errorf("error decoding metrics config struct: %s", err)
+	}
+
+	err = ioutil.WriteFile("/var/configs/metrics.json", mcb, 0644)
+	if err != nil {
+		return nil, fmt.Errorf("error writing to metrics config file: %s", err)
+	}
+
+	return mc, nil
+}
+
+func UpdateMetricsConfigFromConfigmap(data map[string]string) error {
+
+	mc := &MetricsConfig{}
+	key := "metrics.json"
+
+	cdata, ok := data[key]
+	if !ok {
+		return fmt.Errorf("error finding metrics config data")
+	}
+
+	err := json.Unmarshal([]byte(cdata), &mc)
+	if err != nil {
+		return fmt.Errorf("failed to unmarshal metrics configs: %s", err)
+	}
+
+	_, err = UpdateMetricsConfig(mc)
+	if err != nil {
+		return err
+	}
+
+	return nil
+
+}
+
+func GetMetricsConfigWatcher() *watcher.ConfigMapWatcher {
+	return &watcher.ConfigMapWatcher{
+		ConfigMapName: "metrics-config", // temporary, use env
+		WatchFunc: func(name string, data map[string]string) error {
+			klog.Infof("--CONFIGMAP DATA--")
+			for key, val := range data {
+				klog.Infof("%s : %s", key, val)
+			}
+			klog.Infof("------------------")
+			err := UpdateMetricsConfigFromConfigmap(data)
+			return err
+		},
+	}
+}

+ 36 - 18
pkg/metrics/namespacemetrics.go

@@ -15,24 +15,33 @@ import (
 // KubecostNamespaceCollector is a prometheus collector that generates namespace sourced metrics
 // KubecostNamespaceCollector is a prometheus collector that generates namespace sourced metrics
 type KubecostNamespaceCollector struct {
 type KubecostNamespaceCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (nsac KubecostNamespaceCollector) Describe(ch chan<- *prometheus.Desc) {
 func (nsac KubecostNamespaceCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_namespace_annotations", "namespace annotations", []string{}, nil)
+	disabledMetrics := nsac.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_namespace_labels"]; !ok {
+		ch <- prometheus.NewDesc("kube_namespace_annotations", "namespace annotations", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (nsac KubecostNamespaceCollector) Collect(ch chan<- prometheus.Metric) {
 func (nsac KubecostNamespaceCollector) Collect(ch chan<- prometheus.Metric) {
-	namespaces := nsac.KubeClusterCache.GetAllNamespaces()
-	for _, namespace := range namespaces {
-		nsName := namespace.GetName()
-
-		labels, values := prom.KubeAnnotationsToLabels(namespace.Annotations)
-		if len(labels) > 0 {
-			m := newNamespaceAnnotationsMetric("kube_namespace_annotations", nsName, labels, values)
-			ch <- m
+	disabledMetrics := nsac.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_namespace_labels"]; !ok {
+		namespaces := nsac.KubeClusterCache.GetAllNamespaces()
+		for _, namespace := range namespaces {
+			nsName := namespace.GetName()
+
+			labels, values := prom.KubeAnnotationsToLabels(namespace.Annotations)
+			if len(labels) > 0 {
+				m := newNamespaceAnnotationsMetric("kube_namespace_annotations", nsName, labels, values)
+				ch <- m
+			}
 		}
 		}
 	}
 	}
 }
 }
@@ -100,24 +109,33 @@ func (nam NamespaceAnnotationsMetric) Write(m *dto.Metric) error {
 // KubeNamespaceCollector is a prometheus collector that generates namespace sourced metrics
 // KubeNamespaceCollector is a prometheus collector that generates namespace sourced metrics
 type KubeNamespaceCollector struct {
 type KubeNamespaceCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (nsac KubeNamespaceCollector) Describe(ch chan<- *prometheus.Desc) {
 func (nsac KubeNamespaceCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_namespace_labels", "namespace labels", []string{}, nil)
+	disabledMetrics := nsac.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_namespace_labels"]; !ok {
+		ch <- prometheus.NewDesc("kube_namespace_labels", "namespace labels", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (nsac KubeNamespaceCollector) Collect(ch chan<- prometheus.Metric) {
 func (nsac KubeNamespaceCollector) Collect(ch chan<- prometheus.Metric) {
-	namespaces := nsac.KubeClusterCache.GetAllNamespaces()
-	for _, namespace := range namespaces {
-		nsName := namespace.GetName()
-
-		labels, values := prom.KubeLabelsToLabels(namespace.Labels)
-		if len(labels) > 0 {
-			m := newNamespaceAnnotationsMetric("kube_namespace_labels", nsName, labels, values)
-			ch <- m
+	disabledMetrics := nsac.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_namespace_labels"]; !ok {
+		namespaces := nsac.KubeClusterCache.GetAllNamespaces()
+		for _, namespace := range namespaces {
+			nsName := namespace.GetName()
+
+			labels, values := prom.KubeLabelsToLabels(namespace.Labels)
+			if len(labels) > 0 {
+				m := newNamespaceAnnotationsMetric("kube_namespace_labels", nsName, labels, values)
+				ch <- m
+			}
 		}
 		}
 	}
 	}
 }
 }

+ 61 - 35
pkg/metrics/nodemetrics.go

@@ -22,24 +22,45 @@ var (
 // KubeNodeCollector is a prometheus collector that generates node sourced metrics.
 // KubeNodeCollector is a prometheus collector that generates node sourced metrics.
 type KubeNodeCollector struct {
 type KubeNodeCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (nsac KubeNodeCollector) Describe(ch chan<- *prometheus.Desc) {
 func (nsac KubeNodeCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_node_status_capacity", "Node resource capacity.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_node_status_capacity_memory_bytes", "node capacity memory bytes", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_node_status_capacity_cpu_cores", "node capacity cpu cores", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_node_status_allocatable", "The allocatable for different resources of a node that are available for scheduling.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_node_status_allocatable_cpu_cores", "The allocatable cpu cores.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_node_status_allocatable_memory_bytes", "The allocatable memory in bytes.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_node_labels", "all labels for each node prefixed with label_", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_node_status_condition", "The condition of a cluster node.", []string{}, nil)
+	disabledMetrics := nsac.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_node_status_capacity"]; !ok {
+		ch <- prometheus.NewDesc("kube_node_status_capacity", "Node resource capacity.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_node_status_capacity_memory_bytes"]; !ok {
+		ch <- prometheus.NewDesc("kube_node_status_capacity_memory_bytes", "node capacity memory bytes", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_node_status_capacity_cpu_cores"]; !ok {
+		ch <- prometheus.NewDesc("kube_node_status_capacity_cpu_cores", "node capacity cpu cores", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_node_status_allocatable"]; !ok {
+		ch <- prometheus.NewDesc("kube_node_status_allocatable", "The allocatable for different resources of a node that are available for scheduling.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_node_status_allocatable_cpu_cores"]; !ok {
+		ch <- prometheus.NewDesc("kube_node_status_allocatable_cpu_cores", "The allocatable cpu cores.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_node_status_allocatable_memory_bytes"]; !ok {
+		ch <- prometheus.NewDesc("kube_node_status_allocatable_memory_bytes", "The allocatable memory in bytes.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_node_labels"]; !ok {
+		ch <- prometheus.NewDesc("kube_node_labels", "all labels for each node prefixed with label_", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_node_status_condition"]; !ok {
+		ch <- prometheus.NewDesc("kube_node_status_condition", "The condition of a cluster node.", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (nsac KubeNodeCollector) Collect(ch chan<- prometheus.Metric) {
 func (nsac KubeNodeCollector) Collect(ch chan<- prometheus.Metric) {
 	nodes := nsac.KubeClusterCache.GetAllNodes()
 	nodes := nsac.KubeClusterCache.GetAllNodes()
+	disabledMetrics := nsac.metricsConfig.GetDisabledMetricsMap()
+
 	for _, node := range nodes {
 	for _, node := range nodes {
 		nodeName := node.GetName()
 		nodeName := node.GetName()
 
 
@@ -54,15 +75,21 @@ func (nsac KubeNodeCollector) Collect(ch chan<- prometheus.Metric) {
 			}
 			}
 
 
 			// KSM v1 Emission
 			// KSM v1 Emission
-			if resource == "cpu" {
-				ch <- newKubeNodeStatusCapacityCPUCoresMetric("kube_node_status_capacity_cpu_cores", nodeName, value)
+			if _, ok := disabledMetrics["kube_node_status_capacity_cpu_cores"]; !ok {
+				if resource == "cpu" {
+					ch <- newKubeNodeStatusCapacityCPUCoresMetric("kube_node_status_capacity_cpu_cores", nodeName, value)
 
 
+				}
 			}
 			}
-			if resource == "memory" {
-				ch <- newKubeNodeStatusCapacityMemoryBytesMetric("kube_node_status_capacity_memory_bytes", nodeName, value)
+			if _, ok := disabledMetrics["kube_node_status_capacity_memory_bytes"]; !ok {
+				if resource == "memory" {
+					ch <- newKubeNodeStatusCapacityMemoryBytesMetric("kube_node_status_capacity_memory_bytes", nodeName, value)
+				}
 			}
 			}
 
 
-			ch <- newKubeNodeStatusCapacityMetric("kube_node_status_capacity", nodeName, resource, unit, value)
+			if _, ok := disabledMetrics["kube_node_status_capacity"]; !ok {
+				ch <- newKubeNodeStatusCapacityMetric("kube_node_status_capacity", nodeName, resource, unit, value)
+			}
 		}
 		}
 
 
 		// Node Allocatable Resources
 		// Node Allocatable Resources
@@ -76,31 +103,38 @@ func (nsac KubeNodeCollector) Collect(ch chan<- prometheus.Metric) {
 			}
 			}
 
 
 			// KSM v1 Emission
 			// KSM v1 Emission
-			if resource == "cpu" {
-				ch <- newKubeNodeStatusAllocatableCPUCoresMetric("kube_node_status_allocatable_cpu_cores", nodeName, value)
-
+			if _, ok := disabledMetrics["kube_node_status_allocatable_cpu_cores"]; !ok {
+				if resource == "cpu" {
+					ch <- newKubeNodeStatusAllocatableCPUCoresMetric("kube_node_status_allocatable_cpu_cores", nodeName, value)
+				}
 			}
 			}
-			if resource == "memory" {
-				ch <- newKubeNodeStatusAllocatableMemoryBytesMetric("kube_node_status_allocatable_memory_bytes", nodeName, value)
+			if _, ok := disabledMetrics["kube_node_status_allocatable_memory_bytes"]; !ok {
+				if resource == "memory" {
+					ch <- newKubeNodeStatusAllocatableMemoryBytesMetric("kube_node_status_allocatable_memory_bytes", nodeName, value)
+				}
+			}
+			if _, ok := disabledMetrics["kube_node_status_allocatable"]; !ok {
+				ch <- newKubeNodeStatusAllocatableMetric("kube_node_status_allocatable", nodeName, resource, unit, value)
 			}
 			}
-
-			ch <- newKubeNodeStatusAllocatableMetric("kube_node_status_allocatable", nodeName, resource, unit, value)
 		}
 		}
 
 
 		// node labels
 		// node labels
-		labelNames, labelValues := prom.KubePrependQualifierToLabels(node.GetLabels(), "label_")
-		ch <- newKubeNodeLabelsMetric(nodeName, "kube_node_labels", labelNames, labelValues)
+		if _, ok := disabledMetrics["kube_node_labels"]; !ok {
+			labelNames, labelValues := prom.KubePrependQualifierToLabels(node.GetLabels(), "label_")
+			ch <- newKubeNodeLabelsMetric(nodeName, "kube_node_labels", labelNames, labelValues)
+		}
 
 
 		// kube_node_status_condition
 		// kube_node_status_condition
 		// Collect node conditions and while default to false.
 		// Collect node conditions and while default to false.
-		for _, c := range node.Status.Conditions {
-			conditions := getConditions(c.Status)
+		if _, ok := disabledMetrics["kube_node_status_condition"]; !ok {
+			for _, c := range node.Status.Conditions {
+				conditions := getConditions(c.Status)
 
 
-			for _, cond := range conditions {
-				ch <- newKubeNodeStatusConditionMetric(nodeName, "kube_node_status_condition", string(c.Type), cond.status, cond.value)
+				for _, cond := range conditions {
+					ch <- newKubeNodeStatusConditionMetric(nodeName, "kube_node_status_condition", string(c.Type), cond.status, cond.value)
+				}
 			}
 			}
 		}
 		}
-
 	}
 	}
 }
 }
 
 
@@ -256,14 +290,6 @@ func (nam KubeNodeStatusCapacityCPUCoresMetric) Write(m *dto.Metric) error {
 	return nil
 	return nil
 }
 }
 
 
-//--------------------------------------------------------------------------
-//  KubeNodeLabelsCollector
-//--------------------------------------------------------------------------
-//
-// We use this to emit kube_node_labels with all of a node's labels, regardless
-// of the whitelist setting introduced in KSM v2. See
-// https://github.com/kubernetes/kube-state-metrics/issues/1270#issuecomment-712986441
-
 //--------------------------------------------------------------------------
 //--------------------------------------------------------------------------
 //  KubeNodeLabelsMetric
 //  KubeNodeLabelsMetric
 //--------------------------------------------------------------------------
 //--------------------------------------------------------------------------

+ 19 - 6
pkg/metrics/podlabelmetrics.go

@@ -43,30 +43,43 @@ func (kpmc KubecostPodLabelsCollector) Collect(ch chan<- prometheus.Metric) {
 // KubePodLabelsCollector is a prometheus collector that emits pod labels only
 // KubePodLabelsCollector is a prometheus collector that emits pod labels only
 type KubePodLabelsCollector struct {
 type KubePodLabelsCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of pod labels only
 // Describe sends the super-set of all possible descriptors of pod labels only
 // collected by this Collector.
 // collected by this Collector.
 func (kpmc KubePodLabelsCollector) Describe(ch chan<- *prometheus.Desc) {
 func (kpmc KubePodLabelsCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_pod_labels", "All labels for each pod prefixed with label_", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_owner", "Information about the Pod's owner", []string{}, nil)
+	disabledMetrics := kpmc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_pod_labels"]; ok {
+		ch <- prometheus.NewDesc("kube_pod_labels", "All labels for each pod prefixed with label_", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_owner"]; ok {
+		ch <- prometheus.NewDesc("kube_pod_owner", "Information about the Pod's owner", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kpmc KubePodLabelsCollector) Collect(ch chan<- prometheus.Metric) {
 func (kpmc KubePodLabelsCollector) Collect(ch chan<- prometheus.Metric) {
 	pods := kpmc.KubeClusterCache.GetAllPods()
 	pods := kpmc.KubeClusterCache.GetAllPods()
+	disabledMetrics := kpmc.metricsConfig.GetDisabledMetricsMap()
+
 	for _, pod := range pods {
 	for _, pod := range pods {
 		podName := pod.GetName()
 		podName := pod.GetName()
 		podNS := pod.GetNamespace()
 		podNS := pod.GetNamespace()
 		podUID := string(pod.GetUID())
 		podUID := string(pod.GetUID())
 
 
 		// Pod Labels
 		// Pod Labels
-		labelNames, labelValues := prom.KubePrependQualifierToLabels(pod.GetLabels(), "label_")
-		ch <- newKubePodLabelsMetric("kube_pod_labels", podNS, podName, podUID, labelNames, labelValues)
+		if _, ok := disabledMetrics["kube_pod_labels"]; ok {
+			labelNames, labelValues := prom.KubePrependQualifierToLabels(pod.GetLabels(), "label_")
+			ch <- newKubePodLabelsMetric("kube_pod_labels", podNS, podName, podUID, labelNames, labelValues)
+		}
 
 
 		// Owner References
 		// Owner References
-		for _, owner := range pod.OwnerReferences {
-			ch <- newKubePodOwnerMetric("kube_pod_owner", podNS, podName, owner.Name, owner.Kind, owner.Controller != nil)
+		if _, ok := disabledMetrics["kube_pod_owner"]; ok {
+			for _, owner := range pod.OwnerReferences {
+				ch <- newKubePodOwnerMetric("kube_pod_owner", podNS, podName, owner.Name, owner.Kind, owner.Controller != nil)
+			}
 		}
 		}
 	}
 	}
 }
 }

+ 141 - 87
pkg/metrics/podmetrics.go

@@ -18,25 +18,34 @@ import (
 // KubecostPodCollector is a prometheus collector that emits pod metrics
 // KubecostPodCollector is a prometheus collector that emits pod metrics
 type KubecostPodCollector struct {
 type KubecostPodCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (kpmc KubecostPodCollector) Describe(ch chan<- *prometheus.Desc) {
 func (kpmc KubecostPodCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_pod_annotations", "All annotations for each pod prefix with annotation_", []string{}, nil)
+	disabledMetrics := kpmc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_pod_annotations"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_annotations", "All annotations for each pod prefix with annotation_", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kpmc KubecostPodCollector) Collect(ch chan<- prometheus.Metric) {
 func (kpmc KubecostPodCollector) Collect(ch chan<- prometheus.Metric) {
-	pods := kpmc.KubeClusterCache.GetAllPods()
-	for _, pod := range pods {
-		podName := pod.GetName()
-		podNS := pod.GetNamespace()
-
-		// Pod Annotations
-		labels, values := prom.KubeAnnotationsToLabels(pod.Annotations)
-		if len(labels) > 0 {
-			ch <- newPodAnnotationMetric("kube_pod_annotations", podNS, podName, labels, values)
+	disabledMetrics := kpmc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_pod_annotations"]; !ok {
+		pods := kpmc.KubeClusterCache.GetAllPods()
+		for _, pod := range pods {
+			podName := pod.GetName()
+			podNS := pod.GetNamespace()
+
+			// Pod Annotations
+			labels, values := prom.KubeAnnotationsToLabels(pod.Annotations)
+			if len(labels) > 0 {
+				ch <- newPodAnnotationMetric("kube_pod_annotations", podNS, podName, labels, values)
+			}
 		}
 		}
 	}
 	}
 }
 }
@@ -48,26 +57,51 @@ func (kpmc KubecostPodCollector) Collect(ch chan<- prometheus.Metric) {
 // KubePodMetricCollector is a prometheus collector that emits pod metrics
 // KubePodMetricCollector is a prometheus collector that emits pod metrics
 type KubePodCollector struct {
 type KubePodCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (kpmc KubePodCollector) Describe(ch chan<- *prometheus.Desc) {
 func (kpmc KubePodCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_pod_labels", "All labels for each pod prefixed with label_", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_owner", "Information about the Pod's owner", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_container_status_running", "Describes whether the container is currently in running state", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_container_status_terminated_reason", "Describes the reason the container is currently in terminated state.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_container_status_restarts_total", "The number of container restarts per container.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_container_resource_requests", "The number of requested resource by a container", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_container_resource_limits", "The number of requested limit resource by a container.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_container_resource_limits_cpu_cores", "The number of requested limit cpu core resource by a container.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_container_resource_limits_memory_bytes", "The number of requested limit memory resource by a container.", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_pod_status_phase", "The pods current phase.", []string{}, nil)
+	disabledMetrics := kpmc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_pod_labels"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_labels", "All labels for each pod prefixed with label_", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_owner"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_owner", "Information about the Pod's owner", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_container_status_running"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_container_status_running", "Describes whether the container is currently in running state", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_container_status_terminated_reason"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_container_status_terminated_reason", "Describes the reason the container is currently in terminated state.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_container_status_restarts_total"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_container_status_restarts_total", "The number of container restarts per container.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_container_resource_requests"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_container_resource_requests", "The number of requested resource by a container", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_container_resource_limits"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_container_resource_limits", "The number of requested limit resource by a container.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_container_resource_limits_cpu_cores"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_container_resource_limits_cpu_cores", "The number of requested limit cpu core resource by a container.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_container_resource_limits_memory_bytes"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_container_resource_limits_memory_bytes", "The number of requested limit memory resource by a container.", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_pod_status_phase"]; !ok {
+		ch <- prometheus.NewDesc("kube_pod_status_phase", "The pods current phase.", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kpmc KubePodCollector) Collect(ch chan<- prometheus.Metric) {
 func (kpmc KubePodCollector) Collect(ch chan<- prometheus.Metric) {
 	pods := kpmc.KubeClusterCache.GetAllPods()
 	pods := kpmc.KubeClusterCache.GetAllPods()
+	disabledMetrics := kpmc.metricsConfig.GetDisabledMetricsMap()
+
 	for _, pod := range pods {
 	for _, pod := range pods {
 		podName := pod.GetName()
 		podName := pod.GetName()
 		podNS := pod.GetNamespace()
 		podNS := pod.GetNamespace()
@@ -76,71 +110,86 @@ func (kpmc KubePodCollector) Collect(ch chan<- prometheus.Metric) {
 		phase := pod.Status.Phase
 		phase := pod.Status.Phase
 
 
 		// Pod Status Phase
 		// Pod Status Phase
-		if phase != "" {
-			phases := []struct {
-				v bool
-				n string
-			}{
-				{phase == v1.PodPending, string(v1.PodPending)},
-				{phase == v1.PodSucceeded, string(v1.PodSucceeded)},
-				{phase == v1.PodFailed, string(v1.PodFailed)},
-				{phase == v1.PodUnknown, string(v1.PodUnknown)},
-				{phase == v1.PodRunning, string(v1.PodRunning)},
-			}
+		if _, ok := disabledMetrics["kube_pod_status_phase"]; !ok {
+			if phase != "" {
+				phases := []struct {
+					v bool
+					n string
+				}{
+					{phase == v1.PodPending, string(v1.PodPending)},
+					{phase == v1.PodSucceeded, string(v1.PodSucceeded)},
+					{phase == v1.PodFailed, string(v1.PodFailed)},
+					{phase == v1.PodUnknown, string(v1.PodUnknown)},
+					{phase == v1.PodRunning, string(v1.PodRunning)},
+				}
 
 
-			for _, p := range phases {
-				ch <- newKubePodStatusPhaseMetric("kube_pod_status_phase", podNS, podName, podUID, p.n, boolFloat64(p.v))
+				for _, p := range phases {
+					ch <- newKubePodStatusPhaseMetric("kube_pod_status_phase", podNS, podName, podUID, p.n, boolFloat64(p.v))
+				}
 			}
 			}
 		}
 		}
 
 
 		// Pod Labels
 		// Pod Labels
-		labelNames, labelValues := prom.KubePrependQualifierToLabels(pod.GetLabels(), "label_")
-		ch <- newKubePodLabelsMetric("kube_pod_labels", podNS, podName, podUID, labelNames, labelValues)
+		if _, ok := disabledMetrics["kube_pod_labels"]; !ok {
+			labelNames, labelValues := prom.KubePrependQualifierToLabels(pod.GetLabels(), "label_")
+			ch <- newKubePodLabelsMetric("kube_pod_labels", podNS, podName, podUID, labelNames, labelValues)
+		}
 
 
 		// Owner References
 		// Owner References
-		for _, owner := range pod.OwnerReferences {
-			ch <- newKubePodOwnerMetric("kube_pod_owner", podNS, podName, owner.Name, owner.Kind, owner.Controller != nil)
+		if _, ok := disabledMetrics["kube_pod_owner"]; !ok {
+			for _, owner := range pod.OwnerReferences {
+				ch <- newKubePodOwnerMetric("kube_pod_owner", podNS, podName, owner.Name, owner.Kind, owner.Controller != nil)
+			}
 		}
 		}
 
 
 		// Container Status
 		// Container Status
 		for _, status := range pod.Status.ContainerStatuses {
 		for _, status := range pod.Status.ContainerStatuses {
-			ch <- newKubePodContainerStatusRestartsTotalMetric("kube_pod_container_status_restarts_total", podNS, podName, podUID, status.Name, float64(status.RestartCount))
+			if _, ok := disabledMetrics["kube_pod_container_status_restarts_total"]; !ok {
+				ch <- newKubePodContainerStatusRestartsTotalMetric("kube_pod_container_status_restarts_total", podNS, podName, podUID, status.Name, float64(status.RestartCount))
+			}
 			if status.State.Running != nil {
 			if status.State.Running != nil {
-				ch <- newKubePodContainerStatusRunningMetric("kube_pod_container_status_running", podNS, podName, podUID, status.Name)
+				if _, ok := disabledMetrics["kube_pod_container_status_running"]; !ok {
+					ch <- newKubePodContainerStatusRunningMetric("kube_pod_container_status_running", podNS, podName, podUID, status.Name)
+				}
 			}
 			}
 
 
 			if status.State.Terminated != nil {
 			if status.State.Terminated != nil {
-				ch <- newKubePodContainerStatusTerminatedReasonMetric(
-					"kube_pod_container_status_terminated_reason",
-					podNS,
-					podName,
-					podUID,
-					status.Name,
-					status.State.Terminated.Reason)
+				if _, ok := disabledMetrics["kube_pod_container_status_terminated_reason"]; !ok {
+					ch <- newKubePodContainerStatusTerminatedReasonMetric(
+						"kube_pod_container_status_terminated_reason",
+						podNS,
+						podName,
+						podUID,
+						status.Name,
+						status.State.Terminated.Reason)
+				}
 			}
 			}
 		}
 		}
 
 
 		for _, container := range pod.Spec.Containers {
 		for _, container := range pod.Spec.Containers {
-			// Requests
-			for resourceName, quantity := range container.Resources.Requests {
-				resource, unit, value := toResourceUnitValue(resourceName, quantity)
 
 
-				// failed to parse the resource type
-				if resource == "" {
-					log.DedupedWarningf(5, "Failed to parse resource units and quantity for resource: %s", resourceName)
-					continue
+			// Requests
+			if _, ok := disabledMetrics["kube_pod_container_resource_requests"]; !ok {
+				for resourceName, quantity := range container.Resources.Requests {
+					resource, unit, value := toResourceUnitValue(resourceName, quantity)
+
+					// failed to parse the resource type
+					if resource == "" {
+						log.DedupedWarningf(5, "Failed to parse resource units and quantity for resource: %s", resourceName)
+						continue
+					}
+
+					ch <- newKubePodContainerResourceRequestsMetric(
+						"kube_pod_container_resource_requests",
+						podNS,
+						podName,
+						podUID,
+						container.Name,
+						node,
+						resource,
+						unit,
+						value)
 				}
 				}
-
-				ch <- newKubePodContainerResourceRequestsMetric(
-					"kube_pod_container_resource_requests",
-					podNS,
-					podName,
-					podUID,
-					container.Name,
-					node,
-					resource,
-					unit,
-					value)
 			}
 			}
 
 
 			// Limits
 			// Limits
@@ -154,37 +203,42 @@ func (kpmc KubePodCollector) Collect(ch chan<- prometheus.Metric) {
 				}
 				}
 
 
 				// KSM v1 Emission
 				// KSM v1 Emission
-				if resource == "cpu" {
-					ch <- newKubePodContainerResourceLimitsCPUCoresMetric(
-						"kube_pod_container_resource_limits_cpu_cores",
-						podNS,
-						podName,
-						podUID,
-						container.Name,
-						node,
-						value)
+				if _, ok := disabledMetrics["kube_pod_container_resource_limits_cpu_cores"]; !ok {
+					if resource == "cpu" {
+						ch <- newKubePodContainerResourceLimitsCPUCoresMetric(
+							"kube_pod_container_resource_limits_cpu_cores",
+							podNS,
+							podName,
+							podUID,
+							container.Name,
+							node,
+							value)
+					}
+				}
+				if _, ok := disabledMetrics["kube_pod_container_resource_limits_memory_bytes"]; !ok {
+					if resource == "memory" {
+						ch <- newKubePodContainerResourceLimitsMemoryBytesMetric(
+							"kube_pod_container_resource_limits_memory_bytes",
+							podNS,
+							podName,
+							podUID,
+							container.Name,
+							node,
+							value)
+					}
 				}
 				}
-				if resource == "memory" {
-					ch <- newKubePodContainerResourceLimitsMemoryBytesMetric(
-						"kube_pod_container_resource_limits_memory_bytes",
+				if _, ok := disabledMetrics["kube_pod_container_resource_limits"]; !ok {
+					ch <- newKubePodContainerResourceLimitsMetric(
+						"kube_pod_container_resource_limits",
 						podNS,
 						podNS,
 						podName,
 						podName,
 						podUID,
 						podUID,
 						container.Name,
 						container.Name,
 						node,
 						node,
+						resource,
+						unit,
 						value)
 						value)
 				}
 				}
-
-				ch <- newKubePodContainerResourceLimitsMetric(
-					"kube_pod_container_resource_limits",
-					podNS,
-					podName,
-					podUID,
-					container.Name,
-					node,
-					resource,
-					unit,
-					value)
 			}
 			}
 		}
 		}
 	}
 	}

+ 17 - 4
pkg/metrics/pvcmetrics.go

@@ -14,25 +14,38 @@ import (
 // KubePVCCollector is a prometheus collector that generates pvc sourced metrics
 // KubePVCCollector is a prometheus collector that generates pvc sourced metrics
 type KubePVCCollector struct {
 type KubePVCCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics collected by this Collector.
 // Describe sends the super-set of all possible descriptors of metrics collected by this Collector.
 func (kpvc KubePVCCollector) Describe(ch chan<- *prometheus.Desc) {
 func (kpvc KubePVCCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_persistentvolumeclaim_resource_requests_storage_bytes", "The pvc storage resource requests in bytes", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_persistentvolumeclaim_info", "The pvc storage resource requests in bytes", []string{}, nil)
+	disabledMetrics := kpvc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_persistentvolumeclaim_resource_requests_storage_bytes"]; !ok {
+		ch <- prometheus.NewDesc("kube_persistentvolumeclaim_resource_requests_storage_bytes", "The pvc storage resource requests in bytes", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_persistentvolumeclaim_info"]; !ok {
+		ch <- prometheus.NewDesc("kube_persistentvolumeclaim_info", "The pvc storage resource requests in bytes", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kpvc KubePVCCollector) Collect(ch chan<- prometheus.Metric) {
 func (kpvc KubePVCCollector) Collect(ch chan<- prometheus.Metric) {
 	pvcs := kpvc.KubeClusterCache.GetAllPersistentVolumeClaims()
 	pvcs := kpvc.KubeClusterCache.GetAllPersistentVolumeClaims()
+	disabledMetrics := kpvc.metricsConfig.GetDisabledMetricsMap()
+
 	for _, pvc := range pvcs {
 	for _, pvc := range pvcs {
 		storageClass := getPersistentVolumeClaimClass(pvc)
 		storageClass := getPersistentVolumeClaimClass(pvc)
 		volume := pvc.Spec.VolumeName
 		volume := pvc.Spec.VolumeName
 
 
-		ch <- newKubePVCInfoMetric("kube_persistentvolumeclaim_info", pvc.Name, pvc.Namespace, storageClass, volume)
+		if _, ok := disabledMetrics["kube_persistentvolumeclaim_info"]; !ok {
+			ch <- newKubePVCInfoMetric("kube_persistentvolumeclaim_info", pvc.Name, pvc.Namespace, storageClass, volume)
+		}
 
 
 		if storage, ok := pvc.Spec.Resources.Requests[v1.ResourceStorage]; ok {
 		if storage, ok := pvc.Spec.Resources.Requests[v1.ResourceStorage]; ok {
-			ch <- newKubePVCResourceRequestsStorageBytesMetric("kube_persistentvolumeclaim_resource_requests_storage_bytes", pvc.Name, pvc.Namespace, float64(storage.Value()))
+			if _, ok := disabledMetrics["kube_persistentvolumeclaim_resource_requests_storage_bytes"]; !ok {
+				ch <- newKubePVCResourceRequestsStorageBytesMetric("kube_persistentvolumeclaim_resource_requests_storage_bytes", pvc.Name, pvc.Namespace, float64(storage.Value()))
+			}
 		}
 		}
 	}
 	}
 }
 }

+ 33 - 20
pkg/metrics/pvmetrics.go

@@ -14,41 +14,54 @@ import (
 // KubePVCollector is a prometheus collector that generates PV metrics
 // KubePVCollector is a prometheus collector that generates PV metrics
 type KubePVCollector struct {
 type KubePVCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (kpvcb KubePVCollector) Describe(ch chan<- *prometheus.Desc) {
 func (kpvcb KubePVCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("kube_persistentvolume_capacity_bytes", "The pv storage capacity in bytes", []string{}, nil)
-	ch <- prometheus.NewDesc("kube_persistentvolume_status_phase", "The phase indicates if a volume is available, bound to a claim, or released by a claim.", []string{}, nil)
+	disabledMetrics := kpvcb.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["kube_persistentvolume_capacity_bytes"]; !ok {
+		ch <- prometheus.NewDesc("kube_persistentvolume_capacity_bytes", "The pv storage capacity in bytes", []string{}, nil)
+	}
+	if _, ok := disabledMetrics["kube_persistentvolume_status_phase"]; !ok {
+		ch <- prometheus.NewDesc("kube_persistentvolume_status_phase", "The phase indicates if a volume is available, bound to a claim, or released by a claim.", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (kpvcb KubePVCollector) Collect(ch chan<- prometheus.Metric) {
 func (kpvcb KubePVCollector) Collect(ch chan<- prometheus.Metric) {
 	pvs := kpvcb.KubeClusterCache.GetAllPersistentVolumes()
 	pvs := kpvcb.KubeClusterCache.GetAllPersistentVolumes()
-	for _, pv := range pvs {
-		phase := pv.Status.Phase
-		if phase != "" {
-			phases := []struct {
-				v bool
-				n string
-			}{
-				{phase == v1.VolumePending, string(v1.VolumePending)},
-				{phase == v1.VolumeAvailable, string(v1.VolumeAvailable)},
-				{phase == v1.VolumeBound, string(v1.VolumeBound)},
-				{phase == v1.VolumeReleased, string(v1.VolumeReleased)},
-				{phase == v1.VolumeFailed, string(v1.VolumeFailed)},
-			}
+	disabledMetrics := kpvcb.metricsConfig.GetDisabledMetricsMap()
 
 
-			for _, p := range phases {
-				ch <- newKubePVStatusPhaseMetric("kube_persistentvolume_status_phase", pv.Name, p.n, boolFloat64(p.v))
+	for _, pv := range pvs {
+		if _, ok := disabledMetrics["kube_persistentvolume_status_phase"]; !ok {
+			phase := pv.Status.Phase
+			if phase != "" {
+				phases := []struct {
+					v bool
+					n string
+				}{
+					{phase == v1.VolumePending, string(v1.VolumePending)},
+					{phase == v1.VolumeAvailable, string(v1.VolumeAvailable)},
+					{phase == v1.VolumeBound, string(v1.VolumeBound)},
+					{phase == v1.VolumeReleased, string(v1.VolumeReleased)},
+					{phase == v1.VolumeFailed, string(v1.VolumeFailed)},
+				}
+
+				for _, p := range phases {
+					ch <- newKubePVStatusPhaseMetric("kube_persistentvolume_status_phase", pv.Name, p.n, boolFloat64(p.v))
+				}
 			}
 			}
 		}
 		}
 
 
-		storage := pv.Spec.Capacity[v1.ResourceStorage]
-		m := newKubePVCapacityBytesMetric("kube_persistentvolume_capacity_bytes", pv.Name, float64(storage.Value()))
+		if _, ok := disabledMetrics["kube_persistentvolume_capacity_bytes"]; !ok {
+			storage := pv.Spec.Capacity[v1.ResourceStorage]
+			m := newKubePVCapacityBytesMetric("kube_persistentvolume_capacity_bytes", pv.Name, float64(storage.Value()))
 
 
-		ch <- m
+			ch <- m
+		}
 	}
 	}
 }
 }
 
 

+ 18 - 9
pkg/metrics/servicemetrics.go

@@ -15,25 +15,34 @@ import (
 // KubecostServiceCollector is a prometheus collector that generates service sourced metrics.
 // KubecostServiceCollector is a prometheus collector that generates service sourced metrics.
 type KubecostServiceCollector struct {
 type KubecostServiceCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (sc KubecostServiceCollector) Describe(ch chan<- *prometheus.Desc) {
 func (sc KubecostServiceCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("service_selector_labels", "service selector labels", []string{}, nil)
+	disabledMetrics := sc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["service_selector_labels"]; !ok {
+		ch <- prometheus.NewDesc("service_selector_labels", "service selector labels", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (sc KubecostServiceCollector) Collect(ch chan<- prometheus.Metric) {
 func (sc KubecostServiceCollector) Collect(ch chan<- prometheus.Metric) {
-	svcs := sc.KubeClusterCache.GetAllServices()
-	for _, svc := range svcs {
-		serviceName := svc.GetName()
-		serviceNS := svc.GetNamespace()
+	disabledMetrics := sc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["service_selector_labels"]; !ok {
+		svcs := sc.KubeClusterCache.GetAllServices()
+		for _, svc := range svcs {
+			serviceName := svc.GetName()
+			serviceNS := svc.GetNamespace()
 
 
-		labels, values := prom.KubeLabelsToLabels(svc.Spec.Selector)
-		if len(labels) > 0 {
-			m := newServiceSelectorLabelsMetric(serviceName, serviceNS, "service_selector_labels", labels, values)
-			ch <- m
+			labels, values := prom.KubeLabelsToLabels(svc.Spec.Selector)
+			if len(labels) > 0 {
+				m := newServiceSelectorLabelsMetric(serviceName, serviceNS, "service_selector_labels", labels, values)
+				ch <- m
+			}
 		}
 		}
 	}
 	}
 }
 }

+ 18 - 9
pkg/metrics/statefulsetmetrics.go

@@ -15,25 +15,34 @@ import (
 // StatefulsetCollector is a prometheus collector that generates StatefulsetMetrics
 // StatefulsetCollector is a prometheus collector that generates StatefulsetMetrics
 type KubecostStatefulsetCollector struct {
 type KubecostStatefulsetCollector struct {
 	KubeClusterCache clustercache.ClusterCache
 	KubeClusterCache clustercache.ClusterCache
+	metricsConfig    MetricsConfig
 }
 }
 
 
 // Describe sends the super-set of all possible descriptors of metrics
 // Describe sends the super-set of all possible descriptors of metrics
 // collected by this Collector.
 // collected by this Collector.
 func (sc KubecostStatefulsetCollector) Describe(ch chan<- *prometheus.Desc) {
 func (sc KubecostStatefulsetCollector) Describe(ch chan<- *prometheus.Desc) {
-	ch <- prometheus.NewDesc("statefulSet_match_labels", "statfulSet match labels", []string{}, nil)
+	disabledMetrics := sc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["statefulSet_match_labels"]; !ok {
+		ch <- prometheus.NewDesc("statefulSet_match_labels", "statfulSet match labels", []string{}, nil)
+	}
 }
 }
 
 
 // Collect is called by the Prometheus registry when collecting metrics.
 // Collect is called by the Prometheus registry when collecting metrics.
 func (sc KubecostStatefulsetCollector) Collect(ch chan<- prometheus.Metric) {
 func (sc KubecostStatefulsetCollector) Collect(ch chan<- prometheus.Metric) {
-	ds := sc.KubeClusterCache.GetAllStatefulSets()
-	for _, statefulset := range ds {
-		statefulsetName := statefulset.GetName()
-		statefulsetNS := statefulset.GetNamespace()
+	disabledMetrics := sc.metricsConfig.GetDisabledMetricsMap()
+
+	if _, ok := disabledMetrics["statefulSet_match_labels"]; !ok {
+		ds := sc.KubeClusterCache.GetAllStatefulSets()
+		for _, statefulset := range ds {
+			statefulsetName := statefulset.GetName()
+			statefulsetNS := statefulset.GetNamespace()
 
 
-		labels, values := prom.KubeLabelsToLabels(statefulset.Spec.Selector.MatchLabels)
-		if len(labels) > 0 {
-			m := newStatefulsetMatchLabelsMetric(statefulsetName, statefulsetNS, "statefulSet_match_labels", labels, values)
-			ch <- m
+			labels, values := prom.KubeLabelsToLabels(statefulset.Spec.Selector.MatchLabels)
+			if len(labels) > 0 {
+				m := newStatefulsetMatchLabelsMetric(statefulsetName, statefulsetNS, "statefulSet_match_labels", labels, values)
+				ch <- m
+			}
 		}
 		}
 	}
 	}
 }
 }