AjayTripathy 6 年 前
コミット
3d4b6c95fd
5 ファイル変更271 行追加13 行削除
  1. 3 2
      cloud/awsprovider.go
  2. 39 4
      cloud/provider.go
  3. 179 0
      costmodel/containeruptime.go
  4. 29 6
      costmodel/costmodel.go
  5. 21 1
      main.go

+ 3 - 2
cloud/awsprovider.go

@@ -451,7 +451,7 @@ func (aws *AWS) DownloadPricingData() error {
 	aws.ServiceKeySecret = c.ServiceKeySecret
 	aws.ServiceKeySecret = c.ServiceKeySecret
 
 
 	if len(aws.SpotDataBucket) != 0 && len(aws.ProjectID) == 0 {
 	if len(aws.SpotDataBucket) != 0 && len(aws.ProjectID) == 0 {
-		return fmt.Errorf("using SpotDataBucket \"%s\" without ProjectID will not end well", aws.SpotDataBucket)
+		klog.V(1).Infof("using SpotDataBucket \"%s\" without ProjectID will not end well", aws.SpotDataBucket)
 	}
 	}
 	nodeList, err := aws.Clientset.CoreV1().Nodes().List(metav1.ListOptions{})
 	nodeList, err := aws.Clientset.CoreV1().Nodes().List(metav1.ListOptions{})
 	if err != nil {
 	if err != nil {
@@ -480,7 +480,7 @@ func (aws *AWS) DownloadPricingData() error {
 	for _, pv := range pvList.Items {
 	for _, pv := range pvList.Items {
 		params, ok := storageClassMap[pv.Spec.StorageClassName]
 		params, ok := storageClassMap[pv.Spec.StorageClassName]
 		if !ok {
 		if !ok {
-			klog.Infof("Unable to find params for storageClassName %s, falling back to default pricing", pv.Name)
+			klog.V(2).Infof("Unable to find params for storageClassName %s, falling back to default pricing", pv.Name)
 			continue
 			continue
 		}
 		}
 		key := aws.GetPVKey(&pv, params)
 		key := aws.GetPVKey(&pv, params)
@@ -662,6 +662,7 @@ func (aws *AWS) createNode(terms *AWSProductTerms, usageType string, k Key) (*No
 			} else {
 			} else {
 				klog.V(2).Infof("Spot data for node %s is missing", k.ID())
 				klog.V(2).Infof("Spot data for node %s is missing", k.ID())
 			}
 			}
+			klog.V(1).Infof("SPOT COST FOR %s: %s", k.Features, spotcost)
 			return &Node{
 			return &Node{
 				Cost:         spotcost,
 				Cost:         spotcost,
 				VCPU:         terms.VCpu,
 				VCPU:         terms.VCpu,

+ 39 - 4
cloud/provider.go

@@ -152,9 +152,12 @@ type CustomPricing struct {
 	RAM                 string `json:"RAM"`
 	RAM                 string `json:"RAM"`
 	SpotRAM             string `json:"spotRAM"`
 	SpotRAM             string `json:"spotRAM"`
 	GPU                 string `json:"GPU"`
 	GPU                 string `json:"GPU"`
+	SpotGPU             string `json:"spotGPU"`
 	Storage             string `json:"storage"`
 	Storage             string `json:"storage"`
 	SpotLabel           string `json:"spotLabel,omitempty"`
 	SpotLabel           string `json:"spotLabel,omitempty"`
 	SpotLabelValue      string `json:"spotLabelValue,omitempty"`
 	SpotLabelValue      string `json:"spotLabelValue,omitempty"`
+	GpuLabel            string `json:"gpuLabel,omitempty"`
+	GpuLabelValue       string `json:"gpuLabelValue,omitempty"`
 	ServiceKeyName      string `json:"awsServiceKeyName,omitempty"`
 	ServiceKeyName      string `json:"awsServiceKeyName,omitempty"`
 	ServiceKeySecret    string `json:"awsServiceKeySecret,omitempty"`
 	ServiceKeySecret    string `json:"awsServiceKeySecret,omitempty"`
 	SpotDataRegion      string `json:"awsSpotDataRegion,omitempty"`
 	SpotDataRegion      string `json:"awsSpotDataRegion,omitempty"`
@@ -200,6 +203,7 @@ func SetCustomPricingField(obj *CustomPricing, name string, value string) error
 type NodePrice struct {
 type NodePrice struct {
 	CPU string
 	CPU string
 	RAM string
 	RAM string
+	GPU string
 }
 }
 
 
 type CustomProvider struct {
 type CustomProvider struct {
@@ -207,6 +211,8 @@ type CustomProvider struct {
 	Pricing                 map[string]*NodePrice
 	Pricing                 map[string]*NodePrice
 	SpotLabel               string
 	SpotLabel               string
 	SpotLabelValue          string
 	SpotLabelValue          string
+	GPULabel                string
+	GPULabelValue           string
 	DownloadPricingDataLock sync.RWMutex
 	DownloadPricingDataLock sync.RWMutex
 }
 }
 
 
@@ -222,7 +228,7 @@ func (*CustomProvider) GetManagementPlatform() (string, error) {
 	return "", nil
 	return "", nil
 }
 }
 
 
-func (*CustomProvider) UpdateConfig(r io.Reader, updateType string) (*CustomPricing, error) {
+func (cprov *CustomProvider) UpdateConfig(r io.Reader, updateType string) (*CustomPricing, error) {
 	c, err := GetDefaultPricingData("default.json")
 	c, err := GetDefaultPricingData("default.json")
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -254,7 +260,7 @@ func (*CustomProvider) UpdateConfig(r io.Reader, updateType string) (*CustomPric
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-
+	defer cprov.DownloadPricingData()
 	return c, nil
 	return c, nil
 
 
 }
 }
@@ -285,12 +291,19 @@ func (c *CustomProvider) NodePricing(key Key) (*Node, error) {
 	defer c.DownloadPricingDataLock.RUnlock()
 	defer c.DownloadPricingDataLock.RUnlock()
 
 
 	k := key.Features()
 	k := key.Features()
+	var gpuCount string
 	if _, ok := c.Pricing[k]; !ok {
 	if _, ok := c.Pricing[k]; !ok {
 		k = "default"
 		k = "default"
 	}
 	}
+	if key.GPUType() != "" {
+		k += ",gpu"    // TODO: support multiple custom gpu types.
+		gpuCount = "1" // TODO: support more than one gpu.
+	}
 	return &Node{
 	return &Node{
 		VCPUCost: c.Pricing[k].CPU,
 		VCPUCost: c.Pricing[k].CPU,
 		RAMCost:  c.Pricing[k].RAM,
 		RAMCost:  c.Pricing[k].RAM,
+		GPUCost:  c.Pricing[k].GPU,
+		GPU:      gpuCount,
 	}, nil
 	}, nil
 }
 }
 
 
@@ -306,6 +319,10 @@ func (c *CustomProvider) DownloadPricingData() error {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
+	c.SpotLabel = p.SpotLabel
+	c.SpotLabelValue = p.SpotLabelValue
+	c.GPULabel = p.GpuLabel
+	c.GPULabelValue = p.GpuLabelValue
 	c.Pricing["default"] = &NodePrice{
 	c.Pricing["default"] = &NodePrice{
 		CPU: p.CPU,
 		CPU: p.CPU,
 		RAM: p.RAM,
 		RAM: p.RAM,
@@ -314,16 +331,26 @@ func (c *CustomProvider) DownloadPricingData() error {
 		CPU: p.SpotCPU,
 		CPU: p.SpotCPU,
 		RAM: p.SpotRAM,
 		RAM: p.SpotRAM,
 	}
 	}
+	c.Pricing["default,gpu"] = &NodePrice{
+		CPU: p.CPU,
+		RAM: p.RAM,
+		GPU: p.GPU,
+	}
 	return nil
 	return nil
 }
 }
 
 
 type customProviderKey struct {
 type customProviderKey struct {
 	SpotLabel      string
 	SpotLabel      string
 	SpotLabelValue string
 	SpotLabelValue string
+	GPULabel       string
+	GPULabelValue  string
 	Labels         map[string]string
 	Labels         map[string]string
 }
 }
 
 
 func (c *customProviderKey) GPUType() string {
 func (c *customProviderKey) GPUType() string {
+	if t, ok := c.Labels[c.GPULabel]; ok {
+		return t
+	}
 	return ""
 	return ""
 }
 }
 
 
@@ -342,6 +369,8 @@ func (c *CustomProvider) GetKey(labels map[string]string) Key {
 	return &customProviderKey{
 	return &customProviderKey{
 		SpotLabel:      c.SpotLabel,
 		SpotLabel:      c.SpotLabel,
 		SpotLabelValue: c.SpotLabelValue,
 		SpotLabelValue: c.SpotLabelValue,
+		GPULabel:       c.GPULabel,
+		GPULabelValue:  c.GPULabelValue,
 		Labels:         labels,
 		Labels:         labels,
 	}
 	}
 }
 }
@@ -357,8 +386,14 @@ func (*CustomProvider) QuerySQL(query string) ([]byte, error) {
 	return nil, nil
 	return nil, nil
 }
 }
 
 
-func (*CustomProvider) PVPricing(pvk PVKey) (*PV, error) {
-	return nil, nil
+func (c *CustomProvider) PVPricing(pvk PVKey) (*PV, error) {
+	cpricing, err := GetDefaultPricingData("default")
+	if err != nil {
+		return nil, err
+	}
+	return &PV{
+		Cost: cpricing.Storage,
+	}, nil
 }
 }
 
 
 func (*CustomProvider) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string) PVKey {
 func (*CustomProvider) GetPVKey(pv *v1.PersistentVolume, parameters map[string]string) PVKey {

+ 179 - 0
costmodel/containeruptime.go

@@ -0,0 +1,179 @@
+package costmodel
+
+import (
+	"fmt"
+	"time"
+
+	"k8s.io/klog"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/client-go/kubernetes"
+	"k8s.io/client-go/tools/cache"
+	"k8s.io/client-go/util/workqueue"
+)
+
+type Controller struct {
+	indexer  cache.Indexer
+	queue    workqueue.RateLimitingInterface
+	informer cache.Controller
+}
+
+func NewController(queue workqueue.RateLimitingInterface, indexer cache.Indexer, informer cache.Controller) *Controller {
+	return &Controller{
+		informer: informer,
+		indexer:  indexer,
+		queue:    queue,
+	}
+}
+
+func (c *Controller) processNextItem() bool {
+	// Wait until there is a new item in the working queue
+	key, quit := c.queue.Get()
+	if quit {
+		return false
+	}
+	// Tell the queue that we are done with processing this key. This unblocks the key for other workers
+	// This allows safe parallel processing because two pods with the same key are never processed in
+	// parallel.
+	defer c.queue.Done(key)
+
+	// Invoke the method containing the business logic
+	err := c.syncToPrometheus(key.(string))
+	// Handle the error if something went wrong during the execution of the business logic
+	c.handleErr(err, key)
+	return true
+}
+
+// syncToPrometheus is the business logic of the controller. In this controller it simply prints
+// information about the pod to stdout. In case an error happened, it has to simply return the error.
+// The retry logic should not be part of the business logic.
+func (c *Controller) syncToPrometheus(key string) error {
+	obj, exists, err := c.indexer.GetByKey(key)
+	if err != nil {
+		klog.Errorf("Fetching object with key %s from store failed with %v", key, err)
+		return err
+	}
+
+	if !exists {
+		// Below we will warm up our cache with a Pod, so that we will see a delete for one pod
+		klog.V(1).Infof("Pod %s does not exist anymore\n", key)
+	} else {
+		// Note that you also have to check the uid if you have a local controlled resource, which
+		// is dependent on the actual instance, to detect that a Pod was recreated with the same name
+		klog.V(1).Infof("Sync/Add/Update for Pod %s\n", obj.(*v1.Pod).GetName())
+	}
+	return nil
+}
+
+func (c *Controller) GetAll() []*v1.Pod {
+	objs := c.indexer.List()
+	var pods []*v1.Pod
+	for _, obj := range objs {
+		pods = append(pods, obj.(*v1.Pod))
+	}
+	return pods
+}
+
+// handleErr checks if an error happened and makes sure we will retry later.
+func (c *Controller) handleErr(err error, key interface{}) {
+	if err == nil {
+		// Forget about the #AddRateLimited history of the key on every successful synchronization.
+		// This ensures that future processing of updates for this key is not delayed because of
+		// an outdated error history.
+		c.queue.Forget(key)
+		return
+	}
+
+	// This controller retries 5 times if something goes wrong. After that, it stops trying.
+	if c.queue.NumRequeues(key) < 5 {
+		klog.Infof("Error syncing pod %v: %v", key, err)
+
+		// Re-enqueue the key rate limited. Based on the rate limiter on the
+		// queue and the re-enqueue history, the key will be processed later again.
+		c.queue.AddRateLimited(key)
+		return
+	}
+
+	c.queue.Forget(key)
+	// Report to an external entity that, even after several retries, we could not successfully process this key
+	runtime.HandleError(err)
+	klog.Infof("Dropping pod %q out of the queue: %v", key, err)
+}
+
+func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
+	defer runtime.HandleCrash()
+
+	// Let the workers stop when we are done
+	defer c.queue.ShutDown()
+	klog.Info("Starting Pod controller")
+
+	go c.informer.Run(stopCh)
+
+	// Wait for all involved caches to be synced, before processing items from the queue is started
+	if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
+		runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
+		return
+	}
+
+	for i := 0; i < threadiness; i++ {
+		go wait.Until(c.runWorker, time.Second, stopCh)
+	}
+
+	<-stopCh
+	klog.Info("Stopping Pod controller")
+}
+
+func (c *Controller) runWorker() {
+	for c.processNextItem() {
+	}
+}
+
+func ContainerUptimeWatcher(clientset kubernetes.Interface) {
+	podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", "", fields.Everything())
+
+	// create the workqueue
+	queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
+
+	// Bind the workqueue to a cache with the help of an informer. This way we make sure that
+	// whenever the cache is updated, the pod key is added to the workqueue.
+	// Note that when we finally process the item from the workqueue, we might see a newer version
+	// of the Pod than the version which was responsible for triggering the update.
+	indexer, informer := cache.NewIndexerInformer(podListWatcher, &v1.Pod{}, 0, cache.ResourceEventHandlerFuncs{
+		AddFunc: func(obj interface{}) {
+			key, err := cache.MetaNamespaceKeyFunc(obj)
+			if err == nil {
+				queue.Add(key)
+			}
+		},
+		UpdateFunc: func(old interface{}, new interface{}) {
+			key, err := cache.MetaNamespaceKeyFunc(new)
+			if err == nil {
+				queue.Add(key)
+			}
+		},
+		DeleteFunc: func(obj interface{}) {
+			// IndexerInformer uses a delta queue, therefore for deletes we have to use this
+			// key function.
+			key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
+			if err == nil {
+				queue.Add(key)
+			}
+		},
+	}, cache.Indexers{})
+
+	controller := NewController(queue, indexer, informer)
+
+	/*
+		podList, _ := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
+		for _, pod := range podList.Items {
+			indexer.Add(&pod)
+		}
+	*/
+	// Now let's start the controller
+	stop := make(chan struct{})
+	//defer close(stop)
+	go controller.Run(1, stop)
+}

+ 29 - 6
costmodel/costmodel.go

@@ -206,6 +206,27 @@ func getUptimeData(qr interface{}) ([]*Vector, bool, error) {
 	return jobData, kubecostMetrics, nil
 	return jobData, kubecostMetrics, nil
 }
 }
 
 
+func ComputeUptimes(cli prometheusClient.Client) (map[string]float64, error) {
+	res, err := query(cli, `container_start_time_seconds{container_name != "POD",container_name != ""}`)
+	if err != nil {
+		return nil, err
+	}
+	vectors, err := getContainerMetricVector(res, false, 0)
+	if err != nil {
+		return nil, err
+	}
+	results := make(map[string]float64)
+	for key, vector := range vectors {
+		if err != nil {
+			return nil, err
+		}
+		val := vector[0].Value
+		uptime := time.Now().Sub(time.Unix(int64(val), 0)).Seconds()
+		results[key] = uptime
+	}
+	return results, nil
+}
+
 func ComputeCostData(cli prometheusClient.Client, clientset kubernetes.Interface, cloud costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
 func ComputeCostData(cli prometheusClient.Client, clientset kubernetes.Interface, cloud costAnalyzerCloud.Provider, window string, offset string, filterNamespace string) (map[string]*CostData, error) {
 	queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, window, offset, window, offset)
 	queryRAMRequests := fmt.Sprintf(queryRAMRequestsStr, window, offset, window, offset)
 	queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, window, offset, window, offset)
 	queryRAMUsage := fmt.Sprintf(queryRAMUsageStr, window, offset, window, offset)
@@ -454,7 +475,7 @@ func ComputeCostData(cli prometheusClient.Client, clientset kubernetes.Interface
 		} else {
 		} else {
 			// The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
 			// The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
 			klog.V(4).Info("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
 			klog.V(4).Info("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
-			c, err := newContainerMetricFromKey(key)
+			c, err := NewContainerMetricFromKey(key)
 			if err != nil {
 			if err != nil {
 				return nil, err
 				return nil, err
 			}
 			}
@@ -1152,7 +1173,7 @@ func ComputeCostDataRange(cli prometheusClient.Client, clientset kubernetes.Inte
 		} else {
 		} else {
 			// The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
 			// The container has been deleted. Not all information is sent to prometheus via ksm, so fill out what we can without k8s api
 			klog.V(4).Info("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
 			klog.V(4).Info("The container " + key + " has been deleted. Calculating allocation but resulting object will be missing data.")
-			c, _ := newContainerMetricFromKey(key)
+			c, _ := NewContainerMetricFromKey(key)
 			RAMReqV, ok := RAMReqMap[key]
 			RAMReqV, ok := RAMReqMap[key]
 			if !ok {
 			if !ok {
 				klog.V(4).Info("no RAM requests for " + key)
 				klog.V(4).Info("no RAM requests for " + key)
@@ -1455,7 +1476,8 @@ func getPVInfoVector(qr interface{}) (map[string]*PersistentVolumeClaimData, err
 		}
 		}
 		pv, ok := metricMap["volumename"]
 		pv, ok := metricMap["volumename"]
 		if !ok {
 		if !ok {
-			return nil, fmt.Errorf("Volumename field does not exist in data result vector")
+			klog.V(3).Infof("Warning: Unfulfilled claim %s: volumename field does not exist in data result vector", pvclaimStr)
+			pv = ""
 		}
 		}
 		pvStr, ok := pv.(string)
 		pvStr, ok := pv.(string)
 		if !ok {
 		if !ok {
@@ -1588,7 +1610,7 @@ func (c *ContainerMetric) Key() string {
 	return c.Namespace + "," + c.PodName + "," + c.ContainerName + "," + c.NodeName
 	return c.Namespace + "," + c.PodName + "," + c.ContainerName + "," + c.NodeName
 }
 }
 
 
-func newContainerMetricFromKey(key string) (*ContainerMetric, error) {
+func NewContainerMetricFromKey(key string) (*ContainerMetric, error) {
 	s := strings.Split(key, ",")
 	s := strings.Split(key, ",")
 	if len(s) == 4 {
 	if len(s) == 4 {
 		return &ContainerMetric{
 		return &ContainerMetric{
@@ -1654,11 +1676,12 @@ func newContainerMetricFromPrometheus(metrics map[string]interface{}) (*Containe
 	}
 	}
 	node, ok := metrics["node"]
 	node, ok := metrics["node"]
 	if !ok {
 	if !ok {
-		return nil, fmt.Errorf("Prometheus vector does not have node name")
+		klog.V(4).Info("Prometheus vector does not have node name")
+		node = ""
 	}
 	}
 	nodeName, ok := node.(string)
 	nodeName, ok := node.(string)
 	if !ok {
 	if !ok {
-		return nil, fmt.Errorf("Prometheus vector does not have string nodename")
+		return nil, fmt.Errorf("Prometheus vector does not have string node")
 	}
 	}
 	return &ContainerMetric{
 	return &ContainerMetric{
 		ContainerName: containerName,
 		ContainerName: containerName,

+ 21 - 1
main.go

@@ -52,6 +52,7 @@ type Accesses struct {
 	RAMAllocationRecorder         *prometheus.GaugeVec
 	RAMAllocationRecorder         *prometheus.GaugeVec
 	CPUAllocationRecorder         *prometheus.GaugeVec
 	CPUAllocationRecorder         *prometheus.GaugeVec
 	GPUAllocationRecorder         *prometheus.GaugeVec
 	GPUAllocationRecorder         *prometheus.GaugeVec
+	ContainerUptimeRecorder       *prometheus.GaugeVec
 }
 }
 
 
 type DataEnvelope struct {
 type DataEnvelope struct {
@@ -306,6 +307,13 @@ func (p *Accesses) GetPrometheusMetadata(w http.ResponseWriter, _ *http.Request,
 	w.Write(wrapData(costModel.ValidatePrometheus(p.PrometheusClient)))
 	w.Write(wrapData(costModel.ValidatePrometheus(p.PrometheusClient)))
 }
 }
 
 
+func (p *Accesses) PodUptimes(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
+	w.Header().Set("Content-Type", "application/json")
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+	res, err := costModel.ComputeUptimes(p.PrometheusClient)
+	w.Write(wrapData(res, err))
+}
+
 func (a *Accesses) recordPrices() {
 func (a *Accesses) recordPrices() {
 	go func() {
 	go func() {
 		for {
 		for {
@@ -382,7 +390,11 @@ func (a *Accesses) recordPrices() {
 					c, _ := strconv.ParseFloat(cacPv.Cost, 64)
 					c, _ := strconv.ParseFloat(cacPv.Cost, 64)
 					a.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name).Set(c)
 					a.PersistentVolumePriceRecorder.WithLabelValues(pv.Name, pv.Name).Set(c)
 				}
 				}
-
+				containerUptime, _ := costModel.ComputeUptimes(a.PrometheusClient)
+				for key, uptime := range containerUptime {
+					container, _ := costModel.NewContainerMetricFromKey(key)
+					a.ContainerUptimeRecorder.WithLabelValues(container.Namespace, container.PodName, container.ContainerName).Set(uptime)
+				}
 			}
 			}
 			time.Sleep(time.Minute)
 			time.Sleep(time.Minute)
 		}
 		}
@@ -484,6 +496,11 @@ func main() {
 		Help: "container_gpu_allocation GPU used",
 		Help: "container_gpu_allocation GPU used",
 	}, []string{"namespace", "pod", "container", "instance", "node"})
 	}, []string{"namespace", "pod", "container", "instance", "node"})
 
 
+	ContainerUptimeRecorder := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+		Name: "container_uptime_seconds",
+		Help: "container_uptime_seconds Seconds a container has been running",
+	}, []string{"namespace", "pod", "container"})
+
 	prometheus.MustRegister(cpuGv)
 	prometheus.MustRegister(cpuGv)
 	prometheus.MustRegister(ramGv)
 	prometheus.MustRegister(ramGv)
 	prometheus.MustRegister(gpuGv)
 	prometheus.MustRegister(gpuGv)
@@ -491,6 +508,7 @@ func main() {
 	prometheus.MustRegister(pvGv)
 	prometheus.MustRegister(pvGv)
 	prometheus.MustRegister(RAMAllocation)
 	prometheus.MustRegister(RAMAllocation)
 	prometheus.MustRegister(CPUAllocation)
 	prometheus.MustRegister(CPUAllocation)
+	prometheus.MustRegister(ContainerUptimeRecorder)
 
 
 	a := Accesses{
 	a := Accesses{
 		PrometheusClient:              promCli,
 		PrometheusClient:              promCli,
@@ -503,6 +521,7 @@ func main() {
 		RAMAllocationRecorder:         RAMAllocation,
 		RAMAllocationRecorder:         RAMAllocation,
 		CPUAllocationRecorder:         CPUAllocation,
 		CPUAllocationRecorder:         CPUAllocation,
 		GPUAllocationRecorder:         GPUAllocation,
 		GPUAllocationRecorder:         GPUAllocation,
+		ContainerUptimeRecorder:       ContainerUptimeRecorder,
 		PersistentVolumePriceRecorder: pvGv,
 		PersistentVolumePriceRecorder: pvGv,
 	}
 	}
 
 
@@ -530,6 +549,7 @@ func main() {
 	router.GET("/validatePrometheus", a.GetPrometheusMetadata)
 	router.GET("/validatePrometheus", a.GetPrometheusMetadata)
 	router.GET("/managementPlatform", a.ManagementPlatform)
 	router.GET("/managementPlatform", a.ManagementPlatform)
 	router.GET("/clusterInfo", a.ClusterInfo)
 	router.GET("/clusterInfo", a.ClusterInfo)
+	router.GET("/podUptimes", a.PodUptimes)
 
 
 	rootMux := http.NewServeMux()
 	rootMux := http.NewServeMux()
 	rootMux.Handle("/", router)
 	rootMux.Handle("/", router)