Sfoglia il codice sorgente

Implemented first version of node usage listing

jnfrati 5 anni fa
parent
commit
a9917fff8b

+ 159 - 0
internal/kubernetes/nodes/helpers.go

@@ -0,0 +1,159 @@
+package nodes
+
+import (
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+)
+
+func getPodsTotalRequestsAndLimits(podList *corev1.PodList) (reqs map[corev1.ResourceName]resource.Quantity, limits map[corev1.ResourceName]resource.Quantity) {
+	reqs, limits = map[corev1.ResourceName]resource.Quantity{}, map[corev1.ResourceName]resource.Quantity{}
+	for _, pod := range podList.Items {
+		podReqs, podLimits := PodRequestsAndLimits(&pod)
+		for podReqName, podReqValue := range podReqs {
+			if value, ok := reqs[podReqName]; !ok {
+				reqs[podReqName] = podReqValue.DeepCopy()
+			} else {
+				value.Add(podReqValue)
+				reqs[podReqName] = value
+			}
+		}
+		for podLimitName, podLimitValue := range podLimits {
+			if value, ok := limits[podLimitName]; !ok {
+				limits[podLimitName] = podLimitValue.DeepCopy()
+			} else {
+				value.Add(podLimitValue)
+				limits[podLimitName] = value
+			}
+		}
+	}
+	return
+}
+
+func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) {
+	reqs, limits = corev1.ResourceList{}, corev1.ResourceList{}
+	for _, container := range pod.Spec.Containers {
+		addResourceList(reqs, container.Resources.Requests)
+		addResourceList(limits, container.Resources.Limits)
+	}
+	// init containers define the minimum of any resource
+	for _, container := range pod.Spec.InitContainers {
+		maxResourceList(reqs, container.Resources.Requests)
+		maxResourceList(limits, container.Resources.Limits)
+	}
+
+	// Add overhead for running a pod to the sum of requests and to non-zero limits:
+	if pod.Spec.Overhead != nil {
+		addResourceList(reqs, pod.Spec.Overhead)
+
+		for name, quantity := range pod.Spec.Overhead {
+			if value, ok := limits[name]; ok && !value.IsZero() {
+				value.Add(quantity)
+				limits[name] = value
+			}
+		}
+	}
+	return
+}
+
+// addResourceList adds the resources in newList to list
+func addResourceList(list, new corev1.ResourceList) {
+	for name, quantity := range new {
+		if value, ok := list[name]; !ok {
+			list[name] = quantity.DeepCopy()
+		} else {
+			value.Add(quantity)
+			list[name] = value
+		}
+	}
+}
+
+// maxResourceList sets list to the greater of list/newList for every resource
+// either list
+func maxResourceList(list, new corev1.ResourceList) {
+	for name, quantity := range new {
+		if value, ok := list[name]; !ok {
+			list[name] = quantity.DeepCopy()
+			continue
+		} else {
+			if quantity.Cmp(value) > 0 {
+				list[name] = quantity.DeepCopy()
+			}
+		}
+	}
+}
+
+// func IsHugePageResourceName(name corev1.ResourceName) bool {
+// 	return strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix)
+// }
+
+// var standardContainerResources = sets.NewString(
+// 	string(corev1.ResourceCPU),
+// 	string(corev1.ResourceMemory),
+// 	string(corev1.ResourceEphemeralStorage),
+// )
+
+// func IsStandardContainerResourceName(str string) bool {
+// 	return standardContainerResources.Has(str) || IsHugePageResourceName(corev1.ResourceName(str))
+// }
+
+func DescribeNodeResource(nodeNonTerminatedPodsList *corev1.PodList, node *corev1.Node) *NodeUsage {
+	allocatable := node.Status.Capacity
+	if len(node.Status.Allocatable) > 0 {
+		allocatable = node.Status.Allocatable
+	}
+
+	reqs, limits := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList)
+	cpuReqs, cpuLimits, memoryReqs, memoryLimits, ephemeralstorageReqs, ephemeralstorageLimits :=
+		reqs[corev1.ResourceCPU], limits[corev1.ResourceCPU], reqs[corev1.ResourceMemory], limits[corev1.ResourceMemory], reqs[corev1.ResourceEphemeralStorage], limits[corev1.ResourceEphemeralStorage]
+	fractionCpuReqs := float64(0)
+	fractionCpuLimits := float64(0)
+	if allocatable.Cpu().MilliValue() != 0 {
+		fractionCpuReqs = float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
+		fractionCpuLimits = float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100
+	}
+	fractionMemoryReqs := float64(0)
+	fractionMemoryLimits := float64(0)
+	if allocatable.Memory().Value() != 0 {
+		fractionMemoryReqs = float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100
+		fractionMemoryLimits = float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100
+	}
+	fractionEphemeralStorageReqs := float64(0)
+	fractionEphemeralStorageLimits := float64(0)
+	if allocatable.StorageEphemeral().Value() != 0 {
+		fractionEphemeralStorageReqs = float64(ephemeralstorageReqs.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100
+		fractionEphemeralStorageLimits = float64(ephemeralstorageLimits.Value()) / float64(allocatable.StorageEphemeral().Value()) * 100
+	}
+
+	// extResources := make([]string, 0, len(allocatable))
+	// hugePageResources := make([]string, 0, len(allocatable))
+	// for resource := range allocatable {
+	// 	if IsHugePageResourceName(resource) {
+	// 		hugePageResources = append(hugePageResources, string(resource))
+	// 	} else if !IsStandardContainerResourceName(string(resource)) && resource != corev1.ResourcePods {
+	// 		extResources = append(extResources, string(resource))
+	// 	}
+	// }
+
+	// sort.Strings(extResources)
+	// sort.Strings(hugePageResources)
+
+	// for _, resource := range hugePageResources {
+	// 	hugePageSizeRequests, hugePageSizeLimits, hugePageSizeAllocable := reqs[corev1.ResourceName(resource)], limits[corev1.ResourceName(resource)], allocatable[corev1.ResourceName(resource)]
+	// 	fractionHugePageSizeRequests := float64(0)
+	// 	fractionHugePageSizeLimits := float64(0)
+	// 	if hugePageSizeAllocable.Value() != 0 {
+	// 		fractionHugePageSizeRequests = float64(hugePageSizeRequests.Value()) / float64(hugePageSizeAllocable.Value()) * 100
+	// 		fractionHugePageSizeLimits = float64(hugePageSizeLimits.Value()) / float64(hugePageSizeAllocable.Value()) * 100
+	// 	}
+
+	// }
+
+	return &NodeUsage{
+		fractionCpuReqs,
+		fractionCpuLimits,
+		fractionMemoryReqs,
+		fractionMemoryLimits,
+		fractionEphemeralStorageReqs,
+		fractionEphemeralStorageLimits,
+	}
+}

+ 67 - 0
internal/kubernetes/nodes/nodes.go

@@ -0,0 +1,67 @@
+package nodes
+
+import (
+	"context"
+	"fmt"
+
+	v1 "k8s.io/api/core/v1"
+	"k8s.io/client-go/kubernetes"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type NodeUsage struct {
+	fractionCpuReqs                float64
+	fractionCpuLimits              float64
+	fractionMemoryReqs             float64
+	fractionMemoryLimits           float64
+	fractionEphemeralStorageReqs   float64
+	fractionEphemeralStorageLimits float64
+}
+
+type NodeWithUsageData struct {
+	Name                           string  `json:"name"`
+	FractionCpuReqs                float64 `json:"cpu_reqs"`
+	FractionCpuLimits              float64 `json:"cpu_limits"`
+	FractionMemoryReqs             float64 `json:"memory_reqs"`
+	FractionMemoryLimits           float64 `json:"memory_limits"`
+	FractionEphemeralStorageReqs   float64 `json:"ephemeral_storage_reqs"`
+	FractionEphemeralStorageLimits float64 `json:"ephemeral_storage_limits"`
+}
+
+func (nu *NodeUsage) Externalize(node *v1.Node) *NodeWithUsageData {
+	return &NodeWithUsageData{
+		Name:                           node.Name,
+		FractionCpuReqs:                nu.fractionCpuReqs,
+		FractionCpuLimits:              nu.fractionCpuLimits,
+		FractionMemoryReqs:             nu.fractionMemoryReqs,
+		FractionMemoryLimits:           nu.fractionMemoryLimits,
+		FractionEphemeralStorageReqs:   nu.fractionEphemeralStorageReqs,
+		FractionEphemeralStorageLimits: nu.fractionEphemeralStorageLimits,
+	}
+}
+
+func GetNodesUsage(clientset kubernetes.Interface) []*NodeWithUsageData {
+	nodeList, _ := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
+
+	extNodeList := make([]*NodeWithUsageData, len(nodeList.Items))
+
+	for i, node := range nodeList.Items {
+		podList := getPodsForNode(clientset, node.Name)
+		nodeUsage := DescribeNodeResource(podList, &node)
+
+		extNodeList[i] = nodeUsage.Externalize(&node)
+	}
+
+	return extNodeList
+}
+
+func getPodsForNode(clientset kubernetes.Interface, nodeName string) *v1.PodList {
+	fmt.Printf("%s", nodeName)
+
+	podList, _ := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{
+		FieldSelector: "spec.nodeName=" + nodeName + ",status.phase=Running",
+	})
+
+	return podList
+}

+ 43 - 0
server/api/k8s_handler.go

@@ -5,12 +5,14 @@ import (
 	"fmt"
 	"net/http"
 	"net/url"
+	"strconv"
 
 	"github.com/go-chi/chi"
 	"github.com/gorilla/schema"
 	"github.com/gorilla/websocket"
 	"github.com/porter-dev/porter/internal/forms"
 	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/kubernetes/nodes"
 	"github.com/porter-dev/porter/internal/kubernetes/prometheus"
 	v1 "k8s.io/api/core/v1"
 	"k8s.io/client-go/tools/clientcmd"
@@ -1222,3 +1224,44 @@ func (app *App) HandleGetTemporaryKubeconfig(w http.ResponseWriter, r *http.Requ
 		return
 	}
 }
+
+func (app *App) HandleListNodes(w http.ResponseWriter, r *http.Request) {
+	id, err := strconv.ParseUint(chi.URLParam(r, "cluster_id"), 0, 64)
+
+	if err != nil || id == 0 {
+		app.handleErrorFormDecoding(err, ErrProjectDecode, w)
+		return
+	}
+
+	cluster, err := app.Repo.Cluster.ReadCluster(uint(id))
+
+	if err != nil {
+		app.handleErrorRead(err, ErrProjectDataRead, w)
+		return
+	}
+
+	form := &forms.K8sForm{
+		OutOfClusterConfig: &kubernetes.OutOfClusterConfig{
+			Repo:              app.Repo,
+			DigitalOceanOAuth: app.DOConf,
+			Cluster:           cluster,
+		},
+	}
+
+	var agent *kubernetes.Agent
+
+	if app.ServerConf.IsTesting {
+		agent = app.TestAgents.K8sAgent
+	} else {
+		agent, _ = kubernetes.GetAgentOutOfClusterConfig(form.OutOfClusterConfig)
+	}
+
+	nodeWithUsageList := nodes.GetNodesUsage(agent.Clientset)
+
+	w.WriteHeader(http.StatusOK)
+
+	if err := json.NewEncoder(w).Encode(nodeWithUsageList); err != nil {
+		app.handleErrorFormDecoding(err, ErrProjectDecode, w)
+		return
+	}
+}

+ 14 - 0
server/router/router.go

@@ -599,6 +599,20 @@ func New(a *api.App) *chi.Mux {
 				),
 			)
 
+			r.Method(
+				"GET",
+				"/projects/{project_id}/clusters/{cluster_id}/nodes",
+				auth.DoesUserHaveProjectAccess(
+					auth.DoesUserHaveClusterAccess(
+						requestlog.NewHandler(a.HandleListNodes, l),
+						mw.URLParam,
+						mw.URLParam,
+					),
+					mw.URLParam,
+					mw.ReadAccess,
+				),
+			)
+
 			r.Method(
 				"POST",
 				"/projects/{project_id}/clusters/{cluster_id}",