Pārlūkot izejas kodu

Merge branch 'belanger/agent-v3-integration' into dev

Alexander Belanger 3 gadi atpakaļ
vecāks
revīzija
339b32e258

+ 70 - 6
api/server/handlers/cluster/detect_agent_installed.go

@@ -2,16 +2,22 @@ package cluster
 
 import (
 	"errors"
+	"fmt"
 	"net/http"
+	"strings"
 
+	"github.com/Masterminds/semver/v3"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/helm/loader"
 	"github.com/porter-dev/porter/internal/kubernetes"
 	"github.com/porter-dev/porter/internal/models"
+	v1 "k8s.io/api/apps/v1"
+	"sigs.k8s.io/yaml"
 )
 
 type DetectAgentInstalledHandler struct {
@@ -50,15 +56,73 @@ func (c *DetectAgentInstalledHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 	}
 
 	// detect the version of the agent which is installed
-	res := &types.GetAgentResponse{}
+	res := &types.GetAgentResponse{
+		Version: getAgentVersionFromDeployment(depl),
+	}
 
-	versionAnn, ok := depl.ObjectMeta.Annotations["porter.run/agent-major-version"]
+	res.LatestVersion, err = getLatestAgentVersion(c.Config().ServerConf.DefaultAddonHelmRepoURL)
 
-	if !ok {
-		res.Version = "v1"
-	} else {
-		res.Version = versionAnn
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	versionSem, err := semver.NewConstraint(fmt.Sprintf("> %s", strings.TrimPrefix(res.Version, "v")))
+
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	latestVersionSem, err := semver.NewVersion(strings.TrimPrefix(res.LatestVersion, "v"))
+
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	if versionSem.Check(latestVersionSem) {
+		res.ShouldUpgrade = true
 	}
 
 	c.WriteResult(w, r, res)
 }
+
+func getAgentVersionFromDeployment(depl *v1.Deployment) string {
+	versionAnn, ok := depl.ObjectMeta.Annotations["porter.run/agent-version"]
+
+	if !ok {
+		// fallback to porter agent v2 annotation
+		versionAnn = depl.ObjectMeta.Annotations["porter.run/agent-major-version"]
+	}
+
+	if versionAnn != "" {
+		return versionAnn
+	}
+
+	return "v1"
+}
+
+func getLatestAgentVersion(helmRepoURL string) (string, error) {
+	chart, err := loader.LoadChartPublic(helmRepoURL, "porter-agent", "")
+
+	if err != nil {
+		return "", fmt.Errorf("could not load latest porter-agent chart: %w", err)
+	}
+
+	for _, t := range chart.Templates {
+		if t.Name == "deployment.yaml" {
+			depl := &v1.Deployment{}
+
+			err := yaml.Unmarshal(t.Data, depl)
+
+			if err != nil {
+				return "", fmt.Errorf("could not unmarshal deployment.yaml: %w", err)
+			}
+
+			return getAgentVersionFromDeployment(depl), nil
+		}
+	}
+
+	return "", fmt.Errorf("could not find deployment.yaml in porter-agent chart")
+}

+ 64 - 0
api/server/handlers/cluster/get_logs_pod_values.go

@@ -0,0 +1,64 @@
+package cluster
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	porter_agent "github.com/porter-dev/porter/internal/kubernetes/porter_agent/v2"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+type GetLogPodValuesHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+func NewGetLogPodValuesHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetLogPodValuesHandler {
+	return &GetLogPodValuesHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+func (c *GetLogPodValuesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+
+	request := &types.GetPodValuesRequest{}
+
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, "")
+
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	// get agent service
+	agentSvc, err := porter_agent.GetAgentService(agent.Clientset)
+
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	podVals, err := porter_agent.GetPodValues(agent.Clientset, agentSvc, request)
+
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	c.WriteResult(w, r, podVals)
+}

+ 13 - 23
api/server/handlers/cluster/install_agent.go

@@ -108,13 +108,19 @@ func (c *InstallAgentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 			"clusterID": fmt.Sprintf("%d", cluster.ID),
 			"projectID": fmt.Sprintf("%d", proj.ID),
 		},
-	}
-
-	if exists, err := checkIfMonitoringNodeExists(k8sAgent); err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	} else if exists {
-		porterAgentValues["monitoringWorkload"] = true
+		"loki": map[string]interface{}{
+			"nodeSelector": map[string]interface{}{
+				"porter.run/workload-kind": "monitoring",
+			},
+			"tolerations": []map[string]interface{}{
+				{
+					"key":      "porter.run/workload-kind",
+					"operator": "Equal",
+					"value":    "monitoring",
+					"effect":   "NoSchedule",
+				},
+			},
+		},
 	}
 
 	conf := &helm.InstallChartConfig{
@@ -180,19 +186,3 @@ func checkAndDeleteOlderAgent(k8sAgent *kubernetes.Agent) error {
 
 	return nil
 }
-
-func checkIfMonitoringNodeExists(k8sAgent *kubernetes.Agent) (bool, error) {
-	nodeList, err := k8sAgent.Clientset.CoreV1().Nodes().List(context.Background(), v1.ListOptions{
-		LabelSelector: monitoringNodeLabel,
-	})
-
-	if err != nil {
-		return false, fmt.Errorf("error listing nodes: %w", err)
-	}
-
-	if len(nodeList.Items) > 0 {
-		return true, nil
-	}
-
-	return false, nil
-}

+ 29 - 0
api/server/router/cluster.go

@@ -1271,6 +1271,35 @@ func getClusterRoutes(
 		Router:   r,
 	})
 
+	// GET /api/projects/{project_id}/clusters/{cluster_id}/logs/pod_values -> cluster.NewGetLogPodValuesHandler
+	getLogPodValuesEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbGet,
+			Method: types.HTTPVerbGet,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/logs/pod_values", relPath),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	getLogPodValuesHandler := cluster.NewGetLogPodValuesHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: getLogPodValuesEndpoint,
+		Handler:  getLogPodValuesHandler,
+		Router:   r,
+	})
+
 	// GET /api/projects/{project_id}/clusters/{cluster_id}/events -> cluster.NewGetEventsHandler
 	getEventsEndpoint := factory.NewAPIEndpoint(
 		&types.APIRequestMetadata{

+ 3 - 1
api/types/agent.go

@@ -1,5 +1,7 @@
 package types
 
 type GetAgentResponse struct {
-	Version string `json:"version"`
+	Version       string `json:"version"`
+	LatestVersion string `json:"latest_version"`
+	ShouldUpgrade bool   `json:"should_upgrade"`
 }

+ 6 - 0
api/types/incident.go

@@ -101,6 +101,12 @@ type GetLogRequest struct {
 	Namespace   string     `schema:"namespace" form:"required"`
 }
 
+type GetPodValuesRequest struct {
+	StartRange  *time.Time `schema:"start_range"`
+	EndRange    *time.Time `schema:"end_range"`
+	MatchPrefix string     `schema:"match_prefix"`
+}
+
 type LogLine struct {
 	Timestamp *time.Time `json:"timestamp"`
 	Line      string     `json:"line"`

+ 52 - 0
internal/kubernetes/porter_agent/v2/agent_server.go

@@ -196,6 +196,58 @@ func GetHistoricalLogs(
 	return logsResp, nil
 }
 
+func GetPodValues(
+	clientset kubernetes.Interface,
+	service *v1.Service,
+	req *types.GetPodValuesRequest,
+) ([]string, error) {
+	vals := make(map[string]string)
+
+	if req.StartRange != nil {
+		startVal, err := req.StartRange.MarshalText()
+
+		if err != nil {
+			return nil, err
+		}
+
+		vals["start_range"] = string(startVal)
+	}
+
+	if req.EndRange != nil {
+		endVal, err := req.EndRange.MarshalText()
+
+		if err != nil {
+			return nil, err
+		}
+
+		vals["end_range"] = string(endVal)
+	}
+
+	vals["match_prefix"] = req.MatchPrefix
+
+	resp := clientset.CoreV1().Services(service.Namespace).ProxyGet(
+		"http",
+		service.Name,
+		fmt.Sprintf("%d", service.Spec.Ports[0].Port),
+		"/logs/pod_values",
+		vals,
+	)
+
+	rawQuery, err := resp.DoRaw(context.Background())
+	if err != nil {
+		return nil, err
+	}
+
+	valsResp := make([]string, 0)
+
+	err = json.Unmarshal(rawQuery, &valsResp)
+	if err != nil {
+		return nil, err
+	}
+
+	return valsResp, nil
+}
+
 func GetHistoricalEvents(
 	clientset kubernetes.Interface,
 	service *v1.Service,