Justin Rhee 3 tahun lalu
induk
melakukan
4e8638882e
49 mengubah file dengan 1071 tambahan dan 129 penghapusan
  1. 55 0
      api/client/api.go
  2. 63 0
      api/client/v1_stack.go
  3. 21 0
      api/server/handlers/infra/forms.go
  4. 17 0
      api/server/handlers/release/upgrade.go
  5. 21 0
      api/server/handlers/stack/add_application.go
  6. 12 0
      api/server/handlers/stack/add_env_group.go
  7. 25 0
      api/server/handlers/stack/create.go
  8. 11 0
      api/server/handlers/stack/helpers.go
  9. 6 12
      api/server/shared/requestutils/validator.go
  10. 5 5
      api/types/namespace.go
  11. 1 1
      api/types/release.go
  12. 4 4
      api/types/stacks.go
  13. 15 13
      cli/cmd/apply.go
  14. 13 0
      cli/cmd/docker/builder.go
  15. 217 0
      cli/cmd/stack.go
  16. 2 2
      dashboard/src/main/home/cluster-dashboard/chart/ChartList.tsx
  17. 5 0
      dashboard/src/main/home/cluster-dashboard/dashboard/Dashboard.tsx
  18. 18 2
      dashboard/src/main/home/cluster-dashboard/expanded-chart/jobs/JobResource.tsx
  19. 1 1
      dashboard/src/main/home/dashboard/ClusterList.tsx
  20. 1 1
      dashboard/src/main/home/launch/launch-flow/LaunchFlow.tsx
  21. 2 2
      dashboard/src/main/home/modals/EditInviteOrCollaboratorModal.tsx
  22. 9 5
      dashboard/src/main/home/project-settings/InviteList.tsx
  23. 5 5
      dashboard/src/main/home/project-settings/ProjectSettings.tsx
  24. 173 0
      dashboard/src/main/home/sidebar/ClusterSection.tsx
  25. 3 0
      dashboard/src/main/home/sidebar/Clusters.tsx
  26. 4 0
      dashboard/src/main/home/sidebar/ProjectSection.tsx
  27. 9 0
      dashboard/src/main/home/sidebar/Sidebar.tsx
  28. 4 6
      dashboard/src/main/home/sidebar/SidebarLink.tsx
  29. 9 0
      internal/helm/agent.go
  30. 3 0
      internal/helm/config.go
  31. 3 0
      internal/kubernetes/config.go
  32. 3 0
      internal/models/monitor.go
  33. 33 1
      internal/opa/config.yaml
  34. 10 8
      internal/opa/loader.go
  35. 29 10
      internal/opa/opa.go
  36. 26 0
      internal/opa/policies/certificates/expired.rego
  37. 36 0
      internal/opa/policies/node/healthy.rego
  38. 25 0
      internal/opa/policies/node/k8s_version.rego
  39. 23 0
      internal/opa/policies/node/porter_run_labels.rego
  40. 41 0
      internal/opa/policies/node/porter_run_taints.rego
  41. 4 4
      internal/opa/policies/web/web_version.rego
  42. 15 29
      internal/repository/gorm/cluster.go
  43. 26 0
      internal/repository/gorm/monitor.go
  44. 3 0
      internal/repository/monitor.go
  45. 8 0
      internal/repository/test/monitor.go
  46. 4 0
      internal/validator/validator.go
  47. 3 1
      workers/jobs/helm_revisions_count_tracker.go
  48. 43 17
      workers/jobs/recommender.go
  49. 2 0
      workers/main.go

+ 55 - 0
api/client/api.go

@@ -164,6 +164,61 @@ func (c *Client) postRequest(relPath string, data interface{}, response interfac
 	return err
 }
 
+type patchRequestOpts struct {
+	retryCount uint
+}
+
+func (c *Client) patchRequest(relPath string, data interface{}, response interface{}, opts ...patchRequestOpts) error {
+	var retryCount uint = 1
+
+	if len(opts) > 0 {
+		for _, opt := range opts {
+			retryCount = opt.retryCount
+		}
+	}
+
+	var httpErr *types.ExternalError
+	var err error
+
+	for i := 0; i < int(retryCount); i++ {
+		strData, err := json.Marshal(data)
+
+		if err != nil {
+			return nil
+		}
+
+		req, err := http.NewRequest(
+			"PATCH",
+			fmt.Sprintf("%s%s", c.BaseURL, relPath),
+			strings.NewReader(string(strData)),
+		)
+
+		if err != nil {
+			return err
+		}
+
+		httpErr, err = c.sendRequest(req, response, true)
+
+		if httpErr == nil && err == nil {
+			return nil
+		}
+
+		if i != int(retryCount)-1 {
+			if httpErr != nil {
+				fmt.Printf("Error: %s (status code %d), retrying request...\n", httpErr.Error, httpErr.Code)
+			} else {
+				fmt.Printf("Error: %v, retrying request...\n", err)
+			}
+		}
+	}
+
+	if httpErr != nil {
+		return fmt.Errorf("%v", httpErr.Error)
+	}
+
+	return err
+}
+
 func (c *Client) deleteRequest(relPath string, data interface{}, response interface{}) error {
 	strData, err := json.Marshal(data)
 

+ 63 - 0
api/client/v1_stack.go

@@ -0,0 +1,63 @@
+package client
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/porter-dev/porter/api/types"
+)
+
+// ListStacks retrieves the list of stacks
+func (c *Client) ListStacks(
+	ctx context.Context,
+	projectID, clusterID uint,
+	namespace string,
+) (*types.StackListResponse, error) {
+	resp := &types.StackListResponse{}
+
+	err := c.getRequest(
+		fmt.Sprintf(
+			"/v1/projects/%d/clusters/%d/namespaces/%s/stacks",
+			projectID, clusterID, namespace,
+		),
+		nil,
+		resp,
+	)
+
+	return resp, err
+}
+
+func (c *Client) AddEnvGroupToStack(
+	ctx context.Context,
+	projectID, clusterID uint,
+	namespace, stackID string,
+	req *types.CreateStackEnvGroupRequest,
+) error {
+	err := c.patchRequest(
+		fmt.Sprintf(
+			"/v1/projects/%d/clusters/%d/namespaces/%s/stacks/%s/add_env_group",
+			projectID, clusterID, namespace, stackID,
+		),
+		req,
+		nil,
+	)
+
+	return err
+}
+
+func (c *Client) RemoveEnvGroupFromStack(
+	ctx context.Context,
+	projectID, clusterID uint,
+	namespace, stackID, envGroupName string,
+) error {
+	err := c.deleteRequest(
+		fmt.Sprintf(
+			"/v1/projects/%d/clusters/%d/namespaces/%s/stacks/%s/remove_env_group/%s",
+			projectID, clusterID, namespace, stackID, envGroupName,
+		),
+		nil,
+		nil,
+	)
+
+	return err
+}

+ 21 - 0
api/server/handlers/infra/forms.go

@@ -639,6 +639,27 @@ tabs:
       label: Add an additional prometheus node group to ensure monitoring stability.
       settings:
         default: false
+  - name: prometheus_machine_settings
+    show_if: additional_prometheus_node_group
+    contents:
+    - type: select
+      label: ⚙️ AWS Prometheus Machine Type
+      variable: additional_prometheus_machine_type
+      settings:
+        default: t2.medium
+        options:
+        - label: t2.medium
+          value: t2.medium
+        - label: t2.large
+          value: t2.large
+        - label: t2.xlarge
+          value: t2.xlarge
+        - label: t3.medium
+          value: t3.medium
+        - label: t3.large
+          value: t3.large
+        - label: t3.xlarge
+          value: t3.xlarge
 `
 
 const gcrForm = `name: GCR

+ 17 - 0
api/server/handlers/release/upgrade.go

@@ -141,6 +141,23 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		}
 	}
 
+	// check if release is part of a stack
+	stacks, err := c.Repo().Stack().ListStacks(cluster.ProjectID, cluster.ID, helmRelease.Namespace)
+
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	for _, stk := range stacks {
+		for _, res := range stk.Revisions[0].Resources {
+			if res.Name == helmRelease.Name {
+				conf.Stack = stk
+				break
+			}
+		}
+	}
+
 	newHelmRelease, upgradeErr := helmAgent.UpgradeRelease(conf, request.Values, c.Config().DOConf)
 
 	if upgradeErr == nil && newHelmRelease != nil {

+ 21 - 0
api/server/handlers/stack/add_application.go

@@ -82,6 +82,18 @@ func (p *StackAddApplicationHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 
 	appResources = append(appResources, newResources...)
 
+	nameValidator := make(map[string]bool)
+
+	for _, res := range appResources {
+		if _, ok := nameValidator[res.Name]; ok {
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("duplicate app resource name: %s", res.Name),
+				http.StatusBadRequest))
+			return
+		}
+
+		nameValidator[res.Name] = true
+	}
+
 	envGroups, err := stacks.CloneEnvGroups(latestRevision.EnvGroups)
 
 	if err != nil {
@@ -105,6 +117,14 @@ func (p *StackAddApplicationHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		return
 	}
 
+	// re-read the stack to get the most upto date information
+	stack, err = p.Repo().Stack().ReadStackByID(proj.ID, stack.ID)
+
+	if err != nil {
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
 	registries, err := p.Repo().Registry().ListRegistriesByProjectID(cluster.ProjectID)
 
 	if err != nil {
@@ -132,6 +152,7 @@ func (p *StackAddApplicationHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 			registries: registries,
 			helmAgent:  helmAgent,
 			request:    req,
+			stack:      stack,
 		})
 
 		if err != nil {

+ 12 - 0
api/server/handlers/stack/add_env_group.go

@@ -88,6 +88,18 @@ func (p *StackAddEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
 
 	envGroups = append(envGroups, newEnvGroups...)
 
+	nameValidator := make(map[string]bool)
+
+	for _, eg := range envGroups {
+		if _, ok := nameValidator[eg.Name]; ok {
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("duplicate env group name: %s", eg.Name),
+				http.StatusBadRequest))
+			return
+		}
+
+		nameValidator[eg.Name] = true
+	}
+
 	newRevision := &models.StackRevision{
 		StackID:        stack.ID,
 		RevisionNumber: latestRevision.RevisionNumber + 1,

+ 25 - 0
api/server/handlers/stack/create.go

@@ -67,6 +67,18 @@ func (p *StackCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
+	nameValidator := make(map[string]bool)
+
+	for _, res := range resources {
+		if _, ok := nameValidator[res.Name]; ok {
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("duplicate app resource name: %s", res.Name),
+				http.StatusBadRequest))
+			return
+		}
+
+		nameValidator[res.Name] = true
+	}
+
 	envGroups, err := getEnvGroupModels(req.EnvGroups, proj.ID, cluster.ID, namespace)
 
 	if err != nil {
@@ -74,6 +86,18 @@ func (p *StackCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
+	nameValidator = make(map[string]bool)
+
+	for _, eg := range envGroups {
+		if _, ok := nameValidator[eg.Name]; ok {
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("duplicate env group name: %s", eg.Name),
+				http.StatusBadRequest))
+			return
+		}
+
+		nameValidator[eg.Name] = true
+	}
+
 	// write stack to the database with creating status
 	stack := &models.Stack{
 		ProjectID: proj.ID,
@@ -174,6 +198,7 @@ func (p *StackCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 				registries: registries,
 				helmAgent:  helmAgent,
 				request:    appResource,
+				stack:      stack,
 			})
 
 			if err != nil {

+ 11 - 0
api/server/handlers/stack/helpers.go

@@ -17,6 +17,7 @@ type applyAppResourceOpts struct {
 	helmAgent  *helm.Agent
 	request    *types.CreateStackAppResourceRequest
 	registries []*models.Registry
+	stack      *models.Stack
 }
 
 func applyAppResource(opts *applyAppResourceOpts) (*release.Release, error) {
@@ -40,6 +41,16 @@ func applyAppResource(opts *applyAppResourceOpts) (*release.Release, error) {
 		Registries: opts.registries,
 	}
 
+	if conf.Values == nil {
+		conf.Values = make(map[string]interface{})
+	}
+
+	conf.Values["stack"] = map[string]interface{}{
+		"enabled":  true,
+		"name":     opts.stack.Name,
+		"revision": opts.stack.Revisions[0].RevisionNumber,
+	}
+
 	return opts.helmAgent.InstallChart(conf, opts.config.DOConf)
 }
 

+ 6 - 12
api/server/shared/requestutils/validator.go

@@ -5,9 +5,9 @@ import (
 	"net/http"
 	"strings"
 
+	v10Validator "github.com/go-playground/validator/v10"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
-
-	"github.com/go-playground/validator/v10"
+	"github.com/porter-dev/porter/internal/validator"
 )
 
 // Validator will validate the fields for a request object to ensure that
@@ -22,19 +22,13 @@ type Validator interface {
 // DefaultValidator uses the go-playground v10 validator for verifying that
 // request objects are well-formed
 type DefaultValidator struct {
-	v10 *validator.Validate
+	v10 *v10Validator.Validate
 }
 
 // NewDefaultValidator returns a Validator constructed from the go-playground v10
 // validator
 func NewDefaultValidator() Validator {
-	v10 := validator.New()
-
-	// set tag name to "form" since the request structs are used on both
-	// the client and server side
-	v10.SetTagName("form")
-
-	return &DefaultValidator{v10}
+	return &DefaultValidator{validator.New()}
 }
 
 // Validate uses the go-playground v10 validator and checks struct fields against
@@ -47,7 +41,7 @@ func (v *DefaultValidator) Validate(s interface{}) apierrors.RequestError {
 	}
 
 	// translate all validator errors
-	errs, ok := err.(validator.ValidationErrors)
+	errs, ok := err.(v10Validator.ValidationErrors)
 
 	if !ok {
 		return apierrors.NewErrInternal(fmt.Errorf("could not cast err to validator.ValidationErrors"))
@@ -93,7 +87,7 @@ type ValidationErrObject struct {
 
 // NewValidationErrObject simply returns a ValidationErrObject from a go-playground v10
 // validator `FieldError`
-func NewValidationErrObject(fieldErr validator.FieldError) *ValidationErrObject {
+func NewValidationErrObject(fieldErr v10Validator.FieldError) *ValidationErrObject {
 	return &ValidationErrObject{
 		Field:       fieldErr.Field(),
 		Condition:   fieldErr.ActualTag(),

+ 5 - 5
api/types/namespace.go

@@ -135,8 +135,8 @@ type GetEnvGroupRequest struct {
 
 type CloneEnvGroupRequest struct {
 	Namespace string `json:"namespace" form:"required"`
-	Name      string `json:"name" form:"required"`
-	CloneName string `json:"clone_name"`
+	Name      string `json:"name" form:"required,dns1123"`
+	CloneName string `json:"clone_name,dns1123"`
 	Version   uint   `json:"version"`
 }
 
@@ -149,7 +149,7 @@ type DeleteEnvGroupRequest struct {
 }
 
 type AddEnvGroupApplicationRequest struct {
-	Name            string `json:"name" form:"required"`
+	Name            string `json:"name" form:"required,dns1123"`
 	ApplicationName string `json:"app_name" form:"required"`
 }
 
@@ -161,7 +161,7 @@ type ListEnvGroupsResponse []*EnvGroupMeta
 type CreateEnvGroupRequest struct {
 	// the name of the env group to create or update
 	// example: prod-env-group
-	Name string `json:"name" form:"required"`
+	Name string `json:"name" form:"required,dns1123"`
 
 	// the variables to include in the env group
 	Variables map[string]string `json:"variables" form:"required"`
@@ -231,7 +231,7 @@ type GetEnvGroupResponse struct {
 //
 // swagger:model
 type V1EnvGroupReleaseRequest struct {
-	ReleaseName string `json:"release_name" form:"required"`
+	ReleaseName string `json:"release_name" form:"required,dns1123"`
 }
 
 // V1EnvGroupResponse defines an env group

+ 1 - 1
api/types/release.go

@@ -68,7 +68,7 @@ type CreateReleaseBaseRequest struct {
 
 	// The name of this release
 	// required: true
-	Name string `json:"name" form:"required"`
+	Name string `json:"name" form:"required,dns1123"`
 }
 
 // swagger:model

+ 4 - 4
api/types/stacks.go

@@ -56,7 +56,7 @@ type CreateStackAppResourceRequest struct {
 
 	// The name of the resource.
 	// required: true
-	Name string `json:"name" form:"required"`
+	Name string `json:"name" form:"required,dns1123"`
 
 	// The name of the source config (must exist inside `source_configs`).
 	// required: true
@@ -235,15 +235,15 @@ type StackSourceConfig struct {
 type CreateStackEnvGroupRequest struct {
 	// The name of the env group
 	// required: true
-	Name string `json:"name" form:"required"`
+	Name string `json:"name" form:"required,dns1123"`
 
 	// The non-secret variables to set in the env group
 	// required: true
-	Variables map[string]string `json:"variables,required" form:"required"`
+	Variables map[string]string `json:"variables" form:"required"`
 
 	// The secret variables to set in the env group
 	// required: true
-	SecretVariables map[string]string `json:"secret_variables,required" form:"required"`
+	SecretVariables map[string]string `json:"secret_variables" form:"required"`
 
 	// The list of applications that this env group should be synced to. These applications **must** be present
 	// in the stack - if an env group is created from a stack, syncing to applications which are not in the stack

+ 15 - 13
cli/cmd/apply.go

@@ -323,6 +323,8 @@ func (d *DeployDriver) applyApplication(resource *models.Resource, client *api.C
 		return nil, fmt.Errorf("nil resource")
 	}
 
+	resourceName := resource.Name
+
 	appConfig, err := d.getApplicationConfig(resource)
 
 	if err != nil {
@@ -333,13 +335,13 @@ func (d *DeployDriver) applyApplication(resource *models.Resource, client *api.C
 
 	if method != "pack" && method != "docker" && method != "registry" {
 		return nil, fmt.Errorf("for resource %s, config.build.method should either be \"docker\", \"pack\" or \"registry\"",
-			resource.Name)
+			resourceName)
 	}
 
 	fullPath, err := filepath.Abs(appConfig.Build.Context)
 
 	if err != nil {
-		return nil, fmt.Errorf("for resource %s, error getting absolute path for config.build.context: %w", resource.Name,
+		return nil, fmt.Errorf("for resource %s, error getting absolute path for config.build.context: %w", resourceName,
 			err)
 	}
 
@@ -347,17 +349,17 @@ func (d *DeployDriver) applyApplication(resource *models.Resource, client *api.C
 
 	if tag == "" {
 		color.New(color.FgYellow).Printf("for resource %s, since PORTER_TAG is not set, the Docker image tag will default to"+
-			" the git repo SHA", resource.Name)
+			" the git repo SHA", resourceName)
 
 		commit, err := git.LastCommit()
 
 		if err != nil {
-			return nil, fmt.Errorf("for resource %s, error getting last git commit: %w", resource.Name, err)
+			return nil, fmt.Errorf("for resource %s, error getting last git commit: %w", resourceName, err)
 		}
 
 		tag = commit.Sha[:7]
 
-		color.New(color.FgYellow).Printf("for resource %s, using tag %s\n", resource.Name, tag)
+		color.New(color.FgYellow).Printf("for resource %s, using tag %s\n", resourceName, tag)
 	}
 
 	// if the method is registry and a tag is defined, we use the provided tag
@@ -398,16 +400,16 @@ func (d *DeployDriver) applyApplication(resource *models.Resource, client *api.C
 		resource, err = d.createApplication(resource, client, sharedOpts, appConfig)
 
 		if err != nil {
-			return nil, fmt.Errorf("error creating app from resource %s: %w", resource.Name, err)
+			return nil, fmt.Errorf("error creating app from resource %s: %w", resourceName, err)
 		}
 	} else if !appConfig.OnlyCreate {
 		resource, err = d.updateApplication(resource, client, sharedOpts, appConfig)
 
 		if err != nil {
-			return nil, fmt.Errorf("error updating application from resource %s: %w", resource.Name, err)
+			return nil, fmt.Errorf("error updating application from resource %s: %w", resourceName, err)
 		}
 	} else {
-		color.New(color.FgYellow).Printf("Skipping creation for resource %s as onlyCreate is set to true\n", resource.Name)
+		color.New(color.FgYellow).Printf("Skipping creation for resource %s as onlyCreate is set to true\n", resourceName)
 	}
 
 	if err = d.assignOutput(resource, client); err != nil {
@@ -415,13 +417,13 @@ func (d *DeployDriver) applyApplication(resource *models.Resource, client *api.C
 	}
 
 	if d.source.Name == "job" && appConfig.WaitForJob && (shouldCreate || !appConfig.OnlyCreate) {
-		color.New(color.FgYellow).Printf("Waiting for job '%s' to finish\n", resource.Name)
+		color.New(color.FgYellow).Printf("Waiting for job '%s' to finish\n", resourceName)
 
 		err = wait.WaitForJob(client, &wait.WaitOpts{
 			ProjectID: d.target.Project,
 			ClusterID: d.target.Cluster,
 			Namespace: d.target.Namespace,
-			Name:      resource.Name,
+			Name:      resourceName,
 		})
 
 		if err != nil && appConfig.OnlyCreate {
@@ -430,15 +432,15 @@ func (d *DeployDriver) applyApplication(resource *models.Resource, client *api.C
 				d.target.Project,
 				d.target.Cluster,
 				d.target.Namespace,
-				resource.Name,
+				resourceName,
 			)
 
 			if deleteJobErr != nil {
 				return nil, fmt.Errorf("error deleting job %s with waitForJob and onlyCreate set to true: %w",
-					resource.Name, deleteJobErr)
+					resourceName, deleteJobErr)
 			}
 		} else if err != nil {
-			return nil, fmt.Errorf("error waiting for job %s: %w", resource.Name, err)
+			return nil, fmt.Errorf("error waiting for job %s: %w", resourceName, err)
 		}
 	}
 

+ 13 - 0
cli/cmd/docker/builder.go

@@ -12,6 +12,7 @@ import (
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/fileutils"
 	"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
 	"github.com/moby/moby/pkg/jsonmessage"
 	"github.com/moby/moby/pkg/stringid"
@@ -47,6 +48,8 @@ func (a *Agent) BuildLocal(opts *BuildOpts) (err error) {
 		}
 	}
 
+	excludes = trimBuildFilesFromExcludes(excludes, dockerfilePath)
+
 	tar, err := archive.TarWithOptions(opts.BuildContext, &archive.TarOptions{
 		ExcludePatterns: excludes,
 	})
@@ -107,6 +110,16 @@ func (a *Agent) BuildLocal(opts *BuildOpts) (err error) {
 	return jsonmessage.DisplayJSONMessagesStream(out.Body, os.Stderr, termFd, isTerm, nil)
 }
 
+func trimBuildFilesFromExcludes(excludes []string, dockerfile string) []string {
+	if keep, _ := fileutils.Matches(".dockerignore", excludes); keep {
+		excludes = append(excludes, "!.dockerignore")
+	}
+	if keep, _ := fileutils.Matches(dockerfile, excludes); keep {
+		excludes = append(excludes, "!"+dockerfile)
+	}
+	return excludes
+}
+
 // AddDockerfileToBuildContext from a ReadCloser, returns a new archive and
 // the relative path to the dockerfile in the context.
 func AddDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCloser) (io.ReadCloser, string, error) {

+ 217 - 0
cli/cmd/stack.go

@@ -0,0 +1,217 @@
+package cmd
+
+import (
+	"context"
+	"fmt"
+	"os"
+
+	"github.com/fatih/color"
+	api "github.com/porter-dev/porter/api/client"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/spf13/cobra"
+)
+
+var linkedApps []string
+
+// stackCmd represents the "porter stack" base command when called
+// without any subcommands
+var stackCmd = &cobra.Command{
+	Use:     "stack",
+	Aliases: []string{"stacks"},
+	Short:   "Commands that control Porter Stacks",
+}
+
+var stackEnvGroupCmd = &cobra.Command{
+	Use:     "env-group",
+	Aliases: []string{"eg", "envgroup", "env-groups", "envgroups"},
+	Short:   "Commands to add or remove an env group in a stack",
+	Run: func(cmd *cobra.Command, args []string) {
+		color.New(color.FgRed).Println("need to specify an operation to continue")
+	},
+}
+
+var stackEnvGroupAddCmd = &cobra.Command{
+	Use:   "add [name]",
+	Args:  cobra.ExactArgs(1),
+	Short: "Add an env group to a stack",
+	Run: func(cmd *cobra.Command, args []string) {
+		err := checkLoginAndRun(args, stackAddEnvGroup)
+
+		if err != nil {
+			os.Exit(1)
+		}
+	},
+}
+
+var stackEnvGroupRemoveCmd = &cobra.Command{
+	Use:   "remove [name]",
+	Args:  cobra.ExactArgs(1),
+	Short: "Remove an existing env group from a stack",
+	Run: func(cmd *cobra.Command, args []string) {
+		err := checkLoginAndRun(args, stackRemoveEnvGroup)
+
+		if err != nil {
+			os.Exit(1)
+		}
+	},
+}
+
+func init() {
+	rootCmd.AddCommand(stackCmd)
+
+	stackCmd.AddCommand(stackEnvGroupCmd)
+
+	stackCmd.PersistentFlags().StringVar(
+		&name,
+		"name",
+		"",
+		"the name of the stack",
+	)
+
+	stackCmd.PersistentFlags().StringVar(
+		&namespace,
+		"namespace",
+		"default",
+		"the namespace of the stack",
+	)
+
+	stackEnvGroupAddCmd.PersistentFlags().StringArrayVarP(
+		&normalEnvGroupVars,
+		"normal",
+		"n",
+		[]string{},
+		"list of variables to set, in the form VAR=VALUE",
+	)
+
+	stackEnvGroupAddCmd.PersistentFlags().StringArrayVarP(
+		&secretEnvGroupVars,
+		"secret",
+		"s",
+		[]string{},
+		"list of secret variables to set, in the form VAR=VALUE",
+	)
+
+	stackEnvGroupAddCmd.PersistentFlags().StringArrayVar(
+		&linkedApps,
+		"linked-apps",
+		[]string{},
+		"list of stack apps to link this env group with",
+	)
+
+	stackEnvGroupCmd.AddCommand(stackEnvGroupAddCmd)
+	stackEnvGroupCmd.AddCommand(stackEnvGroupRemoveCmd)
+}
+
+func stackAddEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+	envGroupName := args[0]
+
+	if len(envGroupName) == 0 {
+		return fmt.Errorf("empty env group name")
+	} else if len(name) == 0 {
+		return fmt.Errorf("empty stack name")
+	} else if len(normalEnvGroupVars) == 0 && len(secretEnvGroupVars) == 0 {
+		return fmt.Errorf("one or more variables are required to create the env group")
+	}
+
+	listStacks, err := client.ListStacks(context.Background(), cliConf.Project, cliConf.Cluster, namespace)
+
+	if err != nil {
+		return err
+	}
+
+	stacks := *listStacks
+
+	var stackID string
+
+	for _, stk := range stacks {
+		if stk.Name == name {
+			stackID = stk.ID
+		}
+	}
+
+	if len(stackID) == 0 {
+		return fmt.Errorf("stack not found")
+	}
+
+	normalVariables := make(map[string]string)
+	secretVariables := make(map[string]string)
+
+	for _, v := range normalEnvGroupVars {
+		key, val, err := validateVarValue(v)
+
+		if err != nil {
+			return err
+		}
+
+		normalVariables[key] = val
+	}
+
+	for _, v := range secretEnvGroupVars {
+		key, val, err := validateVarValue(v)
+
+		if err != nil {
+			return err
+		}
+
+		secretVariables[key] = val
+	}
+
+	err = client.AddEnvGroupToStack(
+		context.Background(), cliConf.Project, cliConf.Cluster, namespace, stackID,
+		&types.CreateStackEnvGroupRequest{
+			Name:               envGroupName,
+			Variables:          normalVariables,
+			SecretVariables:    secretVariables,
+			LinkedApplications: linkedApps,
+		},
+	)
+
+	if err != nil {
+		return err
+	}
+
+	color.New(color.FgGreen).Println("successfully added env group")
+
+	return nil
+}
+
+func stackRemoveEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+	envGroupName := args[0]
+
+	if len(envGroupName) == 0 {
+		return fmt.Errorf("empty env group name")
+	} else if len(name) == 0 {
+		return fmt.Errorf("empty stack name")
+	}
+
+	listStacks, err := client.ListStacks(context.Background(), cliConf.Project, cliConf.Cluster, namespace)
+
+	if err != nil {
+		return err
+	}
+
+	stacks := *listStacks
+
+	var stackID string
+
+	for _, stk := range stacks {
+		if stk.Name == name {
+			stackID = stk.ID
+		}
+	}
+
+	if len(stackID) == 0 {
+		return fmt.Errorf("stack not found")
+	}
+
+	err = client.RemoveEnvGroupFromStack(context.Background(), cliConf.Project, cliConf.Cluster, namespace, stackID,
+		envGroupName)
+
+	if err != nil {
+		return err
+	}
+
+	color.New(color.FgGreen).Println("successfully removed env group")
+
+	return nil
+}

+ 2 - 2
dashboard/src/main/home/cluster-dashboard/chart/ChartList.tsx

@@ -297,7 +297,7 @@ const ChartList: React.FunctionComponent<Props> = ({
       controllers.map((controller) => closeWebsocket(controller));
       closeWebsocket(jobWebsocketID);
     };
-  }, []);
+  }, [context.currentCluster]);
 
   useEffect(() => {
     const websocketID = "helm_releases";
@@ -307,7 +307,7 @@ const ChartList: React.FunctionComponent<Props> = ({
     return () => {
       closeWebsocket(websocketID);
     };
-  }, [namespace]);
+  }, [namespace, context.currentCluster]);
 
   useEffect(() => {
     let isSubscribed = true;

+ 5 - 0
dashboard/src/main/home/cluster-dashboard/dashboard/Dashboard.tsx

@@ -71,6 +71,11 @@ export const Dashboard: React.FunctionComponent = () => {
     }
   }, [location]);
 
+  // Need to reset tab to reset views that don't auto-update on cluster switch (esp namespaces + settings)
+  useEffect(() => {
+    setCurrentTab("nodes");
+  }, [context.currentCluster]);
+
   return (
     <>
       <TitleSection>

+ 18 - 2
dashboard/src/main/home/cluster-dashboard/expanded-chart/jobs/JobResource.tsx

@@ -109,9 +109,25 @@ export default class JobResource extends Component<PropsType, StateType> {
       }
     });
 
+    if (!completeCondition) {
+      // otherwise look for a failed reason
+      this.props.job.status?.conditions?.forEach(
+        (condition: any, i: number) => {
+          if (condition.type == "Failed") {
+            completeCondition = condition;
+          }
+        }
+      );
+    }
+
+    // if still no complete condition, return unknown
+    if (!completeCondition) {
+      return "Succeeded";
+    }
+
     return (
-      completeCondition.reason ||
-      `Completed at ${readableDate(completeCondition.lastTransitionTime)}`
+      completeCondition?.reason ||
+      `Completed at ${readableDate(completeCondition?.lastTransitionTime)}`
     );
   };
 

+ 1 - 1
dashboard/src/main/home/dashboard/ClusterList.tsx

@@ -162,7 +162,7 @@ class Templates extends Component<PropsType, StateType> {
           <TemplateBlock
             onClick={() => {
               this.context.setCurrentCluster(cluster);
-              pushFiltered(this.props, "/cluster-dashboard", ["project_id"], {
+              pushFiltered(this.props, "/applications", ["project_id"], {
                 cluster: cluster.name,
               });
             }}

+ 1 - 1
dashboard/src/main/home/launch/launch-flow/LaunchFlow.tsx

@@ -364,7 +364,7 @@ const LaunchFlow: React.FC<PropsType> = (props) => {
   const renderCurrentPage = () => {
     let { form, currentTab } = props;
 
-    if (currentPage === "source" && currentTab === "porter") {
+    if (currentPage === "source" && form?.hasSource) {
       return (
         <SourcePage
           sourceType={sourceType}

+ 2 - 2
dashboard/src/main/home/modals/EditInviteOrCollaboratorModal.tsx

@@ -89,7 +89,7 @@ const EditCollaboratorModal = () => {
   return (
     <>
       <ModalTitle>
-        Update {isInvite ? "Invite for" : "Collaborator"} {user?.email}
+        Update {isInvite ? "invite for" : "collaborator"} {user?.email}
       </ModalTitle>
       <Subtitle>Specify a different role for this user.</Subtitle>
       <RoleSelectorWrapper>
@@ -101,7 +101,7 @@ const EditCollaboratorModal = () => {
       </RoleSelectorWrapper>
 
       <SaveButton
-        text={`Update ${isInvite ? "Invite" : "Collaborator"}`}
+        text={`Update ${isInvite ? "invite" : "collaborator"}`}
         color="#616FEEcc"
         onClick={() => handleUpdate()}
         status={status}

+ 9 - 5
dashboard/src/main/home/project-settings/InviteList.tsx

@@ -119,7 +119,11 @@ const InvitePage: React.FunctionComponent<Props> = ({}) => {
 
   const createInvite = () => {
     api
-      .createInvite("<token>", { email, kind: role }, { id: currentProject.id })
+      .createInvite(
+        "<token>",
+        { email: email.toLowerCase(), kind: role },
+        { id: currentProject.id }
+      )
       .then(() => {
         getData();
         setEmail("");
@@ -395,7 +399,7 @@ const InvitePage: React.FunctionComponent<Props> = ({}) => {
   return (
     <>
       <>
-        <Heading isAtTop={true}>Share Project</Heading>
+        <Heading isAtTop={true}>Share project</Heading>
         <Helper>Generate a project invite for another user.</Helper>
         <InputRowWrapper>
           <InputRow
@@ -403,7 +407,7 @@ const InvitePage: React.FunctionComponent<Props> = ({}) => {
             type="text"
             setValue={(newEmail: string) => setEmail(newEmail)}
             width="100%"
-            placeholder="ex: mrp@getporter.dev"
+            placeholder="ex: mrp@porter.run"
           />
         </InputRowWrapper>
         <Helper>Specify a role for this user.</Helper>
@@ -416,7 +420,7 @@ const InvitePage: React.FunctionComponent<Props> = ({}) => {
         </RoleSelectorWrapper>
         <ButtonWrapper>
           <InviteButton disabled={!hasSeats} onClick={() => validateEmail()}>
-            Create Invite
+            Create invite
           </InviteButton>
           {isInvalidEmail && (
             <Invalid>Invalid email address. Please try again.</Invalid>
@@ -429,7 +433,7 @@ const InvitePage: React.FunctionComponent<Props> = ({}) => {
         </ButtonWrapper>
       </>
 
-      <Heading>Invites & Collaborators</Heading>
+      <Heading>Invites & collaborators</Heading>
       <Helper>Manage pending invites and view collaborators.</Helper>
       {isLoading && <Loading height={"30%"} />}
       {data?.length && !isLoading ? (

+ 5 - 5
dashboard/src/main/home/project-settings/ProjectSettings.tsx

@@ -64,7 +64,7 @@ class ProjectSettings extends Component<PropsType, StateType> {
     let { currentProject } = this.context;
     this.setState({ projectName: currentProject.name });
     const tabOptions = [];
-    tabOptions.push({ value: "manage-access", label: "Manage Access" });
+    tabOptions.push({ value: "manage-access", label: "Manage access" });
     tabOptions.push({
       value: "billing",
       label: "Billing",
@@ -87,7 +87,7 @@ class ProjectSettings extends Component<PropsType, StateType> {
 
       tabOptions.push({
         value: "additional-settings",
-        label: "Additional Settings",
+        label: "Additional settings",
       });
     }
 
@@ -132,7 +132,7 @@ class ProjectSettings extends Component<PropsType, StateType> {
     } else {
       return (
         <>
-          <Heading isAtTop={true}>Delete Project</Heading>
+          <Heading isAtTop={true}>Delete project</Heading>
           <Helper>
             Permanently delete this project. This will destroy all clusters tied
             to this project that have been provisioned by Porter. Note that this
@@ -164,7 +164,7 @@ class ProjectSettings extends Component<PropsType, StateType> {
               });
             }}
           >
-            Delete Project
+            Delete project
           </DeleteButton>
         </>
       );
@@ -174,7 +174,7 @@ class ProjectSettings extends Component<PropsType, StateType> {
   render() {
     return (
       <StyledProjectSettings>
-        <TitleSection>Project Settings</TitleSection>
+        <TitleSection>Project settings</TitleSection>
         <TabRegion
           currentTab={this.state.currentTab}
           setCurrentTab={(x: string) => this.setState({ currentTab: x })}

+ 173 - 0
dashboard/src/main/home/sidebar/ClusterSection.tsx

@@ -44,9 +44,16 @@ export const ClusterSection: React.FC<Props> = ({
           <SideLine />
           <NavButton
             path="/applications"
+<<<<<<< HEAD
             active={
               currentCluster.id === clusterId &&
               window.location.pathname === "/applications"
+=======
+            targetClusterName={cluster?.name}
+            active={
+              currentCluster.id === clusterId &&
+              window.location.pathname.startsWith("/applications")
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
             }
           >
             <Img src={monoweb} />
@@ -54,9 +61,16 @@ export const ClusterSection: React.FC<Props> = ({
           </NavButton>
           <NavButton
             path="/jobs"
+<<<<<<< HEAD
             active={
               currentCluster.id === clusterId &&
               window.location.pathname === "/jobs"
+=======
+            targetClusterName={cluster?.name}
+            active={
+              currentCluster.id === clusterId &&
+              window.location.pathname.startsWith("/jobs")
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
             }
           >
             <Img src={monojob} />
@@ -64,9 +78,16 @@ export const ClusterSection: React.FC<Props> = ({
           </NavButton>
           <NavButton
             path="/env-groups"
+<<<<<<< HEAD
             active={
               currentCluster.id === clusterId &&
               window.location.pathname === "/env-groups"
+=======
+            targetClusterName={cluster?.name}
+            active={
+              currentCluster.id === clusterId &&
+              window.location.pathname.startsWith("/env-groups")
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
             }
           >
             <Img src={sliders} />
@@ -77,9 +98,16 @@ export const ClusterSection: React.FC<Props> = ({
             currentProject.enable_rds_databases && (
               <NavButton
                 path="/databases"
+<<<<<<< HEAD
                 active={
                   currentCluster.id === clusterId &&
                   window.location.pathname === "/databases"
+=======
+                targetClusterName={cluster?.name}
+                active={
+                  currentCluster.id === clusterId &&
+                  window.location.pathname.startsWith("/databases")
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
                 }
               >
                 <Icon className="material-icons-outlined">storage</Icon>
@@ -89,20 +117,56 @@ export const ClusterSection: React.FC<Props> = ({
           {currentProject?.stacks_enabled ? (
             <NavButton
               path="/stacks"
+<<<<<<< HEAD
               active={
                 currentCluster.id === clusterId &&
                 window.location.pathname === "/stacks"
+=======
+              targetClusterName={cluster?.name}
+              active={
+                currentCluster.id === clusterId &&
+                window.location.pathname.startsWith("/stacks")
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
               }
             >
               <Icon className="material-icons-outlined">lan</Icon>
               Stacks
             </NavButton>
           ) : null}
+<<<<<<< HEAD
           <NavButton
             path={"/cluster-dashboard"}
             active={
               currentCluster.id === clusterId &&
               window.location.pathname === "/cluster-dashboard"
+=======
+          {currentProject?.preview_envs_enabled && (
+            <NavButton
+              path="/preview-environments"
+              targetClusterName={cluster?.name}
+              active={
+                currentCluster.id === clusterId &&
+                window.location.pathname.startsWith("/preview-environments")
+              }
+            >
+              <InlineSVGWrapper
+                id="Flat"
+                fill="#FFFFFF"
+                xmlns="http://www.w3.org/2000/svg"
+                viewBox="0 0 256 256"
+              >
+                <path d="M103.99951,68a36,36,0,1,0-44,35.0929v49.8142a36,36,0,1,0,16,0V103.0929A36.05516,36.05516,0,0,0,103.99951,68Zm-56,0a20,20,0,1,1,20,20A20.0226,20.0226,0,0,1,47.99951,68Zm40,120a20,20,0,1,1-20-20A20.0226,20.0226,0,0,1,87.99951,188ZM196.002,152.907l-.00146-33.02563a55.63508,55.63508,0,0,0-16.40137-39.59619L155.31348,56h20.686a8,8,0,0,0,0-16h-40c-.02978,0-.05859.00415-.08838.00446-.2334.00256-.46631.01245-.69824.03527-.12891.01258-.25391.03632-.38086.05494-.13135.01928-.26318.03424-.39355.06-.14014.02778-.27686.06611-.41455.10114-.11475.02924-.23047.05426-.34424.08862-.13428.04059-.26367.0907-.395.13806-.11524.04151-.231.07929-.34473.12629-.12109.05011-.23681.10876-.35449.16455-.11914.05621-.23926.10907-.356.17144-.11133.0597-.21728.12757-.32519.1922-.11621.06928-.23389.13483-.34668.21051-.11719.07831-.227.16553-.33985.24976-.09668.07227-.1958.1394-.28955.21655-.18652.1529-.36426.31531-.53564.48413-.01612.01593-.03418.02918-.05029.04529-.02051.02051-.0376.04321-.05762.06391-.16358.16711-.32178.33941-.47022.52032-.083.10059-.15527.20648-.23193.31006-.07861.10571-.16064.20862-.23438.3183-.08056.12072-.15087.24591-.2246.36993-.05958.1-.12208.19757-.17725.30036-.06787.12591-.125.25531-.18506.384-.05078.1084-.10547.21466-.15137.32568-.05127.12463-.09326.25189-.13867.37848-.04248.11987-.08887.238-.126.36047-.03857.12775-.06738.25757-.09912.38678-.03125.124-.06591.24622-.0913.37244-.02979.15088-.04786.30328-.06934.45544-.01465.10645-.03516.21094-.0459.31867q-.03955.39752-.04.79706V88a8,8,0,0,0,16,0V67.31378l24.28516,24.28485a39.73874,39.73874,0,0,1,11.71582,28.28321l.00146,33.02533a36.00007,36.00007,0,1,0,16-.00019ZM188.00244,208a20,20,0,1,1,20-20A20.0226,20.0226,0,0,1,188.00244,208Z" />
+              </InlineSVGWrapper>
+              Preview envs
+            </NavButton>
+          )}
+          <NavButton
+            path={"/cluster-dashboard"}
+            targetClusterName={cluster?.name}
+            active={
+              currentCluster.id === clusterId &&
+              window.location.pathname.startsWith("/cluster-dashboard")
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
             }
           >
             <Icon className="material-icons">device_hub</Icon>
@@ -112,6 +176,7 @@ export const ClusterSection: React.FC<Props> = ({
       );
     }
   };
+<<<<<<< HEAD
 
   return (
     <>
@@ -193,6 +258,114 @@ const Spacer = styled.div`
   flex: 1;
 `;
 
+=======
+
+  return (
+    <>
+      <ClusterSelector
+        onClick={() => setIsExpanded(!isExpanded)}
+        active={
+          !isExpanded &&
+          cluster.id === currentCluster.id && (
+            window.location.pathname.startsWith("/cluster-dashboard") ||
+            window.location.pathname.startsWith("/preview-environments") ||
+            window.location.pathname.startsWith("/stacks") ||
+            window.location.pathname.startsWith("/databases") ||
+            window.location.pathname.startsWith("/env-groups") ||
+            window.location.pathname.startsWith("/jobs") ||
+            window.location.pathname.startsWith("/applications")
+          )
+        }
+      >
+        <LinkWrapper>
+          <ClusterIcon>
+            <svg
+              width="19"
+              height="19"
+              viewBox="0 0 19 19"
+              fill="none"
+              xmlns="http://www.w3.org/2000/svg"
+            >
+              <path
+                d="M15.207 12.4403C16.8094 12.4403 18.1092 11.1414 18.1092 9.53907C18.1092 7.93673 16.8094 6.63782 15.207 6.63782"
+                stroke="white"
+                stroke-width="1.5"
+                stroke-linecap="round"
+                stroke-linejoin="round"
+              />
+              <path
+                d="M3.90217 12.4403C2.29983 12.4403 1 11.1414 1 9.53907C1 7.93673 2.29983 6.63782 3.90217 6.63782"
+                stroke="white"
+                stroke-width="1.5"
+                stroke-linecap="round"
+                stroke-linejoin="round"
+              />
+              <path
+                fill-rule="evenodd"
+                clip-rule="evenodd"
+                d="M9.54993 13.4133C7.4086 13.4133 5.69168 11.6964 5.69168 9.55417C5.69168 7.41284 7.4086 5.69592 9.54993 5.69592C11.6913 5.69592 13.4082 7.41284 13.4082 9.55417C13.4082 11.6964 11.6913 13.4133 9.54993 13.4133Z"
+                stroke="white"
+                stroke-width="1.5"
+                stroke-linecap="round"
+                stroke-linejoin="round"
+              />
+              <path
+                d="M6.66895 15.207C6.66895 16.8094 7.96787 18.1092 9.5702 18.1092C11.1725 18.1092 12.4715 16.8094 12.4715 15.207"
+                stroke="white"
+                stroke-width="1.5"
+                stroke-linecap="round"
+                stroke-linejoin="round"
+              />
+              <path
+                d="M6.66895 3.90217C6.66895 2.29983 7.96787 1 9.5702 1C11.1725 1 12.4715 2.29983 12.4715 3.90217"
+                stroke="white"
+                stroke-width="1.5"
+                stroke-linecap="round"
+                stroke-linejoin="round"
+              />
+              <path
+                fill-rule="evenodd"
+                clip-rule="evenodd"
+                d="M5.69591 9.54996C5.69591 7.40863 7.41283 5.69171 9.55508 5.69171C11.6964 5.69171 13.4133 7.40863 13.4133 9.54996C13.4133 11.6913 11.6964 13.4082 9.55508 13.4082C7.41283 13.4082 5.69591 11.6913 5.69591 9.54996Z"
+                stroke="white"
+                stroke-width="1.5"
+                stroke-linecap="round"
+                stroke-linejoin="round"
+              />
+            </svg>
+          </ClusterIcon>
+          <Tooltip title={cluster?.name}>
+            <ClusterName>{cluster?.name}</ClusterName>
+          </Tooltip>
+          <I isExpanded={isExpanded} className="material-icons">
+            arrow_drop_down
+          </I>
+          <Spacer />
+        </LinkWrapper>
+      </ClusterSelector>
+      <div onClick={() => setCurrentCluster(cluster)}>
+        {renderClusterContent(cluster)}
+      </div>
+    </>
+  );
+};
+
+const InlineSVGWrapper = styled.svg`
+  width: 32px;
+  height: 32px;
+  padding: 8px;
+  padding-left: 0;
+
+  > path {
+    fill: #ffffff;
+  }
+`;
+
+const Spacer = styled.div`
+  flex: 1;
+`;
+
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
 const Settings = styled.p`
   color: #ffffff44;
   width: 16px;

+ 3 - 0
dashboard/src/main/home/sidebar/Clusters.tsx

@@ -205,6 +205,7 @@ const InitializeButton = styled.div`
     background: #ffffff22;
   }
 `;
+<<<<<<< HEAD
 
 const BgAccent = styled.img`
   height: 30px;
@@ -219,3 +220,5 @@ const BgAccent = styled.img`
   border: none;
   outline: none;
 `;
+=======
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d

+ 4 - 0
dashboard/src/main/home/sidebar/ProjectSection.tsx

@@ -200,7 +200,11 @@ const Dropdown = styled.div`
   right: 13px;
   top: calc(100% + 5px);
   background: #26282f;
+<<<<<<< HEAD
   width: 199px;
+=======
+  width: 210px;
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
   max-height: 500px;
   border-radius: 3px;
   z-index: 999;

+ 9 - 0
dashboard/src/main/home/sidebar/Sidebar.tsx

@@ -156,6 +156,7 @@ class Sidebar extends Component<PropsType, StateType> {
               />
             </>
           )}
+<<<<<<< HEAD
           {currentProject?.preview_envs_enabled && (
             <NavButton path="/preview-environments">
               <InlineSVGWrapper
@@ -169,6 +170,8 @@ class Sidebar extends Component<PropsType, StateType> {
               Preview envs
             </NavButton>
           )}
+=======
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
         </ScrollWrapper>
       );
     }
@@ -212,6 +215,7 @@ Sidebar.contextType = Context;
 
 export default withRouter(withAuth(Sidebar));
 
+<<<<<<< HEAD
 const InlineSVGWrapper = styled.svg`
   width: 32px;
   height: 32px;
@@ -225,6 +229,11 @@ const InlineSVGWrapper = styled.svg`
 
 const ScrollWrapper = styled.div`
   overflow-y: auto;
+=======
+const ScrollWrapper = styled.div`
+  overflow-y: auto;
+  padding-bottom: 25px;
+>>>>>>> 6819cce9bfbf1bc3579f6b757d748914208d371d
   max-height: calc(100vh - 95px);
 `;
 

+ 4 - 6
dashboard/src/main/home/sidebar/SidebarLink.tsx

@@ -3,11 +3,9 @@ import { NavLink, NavLinkProps, useParams } from "react-router-dom";
 import { Context } from "shared/Context";
 import { useRouting } from "shared/routing";
 
-const SidebarLink: React.FC<{ path: string } & Omit<NavLinkProps, "to">> = ({
-  children,
-  path,
-  ...props
-}) => {
+const SidebarLink: React.FC<
+  { path: string; targetClusterName?: string } & Omit<NavLinkProps, "to">
+> = ({ children, path, ...props }) => {
   const params = useParams<{ namespace: string }>();
   const { getQueryParam } = useRouting();
   const { currentCluster, currentProject } = useContext(Context);
@@ -20,7 +18,7 @@ const SidebarLink: React.FC<{ path: string } & Omit<NavLinkProps, "to">> = ({
     let pathNamespace = params.namespace;
     const search = new URLSearchParams();
     if (currentCluster?.name) {
-      search.append("cluster", currentCluster.name);
+      search.append("cluster", props.targetClusterName || currentCluster.name);
     }
 
     if (currentProject?.id) {

+ 9 - 0
internal/helm/agent.go

@@ -166,6 +166,7 @@ type UpgradeReleaseConfig struct {
 	Cluster    *models.Cluster
 	Repo       repository.Repository
 	Registries []*models.Registry
+	Stack      *models.Stack
 
 	// Optional, if chart should be overriden
 	Chart *chart.Chart
@@ -222,6 +223,14 @@ func (a *Agent) UpgradeReleaseByValues(
 		return nil, err
 	}
 
+	if conf.Stack != nil {
+		conf.Values["stack"] = map[string]interface{}{
+			"enabled":  true,
+			"name":     conf.Stack.Name,
+			"revision": conf.Stack.Revisions[0].RevisionNumber,
+		}
+	}
+
 	res, err := cmd.Run(conf.Name, ch, conf.Values)
 
 	if err != nil {

+ 3 - 0
internal/helm/config.go

@@ -3,6 +3,7 @@ package helm
 import (
 	"errors"
 	"io/ioutil"
+	"time"
 
 	"github.com/porter-dev/porter/internal/kubernetes"
 	"github.com/porter-dev/porter/internal/models"
@@ -26,6 +27,7 @@ type Form struct {
 	Storage                   string `json:"storage" form:"oneof=secret configmap memory" default:"secret"`
 	Namespace                 string `json:"namespace"`
 	AllowInClusterConnections bool
+	Timeout                   time.Duration // optional
 }
 
 // GetAgentOutOfClusterConfig creates a new Agent from outside the cluster using
@@ -38,6 +40,7 @@ func GetAgentOutOfClusterConfig(form *Form, l *logger.Logger) (*Agent, error) {
 		Repo:                      form.Repo,
 		DigitalOceanOAuth:         form.DigitalOceanOAuth,
 		AllowInClusterConnections: form.AllowInClusterConnections,
+		Timeout:                   form.Timeout,
 	}
 
 	k8sAgent, err := kubernetes.GetAgentOutOfClusterConfig(conf)

+ 3 - 0
internal/kubernetes/config.go

@@ -114,6 +114,7 @@ type OutOfClusterConfig struct {
 	Repo                      repository.Repository
 	DefaultNamespace          string // optional
 	AllowInClusterConnections bool
+	Timeout                   time.Duration // optional
 
 	// Only required if using DigitalOcean OAuth as an auth mechanism
 	DigitalOceanOAuth *oauth2.Config
@@ -135,6 +136,8 @@ func (conf *OutOfClusterConfig) ToRESTConfig() (*rest.Config, error) {
 		return nil, err
 	}
 
+	restConf.Timeout = conf.Timeout
+
 	rest.SetKubernetesDefaults(restConf)
 	return restConf, nil
 }

+ 3 - 0
internal/models/monitor.go

@@ -21,6 +21,9 @@ type MonitorTestResult struct {
 	LastRunResult     string
 	LastRunResultEnum uint
 
+	LastRecommenderRunID string
+	Archived             bool
+
 	Title   string
 	Message string
 

+ 33 - 1
internal/opa/config.yaml

@@ -56,6 +56,7 @@ prometheus:
     name: "prometheus.version"
 nginx_pod:
   kind: "pod"
+  overrideSeverity: "critical"
   match:
     namespace: ingress-nginx
     labels:
@@ -116,4 +117,35 @@ certificates:
     resource: certificates
   policies:
   - path: "./policies/certificates/expiry_two_weeks.rego"
-    name: "certificates.expiry_two_weeks"
+    name: "certificates.expiry_two_weeks"
+  - path: "./policies/certificates/expired.rego"
+    name: "certificates.expired"
+node:
+  kind: "crd_list"
+  match:
+    group: core
+    version: v1
+    resource: nodes
+  policies:
+  - path: "./policies/node/k8s_version.rego"
+    name: "node.k8s_version"
+  - path: "./policies/node/porter_run_taints.rego"
+    name: "node.porter_run_taints"
+  - path: "./policies/node/porter_run_labels.rego"
+    name: "node.porter_run_labels"
+  - path: "./policies/node/healthy.rego"
+    name: "node.healthy"
+descheduler:
+  kind: "helm_release"
+  match:
+    name: descheduler
+    namespace: kube-system
+  mustExist: true
+  policies: []
+vpa:
+  kind: "helm_release"
+  match:
+    name: vpa
+    namespace: kube-system
+  mustExist: true
+  policies: []

+ 10 - 8
internal/opa/loader.go

@@ -13,10 +13,11 @@ import (
 type ConfigFile map[string]ConfigFilePolicyCollection
 
 type ConfigFilePolicyCollection struct {
-	Kind      string             `yaml:"kind"`
-	Match     MatchParameters    `yaml:"match"`
-	MustExist bool               `yaml:"mustExist"`
-	Policies  []ConfigFilePolicy `yaml:"policies"`
+	Kind             string             `json:"kind"`
+	Match            MatchParameters    `json:"match"`
+	MustExist        bool               `json:"mustExist"`
+	OverrideSeverity string             `json:"overrideSeverity"`
+	Policies         []ConfigFilePolicy `json:"policies"`
 }
 
 type ConfigFilePolicy struct {
@@ -67,10 +68,11 @@ func LoadPolicies(configFilePathDir string) (*KubernetesPolicies, error) {
 		}
 
 		policies[name] = KubernetesOPAQueryCollection{
-			Kind:      KubernetesBuiltInKind(cfPolicyCollection.Kind),
-			Queries:   queries,
-			Match:     cfPolicyCollection.Match,
-			MustExist: cfPolicyCollection.MustExist,
+			Kind:             KubernetesBuiltInKind(cfPolicyCollection.Kind),
+			Queries:          queries,
+			Match:            cfPolicyCollection.Match,
+			OverrideSeverity: cfPolicyCollection.OverrideSeverity,
+			MustExist:        cfPolicyCollection.MustExist,
 		}
 	}
 

+ 29 - 10
internal/opa/opa.go

@@ -39,10 +39,11 @@ const (
 )
 
 type KubernetesOPAQueryCollection struct {
-	Kind      KubernetesBuiltInKind
-	Match     MatchParameters
-	MustExist bool
-	Queries   []rego.PreparedEvalQuery
+	Kind             KubernetesBuiltInKind
+	Match            MatchParameters
+	MustExist        bool
+	OverrideSeverity string
+	Queries          []rego.PreparedEvalQuery
 }
 
 type MatchParameters struct {
@@ -158,7 +159,7 @@ func (runner *KubernetesOPARunner) runHelmReleaseQueries(name string, collection
 						ObjectID:       fmt.Sprintf("helm_release/%s/%s/%s", collection.Match.Namespace, collection.Match.Name, "exists"),
 						CategoryName:   name,
 						PolicyVersion:  "v0.0.1",
-						PolicySeverity: "high",
+						PolicySeverity: getSeverity("high", collection),
 						PolicyTitle:    fmt.Sprintf("The helm release %s must exist", collection.Match.Name),
 						PolicyMessage:  "The helm release was not found on the cluster",
 					},
@@ -172,7 +173,7 @@ func (runner *KubernetesOPARunner) runHelmReleaseQueries(name string, collection
 				ObjectID:       fmt.Sprintf("helm_release/%s/%s/%s", collection.Match.Namespace, collection.Match.Name, "exists"),
 				CategoryName:   name,
 				PolicyVersion:  "v0.0.1",
-				PolicySeverity: "high",
+				PolicySeverity: getSeverity("high", collection),
 				PolicyTitle:    fmt.Sprintf("The helm release %s must exist", collection.Match.Name),
 				PolicyMessage:  "The helm release was found",
 			})
@@ -210,8 +211,10 @@ func (runner *KubernetesOPARunner) runHelmReleaseQueries(name string, collection
 			results, err := query.Eval(
 				context.Background(),
 				rego.EvalInput(map[string]interface{}{
-					"version": helmRelease.Chart.Metadata.Version,
-					"values":  helmRelease.Config,
+					"version":   helmRelease.Chart.Metadata.Version,
+					"values":    helmRelease.Config,
+					"name":      helmRelease.Name,
+					"namespace": helmRelease.Namespace,
 				}),
 			)
 
@@ -232,6 +235,7 @@ func (runner *KubernetesOPARunner) runHelmReleaseQueries(name string, collection
 					rawQueryRes,
 					fmt.Sprintf("helm_release/%s/%s/%s", helmRelease.Namespace, helmRelease.Name, rawQueryRes.PolicyID),
 					name,
+					collection,
 				))
 			}
 		}
@@ -240,6 +244,14 @@ func (runner *KubernetesOPARunner) runHelmReleaseQueries(name string, collection
 	return res, nil
 }
 
+func getSeverity(defaultSeverity string, collection KubernetesOPAQueryCollection) string {
+	if collection.OverrideSeverity != "" {
+		return collection.OverrideSeverity
+	}
+
+	return defaultSeverity
+}
+
 func (runner *KubernetesOPARunner) runPodQueries(name string, collection KubernetesOPAQueryCollection) ([]*OPARecommenderQueryResult, error) {
 	res := make([]*OPARecommenderQueryResult, 0)
 
@@ -287,6 +299,7 @@ func (runner *KubernetesOPARunner) runPodQueries(name string, collection Kuberne
 					rawQueryRes,
 					fmt.Sprintf("pod/%s/%s", pod.Namespace, pod.Name),
 					name,
+					collection,
 				))
 			}
 		}
@@ -304,6 +317,11 @@ func (runner *KubernetesOPARunner) runCRDListQueries(name string, collection Kub
 		Resource: collection.Match.Resource,
 	}
 
+	// just case on the "core" group and unset it
+	if collection.Match.Group == "core" {
+		objRes.Group = ""
+	}
+
 	crdList, err := runner.dynamicClient.Resource(objRes).Namespace(collection.Match.Namespace).List(context.Background(), v1.ListOptions{})
 
 	if err != nil {
@@ -334,6 +352,7 @@ func (runner *KubernetesOPARunner) runCRDListQueries(name string, collection Kub
 					rawQueryRes,
 					fmt.Sprintf("%s/%s/%s/%s", collection.Match.Group, collection.Match.Version, collection.Match.Resource, rawQueryRes.PolicyID),
 					name,
+					collection,
 				))
 			}
 		}
@@ -342,7 +361,7 @@ func (runner *KubernetesOPARunner) runCRDListQueries(name string, collection Kub
 	return res, nil
 }
 
-func rawQueryResToRecommenderQueryResult(rawQueryRes *rawQueryResult, objectID, categoryName string) *OPARecommenderQueryResult {
+func rawQueryResToRecommenderQueryResult(rawQueryRes *rawQueryResult, objectID, categoryName string, collection KubernetesOPAQueryCollection) *OPARecommenderQueryResult {
 	queryRes := &OPARecommenderQueryResult{
 		ObjectID:     objectID,
 		CategoryName: categoryName,
@@ -357,7 +376,7 @@ func rawQueryResToRecommenderQueryResult(rawQueryRes *rawQueryResult, objectID,
 
 	queryRes.PolicyMessage = message
 	queryRes.Allow = rawQueryRes.Allow
-	queryRes.PolicySeverity = rawQueryRes.PolicySeverity
+	queryRes.PolicySeverity = getSeverity(rawQueryRes.PolicySeverity, collection)
 	queryRes.PolicyTitle = rawQueryRes.PolicyTitle
 	queryRes.PolicyVersion = rawQueryRes.PolicyVersion
 

+ 26 - 0
internal/opa/policies/certificates/expired.rego

@@ -0,0 +1,26 @@
+package certificates.expired
+
+import future.keywords
+
+POLICY_ID := sprintf("certificates_expired_%s_%s", [input.metadata.namespace, input.metadata.name])
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "critical"
+
+POLICY_TITLE := sprintf("Certificate %s/%s should not be expired", [input.metadata.namespace, input.metadata.name])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: certificate %s/%s is not expired", [input.metadata.namespace, input.metadata.name])
+
+allow if {
+	not rfc3339_expired(input.status.notAfter)
+}
+
+FAILURE_MESSAGE contains msg if {
+	rfc3339_expired(input.status.notAfter)
+	msg := sprintf("Certificate expired at %s", [input.status.notAfter])
+}
+
+rfc3339_expired(a) if {
+	time.parse_rfc3339_ns(a) < time.now_ns()
+}

+ 36 - 0
internal/opa/policies/node/healthy.rego

@@ -0,0 +1,36 @@
+package node.healthy
+
+import future.keywords
+
+POLICY_ID := sprintf("healthy_%s", [input.metadata.name])
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "critical"
+
+POLICY_TITLE := sprintf("The node %s should be healthy", [input.metadata.name])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: this node is healthy or is younger than 10 minutes", [])
+
+# check if one of the node's conditions states that the kubelet is ready
+allow if {
+	some condition in input.status.conditions
+	condition.reason == "KubeletReady"
+	condition.status = "True"
+}
+
+# if the node was started in the last 10 minutes, we do not track it - it may 
+# be unhealthy while initializing the CNI
+allow if {
+	rfc3339_is_younger_than_10_minutes(input.metadata.creationTimestamp)
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := sprintf("Failed: the node %s is not healthy", [input.metadata.name])
+}
+
+rfc3339_is_younger_than_10_minutes(a) if {
+	# add 10 minutes (in nanoseconds) to the creation timestamp and see if it's greater than current time 
+	time.parse_rfc3339_ns(a) + ((((10 * 60) * 1000) * 1000) * 1000) > time.now_ns()
+}

+ 25 - 0
internal/opa/policies/node/k8s_version.rego

@@ -0,0 +1,25 @@
+package node.k8s_version
+
+import future.keywords
+
+POLICY_ID := sprintf("k8s_version_%s", [input.metadata.name])
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+latest_stable_version := "1.21.0"
+
+POLICY_TITLE := sprintf("The Kubernetes version for node %s should be at least v%s", [input.metadata.name, latest_stable_version])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: Kubernetes version is up-to-date", [])
+
+trimmedVersion := trim_left(input.status.nodeInfo.kubeletVersion, "v")
+
+# semver.compare returns -1 if latest_stable_version < trimmedVersion
+allow if semver.compare(latest_stable_version, trimmedVersion) <= 0
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := sprintf("Failed: latest stable version is %s, but node %s is on %s", [latest_stable_version, input.metadata.name, trimmedVersion])
+}

+ 23 - 0
internal/opa/policies/node/porter_run_labels.rego

@@ -0,0 +1,23 @@
+package node.porter_run_labels
+
+import future.keywords
+
+POLICY_ID := sprintf("porter_run_labels_%s", [input.metadata.name])
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("The node %s should have the label porter.run/workload-kind", [input.metadata.name])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: this node has the label porter.run/workload-kind", [])
+
+# determine if the label porter.run/workload-kind exists
+allow if {
+	input.metadata.labels["porter.run/workload-kind"]
+}
+
+FAILURE_MESSAGE contains msg if {
+	not allow
+	msg := sprintf("Failed: the node %s does not have the label porter.run/workload-kind", [input.metadata.name])
+}

+ 41 - 0
internal/opa/policies/node/porter_run_taints.rego

@@ -0,0 +1,41 @@
+package node.porter_run_taints
+
+import future.keywords
+
+POLICY_ID := sprintf("porter_run_taints_%s", [input.metadata.name])
+
+POLICY_VERSION := "v0.0.1"
+
+POLICY_SEVERITY := "high"
+
+POLICY_TITLE := sprintf("The only taints on node %s should be porter.run/workload-kind=system", [input.metadata.name])
+
+POLICY_SUCCESS_MESSAGE := sprintf("Success: this node either has no taints, or has a taint with key porter.run/workload-kind", [])
+
+# if there are no taints, allow the condition
+allow if {
+	not input.spec.taints[0]
+}
+
+# if there is a taint with the key porter.run/workload-kind, allow the condition
+allow if {
+	input.spec.taints[0].key == "porter.run/workload-kind"
+	input.spec.taints[0].effect == "NoSchedule"
+}
+
+FAILURE_MESSAGE contains msg1 if {
+	not allow
+	msg1 := sprintf("Failed: the only permitted taints must contain the key porter.run/workload-kind", [])
+}
+
+FAILURE_MESSAGE contains msg2 if {
+	not allow
+	not input.spec.taints[0].key == "porter.run/workload-kind"
+	msg2 := sprintf("Taint has key %s", [input.spec.taints[0].key])
+}
+
+FAILURE_MESSAGE contains msg3 if {
+	not allow
+	not input.spec.taints[0].effect == "NoSchedule"
+	msg3 := sprintf("Taint has effect %s", [input.spec.taints[0].effect])
+}

+ 4 - 4
internal/opa/policies/web/web_version.rego

@@ -6,13 +6,13 @@ POLICY_ID := "web_version"
 
 POLICY_VERSION := "v0.0.1"
 
-POLICY_SEVERITY := "high"
+POLICY_SEVERITY := "low"
 
 latest_stable_version := "0.50.0"
 
-POLICY_TITLE := sprintf("The web version should be at least v%s", [latest_stable_version])
+POLICY_TITLE := sprintf("The web version for application %s/%s should be at least v%s", [input.namespace, input.name, latest_stable_version])
 
-POLICY_SUCCESS_MESSAGE := sprintf("Success: web version is up-to-date", [])
+POLICY_SUCCESS_MESSAGE := sprintf("Success: web version for %s/%s is up-to-date", [input.namespace, input.name])
 
 trimmedVersion := trim_left(input.version, "v")
 
@@ -21,5 +21,5 @@ allow if semver.compare(latest_stable_version, trimmedVersion) == -1
 
 FAILURE_MESSAGE contains msg if {
 	not allow
-	msg := sprintf("Failed: latest stable version is %s, but you are on %s", [latest_stable_version, trimmedVersion])
+	msg := sprintf("Failed: latest stable version is %s, but %s/%s is on %s", [latest_stable_version, input.namespace, input.name, trimmedVersion])
 }

+ 15 - 29
internal/repository/gorm/cluster.go

@@ -1,8 +1,6 @@
 package gorm
 
 import (
-	"context"
-
 	"github.com/porter-dev/porter/internal/encryption"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/repository"
@@ -120,8 +118,6 @@ func (repo *ClusterRepository) UpdateClusterCandidateCreatedClusterID(
 func (repo *ClusterRepository) CreateCluster(
 	cluster *models.Cluster,
 ) (*models.Cluster, error) {
-	ctxDB := repo.db.WithContext(context.Background())
-
 	err := repo.EncryptClusterData(cluster, repo.key)
 
 	if err != nil {
@@ -130,11 +126,11 @@ func (repo *ClusterRepository) CreateCluster(
 
 	project := &models.Project{}
 
-	if err := ctxDB.Where("id = ?", cluster.ProjectID).First(&project).Error; err != nil {
+	if err := repo.db.Where("id = ?", cluster.ProjectID).First(&project).Error; err != nil {
 		return nil, err
 	}
 
-	assoc := ctxDB.Model(&project).Association("Clusters")
+	assoc := repo.db.Model(&project).Association("Clusters")
 
 	if assoc.Error != nil {
 		return nil, assoc.Error
@@ -147,13 +143,13 @@ func (repo *ClusterRepository) CreateCluster(
 	// create a token cache by default
 	cluster.TokenCache.ClusterID = cluster.ID
 
-	if err := ctxDB.Create(&cluster.TokenCache).Error; err != nil {
+	if err := repo.db.Create(&cluster.TokenCache).Error; err != nil {
 		return nil, err
 	}
 
 	cluster.TokenCacheID = cluster.TokenCache.ID
 
-	if err := ctxDB.Save(cluster).Error; err != nil {
+	if err := repo.db.Save(cluster).Error; err != nil {
 		return nil, err
 	}
 
@@ -170,19 +166,17 @@ func (repo *ClusterRepository) CreateCluster(
 func (repo *ClusterRepository) ReadCluster(
 	projectID, clusterID uint,
 ) (*models.Cluster, error) {
-	ctxDB := repo.db.WithContext(context.Background())
-
 	cluster := &models.Cluster{}
 
 	// preload Clusters association
-	if err := ctxDB.Where("project_id = ? AND id = ?", projectID, clusterID).First(&cluster).Error; err != nil {
+	if err := repo.db.Where("project_id = ? AND id = ?", projectID, clusterID).First(&cluster).Error; err != nil {
 		return nil, err
 	}
 
 	cache := ints.ClusterTokenCache{}
 
 	if cluster.TokenCacheID != 0 {
-		if err := ctxDB.Where("id = ?", cluster.TokenCacheID).First(&cache).Error; err != nil {
+		if err := repo.db.Where("id = ?", cluster.TokenCacheID).First(&cache).Error; err != nil {
 			return nil, err
 		}
 	}
@@ -202,19 +196,17 @@ func (repo *ClusterRepository) ReadCluster(
 func (repo *ClusterRepository) ReadClusterByInfraID(
 	projectID, infraID uint,
 ) (*models.Cluster, error) {
-	ctxDB := repo.db.WithContext(context.Background())
-
 	cluster := &models.Cluster{}
 
 	// preload Clusters association
-	if err := ctxDB.Where("project_id = ? AND infra_id = ?", projectID, infraID).First(&cluster).Error; err != nil {
+	if err := repo.db.Where("project_id = ? AND infra_id = ?", projectID, infraID).First(&cluster).Error; err != nil {
 		return nil, err
 	}
 
 	cache := ints.ClusterTokenCache{}
 
 	if cluster.TokenCacheID != 0 {
-		if err := ctxDB.Where("id = ?", cluster.TokenCacheID).First(&cache).Error; err != nil {
+		if err := repo.db.Where("id = ?", cluster.TokenCacheID).First(&cache).Error; err != nil {
 			return nil, err
 		}
 	}
@@ -235,11 +227,9 @@ func (repo *ClusterRepository) ReadClusterByInfraID(
 func (repo *ClusterRepository) ListClustersByProjectID(
 	projectID uint,
 ) ([]*models.Cluster, error) {
-	ctxDB := repo.db.WithContext(context.Background())
-
 	clusters := []*models.Cluster{}
 
-	if err := ctxDB.Where("project_id = ?", projectID).Find(&clusters).Error; err != nil {
+	if err := repo.db.Where("project_id = ?", projectID).Find(&clusters).Error; err != nil {
 		return nil, err
 	}
 
@@ -254,15 +244,13 @@ func (repo *ClusterRepository) ListClustersByProjectID(
 func (repo *ClusterRepository) UpdateCluster(
 	cluster *models.Cluster,
 ) (*models.Cluster, error) {
-	ctxDB := repo.db.WithContext(context.Background())
-
 	err := repo.EncryptClusterData(cluster, repo.key)
 
 	if err != nil {
 		return nil, err
 	}
 
-	if err := ctxDB.Save(cluster).Error; err != nil {
+	if err := repo.db.Save(cluster).Error; err != nil {
 		return nil, err
 	}
 
@@ -279,8 +267,6 @@ func (repo *ClusterRepository) UpdateCluster(
 func (repo *ClusterRepository) UpdateClusterTokenCache(
 	tokenCache *ints.ClusterTokenCache,
 ) (*models.Cluster, error) {
-	ctxDB := repo.db.WithContext(context.Background())
-
 	if tok := tokenCache.Token; len(tok) > 0 {
 		cipherData, err := encryption.Encrypt(tok, repo.key)
 
@@ -293,23 +279,23 @@ func (repo *ClusterRepository) UpdateClusterTokenCache(
 
 	cluster := &models.Cluster{}
 
-	if err := ctxDB.Where("id = ?", tokenCache.ClusterID).First(&cluster).Error; err != nil {
+	if err := repo.db.Where("id = ?", tokenCache.ClusterID).First(&cluster).Error; err != nil {
 		return nil, err
 	}
 
 	if cluster.TokenCacheID == 0 {
 		tokenCache.ClusterID = cluster.ID
-		if err := ctxDB.Create(tokenCache).Error; err != nil {
+		if err := repo.db.Create(tokenCache).Error; err != nil {
 			return nil, err
 		}
 		cluster.TokenCacheID = tokenCache.ID
-		if err := ctxDB.Save(cluster).Error; err != nil {
+		if err := repo.db.Save(cluster).Error; err != nil {
 			return nil, err
 		}
 	} else {
 		prev := &ints.ClusterTokenCache{}
 
-		if err := ctxDB.Where("id = ?", cluster.TokenCacheID).First(prev).Error; err != nil {
+		if err := repo.db.Where("id = ?", cluster.TokenCacheID).First(prev).Error; err != nil {
 			return nil, err
 		}
 
@@ -317,7 +303,7 @@ func (repo *ClusterRepository) UpdateClusterTokenCache(
 		prev.Expiry = tokenCache.Expiry
 		prev.ClusterID = cluster.ID
 
-		if err := ctxDB.Save(prev).Error; err != nil {
+		if err := repo.db.Save(prev).Error; err != nil {
 			return nil, err
 		}
 	}

+ 26 - 0
internal/repository/gorm/monitor.go

@@ -42,3 +42,29 @@ func (m *MonitorTestResultRepository) UpdateMonitorTestResult(monitor *models.Mo
 
 	return monitor, nil
 }
+
+func (m *MonitorTestResultRepository) ArchiveMonitorTestResults(projectID, clusterID uint, recommenderID string) error {
+	query := m.db.Debug().Unscoped().Model(&models.MonitorTestResult{}).Where("project_id = ? AND cluster_id = ? AND last_recommender_run_id != ?", projectID, clusterID, recommenderID)
+
+	return query.Update("archived", true).Error
+}
+
+func (m *MonitorTestResultRepository) DeleteOldMonitorTestResults(projectID, clusterID uint, recommenderID string) error {
+	monitors := make([]*models.MonitorTestResult, 0)
+
+	query := m.db.Debug().Unscoped().Where("project_id = ? AND cluster_id = ? AND last_recommender_run_id != ?", projectID, clusterID, recommenderID)
+
+	// we need to switch on the database type to delete records older than 24 hours
+	switch m.db.Dialector.Name() {
+	case "sqlite":
+		query = query.Where(
+			"last_tested < DATETIME('now', '-1 day')",
+		)
+	case "postgres":
+		query = query.Where(
+			"last_tested < NOW() - INTERVAL '1 day'",
+		)
+	}
+
+	return query.Delete(monitors).Error
+}

+ 3 - 0
internal/repository/monitor.go

@@ -6,4 +6,7 @@ type MonitorTestResultRepository interface {
 	CreateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error)
 	ReadMonitorTestResult(projectID, clusterID uint, operationID string) (*models.MonitorTestResult, error)
 	UpdateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error)
+
+	ArchiveMonitorTestResults(projectID, clusterID uint, recommenderID string) error
+	DeleteOldMonitorTestResults(projectID, clusterID uint, recommenderID string) error
 }

+ 8 - 0
internal/repository/test/monitor.go

@@ -22,3 +22,11 @@ func (n *MonitorTestResultRepository) ReadMonitorTestResult(projectID, clusterID
 func (n *MonitorTestResultRepository) UpdateMonitorTestResult(monitor *models.MonitorTestResult) (*models.MonitorTestResult, error) {
 	panic("not implemented") // TODO: Implement
 }
+
+func (n *MonitorTestResultRepository) ArchiveMonitorTestResults(projectID, clusterID uint, recommenderID string) error {
+	panic("not implemented") // TODO: Implement
+}
+
+func (n *MonitorTestResultRepository) DeleteOldMonitorTestResults(projectID, clusterID uint, recommenderID string) error {
+	panic("not implemented") // TODO: Implement
+}

+ 4 - 0
internal/validator/validator.go

@@ -2,6 +2,7 @@ package validator
 
 import (
 	"github.com/go-playground/validator/v10"
+	"k8s.io/apimachinery/pkg/util/validation"
 )
 
 // New creates a new instance of validator and sets the tag name
@@ -9,5 +10,8 @@ import (
 func New() *validator.Validate {
 	validate := validator.New()
 	validate.SetTagName("form")
+	validate.RegisterValidation("dns1123", func(fl validator.FieldLevel) bool {
+		return len(validation.IsDNS1123Label(fl.Field().String())) == 0
+	})
 	return validate
 }

+ 3 - 1
workers/jobs/helm_revisions_count_tracker.go

@@ -46,7 +46,7 @@ import (
 	"helm.sh/helm/v3/pkg/releaseutil"
 )
 
-var stepSize int = 100
+var stepSize int = 20
 
 type helmRevisionsCountTracker struct {
 	enqueueTime        time.Time
@@ -175,6 +175,7 @@ func (t *helmRevisionsCountTracker) Run() error {
 					Repo:                      t.repo,
 					DigitalOceanOAuth:         t.doConf,
 					AllowInClusterConnections: false,
+					Timeout:                   5 * time.Second,
 				})
 
 				if err != nil {
@@ -198,6 +199,7 @@ func (t *helmRevisionsCountTracker) Run() error {
 						Repo:                      t.repo,
 						DigitalOceanOAuth:         t.doConf,
 						AllowInClusterConnections: false,
+						Timeout:                   5 * time.Second,
 					}, logger.New(true, os.Stdout), 3, time.Second)
 
 					if err != nil {

+ 43 - 17
workers/jobs/recommender.go

@@ -22,6 +22,7 @@ import (
 	"github.com/porter-dev/porter/api/types"
 
 	"github.com/porter-dev/porter/ee/integrations/vault"
+	"github.com/porter-dev/porter/internal/encryption"
 	"github.com/porter-dev/porter/internal/kubernetes"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/oauth"
@@ -41,6 +42,7 @@ type recommender struct {
 	clusterAndProjectIDs []clusterAndProjectID
 	categories           []string
 	policies             *opa.KubernetesPolicies
+	runRecommenderID     string
 }
 
 // RecommenderOpts holds the options required to run this job
@@ -122,8 +124,14 @@ func NewRecommender(
 		return nil, err
 	}
 
+	recommenderID, err := encryption.GenerateRandomBytes(32)
+
+	if err != nil {
+		return nil, err
+	}
+
 	return &recommender{
-		enqueueTime, db, repo, doConf, clusterIDs, parsedInput.Categories, opaPolicies,
+		enqueueTime, db, repo, doConf, clusterIDs, parsedInput.Categories, opaPolicies, string(recommenderID),
 	}, nil
 }
 
@@ -231,24 +239,38 @@ func (n *recommender) Run() error {
 
 			if err != nil {
 				if errors.Is(err, gorm.ErrRecordNotFound) {
-					monitor, err = n.repo.MonitorTestResult().CreateMonitorTestResult(n.getMonitorTestResultFromQueryResult(cluster, queryRes))
+					monitor, err = n.repo.MonitorTestResult().CreateMonitorTestResult(n.getMonitorTestResultFromQueryResult(cluster, queryRes, n.runRecommenderID))
 				} else {
 					continue
 				}
 			} else {
-				monitor, err = n.repo.MonitorTestResult().UpdateMonitorTestResult(mergeMonitorTestResultFromQueryResult(monitor, queryRes))
+				monitor, err = n.repo.MonitorTestResult().UpdateMonitorTestResult(mergeMonitorTestResultFromQueryResult(monitor, queryRes, n.runRecommenderID))
 			}
 
 			if err != nil {
 				continue
 			}
 		}
+
+		err = n.repo.MonitorTestResult().ArchiveMonitorTestResults(ids.projectID, ids.clusterID, n.runRecommenderID)
+
+		if err != nil {
+			log.Printf("error archiving test results for cluster ID %d: %v", ids.clusterID, err)
+			continue
+		}
+
+		err = n.repo.MonitorTestResult().DeleteOldMonitorTestResults(ids.projectID, ids.clusterID, n.runRecommenderID)
+
+		if err != nil {
+			log.Printf("error deleting old test results for cluster ID %d: %v", ids.clusterID, err)
+			continue
+		}
 	}
 
 	return nil
 }
 
-func (n *recommender) getMonitorTestResultFromQueryResult(cluster *models.Cluster, queryRes *opa.OPARecommenderQueryResult) *models.MonitorTestResult {
+func (n *recommender) getMonitorTestResultFromQueryResult(cluster *models.Cluster, queryRes *opa.OPARecommenderQueryResult, recommenderID string) *models.MonitorTestResult {
 	runResult := types.MonitorTestStatusSuccess
 
 	if !queryRes.Allow {
@@ -258,22 +280,24 @@ func (n *recommender) getMonitorTestResultFromQueryResult(cluster *models.Cluste
 	currTime := time.Now()
 
 	return &models.MonitorTestResult{
-		ProjectID:         cluster.ProjectID,
-		ClusterID:         cluster.ID,
-		Category:          queryRes.CategoryName,
-		ObjectID:          queryRes.ObjectID,
-		LastStatusChange:  &currTime,
-		LastTested:        &currTime,
-		LastRunResult:     string(runResult),
-		LastRunResultEnum: models.GetLastRunResultEnum(string(runResult)),
-		Title:             queryRes.PolicyTitle,
-		Message:           queryRes.PolicyMessage,
-		Severity:          queryRes.PolicySeverity,
-		SeverityEnum:      models.GetSeverityEnum(queryRes.PolicySeverity),
+		ProjectID:            cluster.ProjectID,
+		ClusterID:            cluster.ID,
+		Category:             queryRes.CategoryName,
+		ObjectID:             queryRes.ObjectID,
+		LastStatusChange:     &currTime,
+		LastTested:           &currTime,
+		LastRunResult:        string(runResult),
+		LastRunResultEnum:    models.GetLastRunResultEnum(string(runResult)),
+		LastRecommenderRunID: recommenderID,
+		Title:                queryRes.PolicyTitle,
+		Message:              queryRes.PolicyMessage,
+		Severity:             queryRes.PolicySeverity,
+		SeverityEnum:         models.GetSeverityEnum(queryRes.PolicySeverity),
+		Archived:             false,
 	}
 }
 
-func mergeMonitorTestResultFromQueryResult(monitor *models.MonitorTestResult, queryRes *opa.OPARecommenderQueryResult) *models.MonitorTestResult {
+func mergeMonitorTestResultFromQueryResult(monitor *models.MonitorTestResult, queryRes *opa.OPARecommenderQueryResult, recommenderID string) *models.MonitorTestResult {
 	runResult := types.MonitorTestStatusSuccess
 
 	if !queryRes.Allow {
@@ -293,6 +317,8 @@ func mergeMonitorTestResultFromQueryResult(monitor *models.MonitorTestResult, qu
 	monitor.Severity = queryRes.PolicySeverity
 	monitor.SeverityEnum = models.GetSeverityEnum(queryRes.PolicySeverity)
 	monitor.LastRunResultEnum = models.GetLastRunResultEnum(string(runResult))
+	monitor.LastRecommenderRunID = recommenderID
+	monitor.Archived = false
 
 	return monitor
 }

+ 2 - 0
workers/main.go

@@ -66,6 +66,8 @@ func main() {
 	log.Printf("setting max worker count to: %d\n", envDecoder.MaxWorkers)
 	log.Printf("setting max job queue count to: %d\n", envDecoder.MaxQueue)
 
+	log.Printf("legacy project ids are: %v", envDecoder.LegacyProjectIDs)
+
 	db, err := adapter.New(&envDecoder.DBConf)
 
 	if err != nil {