瀏覽代碼

envVars merge master

Soham Dessai 2 年之前
父節點
當前提交
28b65209f6
共有 100 個文件被更改,包括 3200 次插入808 次删除
  1. 67 0
      .github/golangci-lint.yaml
  2. 7 7
      .github/workflows/install_script.yml
  3. 93 0
      .github/workflows/internal_tooling_stack_porter-ui.yml
  4. 1 1
      .github/workflows/old_build-dev-cli.yaml
  5. 0 0
      .github/workflows/old_dev.yaml
  6. 1 1
      .github/workflows/old_production.yaml
  7. 0 0
      .github/workflows/old_staging.yaml
  8. 0 25
      .github/workflows/porter_tf_provisioner.yml
  9. 3 1
      .github/workflows/pr_push_checks.yaml
  10. 0 0
      .github/workflows/preview_env.yml
  11. 15 10
      .github/workflows/production.yml
  12. 25 0
      .github/workflows/tf_provisioner.yml
  13. 2 0
      .gitignore
  14. 21 0
      api/client/stack.go
  15. 41 12
      api/server/authz/cluster.go
  16. 41 20
      api/server/authz/git_installation.go
  17. 2 2
      api/server/authz/release.go
  18. 1 1
      api/server/handlers/api_contract/list.go
  19. 14 12
      api/server/handlers/cluster/get_kubeconfig.go
  20. 28 22
      api/server/handlers/cluster/install_agent.go
  21. 6 5
      api/server/handlers/cluster/upgrade_agent.go
  22. 38 14
      api/server/handlers/environment/create_deployment_by_cluster.go
  23. 46 9
      api/server/handlers/environment/enable_pull_request.go
  24. 16 3
      api/server/handlers/environment/get_environment.go
  25. 33 2
      api/server/handlers/environment/update_environment_settings.go
  26. 22 10
      api/server/handlers/gitinstallation/get_contents.go
  27. 37 8
      api/server/handlers/gitinstallation/get_porter_yaml.go
  28. 14 0
      api/server/handlers/gitinstallation/list_branches.go
  29. 87 0
      api/server/handlers/gitinstallation/workflow_log_runid.go
  30. 79 0
      api/server/handlers/gitinstallation/workflow_logs.go
  31. 10 0
      api/server/handlers/infra/forms.go
  32. 3 2
      api/server/handlers/namespace/create_env_group.go
  33. 31 16
      api/server/handlers/namespace/list_releases.go
  34. 71 0
      api/server/handlers/porter_app/analytics.go
  35. 484 0
      api/server/handlers/porter_app/create.go
  36. 196 0
      api/server/handlers/porter_app/create_and_update_events.go
  37. 12 10
      api/server/handlers/porter_app/create_secret_and_open_pr.go
  38. 1 1
      api/server/handlers/porter_app/delete.go
  39. 1 1
      api/server/handlers/porter_app/get.go
  40. 153 0
      api/server/handlers/porter_app/get_logs_within_time_range.go
  41. 1 1
      api/server/handlers/porter_app/list.go
  42. 233 0
      api/server/handlers/porter_app/list_events.go
  43. 85 36
      api/server/handlers/porter_app/parse.go
  44. 94 0
      api/server/handlers/porter_app/rollback.go
  45. 1 0
      api/server/handlers/project/create.go
  46. 1 0
      api/server/handlers/project/create_test.go
  47. 5 4
      api/server/handlers/project/get_usage.go
  48. 18 0
      api/server/handlers/project_integration/create_azure.go
  49. 47 0
      api/server/handlers/project_integration/delete_gitlab.go
  50. 89 0
      api/server/handlers/project_integration/get_gitlab_porter_yaml.go
  51. 4 18
      api/server/handlers/project_integration/get_gitlab_repo_buildpack.go
  52. 3 17
      api/server/handlers/project_integration/get_gitlab_repo_contents.go
  53. 3 16
      api/server/handlers/project_integration/get_gitlab_repo_procfile.go
  54. 30 2
      api/server/handlers/project_integration/list_gitlab.go
  55. 14 5
      api/server/handlers/project_integration/list_gitlab_repo_branches.go
  56. 13 4
      api/server/handlers/project_integration/list_gitlab_repos.go
  57. 34 23
      api/server/handlers/registry/get_token.go
  58. 13 2
      api/server/handlers/registry/list_repositories.go
  59. 123 58
      api/server/handlers/release/create.go
  60. 10 8
      api/server/handlers/release/create_addon.go
  61. 1 1
      api/server/handlers/release/create_webhook.go
  62. 5 12
      api/server/handlers/release/delete.go
  63. 12 37
      api/server/handlers/release/get_controllers.go
  64. 3 2
      api/server/handlers/release/get_history.go
  65. 4 3
      api/server/handlers/release/update_image_batch.go
  66. 4 2
      api/server/handlers/release/update_rollback.go
  67. 6 4
      api/server/handlers/release/upgrade.go
  68. 45 34
      api/server/handlers/release/upgrade_webhook.go
  69. 2 2
      api/server/handlers/stack/add_application.go
  70. 2 2
      api/server/handlers/stack/create.go
  71. 1 1
      api/server/handlers/stack/delete.go
  72. 9 7
      api/server/handlers/stack/helpers.go
  73. 1 1
      api/server/handlers/stack/remove_application.go
  74. 1 1
      api/server/handlers/stack/rollback.go
  75. 1 1
      api/server/handlers/stack/update_source_put.go
  76. 0 249
      api/server/handlers/stacks/create_porter_app.go
  77. 2 1
      api/server/handlers/template/get.go
  78. 2 1
      api/server/handlers/template/get_upgrade_notes.go
  79. 17 1
      api/server/handlers/user/update_onboarding_step.go
  80. 3 2
      api/server/handlers/v1/env_group/create.go
  81. 5 3
      api/server/handlers/v1/release/upgrade.go
  82. 2 1
      api/server/handlers/v1/template/get.go
  83. 2 1
      api/server/handlers/v1/template/get_upgrade_notes.go
  84. 1 1
      api/server/router/base.go
  85. 1 1
      api/server/router/cluster.go
  86. 1 1
      api/server/router/cluster_integration.go
  87. 68 1
      api/server/router/git_installation.go
  88. 1 1
      api/server/router/helm_repo.go
  89. 1 1
      api/server/router/infra.go
  90. 1 1
      api/server/router/invite.go
  91. 21 0
      api/server/router/middleware/hydrate_trace.go
  92. 30 21
      api/server/router/middleware/usage.go
  93. 1 1
      api/server/router/namespace.go
  94. 1 1
      api/server/router/oauth_callback.go
  95. 346 0
      api/server/router/porter_app.go
  96. 1 1
      api/server/router/project.go
  97. 70 16
      api/server/router/project_integration.go
  98. 1 1
      api/server/router/project_oauth.go
  99. 1 1
      api/server/router/registry.go
  100. 1 1
      api/server/router/release.go

+ 67 - 0
.github/golangci-lint.yaml

@@ -0,0 +1,67 @@
+---
+run:
+  timeout: 5m
+  issues-exit-code: 1
+  build-tags:
+    - codeanalysis
+
+# enable exported entity commenting lint rule
+issues:
+  exclude:
+    - EXC0012
+  exclude-use-default: false
+linters-settings:
+  revive:
+    rules:
+      - name: exported
+        severity: error
+  # gocyclo:
+  #   min-complexity: 15
+  gomoddirectives:
+    replace-local: false
+  gosec:
+    excludes:
+    - G307
+
+linters:
+  # disable all default-enabled linters so nothing is mysterious
+  disable-all: true
+  # all enabled linters found at https://golangci-lint.run/usage/linters/
+  enable:
+    - errcheck	
+    - gosimple
+    - govet	
+    - ineffassign	
+    - staticcheck
+    - typecheck
+    - unused
+    - whitespace
+    - unparam
+    - unconvert
+    - goconst
+    - misspell
+    - revive
+    - gofumpt
+    - gocyclo
+    - gomoddirectives
+    - gosec
+
+output:
+  # colored-line-number|line-number|json|tab|checkstyle|code-climate|junit-xml|github-actions
+  # default is "colored-line-number"
+  format: colored-line-number
+
+  # print lines of code with issue, default is true
+  print-issued-lines: true
+
+  # print linter name in the end of issue text, default is true
+  print-linter-name: true
+
+  # make issues output unique by line, default is true
+  uniq-by-line: true
+
+  # add a prefix to the output file references; default is no prefix
+  path-prefix: ""
+
+  # sorts results by: filepath, line and column
+  sort-results: false

+ 7 - 7
.github/workflows/porter_install_script.yml → .github/workflows/install_script.yml

@@ -1,8 +1,8 @@
 "on":
   push:
-    branches:
-      - master
-name: Deploy to Porter
+    tags:
+      - production
+name: Deploy Install Script to Production
 jobs:
   porter-deploy:
     runs-on: ubuntu-latest
@@ -17,9 +17,9 @@ jobs:
         uses: porter-dev/porter-update-action@v0.1.0
         with:
           app: install-script
-          cluster: "8"
-          host: https://dashboard.internal-tools.getporter.dev
+          cluster: "9"
+          host: https://dashboard.internal-tools.porter.run
           namespace: default
-          project: "1"
+          project: "5"
           tag: ${{ steps.vars.outputs.sha_short }}
-          token: ${{ secrets.PORTER_TOKEN_1 }}
+          token: ${{ secrets.PORTER_TOKEN_5 }}

+ 93 - 0
.github/workflows/internal_tooling_stack_porter-ui.yml

@@ -0,0 +1,93 @@
+"on":
+  push:
+    branches:
+      - master
+name: Deploy Porter to Internal Tooling
+jobs:
+  build-go:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v3
+      - name: Setup Go Cache
+        uses: actions/cache@v3
+        with:
+          path: |
+            ~/.cache/go-build
+            ~/go/pkg/mod
+          key: porter-go-${{ hashFiles('**/go.sum') }}
+          restore-keys: porter-go-`
+      - name: Setup Go
+        uses: actions/setup-go@v4
+        with:
+          go-version-file: go.mod
+          cache: false
+      - name: Download Go Modules
+        run: go mod download
+      - name: Build Server Binary
+        run: go build -ldflags="-w -s -X 'main.Version=production'" -tags ee -o ./bin/app ./cmd/app
+      - name: Build Migration Binary
+        run: go build -ldflags '-w -s' -tags ee -o ./bin/migrate ./cmd/migrate
+      - name: Compress binaries
+        run: |
+          upx bin/* --best --lzma
+      - name: Store Binaries
+        uses: actions/upload-artifact@v3
+        with:
+          name: go-binaries
+          path: bin/
+          retention-days: 1
+  build-npm:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v3
+      - name: Setup Node
+        uses: actions/setup-node@v3
+        with:
+          node-version: 16
+      - name: Install NPM Dependencies
+        run: |
+          cd dashboard
+          npm i --legacy-peer-deps
+      - name: Run NPM Build
+        run: |
+          cd dashboard
+          npm run build
+      - name: Store NPM Static Files
+        uses: actions/upload-artifact@v3
+        with:
+          name: npm-static-files
+          path: dashboard/build/
+          retention-days: 1
+  porter-deploy:
+    runs-on: ubuntu-latest
+    needs: [build-go, build-npm]
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v3
+      - name: Get Go Binaries
+        uses: actions/download-artifact@v3
+        with:
+          name: go-binaries
+          path: bin/
+      - name: Get NPM static files
+        uses: actions/download-artifact@v3
+        with:
+          name: npm-static-files
+          path: build/
+      - name: Set Github tag
+        id: vars
+        run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
+      - name: Deploy stack
+        timeout-minutes: 30
+        uses: porter-dev/porter-cli-action@v0.1.0
+        with:
+          command: apply -f nonexistant-porter.yaml
+        env:
+          PORTER_CLUSTER: "11"
+          PORTER_HOST: https://dashboard.internal-tools.porter.run
+          PORTER_PROJECT: "8"
+          PORTER_STACK_NAME: porter-ui
+          PORTER_TAG: ${{ steps.vars.outputs.sha_short }}
+          PORTER_TOKEN: ${{ secrets.PORTER_STACK_8_11 }}

+ 1 - 1
.github/workflows/build-dev-cli.yaml → .github/workflows/old_build-dev-cli.yaml

@@ -1,4 +1,4 @@
-name: Deploy to dev
+name: Build Dev CLI
 on:
   push:
     branches:

+ 0 - 0
.github/workflows/dev.yaml → .github/workflows/old_dev.yaml


+ 1 - 1
.github/workflows/production.yaml → .github/workflows/old_production.yaml

@@ -1,4 +1,4 @@
-name: Deploy to production
+name: Deploy to old production
 on:
   push:
     tags:

+ 0 - 0
.github/workflows/staging.yaml → .github/workflows/old_staging.yaml


+ 0 - 25
.github/workflows/porter_tf_provisioner.yml

@@ -1,25 +0,0 @@
-"on":
-  push:
-    branches:
-    - master
-name: Deploy to Porter
-jobs:
-  porter-deploy:
-    runs-on: ubuntu-latest
-    steps:
-    - name: Checkout code
-      uses: actions/checkout@v3
-    - name: Set Github tag
-      id: vars
-      run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
-    - name: Update Porter App
-      timeout-minutes: 20
-      uses: porter-dev/porter-update-action@v0.1.0
-      with:
-        app: tf-provisioner
-        cluster: "8"
-        host: https://dashboard.internal-tools.getporter.dev
-        namespace: default
-        project: "1"
-        tag: ${{ steps.vars.outputs.sha_short }}
-        token: ${{ secrets.PORTER_TOKEN_1 }}

+ 3 - 1
.github/workflows/test-backend.yml → .github/workflows/pr_push_checks.yaml

@@ -1,4 +1,4 @@
-name: Backend CI
+name: PR Checks
 on:
   - pull_request
 jobs:
@@ -24,5 +24,7 @@ jobs:
         with:
           go-version-file: go.mod
           cache: false
+      - name: Run Go vet
+        run: go vet ./${{ matrix.folder }}/...
       - name: Run Go tests
         run: go test ./${{ matrix.folder }}/...

+ 0 - 0
.github/workflows/porter_preview_env.yml → .github/workflows/preview_env.yml


+ 15 - 10
.github/workflows/porter_production.yml → .github/workflows/production.yml

@@ -1,8 +1,8 @@
 "on":
   push:
-    branches:
-      - master
-name: Deploy to Porter
+    tags:
+      - production
+name: Deploy Porter to Production
 jobs:
   build-go:
     runs-on: ubuntu-latest
@@ -25,14 +25,18 @@ jobs:
       - name: Download Go Modules
         run: go mod download
       - name: Build Server Binary
-        run: go build -ldflags="-w -s -X 'main.Version=production'" -o ./bin/app ./cmd/app
+        run: go build -ldflags="-w -s -X 'main.Version=production'" -tags ee -o ./bin/app ./cmd/app
       - name: Build Migration Binary
-        run: go build -ldflags '-w -s' -o ./bin/migrate ./cmd/migrate
+        run: go build -ldflags '-w -s' -tags ee -o ./bin/migrate ./cmd/migrate
+      - name: Compress binaries
+        run: |
+          upx bin/* --best --lzma
       - name: Store Binaries
         uses: actions/upload-artifact@v3
         with:
           name: go-binaries
           path: bin/
+          retention-days: 1
   build-npm:
     runs-on: ubuntu-latest
     steps:
@@ -55,6 +59,7 @@ jobs:
         with:
           name: npm-static-files
           path: dashboard/build/
+          retention-days: 1
   porter-deploy:
     runs-on: ubuntu-latest
     needs: [build-go, build-npm]
@@ -78,10 +83,10 @@ jobs:
         timeout-minutes: 20
         uses: porter-dev/porter-update-action@v0.1.0
         with:
-          app: production
-          cluster: "8"
-          host: https://dashboard.internal-tools.getporter.dev
+          app: porter-ui
+          cluster: "9"
+          host: https://dashboard.internal-tools.porter.run
           namespace: default
-          project: "1"
+          project: "5"
           tag: ${{ steps.vars.outputs.sha_short }}
-          token: ${{ secrets.PORTER_TOKEN_1 }}
+          token: ${{ secrets.PORTER_TOKEN_5 }}

+ 25 - 0
.github/workflows/tf_provisioner.yml

@@ -0,0 +1,25 @@
+"on":
+  push:
+    tags:
+      - production
+name: Deploy TF Provisioner to Production
+jobs:
+  porter-deploy:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v3
+      - name: Set Github tag
+        id: vars
+        run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
+      - name: Update Porter App
+        timeout-minutes: 20
+        uses: porter-dev/porter-update-action@v0.1.0
+        with:
+          app: tf-provisioner
+          cluster: "9"
+          host: https://dashboard.internal-tools.porter.run
+          namespace: default
+          project: "5"
+          tag: ${{ steps.vars.outputs.sha_short }}
+          token: ${{ secrets.PORTER_TOKEN_5 }}

+ 2 - 0
.gitignore

@@ -74,3 +74,5 @@ terraform.rc
 .vscode
 
 tmp
+
+tsconfig.json

+ 21 - 0
api/client/stack.go

@@ -45,3 +45,24 @@ func (c *Client) CreatePorterApp(
 
 	return resp, err
 }
+
+// CreateOrUpdatePorterAppEvent will create a porter app event if one does not exist, or else it will update the existing one if an ID is passed in the object
+func (c *Client) CreateOrUpdatePorterAppEvent(
+	ctx context.Context,
+	projectID, clusterID uint,
+	name string,
+	req *types.CreateOrUpdatePorterAppEventRequest,
+) (types.PorterAppEvent, error) {
+	resp := &types.PorterAppEvent{}
+
+	err := c.postRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/stacks/%s/events",
+			projectID, clusterID, name,
+		),
+		req,
+		resp,
+	)
+
+	return *resp, err
+}

+ 41 - 12
api/server/authz/cluster.go

@@ -6,6 +6,8 @@ import (
 	"net/http"
 	"strings"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
@@ -16,10 +18,12 @@ import (
 	"k8s.io/client-go/dynamic"
 )
 
+type ContextKey string
+
 const (
-	KubernetesAgentCtxKey         string = "k8s-agent"
-	KubernetesDynamicClientCtxKey string = "k8s-dyn-client"
-	HelmAgentCtxKey               string = "helm-agent"
+	KubernetesAgentCtxKey         ContextKey = "k8s-agent"
+	KubernetesDynamicClientCtxKey ContextKey = "k8s-dyn-client"
+	HelmAgentCtxKey               ContextKey = "helm-agent"
 )
 
 type ClusterScopedFactory struct {
@@ -74,7 +78,7 @@ type KubernetesAgentGetter interface {
 	GetOutOfClusterConfig(cluster *models.Cluster) *kubernetes.OutOfClusterConfig
 	GetDynamicClient(r *http.Request, cluster *models.Cluster) (dynamic.Interface, error)
 	GetAgent(r *http.Request, cluster *models.Cluster, namespace string) (*kubernetes.Agent, error)
-	GetHelmAgent(r *http.Request, cluster *models.Cluster, namespace string) (*helm.Agent, error)
+	GetHelmAgent(ctx context.Context, r *http.Request, cluster *models.Cluster, namespace string) (*helm.Agent, error)
 }
 
 type OutOfClusterAgentGetter struct {
@@ -96,9 +100,18 @@ func (d *OutOfClusterAgentGetter) GetOutOfClusterConfig(cluster *models.Cluster)
 }
 
 func (d *OutOfClusterAgentGetter) GetAgent(r *http.Request, cluster *models.Cluster, namespace string) (*kubernetes.Agent, error) {
+	ctx, span := telemetry.NewSpan(r.Context(), "get-k8s-agent")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+		telemetry.AttributeKV{Key: "project-id", Value: cluster.ProjectID},
+		telemetry.AttributeKV{Key: "namespace", Value: namespace},
+	)
+
 	// look for the agent in context if cluster isnt a capi cluster
 	if cluster.ProvisionedBy != "CAPI" {
-		ctxAgentVal := r.Context().Value(KubernetesAgentCtxKey)
+		ctxAgentVal := ctx.Value(KubernetesAgentCtxKey)
 
 		if ctxAgentVal != nil {
 			if agent, ok := ctxAgentVal.(*kubernetes.Agent); ok {
@@ -115,24 +128,38 @@ func (d *OutOfClusterAgentGetter) GetAgent(r *http.Request, cluster *models.Clus
 	} else {
 		ooc.DefaultNamespace = namespace
 	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "default-namespace", Value: namespace},
+	)
 
-	agent, err := kubernetes.GetAgentOutOfClusterConfig(ooc)
+	agent, err := kubernetes.GetAgentOutOfClusterConfig(ctx, ooc)
 	if err != nil {
 		return nil, fmt.Errorf("failed to get agent: %s", err.Error())
 	}
 
-	newCtx := context.WithValue(r.Context(), KubernetesAgentCtxKey, agent)
+	newCtx := context.WithValue(ctx, KubernetesAgentCtxKey, agent)
 
-	r = r.WithContext(newCtx)
+	r = r.Clone(newCtx)
 
 	return agent, nil
 }
 
-func (d *OutOfClusterAgentGetter) GetHelmAgent(r *http.Request, cluster *models.Cluster, namespace string) (*helm.Agent, error) {
+func (d *OutOfClusterAgentGetter) GetHelmAgent(ctx context.Context, r *http.Request, cluster *models.Cluster, namespace string) (*helm.Agent, error) {
+	ctx, span := telemetry.NewSpan(ctx, "get-helm-agent")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+		telemetry.AttributeKV{Key: "project-id", Value: cluster.ProjectID},
+	)
+
+	r = r.Clone(ctx)
+
 	// look for the agent in context
-	ctxAgentVal := r.Context().Value(HelmAgentCtxKey)
+	ctxAgentVal := ctx.Value(HelmAgentCtxKey)
 
 	if ctxAgentVal != nil {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "agent-from-context", Value: true})
 		if agent, ok := ctxAgentVal.(*helm.Agent); ok {
 			return agent, nil
 		}
@@ -141,16 +168,18 @@ func (d *OutOfClusterAgentGetter) GetHelmAgent(r *http.Request, cluster *models.
 	// if helm agent not found in context, construct it from k8s agent
 	k8sAgent, err := d.GetAgent(r, cluster, namespace)
 	if err != nil {
-		return nil, err
+		return nil, telemetry.Error(ctx, span, err, "error getting k8s agent")
 	}
 
 	if namespace == "" {
 		namespace = getNamespaceFromRequest(r)
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
+
 	helmAgent, err := helm.GetAgentFromK8sAgent("secret", namespace, d.config.Logger, k8sAgent)
 	if err != nil {
-		return nil, fmt.Errorf("failed to get Helm agent: %s", err.Error())
+		return nil, telemetry.Error(ctx, span, err, "failed to get Helm agent")
 	}
 
 	newCtx := context.WithValue(r.Context(), HelmAgentCtxKey, helmAgent)

+ 41 - 20
api/server/authz/git_installation.go

@@ -16,6 +16,7 @@ import (
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/models/integrations"
 	"github.com/porter-dev/porter/internal/oauth"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type GitInstallationScopedFactory struct {
@@ -38,29 +39,36 @@ type GitInstallationScopedMiddleware struct {
 }
 
 func (p *GitInstallationScopedMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "middleware-git-installation")
+	defer span.End()
+
 	// read the user to perform authorization
-	user, _ := r.Context().Value(types.UserScope).(*models.User)
+	user, _ := ctx.Value(types.UserScope).(*models.User)
 
 	// get the registry id from the URL param context
-	reqScopes, _ := r.Context().Value(types.RequestScopeCtxKey).(map[types.PermissionScope]*types.RequestAction)
+	reqScopes, _ := ctx.Value(types.RequestScopeCtxKey).(map[types.PermissionScope]*types.RequestAction)
 	gitInstallationID := reqScopes[types.GitInstallationScope].Resource.UInt
 
-	gitInstallation, err := p.config.Repo.GithubAppInstallation().ReadGithubAppInstallationByInstallationID(gitInstallationID)
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "git-installation-id", Value: gitInstallationID})
 
+	gitInstallation, err := p.config.Repo.GithubAppInstallation().ReadGithubAppInstallationByInstallationID(gitInstallationID)
 	if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
-		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrForbidden(err), true)
+		err = telemetry.Error(ctx, span, err, "git installation not found")
+		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound), true)
 		return
 	} else if err != nil {
-		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrInternal(err), true)
+		err = telemetry.Error(ctx, span, err, "git installation not found")
+		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound), true)
 		return
 	}
 
-	if err := p.doesUserHaveGitInstallationAccess(user.GithubAppIntegrationID, gitInstallationID); err != nil {
-		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrInternal(err), true)
+	if err := p.doesUserHaveGitInstallationAccess(ctx, user.GithubAppIntegrationID, gitInstallationID); err != nil {
+		err = telemetry.Error(ctx, span, err, "user does not have access to git installation")
+		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden), true)
 		return
 	}
 
-	ctx := NewGitInstallationContext(r.Context(), gitInstallation)
+	ctx = NewGitInstallationContext(ctx, gitInstallation)
 	r = r.Clone(ctx)
 	p.next.ServeHTTP(w, r)
 }
@@ -73,23 +81,26 @@ func NewGitInstallationContext(ctx context.Context, ga *integrations.GithubAppIn
 // by ensuring the installation id exists for one org or account they have access to
 // note that this makes a github API request, but the endpoint is fast so this doesn't add
 // much overhead
-func (p *GitInstallationScopedMiddleware) doesUserHaveGitInstallationAccess(githubIntegrationID, gitInstallationID uint) error {
+func (p *GitInstallationScopedMiddleware) doesUserHaveGitInstallationAccess(ctx context.Context, githubIntegrationID, gitInstallationID uint) error {
+	ctx, span := telemetry.NewSpan(ctx, "check-user-has-git-installation-access")
+	defer span.End()
+
 	oauthInt, err := p.config.Repo.GithubAppOAuthIntegration().ReadGithubAppOauthIntegration(githubIntegrationID)
 	if err != nil {
-		return err
+		return telemetry.Error(ctx, span, err, "unable to read github app oauth integration")
 	}
 
 	if p.config.GithubAppConf == nil {
-		return fmt.Errorf("config has invalid GithubAppConf")
+		return telemetry.Error(ctx, span, nil, "config has invalid GithubAppConf")
 	}
 
 	if _, _, err = oauth.GetAccessToken(oauthInt.SharedOAuthModel,
 		&p.config.GithubAppConf.Config,
 		oauth.MakeUpdateGithubAppOauthIntegrationFunction(oauthInt, p.config.Repo)); err != nil {
-		return err
+		return telemetry.Error(ctx, span, err, "unable to get access token")
 	}
 
-	client := github.NewClient(p.config.GithubConf.Client(oauth2.NoContext, &oauth2.Token{
+	client := github.NewClient(p.config.GithubConf.Client(ctx, &oauth2.Token{
 		AccessToken:  string(oauthInt.AccessToken),
 		RefreshToken: string(oauthInt.RefreshToken),
 		TokenType:    "Bearer",
@@ -97,9 +108,9 @@ func (p *GitInstallationScopedMiddleware) doesUserHaveGitInstallationAccess(gith
 
 	accountIDs := make([]int64, 0)
 
-	AuthUser, _, err := client.Users.Get(context.Background(), "")
+	AuthUser, _, err := client.Users.Get(ctx, "")
 	if err != nil {
-		return err
+		return telemetry.Error(ctx, span, err, "unable to get authenticated user")
 	}
 
 	accountIDs = append(accountIDs, *AuthUser.ID)
@@ -110,9 +121,9 @@ func (p *GitInstallationScopedMiddleware) doesUserHaveGitInstallationAccess(gith
 	}
 
 	for {
-		orgs, pages, err := client.Organizations.List(context.Background(), "", opts)
+		orgs, pages, err := client.Organizations.List(ctx, "", opts)
 		if err != nil {
-			return err
+			return telemetry.Error(ctx, span, err, "unable to list organizations")
 		}
 
 		for _, org := range orgs {
@@ -124,7 +135,19 @@ func (p *GitInstallationScopedMiddleware) doesUserHaveGitInstallationAccess(gith
 		}
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "account-ids", Value: fmt.Sprintf("%v", accountIDs)})
+
 	installations, err := p.config.Repo.GithubAppInstallation().ReadGithubAppInstallationByAccountIDs(accountIDs)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "unable to read github app installations")
+	}
+
+	installationIds := make([]int64, 0)
+	for _, installation := range installations {
+		installationIds = append(installationIds, installation.InstallationID)
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "installation-ids-for-account-ids", Value: fmt.Sprintf("%v", installationIds)})
 
 	for _, installation := range installations {
 		if uint(installation.InstallationID) == gitInstallationID {
@@ -132,7 +155,5 @@ func (p *GitInstallationScopedMiddleware) doesUserHaveGitInstallationAccess(gith
 		}
 	}
 
-	return apierrors.NewErrForbidden(
-		fmt.Errorf("user does not have access to github app installation %d", gitInstallationID),
-	)
+	return telemetry.Error(ctx, span, nil, "user does not have access to github app installation")
 }

+ 2 - 2
api/server/authz/release.go

@@ -37,7 +37,7 @@ type ReleaseScopedMiddleware struct {
 func (p *ReleaseScopedMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 
-	helmAgent, err := p.agentGetter.GetHelmAgent(r, cluster, "")
+	helmAgent, err := p.agentGetter.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrInternal(err), true)
 		return
@@ -50,7 +50,7 @@ func (p *ReleaseScopedMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Reque
 	// get the version for the application
 	version, _ := requestutils.GetURLParamUint(r, types.URLParamReleaseVersion)
 
-	release, err := helmAgent.GetRelease(name, int(version), false)
+	release, err := helmAgent.GetRelease(context.Background(), name, int(version), false)
 	if err != nil {
 		// ugly casing since at the time of this commit Helm doesn't have an errors package.
 		// so we rely on the Helm error containing "not found"

+ 1 - 1
api/server/handlers/api_contract/list.go

@@ -5,7 +5,7 @@ import (
 	"net/http"
 	"strconv"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"

+ 14 - 12
api/server/handlers/cluster/get_kubeconfig.go

@@ -1,10 +1,7 @@
 package cluster
 
 import (
-	"context"
 	"encoding/base64"
-	"errors"
-	"fmt"
 	"net/http"
 
 	"github.com/bufbuild/connect-go"
@@ -16,6 +13,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 	"k8s.io/client-go/tools/clientcmd"
 )
 
@@ -35,36 +33,40 @@ func NewGetTemporaryKubeconfigHandler(
 }
 
 func (c *GetTemporaryKubeconfigHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-temporary-kubeconfig")
+	defer span.End()
+
 	if c.Config().ServerConf.DisableTemporaryKubeconfig {
-		c.HandleAPIError(w, r, apierrors.NewErrNotFound(
-			errors.New("temporary kubeconfig generation is disabled on this instance"),
-		))
+		e := telemetry.Error(ctx, span, nil, "temporary kubeconfig generation is disabled on this instance")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusNotFound))
 		return
 	}
-	ctx := r.Context()
 
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	outOfClusterConfig := c.GetOutOfClusterConfig(cluster)
 
 	if cluster.ProvisionedBy == "CAPI" {
-		kubeconfigResp, err := c.Config().ClusterControlPlaneClient.KubeConfigForCluster(context.Background(), connect.NewRequest(
+		kubeconfigResp, err := c.Config().ClusterControlPlaneClient.KubeConfigForCluster(ctx, connect.NewRequest(
 			&porterv1.KubeConfigForClusterRequest{
 				ProjectId: int64(cluster.ProjectID),
 				ClusterId: int64(cluster.ID),
 			},
 		))
 		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error getting temporary capi config: %w", err)))
+			e := telemetry.Error(ctx, span, err, "error getting temporary capi config")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
 			return
 		}
 		if kubeconfigResp.Msg == nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error reading temporary capi config: %w", err)))
+			e := telemetry.Error(ctx, span, err, "error reading temporary capi config")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
 			return
 		}
 		b64, err := base64.StdEncoding.DecodeString(kubeconfigResp.Msg.KubeConfig)
 		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("unable to decode base64 kubeconfig: %w", err)))
+			e := telemetry.Error(ctx, span, err, "unable to decode base64 kubeconfig")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
 			return
 		}
 		res := &types.GetTemporaryKubeconfigResponse{
@@ -74,7 +76,7 @@ func (c *GetTemporaryKubeconfigHandler) ServeHTTP(w http.ResponseWriter, r *http
 		return
 	}
 
-	kubeconfig, err := outOfClusterConfig.CreateRawConfigFromCluster()
+	kubeconfig, err := outOfClusterConfig.CreateRawConfigFromCluster(ctx)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 28 - 22
api/server/handlers/cluster/install_agent.go

@@ -17,6 +17,7 @@ import (
 	"github.com/porter-dev/porter/internal/kubernetes"
 	"github.com/porter-dev/porter/internal/kubernetes/nodes"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
@@ -42,53 +43,61 @@ func NewInstallAgentHandler(
 }
 
 func (c *InstallAgentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	user, _ := r.Context().Value(types.UserScope).(*models.User)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-install-agent-handler")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	user, _ := ctx.Value(types.UserScope).(*models.User)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	k8sAgent, err := c.GetAgent(r, cluster, "porter-agent-system")
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "failed to get kubernetes agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "porter-agent-system")
+	helmAgent, err := c.GetHelmAgent(ctx, r, cluster, "porter-agent-system")
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "failed to get helm agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	err = checkAndDeleteOlderAgent(k8sAgent, helmAgent)
-
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "unable to delete older agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	chart, err := loader.LoadChartPublic(c.Config().ServerConf.DefaultAddonHelmRepoURL, "porter-agent", "")
+	chart, err := loader.LoadChartPublic(ctx, c.Config().ServerConf.DefaultAddonHelmRepoURL, "porter-agent", "")
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "failed load public porter-agent chart")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	// create namespace if not exists
 	_, err = helmAgent.K8sAgent.CreateNamespace("porter-agent-system", nil)
-
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "failed to get create porter-agent-system namespace")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	// add api token to values
 	jwt, err := token.GetTokenForAPI(user.ID, proj.ID)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "failed to get porter-agent api token")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	encoded, err := jwt.EncodeToken(c.Config().TokenConf)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "failed to encode porter-agent api token")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -139,13 +148,10 @@ func (c *InstallAgentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		Values:    porterAgentValues,
 	}
 
-	_, err = helmAgent.InstallChart(conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
-
+	_, err = helmAgent.InstallChart(context.Background(), conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-			fmt.Errorf("error installing porter-agent: %w", err), http.StatusBadRequest,
-		))
-
+		err = telemetry.Error(ctx, span, err, "error installing porter-agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -172,13 +178,13 @@ func checkAndDeleteOlderAgent(k8sAgent *kubernetes.Agent, helmAgent *helm.Agent)
 	}
 
 	// detect if the `porter-agent` release is installed
-	helmRelease, err := helmAgent.GetRelease("porter-agent", 0, false)
+	helmRelease, err := helmAgent.GetRelease(context.Background(), "porter-agent", 0, false)
 
 	if err != nil || helmRelease == nil {
 		return nil
 	}
 
-	_, err = helmAgent.UninstallChart("porter-agent")
+	_, err = helmAgent.UninstallChart(context.Background(), "porter-agent")
 
 	if err != nil {
 		return err

+ 6 - 5
api/server/handlers/cluster/upgrade_agent.go

@@ -1,6 +1,7 @@
 package cluster
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 
@@ -33,19 +34,19 @@ func NewUpgradeAgentHandler(
 
 func (c *UpgradeAgentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
-	helmAgent, err := c.GetHelmAgent(r, cluster, "porter-agent-system")
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, "porter-agent-system")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 
-	currRelease, err := helmAgent.GetRelease("porter-agent", 0, false)
+	currRelease, err := helmAgent.GetRelease(context.Background(), "porter-agent", 0, false)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 
-	chart, err := loader.LoadChartPublic(c.Config().ServerConf.DefaultAddonHelmRepoURL, "porter-agent", "")
+	chart, err := loader.LoadChartPublic(context.Background(), c.Config().ServerConf.DefaultAddonHelmRepoURL, "porter-agent", "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -56,14 +57,14 @@ func (c *UpgradeAgentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 	// TODO: update values
 	// newValues["redis"] =
 
-	_, err = helmAgent.UpgradeReleaseByValues(&helm.UpgradeReleaseConfig{
+	_, err = helmAgent.UpgradeReleaseByValues(context.Background(), &helm.UpgradeReleaseConfig{
 		Chart:      chart,
 		Name:       "porter-agent",
 		Values:     newValues,
 		Cluster:    cluster,
 		Repo:       c.Repo(),
 		Registries: []*models.Registry{},
-	}, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+	}, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection, false)
 
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(

+ 38 - 14
api/server/handlers/environment/create_deployment_by_cluster.go

@@ -1,11 +1,12 @@
 package environment
 
 import (
-	"context"
 	"errors"
 	"fmt"
 	"net/http"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
@@ -33,21 +34,43 @@ func NewCreateDeploymentByClusterHandler(
 }
 
 func (c *CreateDeploymentByClusterHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-deployment-by-cluster")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+	)
 
 	request := &types.CreateDeploymentRequest{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "could not decode and validate request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "repo-owner", Value: request.RepoOwner},
+		telemetry.AttributeKV{Key: "repo-name", Value: request.RepoName},
+		telemetry.AttributeKV{Key: "namespace", Value: request.Namespace},
+		telemetry.AttributeKV{Key: "pull-request-id", Value: request.PullRequestID},
+		telemetry.AttributeKV{Key: "pr-name", Value: request.GitHubMetadata.PRName},
+		telemetry.AttributeKV{Key: "commit-sha", Value: request.GitHubMetadata.CommitSHA},
+		telemetry.AttributeKV{Key: "pr-branch-from", Value: request.GitHubMetadata.PRBranchFrom},
+		telemetry.AttributeKV{Key: "pr-branch-into", Value: request.GitHubMetadata.PRBranchInto},
+	)
+
 	// read the environment to get the environment id
 	env, err := c.Repo().Environment().ReadEnvironmentByOwnerRepoName(
 		project.ID, cluster.ID, request.RepoOwner, request.RepoName,
 	)
-
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error reading environment by owner repo name")
+
 		if errors.Is(err, gorm.ErrRecordNotFound) {
 			c.HandleAPIError(w, r, apierrors.NewErrNotFound(
 				fmt.Errorf("error creating deployment: %w", errEnvironmentNotFound)),
@@ -61,21 +84,24 @@ func (c *CreateDeploymentByClusterHandler) ServeHTTP(w http.ResponseWriter, r *h
 
 	// create deployment on GitHub API
 	client, err := getGithubClientFromEnvironment(c.Config(), env)
-
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting github client from environment")
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 
 	// add a check for Github PR status
 	prClosed, err := isGithubPRClosed(client, request.RepoOwner, request.RepoName, int(request.PullRequestID))
-
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error checking if github pr is closed")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusConflict))
 		return
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "pr-closed", Value: prClosed})
+
 	if prClosed {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "unsuccessful-exit-reason", Value: "pr is closed"})
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 			fmt.Errorf("attempting to create deployment for a closed github PR"), http.StatusConflict,
 		))
@@ -83,8 +109,8 @@ func (c *CreateDeploymentByClusterHandler) ServeHTTP(w http.ResponseWriter, r *h
 	}
 
 	ghDeployment, err := createGithubDeployment(client, env, request.PRBranchFrom, request.ActionID)
-
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating github deployment object")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusConflict))
 		return
 	}
@@ -103,20 +129,18 @@ func (c *CreateDeploymentByClusterHandler) ServeHTTP(w http.ResponseWriter, r *h
 		PRBranchFrom:   request.GitHubMetadata.PRBranchFrom,
 		PRBranchInto:   request.GitHubMetadata.PRBranchInto,
 	})
-
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating github deployment object")
 		// try to delete the GitHub deployment
-		_, err = client.Repositories.DeleteDeployment(
-			context.Background(),
+		_, deleteErr := client.Repositories.DeleteDeployment(
+			ctx,
 			env.GitRepoOwner,
 			env.GitRepoName,
 			ghDeployment.GetID(),
 		)
 
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("%v: %w", errGithubAPI, err),
-				http.StatusConflict))
-			return
+		if deleteErr != nil {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "delete-err", Value: deleteErr.Error()})
 		}
 
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error creating deployment: %w", err)))

+ 46 - 9
api/server/handlers/environment/enable_pull_request.go

@@ -14,6 +14,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 	"gorm.io/gorm"
 )
 
@@ -34,17 +35,39 @@ func NewEnablePullRequestHandler(
 }
 
 func (c *EnablePullRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-enable-pull-request")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+	)
 
 	request := &types.PullRequest{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		_ = telemetry.Error(ctx, span, nil, "could not decode and validate request")
 		return
 	}
 
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "title", Value: request.Title},
+		telemetry.AttributeKV{Key: "number", Value: request.Number},
+		telemetry.AttributeKV{Key: "repo-ower", Value: request.RepoOwner},
+		telemetry.AttributeKV{Key: "repo-name", Value: request.RepoName},
+		telemetry.AttributeKV{Key: "branch-from", Value: request.BranchFrom},
+		telemetry.AttributeKV{Key: "branch-into", Value: request.BranchInto},
+		telemetry.AttributeKV{Key: "created-at", Value: request.CreatedAt},
+		telemetry.AttributeKV{Key: "updated-at", Value: request.UpdatedAt},
+	)
+
 	env, err := c.Repo().Environment().ReadEnvironmentByOwnerRepoName(project.ID, cluster.ID, request.RepoOwner, request.RepoName)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error reading environment by owner repo name")
+
 		if errors.Is(err, gorm.ErrRecordNotFound) {
 			c.HandleAPIError(w, r, apierrors.NewErrNotFound(fmt.Errorf("environment not found in cluster and project")))
 			return
@@ -67,6 +90,8 @@ func (c *EnablePullRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		}
 
 		if !found {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "unsuccessful-exit-reason", Value: "cannot find branch-into in git repo branches"})
+
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 				fmt.Errorf("base branch '%s' is not enabled for this preview environment, please enable it "+
 					"in the settings page to continue", request.BranchInto), http.StatusBadRequest,
@@ -84,6 +109,8 @@ func (c *EnablePullRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		}
 
 		if found {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "unsuccessful-exit-reason", Value: "cannot find branch-from in git deploy branches"})
+
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 				fmt.Errorf("head branch '%s' is enabled for branch deploys for this preview environment, "+
 					"please disable it in the settings page to continue", request.BranchInto), http.StatusBadRequest,
@@ -94,26 +121,32 @@ func (c *EnablePullRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 
 	client, err := getGithubClientFromEnvironment(c.Config(), env)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting github client from environment")
+
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 
 	// add an extra check that the installation has permission to read this pull request
-	pr, _, err := client.PullRequests.Get(r.Context(), env.GitRepoOwner, env.GitRepoName, int(request.Number))
+	pr, _, err := client.PullRequests.Get(ctx, env.GitRepoOwner, env.GitRepoName, int(request.Number))
 	if err != nil {
+		_ = telemetry.Error(ctx, span, err, "error getting pull request")
+
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("%v: %w", errGithubAPI, err),
 			http.StatusConflict))
 		return
 	}
 
 	if pr.GetState() == "closed" {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "unsuccessful-exit-reason", Value: "pr is closed"})
+
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("cannot enable deployment for closed PR"),
 			http.StatusConflict))
 		return
 	}
 
 	ghResp, err := client.Actions.CreateWorkflowDispatchEventByFileName(
-		r.Context(), env.GitRepoOwner, env.GitRepoName, fmt.Sprintf("porter_%s_env.yml", env.Name),
+		ctx, env.GitRepoOwner, env.GitRepoName, fmt.Sprintf("porter_%s_env.yml", env.Name),
 		github.CreateWorkflowDispatchEventRequest{
 			Ref: request.BranchFrom,
 			Inputs: map[string]interface{}{
@@ -124,9 +157,16 @@ func (c *EnablePullRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 			},
 		},
 	)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating workflow dispatch event")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
 
 	if ghResp != nil {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "github-status-code", Value: ghResp.StatusCode})
 		if ghResp.StatusCode == 404 {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "unsuccessful-exit-reason", Value: "bad github status code"})
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 				fmt.Errorf(
 					"please make sure the preview environment workflow files are present in PR branch %s and are up to"+
@@ -135,6 +175,7 @@ func (c *EnablePullRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 			)
 			return
 		} else if ghResp.StatusCode == 422 {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "unsuccessful-exit-reason", Value: "bad github status code"})
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 				fmt.Errorf(
 					"please make sure the workflow files in PR branch %s are up to date with the default branch",
@@ -145,11 +186,6 @@ func (c *EnablePullRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		}
 	}
 
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
 	// create the deployment
 	depl, err := c.Repo().Environment().CreateDeployment(&models.Deployment{
 		EnvironmentID: env.ID,
@@ -163,6 +199,7 @@ func (c *EnablePullRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		PRBranchInto:  request.BranchInto,
 	})
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating deployment in repo")
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}

+ 16 - 3
api/server/handlers/environment/get_environment.go

@@ -5,6 +5,8 @@ import (
 	"fmt"
 	"net/http"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -29,18 +31,29 @@ func NewGetEnvironmentHandler(
 }
 
 func (c *GetEnvironmentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-environment")
+	defer span.End()
 
-	envID, reqErr := requestutils.GetURLParamUint(r, "environment_id")
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+	)
 
+	envID, reqErr := requestutils.GetURLParamUint(r, "environment_id")
 	if reqErr != nil {
+		_ = telemetry.Error(ctx, span, reqErr, "could not get environment id from url")
 		c.HandleAPIError(w, r, reqErr)
 		return
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "environment-id", Value: envID})
+
 	env, err := c.Repo().Environment().ReadEnvironmentByID(project.ID, cluster.ID, envID)
 	if err != nil {
+		_ = telemetry.Error(ctx, span, err, "could not read environment by id")
 		if errors.Is(err, gorm.ErrRecordNotFound) {
 			c.HandleAPIError(w, r, apierrors.NewErrNotFound(fmt.Errorf("no such environment with ID: %d", envID)))
 			return

+ 33 - 2
api/server/handlers/environment/update_environment_settings.go

@@ -8,6 +8,8 @@ import (
 	"reflect"
 	"strings"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
@@ -36,24 +38,44 @@ func NewUpdateEnvironmentSettingsHandler(
 }
 
 func (c *UpdateEnvironmentSettingsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-update-environment-settings")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+	)
 
 	envID, reqErr := requestutils.GetURLParamUint(r, "environment_id")
 
 	if reqErr != nil {
+		_ = telemetry.Error(ctx, span, reqErr, "could not get environment id from url")
 		c.HandleAPIError(w, r, reqErr)
 		return
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "environment-id", Value: envID})
+
 	request := &types.UpdateEnvironmentSettingsRequest{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		_ = telemetry.Error(ctx, span, nil, "could not decode and validate request")
 		return
 	}
 
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "mode", Value: request.Mode},
+		telemetry.AttributeKV{Key: "git-repo-branches", Value: request.GitRepoBranches},
+		telemetry.AttributeKV{Key: "git-deploy-branches", Value: request.GitDeployBranches},
+	)
+
 	env, err := c.Repo().Environment().ReadEnvironmentByID(project.ID, cluster.ID, envID)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "could not read environment by id")
+
 		if errors.Is(err, gorm.ErrRecordNotFound) {
 			c.HandleAPIError(w, r, apierrors.NewErrNotFound(fmt.Errorf("no such environment with ID: %d", envID)))
 			return
@@ -91,10 +113,13 @@ func (c *UpdateEnvironmentSettingsHandler) ServeHTTP(w http.ResponseWriter, r *h
 
 	changed = !reflect.DeepEqual(env.ToEnvironmentType().GitDeployBranches, newBranches)
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "changed", Value: changed})
+
 	if changed {
 		// let us check if the webhook has access to the "push" event
 		client, err := getGithubClientFromEnvironment(c.Config(), env)
 		if err != nil {
+			err = telemetry.Error(ctx, span, err, "could not get github client")
 			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return
 		}
@@ -103,6 +128,7 @@ func (c *UpdateEnvironmentSettingsHandler) ServeHTTP(w http.ResponseWriter, r *h
 			context.Background(), env.GitRepoOwner, env.GitRepoName, env.GithubWebhookID,
 		)
 		if err != nil {
+			err = telemetry.Error(ctx, span, err, "could not get hook")
 			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return
 		}
@@ -116,6 +142,8 @@ func (c *UpdateEnvironmentSettingsHandler) ServeHTTP(w http.ResponseWriter, r *h
 			}
 		}
 
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "found", Value: found})
+
 		if !found {
 			hook.Events = append(hook.Events, "push")
 
@@ -123,6 +151,7 @@ func (c *UpdateEnvironmentSettingsHandler) ServeHTTP(w http.ResponseWriter, r *h
 				context.Background(), env.GitRepoOwner, env.GitRepoName, env.GithubWebhookID, hook,
 			)
 			if err != nil {
+				err = telemetry.Error(ctx, span, err, "could not edit hook")
 				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 				return
 			}
@@ -140,6 +169,7 @@ func (c *UpdateEnvironmentSettingsHandler) ServeHTTP(w http.ResponseWriter, r *h
 					errString += ": " + e.Error()
 				}
 
+				_ = telemetry.Error(ctx, span, errors.New(errString), "could not auto deploy branch")
 				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 					fmt.Errorf("error auto deploying preview branches: %s", errString), http.StatusConflict),
 				)
@@ -178,6 +208,7 @@ func (c *UpdateEnvironmentSettingsHandler) ServeHTTP(w http.ResponseWriter, r *h
 		env, err = c.Repo().Environment().UpdateEnvironment(env)
 
 		if err != nil {
+			err = telemetry.Error(ctx, span, err, "could not update environment")
 			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return
 		}

+ 22 - 10
api/server/handlers/gitinstallation/get_contents.go

@@ -11,6 +11,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/commonutils"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type GithubGetContentsHandler struct {
@@ -29,33 +30,44 @@ func NewGithubGetContentsHandler(
 }
 
 func (c *GithubGetContentsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx := r.Context()
-	request := &types.GetContentsRequest{}
-
-	ok := c.DecodeAndValidate(w, r, request)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-github-get-contents")
+	defer span.End()
 
-	if !ok {
+	request := &types.GetContentsRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
 	owner, name, ok := commonutils.GetOwnerAndNameParams(c, w, r)
-
 	if !ok {
+		err := telemetry.Error(ctx, span, nil, "owner and name params not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
 	branch, ok := commonutils.GetBranchParam(c, w, r)
-
 	if !ok {
+		err := telemetry.Error(ctx, span, nil, "branch param not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
 	client, err := GetGithubAppClientFromRequest(c.Config(), r)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "could not get github app client from request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "repo-owner", Value: owner},
+		telemetry.AttributeKV{Key: "repo-name", Value: name},
+		telemetry.AttributeKV{Key: "repo-branch", Value: branch},
+	)
+
 	repoContentOptions := github.RepositoryContentGetOptions{}
 	repoContentOptions.Ref = branch
 	_, directoryContents, resp, err := client.Repositories.GetContents(
@@ -66,8 +78,8 @@ func (c *GithubGetContentsHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		&repoContentOptions,
 	)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-			err, resp.StatusCode))
+		err = telemetry.Error(ctx, span, err, "could not get contents from github")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, resp.StatusCode))
 		return
 	}
 

+ 37 - 8
api/server/handlers/gitinstallation/get_porter_yaml.go

@@ -8,11 +8,14 @@ import (
 	"github.com/google/go-github/v41/github"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/handlers/porter_app"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/commonutils"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/telemetry"
+	"gopkg.in/yaml.v2"
 )
 
 type GithubGetPorterYamlHandler struct {
@@ -31,29 +34,36 @@ func NewGithubGetPorterYamlHandler(
 }
 
 func (c *GithubGetPorterYamlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-porter-yaml")
+	defer span.End()
 	request := &types.GetPorterYamlRequest{}
-
 	ok := c.DecodeAndValidate(w, r, request)
-
 	if !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request body")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
-	owner, name, ok := commonutils.GetOwnerAndNameParams(c, w, r)
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "path", Value: request.Path})
 
+	owner, name, ok := commonutils.GetOwnerAndNameParams(c, w, r)
 	if !ok {
+		err := telemetry.Error(ctx, span, nil, "unable to get owner and name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
 	branch, ok := commonutils.GetBranchParam(c, w, r)
-
 	if !ok {
+		err := telemetry.Error(ctx, span, nil, "unable to get branch")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
 	client, err := GetGithubAppClientFromRequest(c.Config(), r)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "unable to get github app client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -67,16 +77,35 @@ func (c *GithubGetPorterYamlHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		},
 	)
 	if err != nil {
-		http.NotFound(w, r)
+		err = telemetry.Error(ctx, span, err, "unable to get contents")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	fileData, err := resp.GetContent()
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "unable to get file data")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
-	data := b64.StdEncoding.EncodeToString([]byte(fileData))
 
+	parsed := &porter_app.PorterStackYAML{}
+	err = yaml.Unmarshal([]byte(fileData), parsed)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "invalid porter yaml format")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	// backwards compatibility so that old porter yamls are no longer valid
+	if parsed.Version != nil {
+		version := *parsed.Version
+		if version != "v1stack" {
+			err = telemetry.Error(ctx, span, nil, "porter YAML version is not supported")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+	}
+
+	data := b64.StdEncoding.EncodeToString([]byte(fileData))
 	c.WriteResult(w, r, data)
 }

+ 14 - 0
api/server/handlers/gitinstallation/list_branches.go

@@ -5,6 +5,8 @@ import (
 	"net/http"
 	"sync"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/google/go-github/v41/github"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
@@ -30,14 +32,24 @@ func NewGithubListBranchesHandler(
 }
 
 func (c *GithubListBranchesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-github-branches")
+	defer span.End()
+
 	owner, name, ok := commonutils.GetOwnerAndNameParams(c, w, r)
 
 	if !ok {
+		_ = telemetry.Error(ctx, span, nil, "could not get owner and name from request")
 		return
 	}
 
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "owner", Value: owner},
+		telemetry.AttributeKV{Key: "name", Value: name},
+	)
+
 	client, err := GetGithubAppClientFromRequest(c.Config(), r)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "could not get github app client")
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
@@ -49,6 +61,7 @@ func (c *GithubListBranchesHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 		},
 	})
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "could not list branches")
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
@@ -104,6 +117,7 @@ func (c *GithubListBranchesHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 	wg.Wait()
 
 	if workerErr != nil {
+		err = telemetry.Error(ctx, span, workerErr, "worker error listing github branches")
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}

+ 87 - 0
api/server/handlers/gitinstallation/workflow_log_runid.go

@@ -0,0 +1,87 @@
+package gitinstallation
+
+import (
+	"fmt"
+	"net/http"
+	"strconv"
+	"strings"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/commonutils"
+	"github.com/porter-dev/porter/api/server/shared/config"
+)
+
+type GetSpecificWorkflowLogsHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+func NewGetSpecificWorkflowLogsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetSpecificWorkflowLogsHandler {
+	return &GetSpecificWorkflowLogsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (c *GetSpecificWorkflowLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	owner, name, ok := commonutils.GetOwnerAndNameParams(c, w, r)
+	if !ok {
+		return
+	}
+
+	releaseName := r.URL.Query().Get("release_name")
+	filename := r.URL.Query().Get("filename")
+	runNumberStr := r.URL.Query().Get("run_id")
+	fmt.Println(runNumberStr)
+
+	if filename == "" && releaseName == "" {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("filename and release name are both empty")))
+		return
+	}
+
+	if filename == "" {
+		if c.Config().ServerConf.InstanceName != "" {
+			filename = fmt.Sprintf("porter_%s_%s.yml", strings.Replace(
+				strings.ToLower(releaseName), "-", "_", -1),
+				strings.ToLower(c.Config().ServerConf.InstanceName),
+			)
+		} else {
+			filename = fmt.Sprintf("porter_%s.yml", strings.Replace(
+				strings.ToLower(releaseName), "-", "_", -1),
+			)
+		}
+	}
+
+	client, err := GetGithubAppClientFromRequest(c.Config(), r)
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	// parse runNumber from string to int64
+	runNumber, err := strconv.ParseInt(runNumberStr, 10, 64)
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	specificWorkflowRun, _, err := client.Actions.GetWorkflowRunByID(r.Context(), owner, name, runNumber)
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	logsURL, _, err := client.Actions.GetWorkflowRunLogs(r.Context(), owner, name, specificWorkflowRun.GetID(), false)
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	fmt.Printf("Fetched specific workflow logs URL: %v\n", logsURL.String())
+
+	c.WriteResult(w, r, logsURL.String())
+}

+ 79 - 0
api/server/handlers/gitinstallation/workflow_logs.go

@@ -0,0 +1,79 @@
+package gitinstallation
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/commonutils"
+	"github.com/porter-dev/porter/api/server/shared/config"
+)
+
+type GetWorkflowLogsHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+func NewGetWorkflowLogsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetWorkflowLogsHandler {
+	return &GetWorkflowLogsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (c *GetWorkflowLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	owner, name, ok := commonutils.GetOwnerAndNameParams(c, w, r)
+
+	if !ok {
+		return
+	}
+
+	releaseName := r.URL.Query().Get("release_name")
+	filename := r.URL.Query().Get("filename")
+
+	if filename == "" && releaseName == "" {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("filename and release name are both empty")))
+		return
+	}
+
+	if filename == "" {
+		if c.Config().ServerConf.InstanceName != "" {
+			filename = fmt.Sprintf("porter_%s_%s.yml", strings.Replace(
+				strings.ToLower(releaseName), "-", "_", -1),
+				strings.ToLower(c.Config().ServerConf.InstanceName),
+			)
+		} else {
+			filename = fmt.Sprintf("porter_%s.yml", strings.Replace(
+				strings.ToLower(releaseName), "-", "_", -1),
+			)
+		}
+	}
+
+	client, err := GetGithubAppClientFromRequest(c.Config(), r)
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	latestWorkflowRun, err := commonutils.GetLatestWorkflowRun(client, owner, name, filename, "")
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	logsURL, _, err := client.Actions.GetWorkflowRunLogs(r.Context(), owner, name, latestWorkflowRun.GetID(), false)
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	fmt.Printf("Fetched workflow logs URL: %v\n", logsURL.String())
+
+	c.WriteResult(w, r, logsURL.String())
+
+}

+ 10 - 0
api/server/handlers/infra/forms.go

@@ -459,6 +459,14 @@ tabs:
           value: c5.xlarge
         - label: c5.2xlarge
           value: c5.2xlarge
+        - label: c6a.large
+          value: c6a.large
+        - label: c6a.xlarge
+          value: c6a.xlarge
+        - label: c6a.2xlarge
+          value: c6a.2xlarge
+        - label: c6a.4xlarge
+          value: c6a.4xlarge
         - label: c6i.large
           value: c6i.large
         - label: c6i.xlarge
@@ -485,6 +493,8 @@ tabs:
           value: m6a.xlarge
         - label: m6a.2xlarge
           value: m6a.2xlarge
+        - label: m6a.4xlarge
+          value: m6a.4xlarge
         - label: m6i.large
           value: m6i.large
         - label: m6i.xlarge

+ 3 - 2
api/server/handlers/namespace/create_env_group.go

@@ -1,6 +1,7 @@
 package namespace
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"strings"
@@ -67,7 +68,7 @@ func (c *CreateEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		return
 	}
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, namespace)
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, namespace)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -185,7 +186,7 @@ func rolloutApplications(
 				Values:     newConfig,
 			}
 
-			_, err = helmAgent.UpgradeReleaseByValues(conf, config.DOConf, config.ServerConf.DisablePullSecretsInjection)
+			_, err = helmAgent.UpgradeReleaseByValues(context.Background(), conf, config.DOConf, config.ServerConf.DisablePullSecretsInjection, false)
 
 			if err != nil {
 				mu.Lock()

+ 31 - 16
api/server/handlers/namespace/list_releases.go

@@ -10,6 +10,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type ListReleasesHandler struct {
@@ -29,6 +30,9 @@ func NewListReleasesHandler(
 }
 
 func (c *ListReleasesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-releases")
+	defer span.End()
+
 	request := &types.ListReleasesRequest{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
@@ -39,37 +43,48 @@ func (c *ListReleasesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		request.ReleaseListFilter = &types.ReleaseListFilter{}
 	}
 
-	namespace := r.Context().Value(types.NamespaceScope).(string)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	namespace := ctx.Value(types.NamespaceScope).(string)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "namespace", Value: namespace},
+		telemetry.AttributeKV{Key: "cluster_id", Value: cluster.ID},
+	)
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(ctx, r, cluster, "")
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		e := telemetry.Error(ctx, span, err, "failed to get helm agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
 		return
 	}
 
-	releases, err := helmAgent.ListReleases(namespace, request.ReleaseListFilter)
+	releases, err := helmAgent.ListReleases(ctx, namespace, request.ReleaseListFilter)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		e := telemetry.Error(ctx, span, err, "failed to list releases")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
 		return
 	}
 
 	var res types.ListReleasesResponse
 
 	for _, helmRel := range releases {
+		release := types.Release{
+			Release: helmRel,
+		}
+		telemetry.WithAttributes(span,
+			telemetry.AttributeKV{Key: "release_name", Value: helmRel.Name},
+			telemetry.AttributeKV{Key: "release_namespace", Value: helmRel.Namespace},
+		)
+
 		rel, err := c.Repo().Release().ReadRelease(cluster.ID, helmRel.Name, helmRel.Namespace)
+		if err != nil {
+			telemetry.Error(ctx, span, err, "failed to read release. Not a fatal error")
+		}
 
-		if err == nil {
-			res = append(res, &types.Release{
-				Release:       helmRel,
-				PorterRelease: rel.ToReleaseType(),
-			})
-		} else {
-			res = append(res, &types.Release{
-				Release:       helmRel,
-				PorterRelease: &types.PorterRelease{},
-			})
+		if rel != nil {
+			release.PorterRelease = rel.ToReleaseType()
 		}
+
 	}
 
 	c.WriteResult(w, r, res)

+ 71 - 0
api/server/handlers/porter_app/analytics.go

@@ -0,0 +1,71 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/analytics"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+type PorterAppAnalyticsHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+func NewPorterAppAnalyticsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *PorterAppAnalyticsHandler {
+	return &PorterAppAnalyticsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (v *PorterAppAnalyticsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+	user, _ := ctx.Value(types.UserScope).(*models.User)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	request := &types.PorterAppAnalyticsRequest{}
+	if ok := v.DecodeAndValidate(w, r, request); !ok {
+		return
+	}
+
+	if request.Step == "stack-launch-start" {
+		v.Config().AnalyticsClient.Track(analytics.StackLaunchStartTrack(&analytics.StackLaunchStartOpts{
+			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
+			Email:                  user.Email,
+			FirstName:              user.FirstName,
+			LastName:               user.LastName,
+			CompanyName:            user.CompanyName,
+		}))
+	}
+
+	if request.Step == "stack-launch-complete" {
+		v.Config().AnalyticsClient.Track(analytics.StackLaunchCompleteTrack(&analytics.StackLaunchCompleteOpts{
+			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
+			StackName:              request.StackName,
+			Email:                  user.Email,
+			FirstName:              user.FirstName,
+			LastName:               user.LastName,
+			CompanyName:            user.CompanyName,
+		}))
+	}
+
+	if request.Step == "stack-launch-success" {
+		v.Config().AnalyticsClient.Track(analytics.StackLaunchSuccessTrack(&analytics.StackLaunchSuccessOpts{
+			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
+			StackName:              request.StackName,
+			Email:                  user.Email,
+			FirstName:              user.FirstName,
+			LastName:               user.LastName,
+			CompanyName:            user.CompanyName,
+		}))
+	}
+
+	v.WriteResult(w, r, user.ToUserType())
+}

+ 484 - 0
api/server/handlers/porter_app/create.go

@@ -0,0 +1,484 @@
+package porter_app
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/google/uuid"
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/helm"
+	"github.com/porter-dev/porter/internal/helm/loader"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+	"github.com/stefanmcshane/helm/pkg/chart"
+)
+
+type CreatePorterAppHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+func NewCreatePorterAppHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CreatePorterAppHandler {
+	return &CreatePorterAppHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+func (c *CreatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-porter-app")
+	defer span.End()
+
+	request := &types.CreatePorterAppRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	stackName, reqErr := requestutils.GetURLParamString(r, types.URLParamStackName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "error getting stack name from url")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	namespace := fmt.Sprintf("porter-stack-%s", stackName)
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "application-name", Value: stackName})
+
+	helmAgent, err := c.GetHelmAgent(ctx, r, cluster, namespace)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting helm agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	k8sAgent, err := c.GetAgent(r, cluster, namespace)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting k8s agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	helmRelease, err := helmAgent.GetRelease(ctx, stackName, 0, false)
+	shouldCreate := err != nil
+
+	porterYamlBase64 := request.PorterYAMLBase64
+	porterYaml, err := base64.StdEncoding.DecodeString(porterYamlBase64)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error decoding porter yaml")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	imageInfo := request.ImageInfo
+	registries, err := c.Repo().Registry().ListRegistriesByProjectID(cluster.ProjectID)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error listing registries")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	var releaseValues map[string]interface{}
+	var releaseDependencies []*chart.Dependency
+	if shouldCreate || request.OverrideRelease {
+		releaseValues = nil
+		releaseDependencies = nil
+
+		// this is required because when the front-end sends an update request with overrideRelease=true, it is unable to
+		// get the image info from the release. unless it is explicitly provided in the request, we avoid overwriting it
+		// by attempting to get the image info from the release
+		if helmRelease != nil && (imageInfo.Repository == "" || imageInfo.Tag == "") {
+			imageInfo = attemptToGetImageInfoFromRelease(helmRelease.Config)
+		}
+	} else {
+		releaseValues = helmRelease.Config
+		releaseDependencies = helmRelease.Chart.Metadata.Dependencies
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "image-repo", Value: imageInfo.Repository}, telemetry.AttributeKV{Key: "image-tag", Value: imageInfo.Tag})
+
+	if request.Builder == "" {
+		// attempt to get builder from db
+		app, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, stackName)
+		if err == nil {
+			request.Builder = app.Builder
+		}
+	}
+	injectLauncher := strings.Contains(request.Builder, "heroku") ||
+		strings.Contains(request.Builder, "paketo")
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "builder", Value: request.Builder})
+
+	chart, values, releaseJobValues, err := parse(
+		porterYaml,
+		imageInfo,
+		c.Config(),
+		cluster.ProjectID,
+		releaseValues,
+		releaseDependencies,
+		SubdomainCreateOpts{
+			k8sAgent:       k8sAgent,
+			dnsRepo:        c.Repo().DNSRecord(),
+			powerDnsClient: c.Config().PowerDNSClient,
+			appRootDomain:  c.Config().ServerConf.AppRootDomain,
+			stackName:      stackName,
+		},
+		injectLauncher,
+	)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error parsing porter yaml into chart and values")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if shouldCreate {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "installing-application", Value: true})
+
+		// create the namespace if it does not exist already
+		_, err = k8sAgent.CreateNamespace(namespace, nil)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error creating namespace")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		// create the release job chart if it does not exist (only done by front-end currently, where we set overrideRelease=true)
+		if request.OverrideRelease && releaseJobValues != nil {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "installing-pre-deploy-job", Value: true})
+			conf, err := createReleaseJobChart(
+				ctx,
+				stackName,
+				releaseJobValues,
+				c.Config().ServerConf.DefaultApplicationHelmRepoURL,
+				registries,
+				cluster,
+				c.Repo(),
+			)
+			if err != nil {
+				err = telemetry.Error(ctx, span, err, "error making config for pre-deploy job chart")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+				return
+			}
+			_, err = helmAgent.InstallChart(ctx, conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+			if err != nil {
+				err = telemetry.Error(ctx, span, err, "error installing pre-deploy job chart")
+				telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "install-pre-deploy-job-error", Value: err})
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+				_, uninstallChartErr := helmAgent.UninstallChart(ctx, fmt.Sprintf("%s-r", stackName))
+				if uninstallChartErr != nil {
+					uninstallChartErr = telemetry.Error(ctx, span, err, "error uninstalling pre-deploy job chart after failed install")
+					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(uninstallChartErr, http.StatusInternalServerError))
+				}
+				return
+			}
+		}
+
+		conf := &helm.InstallChartConfig{
+			Chart:      chart,
+			Name:       stackName,
+			Namespace:  namespace,
+			Values:     values,
+			Cluster:    cluster,
+			Repo:       c.Repo(),
+			Registries: registries,
+		}
+
+		// create the app chart
+		_, err = helmAgent.InstallChart(ctx, conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error installing app chart")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+
+			_, err = helmAgent.UninstallChart(ctx, stackName)
+			if err != nil {
+				err = telemetry.Error(ctx, span, err, "error uninstalling app chart after failed install")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			}
+
+			return
+		}
+
+		existing, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, stackName)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error reading app from DB")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		} else if existing.Name != "" {
+			err = telemetry.Error(ctx, span, err, "app with name already exists in project")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
+			return
+		}
+
+		app := &models.PorterApp{
+			Name:      stackName,
+			ClusterID: cluster.ID,
+			ProjectID: project.ID,
+			RepoName:  request.RepoName,
+			GitRepoID: request.GitRepoID,
+			GitBranch: request.GitBranch,
+
+			BuildContext:   request.BuildContext,
+			Builder:        request.Builder,
+			Buildpacks:     request.Buildpacks,
+			Dockerfile:     request.Dockerfile,
+			ImageRepoURI:   request.ImageRepoURI,
+			PullRequestURL: request.PullRequestURL,
+			PorterYamlPath: request.PorterYamlPath,
+		}
+
+		// create the db entry
+		porterApp, err := c.Repo().PorterApp().UpdatePorterApp(app)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error writing app to DB")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		_, err = createPorterAppEvent(ctx, "SUCCESS", porterApp.ID, 1, imageInfo.Tag, c.Repo().PorterAppEvent())
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error creating porter app event")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		c.WriteResult(w, r, porterApp.ToPorterAppType())
+	} else {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "upgrading-application", Value: true})
+
+		// create/update the release job chart
+		if request.OverrideRelease {
+			if releaseJobValues == nil {
+				releaseJobName := fmt.Sprintf("%s-r", stackName)
+				_, err := helmAgent.GetRelease(ctx, releaseJobName, 0, false)
+				if err == nil {
+					// handle exception where the user has chosen to delete the release job
+					telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deleting-pre-deploy-job", Value: true})
+					_, err = helmAgent.UninstallChart(ctx, releaseJobName)
+					if err != nil {
+						err = telemetry.Error(ctx, span, err, "error uninstalling pre-deploy job chart")
+						c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+						return
+					}
+				}
+			} else {
+				releaseJobName := fmt.Sprintf("%s-r", stackName)
+				helmRelease, err := helmAgent.GetRelease(ctx, releaseJobName, 0, false)
+				if err != nil {
+					telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "creating-pre-deploy-job", Value: true})
+					conf, err := createReleaseJobChart(
+						ctx,
+						stackName,
+						releaseJobValues,
+						c.Config().ServerConf.DefaultApplicationHelmRepoURL,
+						registries,
+						cluster,
+						c.Repo(),
+					)
+					if err != nil {
+						err = telemetry.Error(ctx, span, err, "error making config for pre-deploy job chart")
+						c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+						return
+					}
+
+					_, err = helmAgent.InstallChart(ctx, conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+					if err != nil {
+						err = telemetry.Error(ctx, span, err, "error installing pre-deploy job chart")
+						telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "install-pre-deploy-job-error", Value: err})
+						c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+						_, uninstallChartErr := helmAgent.UninstallChart(ctx, fmt.Sprintf("%s-r", stackName))
+						if uninstallChartErr != nil {
+							uninstallChartErr = telemetry.Error(ctx, span, err, "error uninstalling pre-deploy job chart after failed install")
+							c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(uninstallChartErr, http.StatusInternalServerError))
+						}
+						return
+					}
+				} else {
+					telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "updating-pre-deploy-job", Value: true})
+					chart, err := loader.LoadChartPublic(ctx, c.Config().Metadata.DefaultAppHelmRepoURL, "job", "")
+					if err != nil {
+						err = telemetry.Error(ctx, span, err, "error loading latest job chart")
+						c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+						return
+					}
+
+					conf := &helm.UpgradeReleaseConfig{
+						Name:       helmRelease.Name,
+						Cluster:    cluster,
+						Repo:       c.Repo(),
+						Registries: registries,
+						Values:     releaseJobValues,
+						Chart:      chart,
+					}
+					_, err = helmAgent.UpgradeReleaseByValues(ctx, conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection, false)
+					if err != nil {
+						err = telemetry.Error(ctx, span, err, "error upgrading pre-deploy job chart")
+						c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+						return
+					}
+				}
+			}
+		}
+
+		// update the app chart
+		conf := &helm.InstallChartConfig{
+			Chart:      chart,
+			Name:       stackName,
+			Namespace:  namespace,
+			Values:     values,
+			Cluster:    cluster,
+			Repo:       c.Repo(),
+			Registries: registries,
+		}
+
+		// update the chart
+		_, err = helmAgent.UpgradeInstallChart(ctx, conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error upgrading application")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		// update the DB entry
+		app, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, stackName)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error reading app from DB")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		if request.RepoName != "" {
+			app.RepoName = request.RepoName
+		}
+		if request.GitBranch != "" {
+			app.GitBranch = request.GitBranch
+		}
+		if request.BuildContext != "" {
+			app.BuildContext = request.BuildContext
+		}
+		if request.Builder != "" {
+			if request.Builder == "null" {
+				app.Builder = ""
+			} else {
+				app.Builder = request.Builder
+			}
+		}
+		if request.Buildpacks != "" {
+			if request.Buildpacks == "null" {
+				app.Buildpacks = ""
+			} else {
+				app.Buildpacks = request.Buildpacks
+			}
+		}
+		if request.Dockerfile != "" {
+			if request.Dockerfile == "null" {
+				app.Dockerfile = ""
+			} else {
+				app.Dockerfile = request.Dockerfile
+			}
+		}
+		if request.ImageRepoURI != "" {
+			app.ImageRepoURI = request.ImageRepoURI
+		}
+		if request.PullRequestURL != "" {
+			app.PullRequestURL = request.PullRequestURL
+		}
+
+		telemetry.WithAttributes(
+			span,
+			telemetry.AttributeKV{Key: "updated-repo-name", Value: app.RepoName},
+			telemetry.AttributeKV{Key: "updated-git-branch", Value: app.GitBranch},
+			telemetry.AttributeKV{Key: "updated-build-context", Value: app.BuildContext},
+			telemetry.AttributeKV{Key: "updated-builder", Value: app.Builder},
+			telemetry.AttributeKV{Key: "updated-buildpacks", Value: app.Buildpacks},
+			telemetry.AttributeKV{Key: "updated-dockerfile", Value: app.Dockerfile},
+			telemetry.AttributeKV{Key: "updated-image-repo-uri", Value: app.ImageRepoURI},
+			telemetry.AttributeKV{Key: "updated-pull-request-url", Value: app.PullRequestURL},
+		)
+
+		updatedPorterApp, err := c.Repo().PorterApp().UpdatePorterApp(app)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error writing updated app to DB")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		_, err = createPorterAppEvent(ctx, "SUCCESS", updatedPorterApp.ID, helmRelease.Version+1, imageInfo.Tag, c.Repo().PorterAppEvent())
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error creating porter app event")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		c.WriteResult(w, r, updatedPorterApp.ToPorterAppType())
+	}
+}
+
+// createPorterAppEvent creates an event for use in the activity feed
+func createPorterAppEvent(ctx context.Context, status string, appID uint, revision int, tag string, repo repository.PorterAppEventRepository) (*models.PorterAppEvent, error) {
+	event := models.PorterAppEvent{
+		ID:                 uuid.New(),
+		Status:             status,
+		Type:               "DEPLOY",
+		TypeExternalSource: "KUBERNETES",
+		PorterAppID:        appID,
+		Metadata: map[string]any{
+			"revision":  revision,
+			"image_tag": tag,
+		},
+	}
+
+	err := repo.CreateEvent(ctx, &event)
+	if err != nil {
+		return nil, err
+	}
+
+	if event.ID == uuid.Nil {
+		return nil, err
+	}
+
+	return &event, nil
+}
+
+func createReleaseJobChart(
+	ctx context.Context,
+	stackName string,
+	values map[string]interface{},
+	repoUrl string,
+	registries []*models.Registry,
+	cluster *models.Cluster,
+	repo repository.Repository,
+) (*helm.InstallChartConfig, error) {
+	chart, err := loader.LoadChartPublic(ctx, repoUrl, "job", "")
+	if err != nil {
+		return nil, err
+	}
+
+	releaseName := fmt.Sprintf("%s-r", stackName)
+	namespace := fmt.Sprintf("porter-stack-%s", stackName)
+
+	return &helm.InstallChartConfig{
+		Chart:      chart,
+		Name:       releaseName,
+		Namespace:  namespace,
+		Values:     values,
+		Cluster:    cluster,
+		Repo:       repo,
+		Registries: registries,
+	}, nil
+}

+ 196 - 0
api/server/handlers/porter_app/create_and_update_events.go

@@ -0,0 +1,196 @@
+package porter_app
+
+import (
+	"context"
+	"net/http"
+
+	"github.com/google/uuid"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+type CreateUpdatePorterAppEventHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+func NewCreateUpdatePorterAppEventHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CreateUpdatePorterAppEventHandler {
+	return &CreateUpdatePorterAppEventHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (p *CreateUpdatePorterAppEventHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-post-porter-app-event")
+	defer span.End()
+
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cluster-id", Value: int(cluster.ID)},
+		telemetry.AttributeKV{Key: "project-id", Value: int(cluster.ProjectID)},
+	)
+
+	request := &types.CreateOrUpdatePorterAppEventRequest{}
+	if ok := p.DecodeAndValidate(w, r, request); !ok {
+		e := telemetry.Error(ctx, span, nil, "error decoding request")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	stackName, reqErr := requestutils.GetURLParamString(r, types.URLParamStackName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, reqErr, "error parsing stack name from url")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "porter-app-name", Value: stackName},
+		telemetry.AttributeKV{Key: "porter-app-event-type-id", Value: request.Type},
+		telemetry.AttributeKV{Key: "porter-app-event-status", Value: request.Status},
+		telemetry.AttributeKV{Key: "porter-app-event-external-source", Value: request.TypeExternalSource},
+		telemetry.AttributeKV{Key: "porter-app-event-id", Value: request.ID},
+	)
+
+	if request.ID == "" {
+		event, err := p.createNewAppEvent(ctx, *cluster, stackName, request.Status, string(request.Type), request.TypeExternalSource, request.Metadata)
+		if err != nil {
+			e := telemetry.Error(ctx, span, err, "error creating new app event")
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+			return
+		}
+		p.WriteResult(w, r, event)
+		return
+	}
+
+	event, err := p.updateExistingAppEvent(ctx, *cluster, stackName, *request)
+	if err != nil {
+		e := telemetry.Error(ctx, span, err, "error creating new app event")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+	p.WriteResult(w, r, event)
+}
+
+// createNewAppEvent will create a new app event for the given porter app name. If the app event is an agent event, then it will be created only if there is no existing event which has the agent ID. In the case that an existing event is found, that will be returned instead
+func (p *CreateUpdatePorterAppEventHandler) createNewAppEvent(ctx context.Context, cluster models.Cluster, porterAppName string, status string, eventType string, externalSource string, requestMetadata map[string]any) (types.PorterAppEvent, error) {
+	ctx, span := telemetry.NewSpan(ctx, "create-porter-app-event")
+	defer span.End()
+
+	app, err := p.Repo().PorterApp().ReadPorterAppByName(cluster.ID, porterAppName)
+	if err != nil {
+		return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error retrieving porter app by name for cluster")
+	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "porter-app-id", Value: app.ID},
+		telemetry.AttributeKV{Key: "porter-app-name", Value: porterAppName},
+		telemetry.AttributeKV{Key: "cluster-id", Value: int(cluster.ID)},
+		telemetry.AttributeKV{Key: "project-id", Value: int(cluster.ProjectID)},
+	)
+
+	if eventType == string(types.PorterAppEventType_AppEvent) {
+		// Agent has no way to know what the porter app event id is, so if we must dedup here
+		// TODO: create a filter to filter by only agent events. Not an issue now as app events are deduped per hour on the agent side
+		if agentEventID, ok := requestMetadata["agent_event_id"]; ok {
+			existingEvents, _, err := p.Repo().PorterAppEvent().ListEventsByPorterAppID(ctx, app.ID)
+			if err != nil {
+				return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error listing porter app events for event type")
+			}
+
+			for _, existingEvent := range existingEvents {
+				if existingEvent.Type == eventType {
+					existingAgentEventID, ok := existingEvent.Metadata["agent_event_id"]
+					if !ok {
+						continue
+					}
+					if existingAgentEventID == 0 {
+						continue
+					}
+					if existingAgentEventID == agentEventID {
+						return existingEvent.ToPorterAppEvent(), nil
+					}
+				}
+			}
+		}
+	}
+
+	event := models.PorterAppEvent{
+		ID:                 uuid.New(),
+		Status:             status,
+		Type:               eventType,
+		TypeExternalSource: externalSource,
+		PorterAppID:        app.ID,
+		Metadata:           make(map[string]any),
+	}
+
+	for k, v := range requestMetadata {
+		event.Metadata[k] = v
+	}
+
+	err = p.Repo().PorterAppEvent().CreateEvent(ctx, &event)
+	if err != nil {
+		return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error creating porter app event")
+	}
+
+	if event.ID == uuid.Nil {
+		return types.PorterAppEvent{}, telemetry.Error(ctx, span, nil, "porter app event not found")
+	}
+
+	return event.ToPorterAppEvent(), nil
+}
+
+func (p *CreateUpdatePorterAppEventHandler) updateExistingAppEvent(ctx context.Context, cluster models.Cluster, porterAppName string, submittedEvent types.CreateOrUpdatePorterAppEventRequest) (types.PorterAppEvent, error) {
+	ctx, span := telemetry.NewSpan(ctx, "update-porter-app-event")
+	defer span.End()
+
+	app, err := p.Repo().PorterApp().ReadPorterAppByName(cluster.ID, porterAppName)
+	if err != nil {
+		return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error retrieving porter app by name for cluster")
+	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "porter-app-id", Value: app.ID},
+		telemetry.AttributeKV{Key: "porter-app-name", Value: porterAppName},
+		telemetry.AttributeKV{Key: "cluster-id", Value: int(cluster.ID)},
+		telemetry.AttributeKV{Key: "project-id", Value: int(cluster.ProjectID)},
+	)
+
+	if submittedEvent.ID == "" {
+		return types.PorterAppEvent{}, telemetry.Error(ctx, span, nil, "porter app event id is required")
+	}
+	submittedEventID, err := uuid.Parse(submittedEvent.ID)
+	if err != nil {
+		return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error parsing porter app event id as uuid")
+	}
+
+	existingAppEvent, err := p.Repo().PorterAppEvent().ReadEvent(ctx, submittedEventID)
+	if err != nil {
+		return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error retrieving porter app event by id")
+	}
+
+	if submittedEvent.Status != "" {
+		existingAppEvent.Status = submittedEvent.Status
+	}
+
+	if submittedEvent.Metadata != nil {
+		for k, v := range submittedEvent.Metadata {
+			existingAppEvent.Metadata[k] = v
+		}
+	}
+
+	err = p.Repo().PorterAppEvent().UpdateEvent(ctx, &existingAppEvent)
+	if err != nil {
+		return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error updating porter app event")
+	}
+
+	return existingAppEvent.ToPorterAppEvent(), nil
+}

+ 12 - 10
api/server/handlers/stacks/create_secret_and_open_pr.go → api/server/handlers/porter_app/create_secret_and_open_pr.go

@@ -1,4 +1,4 @@
-package stacks
+package porter_app
 
 import (
 	"errors"
@@ -83,15 +83,17 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	var pr *github.PullRequest
 	if request.OpenPr {
 		pr, err = actions.OpenGithubPR(&actions.GithubPROpts{
-			Client:        client,
-			GitRepoOwner:  request.GithubRepoOwner,
-			GitRepoName:   request.GithubRepoName,
-			StackName:     stackName,
-			ProjectID:     project.ID,
-			ClusterID:     cluster.ID,
-			ServerURL:     c.Config().ServerConf.ServerURL,
-			DefaultBranch: request.Branch,
-			SecretName:    secretName,
+			Client:         client,
+			GitRepoOwner:   request.GithubRepoOwner,
+			GitRepoName:    request.GithubRepoName,
+			StackName:      stackName,
+			ProjectID:      project.ID,
+			ClusterID:      cluster.ID,
+			ServerURL:      c.Config().ServerConf.ServerURL,
+			DefaultBranch:  request.Branch,
+			SecretName:     secretName,
+			PorterYamlPath: request.PorterYamlPath,
+			Body:           "Hello 👋 from Porter! Please merge this PR to finish setting up your application.",
 		})
 	}
 

+ 1 - 1
api/server/handlers/stacks/delete_porter_app.go → api/server/handlers/porter_app/delete.go

@@ -1,4 +1,4 @@
-package stacks
+package porter_app
 
 import (
 	"net/http"

+ 1 - 1
api/server/handlers/stacks/get_porter_app.go → api/server/handlers/porter_app/get.go

@@ -1,4 +1,4 @@
-package stacks
+package porter_app
 
 import (
 	"net/http"

+ 153 - 0
api/server/handlers/porter_app/get_logs_within_time_range.go

@@ -0,0 +1,153 @@
+package porter_app
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	porter_agent "github.com/porter-dev/porter/internal/kubernetes/porter_agent/v2"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+	v1 "k8s.io/api/core/v1"
+)
+
+type GetLogsWithinTimeRangeHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+func NewGetLogsWithinTimeRangeHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetLogsWithinTimeRangeHandler {
+	return &GetLogsWithinTimeRangeHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+func (c *GetLogsWithinTimeRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-logs-within-time-range")
+	defer span.End()
+	r = r.Clone(ctx)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	request := &types.GetChartLogsWithinTimeRangeRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		return
+	}
+
+	if request.StartRange.IsZero() || request.EndRange.IsZero() {
+		err := telemetry.Error(ctx, span, nil, "must provide start and end range")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		_ = telemetry.Error(ctx, span, err, "unable to get agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get agent"), http.StatusInternalServerError))
+		return
+	}
+
+	// get agent service
+	agentSvc, err := porter_agent.GetAgentService(agent.Clientset)
+	if err != nil {
+		_ = telemetry.Error(ctx, span, err, "unable to get agent service")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get agent service"), http.StatusInternalServerError))
+		return
+	}
+
+	podValuesRequest := &types.GetPodValuesRequest{
+		StartRange:  &request.StartRange,
+		EndRange:    &request.EndRange,
+		Namespace:   request.Namespace,
+		MatchPrefix: request.ChartName,
+		Revision:    request.Revision,
+	}
+
+	var podSelector string
+	if request.ChartName == "" {
+		if request.PodSelector == "" {
+			err = telemetry.Error(ctx, span, nil, "must provide either chart name or pod selector")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+		podSelector = request.PodSelector
+	} else {
+		// get the pod values which will be used to get the correct pod selector
+		podVals, err := porter_agent.GetPodValues(agent.Clientset, agentSvc, podValuesRequest)
+		if err != nil {
+			_ = telemetry.Error(ctx, span, err, "unable to get pod values")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		if len(podVals) == 0 {
+			err = telemetry.Error(ctx, span, nil, "no pods found within timerange")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound))
+			return
+		}
+		if len(podVals) == 1 {
+			podSelector = podVals[0]
+		} else {
+			// TODO: why are pods being returned from get pod values whose timestamps don't overlap with the search range??
+			// hacky workaround for the above bug, only for jobs - get the pods, and then filter them by timestamp
+			var latestPod *v1.Pod
+			for _, v := range podVals {
+				name := strings.Split(v, "-hook")[0] + "-hook"
+				pods, err := agent.GetJobPods(request.Namespace, name)
+				if err != nil {
+					_ = telemetry.Error(ctx, span, err, "unable to get pods for job")
+					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get pods for job"), http.StatusInternalServerError))
+					return
+				}
+				for _, pod := range pods {
+					if pod.GetCreationTimestamp().Time.After(request.StartRange) && pod.GetCreationTimestamp().Time.Before(request.EndRange) {
+						if latestPod == nil || pod.GetCreationTimestamp().Time.After(latestPod.GetCreationTimestamp().Time) {
+							latestPod = &pod
+						}
+					}
+				}
+			}
+			if latestPod == nil {
+				err = telemetry.Error(ctx, span, nil, "no pods found within timerange")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound))
+				return
+			}
+			podSelector = latestPod.Name
+		}
+	}
+
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "pod-selector", Value: podSelector},
+		telemetry.AttributeKV{Key: "start-range", Value: request.StartRange.String()},
+		telemetry.AttributeKV{Key: "end-range", Value: request.EndRange.String()},
+	)
+
+	logRequest := &types.GetLogRequest{
+		Limit:       request.Limit,
+		StartRange:  &request.StartRange,
+		EndRange:    &request.EndRange,
+		Revision:    request.Revision,
+		PodSelector: podSelector,
+		Namespace:   request.Namespace,
+	}
+
+	logs, err := porter_agent.GetHistoricalLogs(agent.Clientset, agentSvc, logRequest)
+	if err != nil {
+		_ = telemetry.Error(ctx, span, err, "unable to get logs")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get logs for pod selector %s", podSelector), http.StatusInternalServerError))
+		return
+	}
+
+	c.WriteResult(w, r, logs)
+}

+ 1 - 1
api/server/handlers/stacks/list_porter_app.go → api/server/handlers/porter_app/list.go

@@ -1,4 +1,4 @@
-package stacks
+package porter_app
 
 import (
 	"net/http"

+ 233 - 0
api/server/handlers/porter_app/list_events.go

@@ -0,0 +1,233 @@
+package porter_app
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"reflect"
+	"strconv"
+
+	"github.com/bradleyfalzon/ghinstallation/v2"
+	"github.com/google/go-github/v41/github"
+	"github.com/google/uuid"
+	"github.com/gorilla/schema"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository/gorm/helpers"
+	"github.com/porter-dev/porter/internal/telemetry"
+	"gorm.io/gorm"
+)
+
+type PorterAppEventListHandler struct {
+	handlers.PorterHandlerWriter
+}
+
+func NewPorterAppEventListHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) *PorterAppEventListHandler {
+	return &PorterAppEventListHandler{
+		PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
+	}
+}
+
+func (p *PorterAppEventListHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-porter-app-events")
+	defer span.End()
+
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cluster-id", Value: int(cluster.ID)},
+		telemetry.AttributeKV{Key: "project-id", Value: int(cluster.ProjectID)},
+	)
+
+	stackName, reqErr := requestutils.GetURLParamString(r, types.URLParamStackName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, nil, "error parsing stack name from url")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	pr := types.PaginationRequest{}
+	d := schema.NewDecoder()
+	err := d.Decode(&pr, r.URL.Query())
+	if err != nil {
+		e := telemetry.Error(ctx, span, nil, "error decoding request")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	app, err := p.Repo().PorterApp().ReadPorterAppByName(cluster.ID, stackName)
+	if err != nil {
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	porterAppEvents, paginatedResult, err := p.Repo().PorterAppEvent().ListEventsByPorterAppID(ctx, app.ID, helpers.WithPageSize(20), helpers.WithPage(int(pr.Page)))
+	if err != nil {
+		if !errors.Is(err, gorm.ErrRecordNotFound) {
+			e := telemetry.Error(ctx, span, nil, "error listing porter app events by porter app id")
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+			return
+		}
+	}
+
+	for idx, appEvent := range porterAppEvents {
+		if appEvent.Status == "PROGRESSING" {
+			pae, err := p.updateExistingAppEvent(ctx, *cluster, stackName, *appEvent)
+			if err != nil {
+				e := telemetry.Error(ctx, span, nil, "unable to update existing porter app event")
+				p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+				return
+			}
+			porterAppEvents[idx] = &pae
+		}
+	}
+
+	res := struct {
+		Events []types.PorterAppEvent `json:"events"`
+		types.PaginationResponse
+	}{
+		PaginationResponse: types.PaginationResponse(paginatedResult),
+	}
+	res.Events = make([]types.PorterAppEvent, 0)
+
+	for _, porterApp := range porterAppEvents {
+		if porterApp == nil {
+			continue
+		}
+		pa := porterApp.ToPorterAppEvent()
+		res.Events = append(res.Events, pa)
+	}
+	p.WriteResult(w, r, res)
+}
+
+func (p *PorterAppEventListHandler) updateExistingAppEvent(ctx context.Context, cluster models.Cluster, stackName string, appEvent models.PorterAppEvent) (models.PorterAppEvent, error) {
+	ctx, span := telemetry.NewSpan(ctx, "update-porter-app-event")
+	defer span.End()
+
+	if appEvent.ID == uuid.Nil {
+		return models.PorterAppEvent{}, telemetry.Error(ctx, span, nil, "porter app event id is nil when updating")
+	}
+
+	event, err := p.Repo().PorterAppEvent().ReadEvent(ctx, appEvent.ID)
+	if err != nil {
+		return models.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error retrieving porter app by name for cluster")
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "porter-app-id", Value: event.PorterAppID},
+		telemetry.AttributeKV{Key: "porter-app-event-id", Value: event.ID.String()},
+		telemetry.AttributeKV{Key: "porter-app-event-status", Value: event.Status},
+		telemetry.AttributeKV{Key: "cluster-id", Value: int(cluster.ID)},
+		telemetry.AttributeKV{Key: "project-id", Value: int(cluster.ProjectID)},
+	)
+
+	if appEvent.Type == string(types.PorterAppEventType_Build) && appEvent.TypeExternalSource == "GITHUB" {
+		err = p.updateBuildEvent_Github(ctx, &event)
+		if err != nil {
+			return models.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error updating porter app event for github build")
+		}
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-event-updated-status", Value: event.Status})
+
+	err = p.Repo().PorterAppEvent().UpdateEvent(ctx, &event)
+	if err != nil {
+		return models.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error creating porter app event")
+	}
+
+	if event.ID == uuid.Nil {
+		return models.PorterAppEvent{}, telemetry.Error(ctx, span, nil, "porter app event not found")
+	}
+
+	return event, nil
+}
+
+func (p *PorterAppEventListHandler) updateBuildEvent_Github(ctx context.Context, event *models.PorterAppEvent) error {
+	ctx, span := telemetry.NewSpan(ctx, "update-porter-app-build-event")
+	defer span.End()
+
+	repoOrg, ok := event.Metadata["org"].(string)
+	if !ok {
+		return telemetry.Error(ctx, span, nil, "error retrieving repo org from metadata")
+	}
+
+	repoName, ok := event.Metadata["repo"].(string)
+	if !ok {
+		return telemetry.Error(ctx, span, nil, "error retrieving repo name from metadata")
+	}
+
+	actionRunIDIface, ok := event.Metadata["action_run_id"]
+	if !ok {
+		return telemetry.Error(ctx, span, nil, "error retrieving action run id from metadata")
+	}
+	actionRunID, ok := actionRunIDIface.(float64)
+	if !ok {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "action-run-id-type", Value: reflect.TypeOf(actionRunIDIface).String()})
+		return telemetry.Error(ctx, span, nil, "error converting action run id to int")
+	}
+
+	accountIDIface, ok := event.Metadata["github_account_id"]
+	if !ok {
+		return telemetry.Error(ctx, span, nil, "error retrieving github account id from metadata")
+	}
+	githubAccountID, ok := accountIDIface.(float64)
+	if !ok {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "github-account-id-type", Value: reflect.TypeOf(accountIDIface).String()})
+		return telemetry.Error(ctx, span, nil, "error converting github account id to int")
+	}
+
+	// read the environment to get the environment id
+	env, err := p.Repo().GithubAppInstallation().ReadGithubAppInstallationByAccountID(int64(githubAccountID))
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error reading github environment by owner repo name")
+	}
+
+	ghClient, err := getGithubClientFromEnvironment(p.Config(), env.InstallationID)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error getting github client using porter application")
+	}
+
+	actionRun, _, err := ghClient.Actions.GetWorkflowRunByID(ctx, repoOrg, repoName, int64(actionRunID))
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error getting github action run by id")
+	}
+
+	if *actionRun.Status == "completed" {
+		if *actionRun.Conclusion == "success" {
+			event.Status = "SUCCESS"
+		} else {
+			event.Status = "FAILED"
+		}
+	}
+
+	return nil
+}
+
+func getGithubClientFromEnvironment(config *config.Config, installationID int64) (*github.Client, error) {
+	// get the github app client
+	ghAppId, err := strconv.Atoi(config.ServerConf.GithubAppID)
+	if err != nil {
+		return nil, fmt.Errorf("malformed GITHUB_APP_ID in server configuration: %w", err)
+	}
+
+	// authenticate as github app installation
+	itr, err := ghinstallation.New(
+		http.DefaultTransport,
+		int64(ghAppId),
+		installationID,
+		config.ServerConf.GithubAppSecret,
+	)
+	if err != nil {
+		return nil, fmt.Errorf("error in creating github client from preview environment: %w", err)
+	}
+
+	return github.NewClient(&http.Client{Transport: itr}), nil
+}

+ 85 - 36
api/server/handlers/stacks/parse.go → api/server/handlers/porter_app/parse.go

@@ -1,4 +1,4 @@
-package stacks
+package porter_app
 
 import (
 	"fmt"
@@ -22,7 +22,7 @@ type PorterStackYAML struct {
 	Build   *Build            `yaml:"build"`
 	Env     map[string]string `yaml:"env"`
 	Apps    map[string]*App   `yaml:"apps"`
-	Release *string           `yaml:"release"`
+	Release *App              `yaml:"release"`
 }
 
 type Build struct {
@@ -37,7 +37,7 @@ type Build struct {
 type App struct {
 	Run    *string                `yaml:"run" validate:"required"`
 	Config map[string]interface{} `yaml:"config"`
-	Type   *string                `yaml:"type" validate:"required, oneof=web worker job"`
+	Type   *string                `yaml:"type" validate:"oneof=web worker job"`
 }
 
 type SubdomainCreateOpts struct {
@@ -56,29 +56,36 @@ func parse(
 	existingValues map[string]interface{},
 	existingDependencies []*chart.Dependency,
 	opts SubdomainCreateOpts,
-) (*chart.Chart, map[string]interface{}, error) {
+	injectLauncher bool,
+) (*chart.Chart, map[string]interface{}, map[string]interface{}, error) {
 	parsed := &PorterStackYAML{}
 
 	err := yaml.Unmarshal(porterYaml, parsed)
 	if err != nil {
-		return nil, nil, fmt.Errorf("%s: %w", "error parsing porter.yaml", err)
+		return nil, nil, nil, fmt.Errorf("%s: %w", "error parsing porter.yaml", err)
 	}
 
-	values, err := buildStackValues(parsed, imageInfo, existingValues, opts)
+	values, err := buildStackValues(parsed, imageInfo, existingValues, opts, injectLauncher)
 	if err != nil {
-		return nil, nil, fmt.Errorf("%s: %w", "error building values from porter.yaml", err)
+		return nil, nil, nil, fmt.Errorf("%s: %w", "error building values from porter.yaml", err)
 	}
 	convertedValues := convertMap(values).(map[string]interface{})
 
 	chart, err := buildStackChart(parsed, config, projectID, existingDependencies)
 	if err != nil {
-		return nil, nil, fmt.Errorf("%s: %w", "error building chart from porter.yaml", err)
+		return nil, nil, nil, fmt.Errorf("%s: %w", "error building chart from porter.yaml", err)
 	}
 
-	return chart, convertedValues, nil
+	// return the parsed release values for the release job chart, if they exist
+	var releaseJobValues map[string]interface{}
+	if parsed.Release != nil && parsed.Release.Run != nil {
+		releaseJobValues = buildReleaseValues(parsed.Release, parsed.Env, imageInfo, injectLauncher)
+	}
+
+	return chart, convertedValues, releaseJobValues, nil
 }
 
-func buildStackValues(parsed *PorterStackYAML, imageInfo types.ImageInfo, existingValues map[string]interface{}, opts SubdomainCreateOpts) (map[string]interface{}, error) {
+func buildStackValues(parsed *PorterStackYAML, imageInfo types.ImageInfo, existingValues map[string]interface{}, opts SubdomainCreateOpts, injectLauncher bool) (map[string]interface{}, error) {
 	values := make(map[string]interface{})
 
 	if parsed.Apps == nil {
@@ -107,16 +114,40 @@ func buildStackValues(parsed *PorterStackYAML, imageInfo types.ImageInfo, existi
 			return nil, err
 		}
 
+		// just in case this slips by
+		if appType == "web" {
+			if helm_values["ingress"] == nil {
+				helm_values["ingress"] = map[string]interface{}{
+					"enabled": false,
+				}
+			}
+		}
+
 		values[helmName] = helm_values
 	}
 
-	// add back in the existing values that were not overwritten
+	// add back in the existing services that were not overwritten
 	for k, v := range existingValues {
 		if values[k] == nil {
 			values[k] = v
 		}
 	}
 
+	// prepend launcher to all start commands if we need to
+	for _, v := range values {
+		if serviceValues, ok := v.(map[string]interface{}); ok {
+			if serviceValues["container"] != nil {
+				containerMap := serviceValues["container"].(map[string]interface{})
+				if containerMap["command"] != nil {
+					command := containerMap["command"].(string)
+					if injectLauncher && !strings.HasPrefix(command, "launcher") && !strings.HasPrefix(command, "/cnb/lifecycle/launcher") {
+						containerMap["command"] = fmt.Sprintf("/cnb/lifecycle/launcher %s", command)
+					}
+				}
+			}
+		}
+	}
+
 	if imageInfo.Repository != "" && imageInfo.Tag != "" {
 		values["global"] = map[string]interface{}{
 			"image": map[string]interface{}{
@@ -129,6 +160,32 @@ func buildStackValues(parsed *PorterStackYAML, imageInfo types.ImageInfo, existi
 	return values, nil
 }
 
+func buildReleaseValues(release *App, env map[string]string, imageInfo types.ImageInfo, injectLauncher bool) map[string]interface{} {
+	defaultValues := getDefaultValues(release, env, "job")
+	convertedConfig := convertMap(release.Config).(map[string]interface{})
+	helm_values := utils.DeepCoalesceValues(defaultValues, convertedConfig)
+
+	if imageInfo.Repository != "" && imageInfo.Tag != "" {
+		helm_values["image"] = map[string]interface{}{
+			"repository": imageInfo.Repository,
+			"tag":        imageInfo.Tag,
+		}
+	}
+
+	// prepend launcher if we need to
+	if helm_values["container"] != nil {
+		containerMap := helm_values["container"].(map[string]interface{})
+		if containerMap["command"] != nil {
+			command := containerMap["command"].(string)
+			if injectLauncher && !strings.HasPrefix(command, "launcher") && !strings.HasPrefix(command, "/cnb/lifecycle/launcher") {
+				containerMap["command"] = fmt.Sprintf("/cnb/lifecycle/launcher %s", command)
+			}
+		}
+	}
+
+	return helm_values
+}
+
 func getType(name string, app *App) string {
 	if app.Type != nil {
 		return *app.Type
@@ -145,28 +202,15 @@ func getDefaultValues(app *App, env map[string]string, appType string) map[strin
 	if app.Run != nil {
 		runCommand = *app.Run
 	}
-	if appType == "web" {
-		defaultValues = map[string]interface{}{
-			"ingress": map[string]interface{}{
-				"enabled": false,
-			},
-			"container": map[string]interface{}{
-				"command": runCommand,
-				"env": map[string]interface{}{
-					"normal": CopyEnv(env),
-				},
+	defaultValues = map[string]interface{}{
+		"container": map[string]interface{}{
+			"command": runCommand,
+			"env": map[string]interface{}{
+				"normal": CopyEnv(env),
 			},
-		}
-	} else {
-		defaultValues = map[string]interface{}{
-			"container": map[string]interface{}{
-				"command": runCommand,
-				"env": map[string]interface{}{
-					"normal": CopyEnv(env),
-				},
-			},
-		}
+		},
 	}
+
 	return defaultValues
 }
 
@@ -328,13 +372,18 @@ func createSubdomainIfRequired(
 	ingressMap, err := getNestedMap(mergedValues, "ingress")
 	if err == nil {
 		enabledVal, enabledExists := ingressMap["enabled"]
-		customDomVal, customDomExists := ingressMap["custom_domain"]
-
-		if enabledExists && customDomExists {
+		if enabledExists {
 			enabled, eOK := enabledVal.(bool)
-			customDomain, cOK := customDomVal.(bool)
+			if eOK && enabled {
+				// if custom domain, we don't need to create a subdomain
+				customDomVal, customDomExists := ingressMap["custom_domain"]
+				if customDomExists {
+					customDomain, cOK := customDomVal.(bool)
+					if cOK && customDomain {
+						return nil
+					}
+				}
 
-			if eOK && cOK && enabled && !customDomain {
 				// subdomain already exists, no need to create one
 				if porterHosts, ok := ingressMap["porter_hosts"].([]interface{}); ok && len(porterHosts) > 0 {
 					return nil

+ 94 - 0
api/server/handlers/porter_app/rollback.go

@@ -0,0 +1,94 @@
+package porter_app
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+type RollbackPorterAppHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+func NewRollbackPorterAppHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *RollbackPorterAppHandler {
+	return &RollbackPorterAppHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+func (c *RollbackPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-rollback-porter-app")
+	defer span.End()
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	request := &types.RollbackPorterAppRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	stackName, reqErr := requestutils.GetURLParamString(r, types.URLParamStackName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "error getting stack name from url")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "stack-name", Value: stackName})
+	namespace := fmt.Sprintf("porter-stack-%s", stackName)
+
+	helmAgent, err := c.GetHelmAgent(ctx, r, cluster, namespace)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting helm agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	helmRelease, err := helmAgent.GetRelease(ctx, stackName, 0, false)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting helm release")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	imageInfo := attemptToGetImageInfoFromRelease(helmRelease.Config)
+	if imageInfo.Tag == "" {
+		imageInfo.Tag = "latest"
+	}
+
+	porterApp, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, stackName)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting porter app")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	err = helmAgent.RollbackRelease(ctx, helmRelease.Name, request.Revision)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error rolling back release")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	_, err = createPorterAppEvent(ctx, "SUCCESS", porterApp.ID, helmRelease.Version+1, imageInfo.Tag, c.Repo().PorterAppEvent())
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating porter app event")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+}

+ 1 - 0
api/server/handlers/project/create.go

@@ -42,6 +42,7 @@ func (p *ProjectCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 	proj := &models.Project{
 		Name:                   request.Name,
 		CapiProvisionerEnabled: true,
+		SimplifiedViewEnabled:  true,
 	}
 
 	var err error

+ 1 - 0
api/server/handlers/project/create_test.go

@@ -43,6 +43,7 @@ func TestCreateProjectSuccessful(t *testing.T) {
 			},
 		},
 		CapiProvisionerEnabled: true,
+		SimplifiedViewEnabled:  true,
 	}
 
 	gotProject := &types.CreateProjectResponse{}

+ 5 - 4
api/server/handlers/project/get_usage.go

@@ -40,10 +40,11 @@ func (p *ProjectGetUsageHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 	res := &types.GetProjectUsageResponse{}
 
 	currUsage, limit, usageCache, err := usage.GetUsage(&usage.GetUsageOpts{
-		Project:          proj,
-		DOConf:           p.Config().DOConf,
-		Repo:             p.Repo(),
-		WhitelistedUsers: p.Config().WhitelistedUsers,
+		Project:                          proj,
+		DOConf:                           p.Config().DOConf,
+		Repo:                             p.Repo(),
+		WhitelistedUsers:                 p.Config().WhitelistedUsers,
+		ClusterControlPlaneServiceClient: p.Config().ClusterControlPlaneClient,
 	})
 	if err != nil {
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))

+ 18 - 0
api/server/handlers/project_integration/create_azure.go

@@ -3,6 +3,10 @@ package project_integration
 import (
 	"net/http"
 
+	"github.com/bufbuild/connect-go"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -48,6 +52,20 @@ func (p *CreateAzureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		AzureIntegration: az.ToAzureIntegrationType(),
 	}
 
+	req := connect.NewRequest(&porterv1.CreateAzureConnectionRequest{
+		ProjectId:              int64(project.ID),
+		ClientId:               request.AzureClientID,
+		SubscriptionId:         request.AzureSubscriptionID,
+		TenantId:               request.AzureTenantID,
+		ServicePrincipalSecret: []byte(request.ServicePrincipalKey),
+	})
+	_, err = p.Config().ClusterControlPlaneClient.CreateAzureConnection(r.Context(), req)
+
+	if err != nil {
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
 	p.WriteResult(w, r, res)
 }
 

+ 47 - 0
api/server/handlers/project_integration/delete_gitlab.go

@@ -0,0 +1,47 @@
+package project_integration
+
+import (
+	"fmt"
+	"net/http"
+
+	ints "github.com/porter-dev/porter/internal/models/integrations"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+)
+
+type DeleteGitlabIntegration struct {
+	handlers.PorterHandlerReadWriter
+}
+
+func NewDeleteGitlabIntegrationHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *DeleteGitlabIntegration {
+	return &DeleteGitlabIntegration{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (p *DeleteGitlabIntegration) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	gi, _ := r.Context().Value(types.GitlabIntegrationScope).(*ints.GitlabIntegration)
+
+	metadata := p.Config().Metadata
+
+	if !metadata.Gitlab {
+		p.HandleAPIError(w, r, apierrors.NewErrForbidden(fmt.Errorf("gitlab integration endpoints are not enabled")))
+		return
+	}
+
+	err := p.Repo().GitlabIntegration().DeleteGitlabIntegrationByID(gi.ProjectID, gi.ID)
+	if err != nil {
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error deleting gitlab integration: %w", err)))
+		return
+	}
+
+	return
+}

+ 89 - 0
api/server/handlers/project_integration/get_gitlab_porter_yaml.go

@@ -0,0 +1,89 @@
+package project_integration
+
+import (
+	b64 "encoding/base64"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/porter-dev/porter/internal/models"
+	ints "github.com/porter-dev/porter/internal/models/integrations"
+	"github.com/xanzy/go-gitlab"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+)
+
+type GitlabRepoPorterYamlContentsHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+func NewGetGitlabRepoPorterYamlContentsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GitlabRepoPorterYamlContentsHandler {
+	return &GitlabRepoPorterYamlContentsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (p *GitlabRepoPorterYamlContentsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	user, _ := r.Context().Value(types.UserScope).(*models.User)
+	gi, _ := r.Context().Value(types.GitlabIntegrationScope).(*ints.GitlabIntegration)
+
+	request := &types.GetGitlabProcfileRequest{}
+
+	ok := p.DecodeAndValidate(w, r, request)
+	if !ok {
+		return
+	}
+
+	path, err := url.QueryUnescape(request.Path)
+	if err != nil {
+		p.HandleAPIError(w, r, apierrors.NewErrForbidden(fmt.Errorf("malformed query param path")))
+		return
+	}
+
+	client, err := getGitlabClient(p.Repo(), user.ID, project.ID, gi, p.Config())
+	if err != nil {
+		if errors.Is(err, errUnauthorizedGitlabUser) {
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(errUnauthorizedGitlabUser, http.StatusUnauthorized))
+		}
+
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	file, resp, err := client.RepositoryFiles.GetRawFile(request.RepoPath,
+		strings.TrimPrefix(path, "./"), &gitlab.GetRawFileOptions{
+			Ref: gitlab.String(request.Branch),
+		},
+	)
+
+	if resp.StatusCode == http.StatusUnauthorized {
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unauthorized gitlab user"), http.StatusUnauthorized))
+		return
+	} else if resp.StatusCode == http.StatusNotFound {
+		w.WriteHeader(http.StatusNotFound)
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("no such procfile exists")))
+		return
+	}
+
+	if err != nil {
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	data := b64.StdEncoding.EncodeToString(file)
+
+	p.WriteResult(w, r, data)
+}

+ 4 - 18
api/server/handlers/project_integration/get_gitlab_repo_buildpack.go

@@ -11,7 +11,6 @@ import (
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/commonutils"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/integrations/buildpacks"
@@ -39,22 +38,9 @@ func (p *GetGitlabRepoBuildpackHandler) ServeHTTP(w http.ResponseWriter, r *http
 	user, _ := r.Context().Value(types.UserScope).(*models.User)
 	gi, _ := r.Context().Value(types.GitlabIntegrationScope).(*ints.GitlabIntegration)
 
-	request := &types.GetBuildpackRequest{}
+	request := &types.GetGitlabBuildpackRequest{}
 
 	ok := p.DecodeAndValidate(w, r, request)
-
-	if !ok {
-		return
-	}
-
-	owner, name, ok := commonutils.GetOwnerAndNameParams(p, w, r)
-
-	if !ok {
-		return
-	}
-
-	branch, ok := commonutils.GetBranchParam(p, w, r)
-
 	if !ok {
 		return
 	}
@@ -81,9 +67,9 @@ func (p *GetGitlabRepoBuildpackHandler) ServeHTTP(w http.ResponseWriter, r *http
 		dir = "."
 	}
 
-	tree, resp, err := client.Repositories.ListTree(fmt.Sprintf("%s/%s", owner, name), &gitlab.ListTreeOptions{
+	tree, resp, err := client.Repositories.ListTree(request.RepoPath, &gitlab.ListTreeOptions{
 		Path: gitlab.String(dir),
-		Ref:  gitlab.String(branch),
+		Ref:  gitlab.String(request.Branch),
 	})
 
 	if resp.StatusCode == http.StatusUnauthorized {
@@ -111,7 +97,7 @@ func (p *GetGitlabRepoBuildpackHandler) ServeHTTP(w http.ResponseWriter, r *http
 				}
 			}()
 			buildpacks.Runtimes[idx].DetectGitlab(
-				client, tree, owner, name, dir, branch,
+				client, tree, request.RepoPath, dir, request.Branch,
 				builderInfoMap[buildpacks.PaketoBuilder], builderInfoMap[buildpacks.HerokuBuilder],
 			)
 			wg.Done()

+ 3 - 17
api/server/handlers/project_integration/get_gitlab_repo_contents.go

@@ -10,7 +10,6 @@ import (
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/commonutils"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
@@ -37,22 +36,9 @@ func (p *GetGitlabRepoContentsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 	user, _ := r.Context().Value(types.UserScope).(*models.User)
 	gi, _ := r.Context().Value(types.GitlabIntegrationScope).(*ints.GitlabIntegration)
 
-	request := &types.GetContentsRequest{}
+	request := &types.GetGitlabContentsRequest{}
 
 	ok := p.DecodeAndValidate(w, r, request)
-
-	if !ok {
-		return
-	}
-
-	owner, name, ok := commonutils.GetOwnerAndNameParams(p, w, r)
-
-	if !ok {
-		return
-	}
-
-	branch, ok := commonutils.GetBranchParam(p, w, r)
-
 	if !ok {
 		return
 	}
@@ -79,9 +65,9 @@ func (p *GetGitlabRepoContentsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 		return
 	}
 
-	tree, resp, err := client.Repositories.ListTree(fmt.Sprintf("%s/%s", owner, name), &gitlab.ListTreeOptions{
+	tree, resp, err := client.Repositories.ListTree(request.RepoPath, &gitlab.ListTreeOptions{
 		Path: gitlab.String(dir),
-		Ref:  gitlab.String(branch),
+		Ref:  gitlab.String(request.Branch),
 	})
 
 	if resp.StatusCode == http.StatusUnauthorized {

+ 3 - 16
api/server/handlers/project_integration/get_gitlab_repo_procfile.go

@@ -11,7 +11,6 @@ import (
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/commonutils"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
@@ -40,7 +39,7 @@ func (p *GetGitlabRepoProcfileHandler) ServeHTTP(w http.ResponseWriter, r *http.
 	user, _ := r.Context().Value(types.UserScope).(*models.User)
 	gi, _ := r.Context().Value(types.GitlabIntegrationScope).(*ints.GitlabIntegration)
 
-	request := &types.GetProcfileRequest{}
+	request := &types.GetGitlabProcfileRequest{}
 
 	ok := p.DecodeAndValidate(w, r, request)
 
@@ -48,18 +47,6 @@ func (p *GetGitlabRepoProcfileHandler) ServeHTTP(w http.ResponseWriter, r *http.
 		return
 	}
 
-	owner, name, ok := commonutils.GetOwnerAndNameParams(p, w, r)
-
-	if !ok {
-		return
-	}
-
-	branch, ok := commonutils.GetBranchParam(p, w, r)
-
-	if !ok {
-		return
-	}
-
 	path, err := url.QueryUnescape(request.Path)
 	if err != nil {
 		p.HandleAPIError(w, r, apierrors.NewErrForbidden(fmt.Errorf("malformed query param path")))
@@ -76,9 +63,9 @@ func (p *GetGitlabRepoProcfileHandler) ServeHTTP(w http.ResponseWriter, r *http.
 		return
 	}
 
-	file, resp, err := client.RepositoryFiles.GetRawFile(fmt.Sprintf("%s/%s", owner, name),
+	file, resp, err := client.RepositoryFiles.GetRawFile(request.RepoPath,
 		strings.TrimPrefix(path, "./"), &gitlab.GetRawFileOptions{
-			Ref: gitlab.String(branch),
+			Ref: gitlab.String(request.Branch),
 		},
 	)
 

+ 30 - 2
api/server/handlers/project_integration/list_gitlab.go

@@ -3,6 +3,8 @@ package project_integration
 import (
 	"net/http"
 
+	ints "github.com/porter-dev/porter/internal/models/integrations"
+
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -26,6 +28,7 @@ func NewListGitlabHandler(
 
 func (p *ListGitlabHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	user, _ := r.Context().Value(types.UserScope).(*models.User)
 
 	gitlabInts, err := p.Repo().GitlabIntegration().ListGitlabIntegrationsByProjectID(project.ID)
 	if err != nil {
@@ -33,11 +36,36 @@ func (p *ListGitlabHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	var res types.ListGitlabResponse = make([]*types.GitlabIntegration, 0)
+	var res types.ListGitlabResponse = make([]*types.GitlabIntegrationWithUsername, 0)
 
 	for _, gitlabInt := range gitlabInts {
-		res = append(res, gitlabInt.ToGitlabIntegrationType())
+		username := p.getCurrentUsername(user.ID, project.ID, gitlabInt)
+		glit := gitlabInt.ToGitlabIntegrationType()
+		res = append(res,
+			&types.GitlabIntegrationWithUsername{
+				Username:          username,
+				GitlabIntegration: *glit,
+			},
+		)
 	}
 
 	p.WriteResult(w, r, res)
 }
+
+func (p *ListGitlabHandler) getCurrentUsername(userID uint, projectID uint, gi *ints.GitlabIntegration) string {
+	client, err := getGitlabClient(p.Repo(), userID, projectID, gi, p.Config())
+	if err != nil {
+		return "Unable to connect"
+	}
+
+	currentUser, resp, err := client.Users.CurrentUser()
+	if resp.StatusCode == http.StatusUnauthorized {
+		return "Unable to connect"
+	}
+
+	if err != nil {
+		return "Unable to connect"
+	}
+
+	return currentUser.Username
+}

+ 14 - 5
api/server/handlers/project_integration/list_gitlab_repo_branches.go

@@ -8,7 +8,6 @@ import (
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/commonutils"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
@@ -35,12 +34,14 @@ func (p *ListGitlabRepoBranchesHandler) ServeHTTP(w http.ResponseWriter, r *http
 	user, _ := r.Context().Value(types.UserScope).(*models.User)
 	gi, _ := r.Context().Value(types.GitlabIntegrationScope).(*ints.GitlabIntegration)
 
-	owner, name, ok := commonutils.GetOwnerAndNameParams(p, w, r)
-
-	if !ok {
+	request := &types.ListGitlabRepoBranchesRequest{}
+	if ok := p.DecodeAndValidate(w, r, request); !ok {
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(errors.New("cannot decode and validate request")))
 		return
 	}
 
+	repoPath := request.RepoPath
+
 	client, err := getGitlabClient(p.Repo(), user.ID, project.ID, gi, p.Config())
 	if err != nil {
 		if errors.Is(err, errUnauthorizedGitlabUser) {
@@ -51,7 +52,15 @@ func (p *ListGitlabRepoBranchesHandler) ServeHTTP(w http.ResponseWriter, r *http
 		return
 	}
 
-	branches, resp, err := client.Branches.ListBranches(fmt.Sprintf("%s/%s", owner, name), &gitlab.ListBranchesOptions{})
+	branches, resp, err := client.Branches.ListBranches(repoPath,
+		&gitlab.ListBranchesOptions{
+			ListOptions: gitlab.ListOptions{
+				Page:    1,
+				PerPage: 20,
+			},
+			Search: &request.SearchTerm,
+		},
+	)
 
 	if resp.StatusCode == http.StatusUnauthorized {
 		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unauthorized gitlab user"), http.StatusUnauthorized))

+ 13 - 4
api/server/handlers/project_integration/list_gitlab_repos.go

@@ -50,10 +50,21 @@ func (p *ListGitlabReposHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 		return
 	}
 
-	giProjects, resp, err := client.Projects.ListProjects(&gitlab.ListProjectsOptions{
+	searchTerm := r.URL.Query().Get("searchTerm")
+
+	opts := &gitlab.ListProjectsOptions{
 		Simple:     gitlab.Bool(true),
 		Membership: gitlab.Bool(true),
-	})
+		ListOptions: gitlab.ListOptions{
+			PerPage: 20,
+			Page:    1,
+		},
+		Search:           gitlab.String(searchTerm),
+		SearchNamespaces: gitlab.Bool(true),
+	}
+
+	var res []string
+	giProjects, resp, err := client.Projects.ListProjects(opts)
 
 	if resp.StatusCode == http.StatusUnauthorized {
 		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unauthorized gitlab user"), http.StatusUnauthorized))
@@ -65,8 +76,6 @@ func (p *ListGitlabReposHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 		return
 	}
 
-	var res []string
-
 	for _, giProject := range giProjects {
 		res = append(res, giProject.PathWithNamespace)
 	}

+ 34 - 23
api/server/handlers/registry/get_token.go

@@ -18,6 +18,7 @@ import (
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/oauth"
 	"github.com/porter-dev/porter/internal/registry"
+	"github.com/porter-dev/porter/internal/telemetry"
 
 	"github.com/aws/aws-sdk-go/aws/arn"
 )
@@ -151,7 +152,10 @@ func NewRegistryGetGCRTokenHandler(
 }
 
 func (c *RegistryGetGCRTokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-registry-get-gcr-token")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
 
 	request := &types.GetRegistryGCRTokenRequest{}
 
@@ -162,7 +166,8 @@ func (c *RegistryGetGCRTokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 	// list registries and find one that matches the region
 	regs, err := c.Repo().Registry().ListRegistriesByProjectID(proj.ID)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		e := telemetry.Error(ctx, span, err, "error listing registries by project id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
 		return
 	}
 
@@ -173,15 +178,16 @@ func (c *RegistryGetGCRTokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		if reg.GCPIntegrationID != 0 && strings.Contains(reg.URL, request.ServerURL) {
 			_reg := registry.Registry(*reg)
 
-			oauthTok, err := _reg.GetGCRToken(c.Repo())
-
-			// if the oauth token is not nil, but the error is not nil, we still return the token
-			// but log an error
-			if oauthTok != nil && err != nil {
-				c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(err))
-			} else if err != nil {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-				return
+			oauthTok, err := _reg.GetGCRToken(ctx, c.Repo())
+			if err != nil {
+				// if the oauth token is not nil, we still return the token but log an error
+				if oauthTok == nil {
+					e := telemetry.Error(ctx, span, err, "error getting gcr token")
+					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
+					return
+				}
+				e := telemetry.Error(ctx, span, err, "error getting gcr token, but token was returned")
+				c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(e))
 			}
 
 			token = oauthTok.AccessToken
@@ -213,7 +219,10 @@ func NewRegistryGetGARTokenHandler(
 }
 
 func (c *RegistryGetGARTokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-registry-get-gar-token")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
 
 	request := &types.GetRegistryGCRTokenRequest{}
 
@@ -224,7 +233,8 @@ func (c *RegistryGetGARTokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 	// list registries and find one that matches the region
 	regs, err := c.Repo().Registry().ListRegistriesByProjectID(proj.ID)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		e := telemetry.Error(ctx, span, err, "error listing registries by project id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
 		return
 	}
 
@@ -235,15 +245,16 @@ func (c *RegistryGetGARTokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		if reg.GCPIntegrationID != 0 && strings.Contains(reg.URL, request.ServerURL) {
 			_reg := registry.Registry(*reg)
 
-			oauthTok, err := _reg.GetGARToken(c.Repo())
-
-			// if the oauth token is not nil, but the error is not nil, we still return the token
-			// but log an error
-			if oauthTok != nil && err != nil {
-				c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(err))
-			} else if err != nil {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-				return
+			oauthTok, err := _reg.GetGARToken(ctx, c.Repo())
+			if err != nil {
+				// if the oauth token is not nil, we still return the token but log an error
+				if oauthTok == nil {
+					e := telemetry.Error(ctx, span, err, "error getting gar token")
+					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
+					return
+				}
+				e := telemetry.Error(ctx, span, err, "error getting gar token, but token was returned")
+				c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(e))
 			}
 
 			token = oauthTok.AccessToken
@@ -406,9 +417,9 @@ func (c *RegistryGetACRTokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 	for _, reg := range regs {
 		if reg.AzureIntegrationID != 0 && strings.Contains(reg.URL, "azurecr.io") {
 			_reg := registry.Registry(*reg)
-
 			username, pw, err := _reg.GetACRCredentials(c.Repo())
 			if err != nil {
+				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 				continue
 			}
 

+ 13 - 2
api/server/handlers/registry/list_repositories.go

@@ -3,6 +3,8 @@ package registry
 import (
 	"net/http"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -26,16 +28,25 @@ func NewRegistryListRepositoriesHandler(
 }
 
 func (c *RegistryListRepositoriesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx := r.Context()
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-registry-list-repositories")
+	defer span.End()
+
 	reg, _ := ctx.Value(types.RegistryScope).(*models.Registry)
 
 	// cast to a registry from registry package
 	_reg := registry.Registry(*reg)
 	regAPI := &_reg
 
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "registry-name", Value: regAPI.Name},
+		telemetry.AttributeKV{Key: "registry-id", Value: regAPI.ID},
+		telemetry.AttributeKV{Key: "project-id", Value: regAPI.ProjectID},
+	)
+
 	repos, err := regAPI.ListRepositories(ctx, c.Repo(), c.Config())
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err := telemetry.Error(ctx, span, err, "error listing repositories")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 

+ 123 - 58
api/server/handlers/release/create.go

@@ -9,6 +9,8 @@ import (
 	"strings"
 	"time"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
@@ -49,12 +51,21 @@ func NewCreateReleaseHandler(
 }
 
 func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx := r.Context()
-	user, _ := ctx.Value(types.UserScope).(*models.User)
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
-	namespace := ctx.Value(types.NamespaceScope).(string)
+	user, _ := r.Context().Value(types.UserScope).(*models.User)
+	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	namespace := r.Context().Value(types.NamespaceScope).(string)
 	operationID := oauth.CreateRandomState()
 
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-release")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: cluster.ProjectID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+		telemetry.AttributeKV{Key: "user-email", Value: user.Email},
+		telemetry.AttributeKV{Key: "namespace", Value: namespace},
+	)
+
 	c.Config().AnalyticsClient.Track(analytics.ApplicationLaunchStartTrack(
 		&analytics.ApplicationLaunchStartTrackOpts{
 			ClusterScopedTrackOpts: analytics.GetClusterScopedTrackOpts(user.ID, cluster.ProjectID, cluster.ID),
@@ -62,9 +73,9 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		},
 	))
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(ctx, r, cluster, "")
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error getting helm agent: %w", err)))
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error getting helm agent")))
 		return
 	}
 
@@ -78,12 +89,14 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		request.RepoURL = c.Config().ServerConf.DefaultApplicationHelmRepoURL
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "repo-url", Value: request.RepoURL})
+
 	// if the repo url is not an addon or application url, validate against the helm repos
 	if request.RepoURL != c.Config().ServerConf.DefaultAddonHelmRepoURL && request.RepoURL != c.Config().ServerConf.DefaultApplicationHelmRepoURL {
 		// load the helm repos in the project
 		hrs, err := c.Repo().HelmRepo().ListHelmReposByProjectID(cluster.ProjectID)
 		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error listing helm repos for project : %w", err)))
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error listing helm repos for project")))
 			return
 		}
 
@@ -91,7 +104,7 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 
 		if !isValid {
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-				fmt.Errorf("invalid repo_url parameter"),
+				telemetry.Error(ctx, span, err, "invalid repo_url parameter"),
 				http.StatusBadRequest,
 			))
 
@@ -103,15 +116,28 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		request.TemplateVersion = ""
 	}
 
-	chart, err := loader.LoadChartPublic(request.RepoURL, request.TemplateName, request.TemplateVersion)
+	chart, err := loader.LoadChartPublic(ctx, request.RepoURL, request.TemplateName, request.TemplateVersion)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error loading public chart: %w", err)))
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error loading public chart")))
 		return
 	}
 
 	registries, err := c.Repo().Registry().ListRegistriesByProjectID(cluster.ProjectID)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error listing registries: %w", err)))
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error listing registries")))
+		return
+	}
+
+	k8sAgent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error getting k8s agent")))
+		return
+	}
+
+	// create the namespace if it does not exist already
+	_, err = k8sAgent.CreateNamespace(namespace, nil)
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error creating namespace")))
 		return
 	}
 
@@ -125,22 +151,16 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		Registries: registries,
 	}
 
-	helmRelease, err := helmAgent.InstallChart(conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+	helmRelease, err := helmAgent.InstallChart(ctx, conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-			fmt.Errorf("error installing a new chart: %s", err.Error()),
+			telemetry.Error(ctx, span, err, "error installing a new chart"),
 			http.StatusBadRequest,
 		))
 
 		return
 	}
 
-	k8sAgent, err := c.GetAgent(r, cluster, "")
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error getting k8s agent: %w", err)))
-		return
-	}
-
 	configMaps := make([]*v1.ConfigMap, 0)
 
 	if request.SyncedEnvGroups != nil && len(request.SyncedEnvGroups) > 0 {
@@ -148,7 +168,7 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 			// read the attached configmap
 			cm, _, err := k8sAgent.GetLatestVersionedConfigMap(envGroupName, namespace)
 			if err != nil {
-				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("Couldn't find the env group"), http.StatusNotFound))
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(telemetry.Error(ctx, span, err, "Couldn't find the env group"), http.StatusNotFound))
 				return
 			}
 
@@ -156,9 +176,9 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		}
 	}
 
-	release, err := CreateAppReleaseFromHelmRelease(c.Config(), cluster.ProjectID, cluster.ID, 0, helmRelease)
+	release, err := CreateAppReleaseFromHelmRelease(ctx, c.Config(), cluster.ProjectID, cluster.ID, 0, helmRelease)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error creating app release from helm release")))
 		return
 	}
 
@@ -168,7 +188,9 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 			_, err = k8sAgent.AddApplicationToVersionedConfigMap(cm, release.Name)
 
 			if err != nil {
-				c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(fmt.Errorf("Couldn't add %s to the config map %s", release.Name, cm.Name)))
+				telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "release-name", Value: release.Name})
+				telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "config-map-name", Value: cm.Name})
+				c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "Couldn't add release to the config map")))
 			}
 		}
 	}
@@ -182,11 +204,11 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 	}
 
 	if request.BuildConfig != nil {
-		_, err = createBuildConfig(c.Config(), release, request.BuildConfig)
+		_, err = createBuildConfig(ctx, c.Config(), release, request.BuildConfig)
 	}
 
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error building config")))
 		return
 	}
 
@@ -207,12 +229,12 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 
 			if unwrappedErr != nil {
 				if errors.Is(unwrappedErr, actions.ErrProtectedBranch) {
-					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusConflict))
+					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(telemetry.Error(ctx, span, err, "error creating git action on protected branch"), http.StatusConflict))
 				} else if errors.Is(unwrappedErr, actions.ErrCreatePRForProtectedBranch) {
-					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusPreconditionFailed))
+					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(telemetry.Error(ctx, span, err, "error creating PR on protected branch"), http.StatusPreconditionFailed))
 				}
 			} else {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+				c.HandleAPIError(w, r, apierrors.NewErrInternal(telemetry.Error(ctx, span, err, "error creating git action")))
 				return
 			}
 		}
@@ -236,10 +258,21 @@ func (c *CreateReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 }
 
 func CreateAppReleaseFromHelmRelease(
+	ctx context.Context,
 	config *config.Config,
 	projectID, clusterID, stackResourceID uint,
 	helmRelease *release.Release,
 ) (*models.Release, error) {
+	ctx, span := telemetry.NewSpan(ctx, "create-app-release-from-helm-release")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: projectID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: clusterID},
+		telemetry.AttributeKV{Key: "stack-resource-id", Value: stackResourceID},
+		telemetry.AttributeKV{Key: "helm-release-name", Value: helmRelease.Name},
+	)
+
 	token, err := encryption.GenerateRandomBytes(16)
 	if err != nil {
 		return nil, err
@@ -249,16 +282,18 @@ func CreateAppReleaseFromHelmRelease(
 	image, ok := helmRelease.Config["image"].(map[string]interface{})
 
 	if !ok {
-		return nil, fmt.Errorf("Could not find field image in config")
+		return nil, telemetry.Error(ctx, span, nil, "Could not find field image in config")
 	}
 
 	repository := image["repository"]
 	repoStr, ok := repository.(string)
 
 	if !ok {
-		return nil, fmt.Errorf("Could not find field repository in config")
+		return nil, telemetry.Error(ctx, span, nil, "Could not find field repository in config")
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "repo-uri", Value: repoStr})
+
 	release := &models.Release{
 		ClusterID:       clusterID,
 		ProjectID:       projectID,
@@ -296,12 +331,23 @@ func createGitAction(
 	name, namespace string,
 	release *models.Release,
 ) (*types.GitActionConfig, []byte, error) {
+	ctx, span := telemetry.NewSpan(ctx, "create-git-action")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: projectID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: clusterID},
+		telemetry.AttributeKV{Key: "user-id", Value: userID},
+		telemetry.AttributeKV{Key: "name", Value: name},
+		telemetry.AttributeKV{Key: "namespace", Value: namespace},
+	)
+
 	// if the registry was provisioned through Porter, create a repository if necessary
 	if release != nil && request.RegistryID != 0 {
 		// read the registry
 		reg, err := config.Repo.Registry().ReadRegistry(projectID, request.RegistryID)
 		if err != nil {
-			return nil, nil, err
+			return nil, nil, telemetry.Error(ctx, span, err, "could not read repo registry")
 		}
 
 		_reg := registry.Registry(*reg)
@@ -311,30 +357,28 @@ func createGitAction(
 		nameSpl := strings.Split(request.ImageRepoURI, "/")
 		repoName := nameSpl[len(nameSpl)-1]
 
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "repo-name", Value: repoName})
+
 		err = regAPI.CreateRepository(ctx, config, repoName)
 
 		if err != nil {
-			return nil, nil, err
+			return nil, nil, telemetry.Error(ctx, span, err, "could not create repo")
 		}
 	}
 
 	isDryRun := release == nil
 
-	repoSplit := strings.Split(request.GitRepo, "/")
-
-	if len(repoSplit) != 2 {
-		return nil, nil, fmt.Errorf("invalid formatting of repo name")
-	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "is-dry-run", Value: isDryRun})
 
 	encoded := ""
 	var err error
 
 	// if this isn't a dry run, generate the token
 	if !isDryRun {
-		encoded, err = getToken(config, userID, projectID, clusterID, request)
+		encoded, err = getToken(ctx, config, userID, projectID, clusterID, request)
 
 		if err != nil {
-			return nil, nil, err
+			return nil, nil, telemetry.Error(ctx, span, err, "error getting token")
 		}
 	}
 
@@ -344,8 +388,7 @@ func createGitAction(
 	if request.GitlabIntegrationID != 0 {
 		giRunner := &gitlab.GitlabCI{
 			ServerURL:        config.ServerConf.ServerURL,
-			GitRepoOwner:     repoSplit[0],
-			GitRepoName:      repoSplit[1],
+			GitRepoPath:      request.GitRepo,
 			GitBranch:        request.GitBranch,
 			Repo:             config.Repo,
 			ProjectID:        projectID,
@@ -361,6 +404,12 @@ func createGitAction(
 
 		gitErr = giRunner.Setup()
 	} else {
+		repoSplit := strings.Split(request.GitRepo, "/")
+
+		if len(repoSplit) != 2 {
+			return nil, nil, fmt.Errorf("invalid formatting of repo name")
+		}
+
 		// create the commit in the git repo
 		gaRunner := &actions.GithubActions{
 			InstanceName:           config.ServerConf.InstanceName,
@@ -394,7 +443,7 @@ func createGitAction(
 
 		if gaRunner.DryRun {
 			if gitErr != nil {
-				return nil, nil, gitErr
+				return nil, nil, telemetry.Error(ctx, span, gitErr, "error setting up git")
 			}
 
 			return nil, workflowYAML, nil
@@ -415,26 +464,35 @@ func createGitAction(
 		Version:             "v0.1.0",
 	})
 	if err != nil {
-		return nil, nil, err
+		return nil, nil, telemetry.Error(ctx, span, err, "error creating git action config")
 	}
 
 	// update the release in the db with the image repo uri
 	release.ImageRepoURI = ga.ImageRepoURI
 
 	_, err = config.Repo.Release().UpdateRelease(release)
-
 	if err != nil {
-		return nil, nil, err
+		return nil, nil, telemetry.Error(ctx, span, err, "error updating release")
 	}
 
 	return ga.ToGitActionConfigType(), workflowYAML, gitErr
 }
 
 func getToken(
+	ctx context.Context,
 	config *config.Config,
 	userID, projectID, clusterID uint,
 	request *types.CreateGitActionConfigRequest,
 ) (string, error) {
+	ctx, span := telemetry.NewSpan(ctx, "get-git-action-token")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: projectID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: clusterID},
+		telemetry.AttributeKV{Key: "user-id", Value: userID},
+	)
+
 	// create a policy for the token
 	policy := []*types.PolicyDocument{
 		{
@@ -459,12 +517,12 @@ func getToken(
 
 	uid, err := encryption.GenerateRandomBytes(16)
 	if err != nil {
-		return "", err
+		return "", telemetry.Error(ctx, span, err, "error generating uid")
 	}
 
 	policyBytes, err := json.Marshal(policy)
 	if err != nil {
-		return "", err
+		return "", telemetry.Error(ctx, span, err, "error marshalling policy into json")
 	}
 
 	policyModel := &models.Policy{
@@ -478,24 +536,24 @@ func getToken(
 	policyModel, err = config.Repo.Policy().CreatePolicy(policyModel)
 
 	if err != nil {
-		return "", err
+		return "", telemetry.Error(ctx, span, err, "error creating policy")
 	}
 
 	// create the token in the database
 	tokenUID, err := encryption.GenerateRandomBytes(16)
 	if err != nil {
-		return "", err
+		return "", telemetry.Error(ctx, span, err, "error generating tokenUID")
 	}
 
 	secretKey, err := encryption.GenerateRandomBytes(16)
 	if err != nil {
-		return "", err
+		return "", telemetry.Error(ctx, span, err, "error generating secret key")
 	}
 
 	// hash the secret key for storage in the db
 	hashedToken, err := bcrypt.GenerateFromPassword([]byte(secretKey), 8)
 	if err != nil {
-		return "", err
+		return "", telemetry.Error(ctx, span, err, "error generating hashedToken")
 	}
 
 	expiresAt := time.Now().Add(time.Hour * 24 * 365)
@@ -515,26 +573,30 @@ func getToken(
 	apiToken, err = config.Repo.APIToken().CreateAPIToken(apiToken)
 
 	if err != nil {
-		return "", err
+		return "", telemetry.Error(ctx, span, err, "error creating api token")
 	}
 
 	// generate porter jwt token
 	jwt, err := token.GetStoredTokenForAPI(userID, projectID, apiToken.UniqueID, secretKey)
 	if err != nil {
-		return "", err
+		return "", telemetry.Error(ctx, span, err, "error getting stored token for api")
 	}
 
 	return jwt.EncodeToken(config.TokenConf)
 }
 
 func createBuildConfig(
+	ctx context.Context,
 	config *config.Config,
 	release *models.Release,
 	bcRequest *types.CreateBuildConfigRequest,
 ) (*types.BuildConfig, error) {
+	ctx, span := telemetry.NewSpan(ctx, "create-build-config")
+	defer span.End()
+
 	data, err := json.Marshal(bcRequest.Config)
 	if err != nil {
-		return nil, err
+		return nil, telemetry.Error(ctx, span, err, "error marshalling build config request")
 	}
 
 	// handle write to the database
@@ -544,14 +606,14 @@ func createBuildConfig(
 		Config:     data,
 	})
 	if err != nil {
-		return nil, err
+		return nil, telemetry.Error(ctx, span, err, "error creating build config")
 	}
 
 	release.BuildConfig = bc.ID
 
 	_, err = config.Repo.Release().UpdateRelease(release)
 	if err != nil {
-		return nil, err
+		return nil, telemetry.Error(ctx, span, err, "error updating release")
 	}
 
 	return bc.ToBuildConfigType(), nil
@@ -566,6 +628,7 @@ type containerEnvConfig struct {
 }
 
 func GetGARunner(
+	ctx context.Context,
 	config *config.Config,
 	userID, projectID, clusterID uint,
 	ga *models.GitActionConfig,
@@ -573,10 +636,12 @@ func GetGARunner(
 	release *models.Release,
 	helmRelease *release.Release,
 ) (*actions.GithubActions, error) {
+	ctx, span := telemetry.NewSpan(ctx, "get-ga-runner")
+	defer span.End()
+
 	cEnv := &containerEnvConfig{}
 
 	rawValues, err := yaml.Marshal(helmRelease.Config)
-
 	if err == nil {
 		err = yaml.Unmarshal(rawValues, cEnv)
 
@@ -589,7 +654,7 @@ func GetGARunner(
 	repoSplit := strings.Split(ga.GitRepo, "/")
 
 	if len(repoSplit) != 2 {
-		return nil, fmt.Errorf("invalid formatting of repo name")
+		return nil, telemetry.Error(ctx, span, nil, "invalid formatting of repo name")
 	}
 
 	// create the commit in the git repo

+ 10 - 8
api/server/handlers/release/create_addon.go

@@ -1,6 +1,7 @@
 package release
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 
@@ -48,7 +49,7 @@ func (c *CreateAddonHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		},
 	))
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -91,7 +92,7 @@ func (c *CreateAddonHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		Registries: registries,
 	}
 
-	helmRelease, err := helmAgent.InstallChart(conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+	helmRelease, err := helmAgent.InstallChart(context.Background(), conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 			fmt.Errorf("error installing a new chart: %s", err.Error()),
@@ -124,7 +125,7 @@ type LoadAddonChartOpts struct {
 func LoadChart(config *config.Config, opts *LoadAddonChartOpts) (*chart.Chart, error) {
 	// if the chart repo url is one of the specified application/addon charts, just load public
 	if opts.RepoURL == config.ServerConf.DefaultAddonHelmRepoURL || opts.RepoURL == config.ServerConf.DefaultApplicationHelmRepoURL {
-		return loader.LoadChartPublic(opts.RepoURL, opts.TemplateName, opts.TemplateVersion)
+		return loader.LoadChartPublic(context.Background(), opts.RepoURL, opts.TemplateName, opts.TemplateVersion)
 	} else {
 		// load the helm repos in the project
 		hrs, err := config.Repo.HelmRepo().ListHelmReposByProjectID(opts.ProjectID)
@@ -141,12 +142,13 @@ func LoadChart(config *config.Config, opts *LoadAddonChartOpts) (*chart.Chart, e
 						return nil, err
 					}
 
-					return loader.LoadChart(&loader.BasicAuthClient{
-						Username: string(basic.Username),
-						Password: string(basic.Password),
-					}, hr.RepoURL, opts.TemplateName, opts.TemplateVersion)
+					return loader.LoadChart(context.Background(),
+						&loader.BasicAuthClient{
+							Username: string(basic.Username),
+							Password: string(basic.Password),
+						}, hr.RepoURL, opts.TemplateName, opts.TemplateVersion)
 				} else {
-					return loader.LoadChartPublic(hr.RepoURL, opts.TemplateName, opts.TemplateVersion)
+					return loader.LoadChartPublic(context.Background(), hr.RepoURL, opts.TemplateName, opts.TemplateVersion)
 				}
 			}
 		}

+ 1 - 1
api/server/handlers/release/create_webhook.go

@@ -29,7 +29,7 @@ func (c *CreateWebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 	helmRelease, _ := r.Context().Value(types.ReleaseScope).(*release.Release)
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 
-	release, err := CreateAppReleaseFromHelmRelease(c.Config(), cluster.ProjectID, cluster.ID, 0, helmRelease)
+	release, err := CreateAppReleaseFromHelmRelease(r.Context(), c.Config(), cluster.ProjectID, cluster.ID, 0, helmRelease)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 5 - 12
api/server/handlers/release/delete.go

@@ -1,9 +1,8 @@
 package release
 
 import (
-	"fmt"
+	"context"
 	"net/http"
-	"strings"
 
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
@@ -37,13 +36,13 @@ func (c *DeleteReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 	helmRelease, _ := r.Context().Value(types.ReleaseScope).(*release.Release)
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 
-	_, err = helmAgent.UninstallChart(helmRelease.Name)
+	_, err = helmAgent.UninstallChart(context.Background(), helmRelease.Name)
 
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
@@ -59,17 +58,10 @@ func (c *DeleteReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 
 			if gitAction != nil && gitAction.ID != 0 {
 				if gitAction.GitlabIntegrationID != 0 {
-					repoSplit := strings.Split(gitAction.GitRepo, "/")
-
-					if len(repoSplit) != 2 {
-						c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("invalid formatting of repo name")))
-						return
-					}
 
 					giRunner := &gitlab.GitlabCI{
 						ServerURL:        c.Config().ServerConf.ServerURL,
-						GitRepoOwner:     repoSplit[0],
-						GitRepoName:      repoSplit[1],
+						GitRepoPath:      gitAction.GitRepo,
 						Repo:             c.Repo(),
 						ProjectID:        cluster.ProjectID,
 						ClusterID:        cluster.ID,
@@ -88,6 +80,7 @@ func (c *DeleteReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 					}
 				} else {
 					gaRunner, err := GetGARunner(
+						r.Context(),
 						c.Config(),
 						user.ID,
 						cluster.ProjectID,

+ 12 - 37
api/server/handlers/release/get_controllers.go

@@ -77,61 +77,40 @@ func getController(controller grapher.Object, agent *kubernetes.Agent) (rc inter
 	case "deployment":
 		obj, err := agent.GetDeployment(controller)
 		if err != nil {
-			controller.Namespace = "default"
-			obj, err = agent.GetDeployment(controller)
-			if err != nil {
-				err = fmt.Errorf("error getting deployment: %w", err)
-				return nil, nil, err
-			}
+			err = fmt.Errorf("error getting deployment: %w", err)
+			return nil, nil, err
 		}
 
 		return obj, obj.Spec.Selector, nil
 	case "statefulset":
-
 		obj, err := agent.GetStatefulSet(controller)
 		if err != nil {
-			controller.Namespace = "default"
-			obj, err = agent.GetStatefulSet(controller)
-			if err != nil {
-				err = fmt.Errorf("error getting stateful set: %w", err)
-				return nil, nil, err
-			}
+			err = fmt.Errorf("error getting stateful set: %w", err)
+			return nil, nil, err
 		}
 
 		return obj, obj.Spec.Selector, nil
 	case "daemonset":
 		obj, err := agent.GetDaemonSet(controller)
 		if err != nil {
-			controller.Namespace = "default"
-			obj, err = agent.GetDaemonSet(controller)
-			if err != nil {
-				err = fmt.Errorf("error getting daemon set: %w", err)
-				return nil, nil, err
-			}
+			err = fmt.Errorf("error getting daemon set: %w", err)
+			return nil, nil, err
 		}
 
 		return obj, obj.Spec.Selector, nil
 	case "replicaset":
 		obj, err := agent.GetReplicaSet(controller)
 		if err != nil {
-			controller.Namespace = "default"
-			obj, err = agent.GetReplicaSet(controller)
-			if err != nil {
-				err = fmt.Errorf("error getting replica set: %w", err)
-				return nil, nil, err
-			}
+			err = fmt.Errorf("error getting replica set: %w", err)
+			return nil, nil, err
 		}
 
 		return obj, obj.Spec.Selector, nil
 	case "cronjob":
 		obj, err := agent.GetCronJob(controller)
 		if err != nil {
-			controller.Namespace = "default"
-			obj, err = agent.GetCronJob(controller)
-			if err != nil {
-				err = fmt.Errorf("error getting cron job %w", err)
-				return nil, nil, err
-			}
+			err = fmt.Errorf("error getting cron job %w", err)
+			return nil, nil, err
 		}
 
 		res := &metav1.LabelSelector{
@@ -146,12 +125,8 @@ func getController(controller grapher.Object, agent *kubernetes.Agent) (rc inter
 	case "job":
 		obj, err := agent.GetJob(controller)
 		if err != nil {
-			controller.Namespace = "default"
-			obj, err = agent.GetJob(controller)
-			if err != nil {
-				err = fmt.Errorf("error getting job: %w", err)
-				return nil, nil, err
-			}
+			err = fmt.Errorf("error getting job: %w", err)
+			return nil, nil, err
 		}
 
 		return obj, obj.Spec.Selector, nil

+ 3 - 2
api/server/handlers/release/get_history.go

@@ -1,6 +1,7 @@
 package release
 
 import (
+	"context"
 	"net/http"
 
 	"github.com/porter-dev/porter/api/server/authz"
@@ -32,7 +33,7 @@ func NewGetReleaseHistoryHandler(
 func (c *GetReleaseHistoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -40,7 +41,7 @@ func (c *GetReleaseHistoryHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 
 	// get the name of the application
 	name, _ := requestutils.GetURLParamString(r, types.URLParamReleaseName)
-	history, err := helmAgent.GetReleaseHistory(name)
+	history, err := helmAgent.GetReleaseHistory(context.Background(), name)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 4 - 3
api/server/handlers/release/update_image_batch.go

@@ -1,6 +1,7 @@
 package release
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"strings"
@@ -35,7 +36,7 @@ func NewUpdateImageBatchHandler(
 func (c *UpdateImageBatchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -75,7 +76,7 @@ func (c *UpdateImageBatchHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
 		go func() {
 			defer wg.Done()
 			// read release via agent
-			rel, err := helmAgent.GetRelease(releases[index].Name, 0, false)
+			rel, err := helmAgent.GetRelease(context.Background(), releases[index].Name, 0, false)
 			if err != nil {
 				// if this is a release not found error, just return - the release has likely been deleted from the underlying
 				// cluster but has not been deleted from the Porter database yet
@@ -104,7 +105,7 @@ func (c *UpdateImageBatchHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
 					Values:     rel.Config,
 				}
 
-				_, err = helmAgent.UpgradeReleaseByValues(conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+				_, err = helmAgent.UpgradeReleaseByValues(context.Background(), conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection, false)
 
 				if err != nil {
 					// if this is a release not found error, just return - the release has likely been deleted from the underlying

+ 4 - 2
api/server/handlers/release/update_rollback.go

@@ -1,6 +1,7 @@
 package release
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 
@@ -36,7 +37,7 @@ func (c *RollbackReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 	helmRelease, _ := r.Context().Value(types.ReleaseScope).(*release.Release)
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -48,7 +49,7 @@ func (c *RollbackReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 		return
 	}
 
-	err = helmAgent.RollbackRelease(helmRelease.Name, request.Revision)
+	err = helmAgent.RollbackRelease(context.Background(), helmRelease.Name, request.Revision)
 
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
@@ -75,6 +76,7 @@ func (c *RollbackReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 
 			if gitAction != nil && gitAction.ID != 0 && gitAction.GitlabIntegrationID == 0 {
 				gaRunner, err := GetGARunner(
+					r.Context(),
 					c.Config(),
 					user.ID,
 					cluster.ProjectID,

+ 6 - 4
api/server/handlers/release/upgrade.go

@@ -1,6 +1,7 @@
 package release
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"net/url"
@@ -44,7 +45,7 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 	helmRelease, _ := r.Context().Value(types.ReleaseScope).(*release.Release)
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -116,7 +117,7 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 
 	// if LatestRevision is set, check that the revision matches the latest revision in the database
 	if request.LatestRevision != 0 {
-		currHelmRelease, err := helmAgent.GetRelease(helmRelease.Name, 0, false)
+		currHelmRelease, err := helmAgent.GetRelease(context.Background(), helmRelease.Name, 0, false)
 		if err != nil {
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 				fmt.Errorf("could not retrieve latest revision"),
@@ -153,8 +154,8 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		}
 	}
 
-	newHelmRelease, upgradeErr := helmAgent.UpgradeRelease(conf, request.Values, c.Config().DOConf,
-		c.Config().ServerConf.DisablePullSecretsInjection)
+	newHelmRelease, upgradeErr := helmAgent.UpgradeRelease(context.Background(), conf, request.Values, c.Config().DOConf,
+		c.Config().ServerConf.DisablePullSecretsInjection, request.IgnoreDependencies)
 
 	if upgradeErr == nil && newHelmRelease != nil {
 		helmRelease = newHelmRelease
@@ -233,6 +234,7 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 
 			if gitAction != nil && gitAction.ID != 0 && gitAction.GitlabIntegrationID == 0 {
 				gaRunner, err := GetGARunner(
+					r.Context(),
 					c.Config(),
 					user.ID,
 					cluster.ProjectID,

+ 45 - 34
api/server/handlers/release/upgrade_webhook.go

@@ -16,6 +16,7 @@ import (
 	"github.com/porter-dev/porter/internal/helm"
 	"github.com/porter-dev/porter/internal/notifier"
 	"github.com/porter-dev/porter/internal/notifier/slack"
+	"github.com/porter-dev/porter/internal/telemetry"
 	"gorm.io/gorm"
 )
 
@@ -36,57 +37,73 @@ func NewWebhookHandler(
 }
 
 func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-webhook-deploy-with-token-handler")
+	defer span.End()
+
 	token, _ := requestutils.GetURLParamString(r, types.URLParamToken)
 
 	// retrieve release by token
-	release, err := c.Repo().Release().ReadReleaseByWebhookToken(token)
+	dbRelease, err := c.Repo().Release().ReadReleaseByWebhookToken(token)
 	if err != nil {
 		if err == gorm.ErrRecordNotFound {
+			err = telemetry.Error(ctx, span, err, "release not found with given webhook")
 			// throw forbidden error, since we don't want a way to verify if webhooks exist
-			c.HandleAPIError(w, r, apierrors.NewErrForbidden(
-				fmt.Errorf("release not found with given webhook"),
-			))
-
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
 			return
 		}
 
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error with reading release by webhook token")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if dbRelease == nil {
+		err = telemetry.Error(ctx, span, nil, "release is nil with given webhook")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
 		return
 	}
+	release := *dbRelease
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "release-id", Value: release.ID},
+		telemetry.AttributeKV{Key: "release-name", Value: release.Name},
+		telemetry.AttributeKV{Key: "release-namespace", Value: release.Namespace},
+		telemetry.AttributeKV{Key: "cluster-id", Value: release.ClusterID},
+		telemetry.AttributeKV{Key: "project-id", Value: release.ProjectID},
+	)
 
 	cluster, err := c.Repo().Cluster().ReadCluster(release.ProjectID, release.ClusterID)
 	if err != nil {
 		if err == gorm.ErrRecordNotFound {
+			err = telemetry.Error(ctx, span, err, "cluster not found for upgrade webhook")
 			// throw forbidden error, since we don't want a way to verify if the cluster and project
 			// still exist for a cluster that's been deleted
-			c.HandleAPIError(w, r, apierrors.NewErrForbidden(
-				fmt.Errorf("cluster %d in project %d not found for upgrade webhook", release.ClusterID, release.ProjectID),
-			))
-
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
 			return
 		}
 
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error with reading cluster for upgrade webhook")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	// in this case, we retrieve the agent by passing in the namespace field directly, since
 	// it cannot be detected from the URL
-	helmAgent, err := c.GetHelmAgent(r, cluster, release.Namespace)
+	helmAgent, err := c.GetHelmAgent(ctx, r, cluster, release.Namespace)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "unable to get helm agent for upgrade webhook")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	request := &types.WebhookRequest{}
-
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
 		return
 	}
 
-	rel, err := helmAgent.GetRelease(release.Name, 0, true)
+	rel, err := helmAgent.GetRelease(ctx, release.Name, 0, true)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "uanble to get release for upgrade webhook")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -114,17 +131,15 @@ func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	rel.Config["image"] = image
 
 	if rel.Config["auto_deploy"] == false {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-			fmt.Errorf("Deploy webhook is disabled for this deployment."),
-			http.StatusBadRequest,
-		))
-
+		err = telemetry.Error(ctx, span, err, "deploy")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
 	registries, err := c.Repo().Registry().ListRegistriesByProjectID(release.ProjectID)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "unable to list registries for upgrade webhook")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -140,10 +155,11 @@ func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 	var notifConf *types.NotificationConfig
 	notifConf = nil
-	if release != nil && release.NotificationConfig != 0 {
+	if release.NotificationConfig != 0 {
 		conf, err := c.Repo().NotificationConfig().ReadNotificationConfig(release.NotificationConfig)
 		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+			err = telemetry.Error(ctx, span, err, "unable to read notification config for upgrade webhook")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 			return
 		}
 
@@ -168,8 +184,7 @@ func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		),
 	}
 
-	rel, err = helmAgent.UpgradeReleaseByValues(conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
-
+	rel, err = helmAgent.UpgradeReleaseByValues(ctx, conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection, false)
 	if err != nil {
 		notifyOpts.Status = notifier.StatusHelmFailed
 		notifyOpts.Info = err.Error()
@@ -177,12 +192,8 @@ func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		if !cluster.NotificationsDisabled {
 			deplNotifier.Notify(notifyOpts)
 		}
-
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-			err,
-			http.StatusBadRequest,
-		))
-
+		err = telemetry.Error(ctx, span, err, "unable to upgrade release for upgrade webhook")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
@@ -210,9 +221,9 @@ func (c *WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	c.WriteResult(w, r, nil)
 
 	err = postUpgrade(c.Config(), cluster.ProjectID, cluster.ID, rel)
-
 	if err != nil {
-		c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error while running post upgrade hooks")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 }

+ 2 - 2
api/server/handlers/stack/add_application.go

@@ -125,7 +125,7 @@ func (p *StackAddApplicationHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		return
 	}
 
-	helmAgent, err := p.GetHelmAgent(r, cluster, "")
+	helmAgent, err := p.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -175,7 +175,7 @@ func (p *StackAddApplicationHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 
 	for _, resource := range revision.Resources {
 		if rel, exists := helmReleaseMap[fmt.Sprintf("%s/%s", namespace, resource.Name)]; exists {
-			_, err = release.CreateAppReleaseFromHelmRelease(p.Config(), proj.ID, cluster.ID, resource.ID, rel)
+			_, err = release.CreateAppReleaseFromHelmRelease(r.Context(), p.Config(), proj.ID, cluster.ID, resource.ID, rel)
 
 			if err != nil {
 				saveErrs = append(saveErrs, fmt.Sprintf("the resource %s/%s could not be saved right now", namespace, resource.Name))

+ 2 - 2
api/server/handlers/stack/create.go

@@ -171,7 +171,7 @@ func (p *StackCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 			return
 		}
 
-		helmAgent, err := p.GetHelmAgent(r, cluster, "")
+		helmAgent, err := p.GetHelmAgent(r.Context(), r, cluster, "")
 		if err != nil {
 			p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return
@@ -221,7 +221,7 @@ func (p *StackCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 		for _, resource := range revision.Resources {
 			if rel, exists := helmReleaseMap[fmt.Sprintf("%s/%s", namespace, resource.Name)]; exists {
-				_, err = release.CreateAppReleaseFromHelmRelease(p.Config(), proj.ID, cluster.ID, resource.ID, rel)
+				_, err = release.CreateAppReleaseFromHelmRelease(r.Context(), p.Config(), proj.ID, cluster.ID, resource.ID, rel)
 
 				if err != nil {
 					saveErrs = append(saveErrs, fmt.Sprintf("the resource %s/%s could not be saved right now", namespace, resource.Name))

+ 1 - 1
api/server/handlers/stack/delete.go

@@ -46,7 +46,7 @@ func (p *StackDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 			return
 		}
 
-		helmAgent, err := p.GetHelmAgent(r, cluster, namespace)
+		helmAgent, err := p.GetHelmAgent(r.Context(), r, cluster, namespace)
 		if err != nil {
 			p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return

+ 9 - 7
api/server/handlers/stack/helpers.go

@@ -1,6 +1,8 @@
 package stack
 
 import (
+	"context"
+
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/helm"
@@ -28,7 +30,7 @@ func applyAppResource(opts *applyAppResourceOpts) (*release.Release, error) {
 		opts.request.TemplateVersion = ""
 	}
 
-	chart, err := loader.LoadChartPublic(opts.request.TemplateRepoURL, opts.request.TemplateName, opts.request.TemplateVersion)
+	chart, err := loader.LoadChartPublic(context.Background(), opts.request.TemplateRepoURL, opts.request.TemplateName, opts.request.TemplateVersion)
 	if err != nil {
 		return nil, err
 	}
@@ -53,7 +55,7 @@ func applyAppResource(opts *applyAppResourceOpts) (*release.Release, error) {
 		"revision": opts.stackRevision,
 	}
 
-	return opts.helmAgent.InstallChart(conf, opts.config.DOConf, opts.config.ServerConf.DisablePullSecretsInjection)
+	return opts.helmAgent.InstallChart(context.Background(), conf, opts.config.DOConf, opts.config.ServerConf.DisablePullSecretsInjection)
 }
 
 type rollbackAppResourceOpts struct {
@@ -63,7 +65,7 @@ type rollbackAppResourceOpts struct {
 }
 
 func rollbackAppResource(opts *rollbackAppResourceOpts) error {
-	return opts.helmAgent.RollbackRelease(opts.name, int(opts.helmRevisionID))
+	return opts.helmAgent.RollbackRelease(context.Background(), opts.name, int(opts.helmRevisionID))
 }
 
 type updateAppResourceTagOpts struct {
@@ -82,7 +84,7 @@ type updateAppResourceTagOpts struct {
 
 func updateAppResourceTag(opts *updateAppResourceTagOpts) error {
 	// read the current release to get the current values
-	rel, err := opts.helmAgent.GetRelease(opts.name, 0, true)
+	rel, err := opts.helmAgent.GetRelease(context.Background(), opts.name, 0, true)
 	if err != nil {
 		return err
 	}
@@ -104,8 +106,8 @@ func updateAppResourceTag(opts *updateAppResourceTagOpts) error {
 		StackRevision: opts.stackRevision,
 	}
 
-	_, err = opts.helmAgent.UpgradeReleaseByValues(conf, opts.config.DOConf,
-		opts.config.ServerConf.DisablePullSecretsInjection)
+	_, err = opts.helmAgent.UpgradeReleaseByValues(context.Background(), conf, opts.config.DOConf,
+		opts.config.ServerConf.DisablePullSecretsInjection, false)
 
 	return err
 }
@@ -116,7 +118,7 @@ type deleteAppResourceOpts struct {
 }
 
 func deleteAppResource(opts *deleteAppResourceOpts) error {
-	_, err := opts.helmAgent.UninstallChart(opts.name)
+	_, err := opts.helmAgent.UninstallChart(context.Background(), opts.name)
 
 	return err
 }

+ 1 - 1
api/server/handlers/stack/remove_application.go

@@ -97,7 +97,7 @@ func (p *StackRemoveApplicationHandler) ServeHTTP(w http.ResponseWriter, r *http
 		return
 	}
 
-	helmAgent, err := p.GetHelmAgent(r, cluster, namespace)
+	helmAgent, err := p.GetHelmAgent(r.Context(), r, cluster, namespace)
 	if err != nil {
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 1 - 1
api/server/handlers/stack/rollback.go

@@ -35,7 +35,7 @@ func NewStackRollbackHandler(
 func (p *StackRollbackHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
-	helmAgent, err := p.GetHelmAgent(r, cluster, "")
+	helmAgent, err := p.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 1 - 1
api/server/handlers/stack/update_source_put.go

@@ -38,7 +38,7 @@ func (p *StackPutSourceConfigHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 	namespace, _ := r.Context().Value(types.NamespaceScope).(string)
 	stack, _ := r.Context().Value(types.StackScope).(*models.Stack)
 
-	helmAgent, err := p.GetHelmAgent(r, cluster, "")
+	helmAgent, err := p.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 0 - 249
api/server/handlers/stacks/create_porter_app.go

@@ -1,249 +0,0 @@
-package stacks
-
-import (
-	"encoding/base64"
-	"fmt"
-	"net/http"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/server/shared/requestutils"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/helm"
-	"github.com/porter-dev/porter/internal/models"
-	"github.com/stefanmcshane/helm/pkg/chart"
-)
-
-type CreatePorterAppHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-func NewCreatePorterAppHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *CreatePorterAppHandler {
-	return &CreatePorterAppHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-func (c *CreatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx := r.Context()
-	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
-
-	request := &types.CreatePorterAppRequest{}
-	if ok := c.DecodeAndValidate(w, r, request); !ok {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error decoding request")))
-		return
-	}
-
-	stackName, reqErr := requestutils.GetURLParamString(r, types.URLParamStackName)
-	if reqErr != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(reqErr, http.StatusBadRequest))
-		return
-	}
-	namespace := fmt.Sprintf("porter-stack-%s", stackName)
-
-	helmAgent, err := c.GetHelmAgent(r, cluster, namespace)
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error getting helm agent: %w", err)))
-		return
-	}
-
-	k8sAgent, err := c.GetAgent(r, cluster, namespace)
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error getting k8s agent: %w", err)))
-		return
-	}
-
-	helmRelease, err := helmAgent.GetRelease(stackName, 0, false)
-	shouldCreate := err != nil
-
-	porterYamlBase64 := request.PorterYAMLBase64
-	porterYaml, err := base64.StdEncoding.DecodeString(porterYamlBase64)
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error decoding porter.yaml: %w", err)))
-		return
-	}
-	imageInfo := request.ImageInfo
-	registries, err := c.Repo().Registry().ListRegistriesByProjectID(cluster.ProjectID)
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error listing registries: %w", err)))
-		return
-	}
-
-	var releaseValues map[string]interface{}
-	var releaseDependencies []*chart.Dependency
-	if shouldCreate || request.OverrideRelease {
-		releaseValues = nil
-		releaseDependencies = nil
-
-		// this is required because when the front-end sends an update request with overrideRelease=true, it is unable to
-		// get the image info from the release. unless it is explicitly provided in the request, we avoid overwriting it
-		// by attempting to get the image info from the release
-		if helmRelease != nil && (imageInfo.Repository == "" || imageInfo.Tag == "") {
-			imageInfo = attemptToGetImageInfoFromRelease(helmRelease.Config)
-		}
-	} else {
-		releaseValues = helmRelease.Config
-		releaseDependencies = helmRelease.Chart.Metadata.Dependencies
-	}
-
-	chart, values, err := parse(
-		porterYaml,
-		imageInfo,
-		c.Config(),
-		cluster.ProjectID,
-		releaseValues,
-		releaseDependencies,
-		SubdomainCreateOpts{
-			k8sAgent:       k8sAgent,
-			dnsRepo:        c.Repo().DNSRecord(),
-			powerDnsClient: c.Config().PowerDNSClient,
-			appRootDomain:  c.Config().ServerConf.AppRootDomain,
-			stackName:      stackName,
-		})
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error parsing porter yaml into chart and values: %w", err)))
-		return
-	}
-
-	if shouldCreate {
-		// create the namespace if it does not exist already
-		_, err = k8sAgent.CreateNamespace(namespace, nil)
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error creating namespace: %w", err)))
-			return
-		}
-
-		conf := &helm.InstallChartConfig{
-			Chart:      chart,
-			Name:       stackName,
-			Namespace:  namespace,
-			Values:     values,
-			Cluster:    cluster,
-			Repo:       c.Repo(),
-			Registries: registries,
-		}
-
-		// create the chart
-		_, err = helmAgent.InstallChart(conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error deploying app: %s", err.Error())))
-
-			_, err = helmAgent.UninstallChart(stackName)
-			if err != nil {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error uninstalling chart: %w", err)))
-			}
-
-			return
-		}
-
-		existing, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, stackName)
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-			return
-		} else if existing.Name != "" {
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
-				fmt.Errorf("porter app with name %s already exists in this environment", existing.Name), http.StatusForbidden))
-			return
-		}
-
-		app := &models.PorterApp{
-			Name:      stackName,
-			ClusterID: cluster.ID,
-			ProjectID: project.ID,
-			RepoName:  request.RepoName,
-			GitRepoID: request.GitRepoID,
-			GitBranch: request.GitBranch,
-
-			BuildContext:   request.BuildContext,
-			Builder:        request.Builder,
-			Buildpacks:     request.Buildpacks,
-			Dockerfile:     request.Dockerfile,
-			ImageRepoURI:   request.ImageRepoURI,
-			PullRequestURL: request.PullRequestURL,
-		}
-
-		// create the db entry
-		porterApp, err := c.Repo().PorterApp().UpdatePorterApp(app)
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error writing app to DB: %s", err.Error())))
-			return
-		}
-
-		c.WriteResult(w, r, porterApp.ToPorterAppType())
-	} else {
-		conf := &helm.InstallChartConfig{
-			Chart:      chart,
-			Name:       stackName,
-			Namespace:  namespace,
-			Values:     values,
-			Cluster:    cluster,
-			Repo:       c.Repo(),
-			Registries: registries,
-		}
-
-		// update the chart
-		_, err = helmAgent.UpgradeInstallChart(conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error deploying app: %s", err.Error())))
-
-			return
-		}
-
-		// update the DB entry
-		app, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, stackName)
-		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-			return
-		}
-
-		if request.RepoName != "" {
-			app.RepoName = request.RepoName
-		}
-		if request.GitBranch != "" {
-			app.GitBranch = request.GitBranch
-		}
-		if request.BuildContext != "" {
-			app.BuildContext = request.BuildContext
-		}
-		if request.Builder != "" {
-			app.Builder = request.Builder
-		}
-		if request.Buildpacks != "" {
-			if request.Buildpacks == "null" {
-				app.Buildpacks = ""
-			} else {
-				app.Buildpacks = request.Buildpacks
-			}
-		}
-		if request.Dockerfile != "" {
-			if request.Dockerfile == "null" {
-				app.Dockerfile = ""
-			} else {
-				app.Dockerfile = request.Dockerfile
-			}
-		}
-		if request.ImageRepoURI != "" {
-			app.ImageRepoURI = request.ImageRepoURI
-		}
-		if request.PullRequestURL != "" {
-			app.PullRequestURL = request.PullRequestURL
-		}
-
-		updatedPorterApp, err := c.Repo().PorterApp().UpdatePorterApp(app)
-		if err != nil {
-			return
-		}
-
-		c.WriteResult(w, r, updatedPorterApp.ToPorterAppType())
-	}
-}

+ 2 - 1
api/server/handlers/template/get.go

@@ -1,6 +1,7 @@
 package template
 
 import (
+	"context"
 	"net/http"
 	"strings"
 
@@ -49,7 +50,7 @@ func (t *TemplateGetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		request.RepoURL = t.Config().ServerConf.DefaultApplicationHelmRepoURL
 	}
 
-	chart, err := loader.LoadChartPublic(request.RepoURL, name, version)
+	chart, err := loader.LoadChartPublic(context.Background(), request.RepoURL, name, version)
 	if err != nil {
 		t.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 2 - 1
api/server/handlers/template/get_upgrade_notes.go

@@ -1,6 +1,7 @@
 package template
 
 import (
+	"context"
 	"net/http"
 	"strings"
 
@@ -51,7 +52,7 @@ func (t *TemplateGetUpgradeNotesHandler) ServeHTTP(w http.ResponseWriter, r *htt
 		prevVersion = "v0.0.0"
 	}
 
-	chart, err := loader.LoadChartPublic(request.RepoURL, name, version)
+	chart, err := loader.LoadChartPublic(context.Background(), request.RepoURL, name, version)
 	if err != nil {
 		t.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 17 - 1
api/server/handlers/user/update_onboarding_step.go

@@ -33,9 +33,25 @@ func (v *UpdateOnboardingStepHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 		return
 	}
 
+	if request.Step == "cost-consent-opened" {
+		v.Config().AnalyticsClient.Track(analytics.CostConsentOpenedTrack(&analytics.CostConsentOpenedTrackOpts{
+			UserScopedTrackOpts: analytics.GetUserScopedTrackOpts(user.ID),
+			Provider:            request.Provider,
+			Email:               user.Email,
+			FirstName:           user.FirstName,
+			LastName:            user.LastName,
+			CompanyName:         user.CompanyName,
+		}))
+	}
+
 	if request.Step == "cost-consent-complete" {
-		v.Config().AnalyticsClient.Track(analytics.CostConsentTrack(&analytics.CostConsentTrackOpts{
+		v.Config().AnalyticsClient.Track(analytics.CostConsentCompletedTrack(&analytics.CostConsentCompletedTrackOpts{
 			UserScopedTrackOpts: analytics.GetUserScopedTrackOpts(user.ID),
+			Provider:            request.Provider,
+			Email:               user.Email,
+			FirstName:           user.FirstName,
+			LastName:            user.LastName,
+			CompanyName:         user.CompanyName,
 		}))
 	}
 

+ 3 - 2
api/server/handlers/v1/env_group/create.go

@@ -1,6 +1,7 @@
 package env_group
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"strings"
@@ -69,7 +70,7 @@ func (c *CreateEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 	}
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, namespace)
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, namespace)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -201,7 +202,7 @@ func rolloutApplications(
 				Values:     newConfig,
 			}
 
-			_, err = helmAgent.UpgradeReleaseByValues(conf, config.DOConf, config.ServerConf.DisablePullSecretsInjection)
+			_, err = helmAgent.UpgradeReleaseByValues(context.Background(), conf, config.DOConf, config.ServerConf.DisablePullSecretsInjection, false)
 
 			if err != nil {
 				mu.Lock()

+ 5 - 3
api/server/handlers/v1/release/upgrade.go

@@ -1,6 +1,7 @@
 package release
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"net/url"
@@ -44,7 +45,7 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 	helmRelease, _ := r.Context().Value(types.ReleaseScope).(*release.Release)
 
-	helmAgent, err := c.GetHelmAgent(r, cluster, "")
+	helmAgent, err := c.GetHelmAgent(r.Context(), r, cluster, "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
@@ -118,7 +119,7 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 
 	// if LatestRevision is set, check that the revision matches the latest revision in the database
 	if request.LatestRevision != 0 {
-		currHelmRelease, err := helmAgent.GetRelease(helmRelease.Name, 0, false)
+		currHelmRelease, err := helmAgent.GetRelease(context.Background(), helmRelease.Name, 0, false)
 		if err != nil {
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 				fmt.Errorf("could not retrieve latest revision"),
@@ -138,7 +139,7 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		}
 	}
 
-	newHelmRelease, upgradeErr := helmAgent.UpgradeReleaseByValues(conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection)
+	newHelmRelease, upgradeErr := helmAgent.UpgradeReleaseByValues(context.Background(), conf, c.Config().DOConf, c.Config().ServerConf.DisablePullSecretsInjection, false)
 
 	if upgradeErr == nil && newHelmRelease != nil {
 		helmRelease = newHelmRelease
@@ -217,6 +218,7 @@ func (c *UpgradeReleaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 
 			if gitAction != nil && gitAction.ID != 0 && gitAction.GitlabIntegrationID == 0 {
 				gaRunner, err := baseReleaseHandler.GetGARunner(
+					r.Context(),
 					c.Config(),
 					user.ID,
 					cluster.ProjectID,

+ 2 - 1
api/server/handlers/v1/template/get.go

@@ -1,6 +1,7 @@
 package template
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"strings"
@@ -81,7 +82,7 @@ func (t *TemplateGetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		version = ""
 	}
 
-	chart, err := loader.LoadChartPublic(request.RepoURL, name, version)
+	chart, err := loader.LoadChartPublic(context.Background(), request.RepoURL, name, version)
 	if err != nil {
 		t.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 2 - 1
api/server/handlers/v1/template/get_upgrade_notes.go

@@ -1,6 +1,7 @@
 package template
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 	"strings"
@@ -83,7 +84,7 @@ func (t *TemplateGetUpgradeNotesHandler) ServeHTTP(w http.ResponseWriter, r *htt
 		prevVersion = "v0.0.0"
 	}
 
-	chart, err := loader.LoadChartPublic(request.RepoURL, name, version)
+	chart, err := loader.LoadChartPublic(context.Background(), request.RepoURL, name, version)
 	if err != nil {
 		t.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 1 - 1
api/server/router/base.go

@@ -3,7 +3,7 @@ package router
 import (
 	"fmt"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/credentials"
 	"github.com/porter-dev/porter/api/server/handlers/gitinstallation"
 	"github.com/porter-dev/porter/api/server/handlers/healthcheck"

+ 1 - 1
api/server/router/cluster.go

@@ -3,7 +3,7 @@ package router
 import (
 	"fmt"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/cluster"
 	"github.com/porter-dev/porter/api/server/handlers/database"
 	"github.com/porter-dev/porter/api/server/handlers/environment"

+ 1 - 1
api/server/router/cluster_integration.go

@@ -1,7 +1,7 @@
 package router
 
 import (
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	awsClusterInt "github.com/porter-dev/porter/api/server/handlers/cluster_integration/aws"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/config"

+ 68 - 1
api/server/router/git_installation.go

@@ -3,7 +3,7 @@ package router
 import (
 	"fmt"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/environment"
 	"github.com/porter-dev/porter/api/server/handlers/gitinstallation"
 	"github.com/porter-dev/porter/api/server/shared"
@@ -735,5 +735,72 @@ func getGitInstallationRoutes(
 		Router:   r,
 	})
 
+	getWorkflowLogsEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbGet,
+			Method: types.HTTPVerbGet,
+			Path: &types.Path{
+				Parent: basePath,
+				RelativePath: fmt.Sprintf(
+					"%s/{%s}/{%s}/clusters/{cluster_id}/get_logs_workflow",
+					relPath,
+					types.URLParamGitRepoOwner,
+					types.URLParamGitRepoName,
+				),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.GitInstallationScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	getWorkflowLogsHandler := gitinstallation.NewGetWorkflowLogsHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: getWorkflowLogsEndpoint,
+		Handler:  getWorkflowLogsHandler,
+		Router:   r,
+	})
+
+	getWorkflowLogByIDEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbGet,
+			Method: types.HTTPVerbGet,
+			Path: &types.Path{
+				Parent: basePath,
+				RelativePath: fmt.Sprintf(
+					"%s/{%s}/{%s}/clusters/{cluster_id}/workflow_run_id",
+					relPath,
+					types.URLParamGitRepoOwner,
+					types.URLParamGitRepoName,
+				),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.GitInstallationScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	getWorkflowLogByIDHandler := gitinstallation.NewGetSpecificWorkflowLogsHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: getWorkflowLogByIDEndpoint,
+		Handler:  getWorkflowLogByIDHandler,
+		Router:   r,
+	})
 	return routes, newPath
 }

+ 1 - 1
api/server/router/helm_repo.go

@@ -1,7 +1,7 @@
 package router
 
 import (
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/helmrepo"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/config"

+ 1 - 1
api/server/router/infra.go

@@ -3,7 +3,7 @@ package router
 import (
 	"fmt"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/database"
 	"github.com/porter-dev/porter/api/server/handlers/infra"
 	"github.com/porter-dev/porter/api/server/shared"

+ 1 - 1
api/server/router/invite.go

@@ -1,7 +1,7 @@
 package router
 
 import (
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/invite"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/config"

+ 21 - 0
api/server/router/middleware/hydrate_trace.go

@@ -0,0 +1,21 @@
+package middleware
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/internal/telemetry"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// HydrateTraces pulls related IDs from requests, and puts them into a span which already exists.
+// If no span already exists, these attibutes will not be populated. This should not be used as a replacement for creating your own spans.
+// This should be added as the last middleware in the chain, so that it can pull IDs from the request context.
+func HydrateTraces(next http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		ctx := r.Context()
+		span := trace.SpanFromContext(ctx)
+		telemetry.AddKnownContextVariablesToSpan(ctx, span)
+		r = r.Clone(ctx)
+		next.ServeHTTP(w, r)
+	})
+}

+ 30 - 21
api/server/router/middleware/usage.go

@@ -1,9 +1,10 @@
 package middleware
 
 import (
-	"fmt"
 	"net/http"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
@@ -24,16 +25,24 @@ var UsageErrFmt = "usage limit reached for metric %s: limit %d, requested %d"
 
 func (b *UsageMiddleware) Middleware(next http.Handler) http.Handler {
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+		ctx, span := telemetry.NewSpan(r.Context(), "middleware-usage")
+		defer span.End()
+
+		proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "project-id", Value: proj.ID})
 
 		// get the project usage limits
 		currentUsage, limit, _, err := usage.GetUsage(&usage.GetUsageOpts{
-			Project:          proj,
-			DOConf:           b.config.DOConf,
-			Repo:             b.config.Repo,
-			WhitelistedUsers: b.config.WhitelistedUsers,
+			Project:                          proj,
+			DOConf:                           b.config.DOConf,
+			Repo:                             b.config.Repo,
+			WhitelistedUsers:                 b.config.WhitelistedUsers,
+			ClusterControlPlaneServiceClient: b.config.ClusterControlPlaneClient,
 		})
 		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error getting usage")
+
 			apierrors.HandleAPIError(
 				b.config.Logger,
 				b.config.Alerter,
@@ -45,25 +54,25 @@ func (b *UsageMiddleware) Middleware(next http.Handler) http.Handler {
 			return
 		}
 
+		telemetry.WithAttributes(span,
+			telemetry.AttributeKV{Key: "users-current-usage", Value: currentUsage.Users},
+			telemetry.AttributeKV{Key: "users-limit", Value: limit.Users},
+			telemetry.AttributeKV{Key: "cpu-current-usage", Value: currentUsage.ResourceCPU},
+			telemetry.AttributeKV{Key: "cpu-limit", Value: limit.ResourceCPU},
+			telemetry.AttributeKV{Key: "memory-current-usage", Value: currentUsage.ResourceMemory},
+			telemetry.AttributeKV{Key: "memory-limit", Value: limit.ResourceMemory},
+			telemetry.AttributeKV{Key: "clusters-current-usage", Value: currentUsage.Clusters},
+			telemetry.AttributeKV{Key: "clusters-limit", Value: limit.Clusters},
+		)
+
 		// check the usage limits
 		allowed := allowUsage(limit, currentUsage, b.metric)
 
-		if allowed {
-			next.ServeHTTP(w, r)
-		} else {
-			limit, curr := getMetricUsage(limit, currentUsage, b.metric)
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "allowed", Value: allowed})
 
-			apierrors.HandleAPIError(
-				b.config.Logger,
-				b.config.Alerter,
-				w, r,
-				apierrors.NewErrPassThroughToClient(
-					fmt.Errorf(UsageErrFmt, b.metric, limit, curr),
-					http.StatusBadRequest,
-				),
-				true,
-			)
-		}
+		r = r.Clone(ctx)
+
+		next.ServeHTTP(w, r)
 	})
 }
 

+ 1 - 1
api/server/router/namespace.go

@@ -3,7 +3,7 @@ package router
 import (
 	"fmt"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 
 	"github.com/porter-dev/porter/api/server/handlers/job"
 	"github.com/porter-dev/porter/api/server/handlers/namespace"

+ 1 - 1
api/server/router/oauth_callback.go

@@ -1,7 +1,7 @@
 package router
 
 import (
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/oauth_callback"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/config"

+ 346 - 0
api/server/router/porter_app.go

@@ -0,0 +1,346 @@
+package router
+
+import (
+	"fmt"
+
+	"github.com/go-chi/chi/v5"
+	"github.com/porter-dev/porter/api/server/handlers/porter_app"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/router"
+	"github.com/porter-dev/porter/api/types"
+)
+
+func NewStackScopedRegisterer(children ...*router.Registerer) *router.Registerer {
+	return &router.Registerer{
+		GetRoutes: GetStackScopedRoutes,
+		Children:  children,
+	}
+}
+
+func GetStackScopedRoutes(
+	r chi.Router,
+	config *config.Config,
+	basePath *types.Path,
+	factory shared.APIEndpointFactory,
+	children ...*router.Registerer,
+) []*router.Route {
+	routes, projPath := getStackRoutes(r, config, basePath, factory)
+
+	if len(children) > 0 {
+		r.Route(projPath.RelativePath, func(r chi.Router) {
+			for _, child := range children {
+				childRoutes := child.GetRoutes(r, config, basePath, factory, child.Children...)
+
+				routes = append(routes, childRoutes...)
+			}
+		})
+	}
+
+	return routes
+}
+
+func getStackRoutes(
+	r chi.Router,
+	config *config.Config,
+	basePath *types.Path,
+	factory shared.APIEndpointFactory,
+) ([]*router.Route, *types.Path) {
+	relPath := "/stacks"
+
+	newPath := &types.Path{
+		Parent:       basePath,
+		RelativePath: relPath,
+	}
+
+	var routes []*router.Route
+
+	// GET /api/projects/{project_id}/clusters/{cluster_id}/stacks/{name} -> porter_app.NewPorterAppGetHandler
+	getPorterAppEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbGet,
+			Method: types.HTTPVerbGet,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/{%s}", relPath, types.URLParamStackName),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	getPorterAppHandler := porter_app.NewGetPorterAppHandler(
+		config,
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: getPorterAppEndpoint,
+		Handler:  getPorterAppHandler,
+		Router:   r,
+	})
+
+	// GET /api/projects/{project_id}/clusters/{cluster_id}/stacks/{name} -> porter_app.NewPorterAppListHandler
+	listPorterAppEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbList,
+			Method: types.HTTPVerbGet,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: relPath,
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	listPorterAppHandler := porter_app.NewPorterAppListHandler(
+		config,
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: listPorterAppEndpoint,
+		Handler:  listPorterAppHandler,
+		Router:   r,
+	})
+
+	// DELETE /api/projects/{project_id}/clusters/{cluster_id}/stacks -> release.NewDeletePorterAppByNameHandler
+	deletePorterAppByNameEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbDelete,
+			Method: types.HTTPVerbDelete,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/{%s}", relPath, types.URLParamStackName),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	deletePorterAppByNameHandler := porter_app.NewDeletePorterAppByNameHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: deletePorterAppByNameEndpoint,
+		Handler:  deletePorterAppByNameHandler,
+		Router:   r,
+	})
+
+	// POST /api/projects/{project_id}/clusters/{cluster_id}/stacks/{stack} -> porter_app.NewCreatePorterAppHandler
+	createPorterAppEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbCreate,
+			Method: types.HTTPVerbPost,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/{%s}", relPath, types.URLParamStackName),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	createPorterAppHandler := porter_app.NewCreatePorterAppHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: createPorterAppEndpoint,
+		Handler:  createPorterAppHandler,
+		Router:   r,
+	})
+
+	// POST /api/projects/{project_id}/clusters/{cluster_id}/stacks/{stack}/rollback -> porter_app.NewRollbackPorterAppHandler
+	rollbackPorterAppEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbCreate,
+			Method: types.HTTPVerbPost,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/{%s}/rollback", relPath, types.URLParamStackName),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	rollbackPorterAppHandler := porter_app.NewRollbackPorterAppHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: rollbackPorterAppEndpoint,
+		Handler:  rollbackPorterAppHandler,
+		Router:   r,
+	})
+
+	// POST /api/projects/{project_id}/clusters/{cluster_id}/stacks/{stack}/pr -> porter_app.NewOpenStackPRHandler
+	createSecretAndOpenGitHubPullRequestEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbCreate,
+			Method: types.HTTPVerbPost,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/{%s}/pr", relPath, types.URLParamStackName),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	createSecretAndOpenGitHubPullRequestHandler := porter_app.NewOpenStackPRHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: createSecretAndOpenGitHubPullRequestEndpoint,
+		Handler:  createSecretAndOpenGitHubPullRequestHandler,
+		Router:   r,
+	})
+
+	// GET /api/projects/{project_id}/clusters/{cluster_id}/stacks/{name}/events -> porter_app.NewPorterAppEventListHandler
+	listPorterAppEventsEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbList,
+			Method: types.HTTPVerbGet,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/{%s}/events", relPath, types.URLParamStackName),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	listPorterAppEventsHandler := porter_app.NewPorterAppEventListHandler(
+		config,
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: listPorterAppEventsEndpoint,
+		Handler:  listPorterAppEventsHandler,
+		Router:   r,
+	})
+
+	// POST /api/projects/{project_id}/clusters/{cluster_id}/stacks/{name}/events -> porter_app.NewCreatePorterAppEventEndpoint
+	createPorterAppEventEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbCreate,
+			Method: types.HTTPVerbPost,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/{%s}/events", relPath, types.URLParamStackName),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	createPorterAppEventHandler := porter_app.NewCreateUpdatePorterAppEventHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: createPorterAppEventEndpoint,
+		Handler:  createPorterAppEventHandler,
+		Router:   r,
+	})
+
+	// POST /api/projects/{project_id}/clusters/{cluster_id}/stacks/analytics -> porter_app.NewPorterAppAnalyticsHandler
+	porterAppAnalyticsEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbUpdate,
+			Method: types.HTTPVerbPost,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/analytics", relPath),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	porterAppAnalyticsHandler := porter_app.NewPorterAppAnalyticsHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: porterAppAnalyticsEndpoint,
+		Handler:  porterAppAnalyticsHandler,
+		Router:   r,
+	})
+
+	// GET /api/projects/{project_id}/clusters/{cluster_id}/stacks/logs -> cluster.NewGetChartLogsWithinTimeRangeHandler
+	getChartLogsWithinTimeRangeEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbGet,
+			Method: types.HTTPVerbGet,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: fmt.Sprintf("%s/logs", relPath),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.ClusterScope,
+			},
+		},
+	)
+
+	getChartLogsWithinTimeRangeHandler := porter_app.NewGetLogsWithinTimeRangeHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: getChartLogsWithinTimeRangeEndpoint,
+		Handler:  getChartLogsWithinTimeRangeHandler,
+		Router:   r,
+	})
+
+	return routes, newPath
+}

+ 1 - 1
api/server/router/project.go

@@ -3,7 +3,7 @@ package router
 import (
 	"fmt"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	apiContract "github.com/porter-dev/porter/api/server/handlers/api_contract"
 	"github.com/porter-dev/porter/api/server/handlers/api_token"
 	"github.com/porter-dev/porter/api/server/handlers/billing"

+ 70 - 16
api/server/router/project_integration.go

@@ -3,7 +3,7 @@ package router
 import (
 	"fmt"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	project_integration "github.com/porter-dev/porter/api/server/handlers/project_integration"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/config"
@@ -416,6 +416,33 @@ func getProjectIntegrationRoutes(
 	// PATCH /api/projects/{project_id}/integrations/gitlab/{integration_id}
 
 	// DELETE /api/projects/{project_id}/integrations/gitlab/{integration_id}
+	deleteGitlabEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbDelete,
+			Method: types.HTTPVerbDelete,
+			Path: &types.Path{
+				Parent:       basePath,
+				RelativePath: relPath + "/gitlab/{integration_id}",
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.GitlabIntegrationScope,
+			},
+		},
+	)
+
+	deleteGitlabHandler := project_integration.NewDeleteGitlabIntegrationHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: deleteGitlabEndpoint,
+		Handler:  deleteGitlabHandler,
+		Router:   r,
+	})
 
 	// GET /api/projects/{project_id}/integrations/git
 	listGitIntegrationsEndpoint := factory.NewAPIEndpoint(
@@ -474,15 +501,15 @@ func getProjectIntegrationRoutes(
 		Router:   r,
 	})
 
-	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/{owner}/{name}/branches
+	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/branches
 	listGitlabRepoBranchesEndpoint := factory.NewAPIEndpoint(
 		&types.APIRequestMetadata{
 			Verb:   types.APIVerbGet,
 			Method: types.HTTPVerbGet,
 			Path: &types.Path{
 				Parent: basePath,
-				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/{%s}/{%s}/branches",
-					relPath, types.URLParamIntegrationID, types.URLParamGitRepoOwner, types.URLParamGitRepoName),
+				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/branches",
+					relPath, types.URLParamIntegrationID),
 			},
 			Scopes: []types.PermissionScope{
 				types.UserScope,
@@ -504,16 +531,15 @@ func getProjectIntegrationRoutes(
 		Router:   r,
 	})
 
-	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/{owner}/{name}/{branch}/contents
+	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/contents
 	getGitlabRepoContentsEndpoint := factory.NewAPIEndpoint(
 		&types.APIRequestMetadata{
 			Verb:   types.APIVerbGet,
 			Method: types.HTTPVerbGet,
 			Path: &types.Path{
 				Parent: basePath,
-				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/{%s}/{%s}/{%s}/contents", relPath,
-					types.URLParamIntegrationID, types.URLParamGitRepoOwner,
-					types.URLParamGitRepoName, types.URLParamGitBranch),
+				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/contents", relPath,
+					types.URLParamIntegrationID),
 			},
 			Scopes: []types.PermissionScope{
 				types.UserScope,
@@ -535,16 +561,15 @@ func getProjectIntegrationRoutes(
 		Router:   r,
 	})
 
-	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/{owner}/{name}/{branch}/buildpack/detect
+	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/buildpack/detect
 	getGitlabRepoBuildpackEndpoint := factory.NewAPIEndpoint(
 		&types.APIRequestMetadata{
 			Verb:   types.APIVerbGet,
 			Method: types.HTTPVerbGet,
 			Path: &types.Path{
 				Parent: basePath,
-				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/{%s}/{%s}/{%s}/buildpack/detect", relPath,
-					types.URLParamIntegrationID, types.URLParamGitRepoOwner,
-					types.URLParamGitRepoName, types.URLParamGitBranch),
+				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/buildpack/detect", relPath,
+					types.URLParamIntegrationID),
 			},
 			Scopes: []types.PermissionScope{
 				types.UserScope,
@@ -566,16 +591,15 @@ func getProjectIntegrationRoutes(
 		Router:   r,
 	})
 
-	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/{owner}/{name}/{branch}/procfile
+	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/procfile
 	getGitlabRepoProcfileEndpoint := factory.NewAPIEndpoint(
 		&types.APIRequestMetadata{
 			Verb:   types.APIVerbGet,
 			Method: types.HTTPVerbGet,
 			Path: &types.Path{
 				Parent: basePath,
-				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/{%s}/{%s}/{%s}/procfile", relPath,
-					types.URLParamIntegrationID, types.URLParamGitRepoOwner,
-					types.URLParamGitRepoName, types.URLParamGitBranch),
+				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/procfile", relPath,
+					types.URLParamIntegrationID),
 			},
 			Scopes: []types.PermissionScope{
 				types.UserScope,
@@ -597,5 +621,35 @@ func getProjectIntegrationRoutes(
 		Router:   r,
 	})
 
+	// GET /api/projects/{project_id}/integrations/gitlab/{integration_id}/repos/porteryaml
+	getGitlabRepoPorterYamlContentsEndpoint := factory.NewAPIEndpoint(
+		&types.APIRequestMetadata{
+			Verb:   types.APIVerbGet,
+			Method: types.HTTPVerbGet,
+			Path: &types.Path{
+				Parent: basePath,
+				RelativePath: fmt.Sprintf("%s/gitlab/{%s}/repos/porteryaml", relPath,
+					types.URLParamIntegrationID),
+			},
+			Scopes: []types.PermissionScope{
+				types.UserScope,
+				types.ProjectScope,
+				types.GitlabIntegrationScope,
+			},
+		},
+	)
+
+	getGitlabRepoPorterYamlContentsHandler := project_integration.NewGetGitlabRepoPorterYamlContentsHandler(
+		config,
+		factory.GetDecoderValidator(),
+		factory.GetResultWriter(),
+	)
+
+	routes = append(routes, &router.Route{
+		Endpoint: getGitlabRepoPorterYamlContentsEndpoint,
+		Handler:  getGitlabRepoPorterYamlContentsHandler,
+		Router:   r,
+	})
+
 	return routes, newPath
 }

+ 1 - 1
api/server/router/project_oauth.go

@@ -1,7 +1,7 @@
 package router
 
 import (
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 
 	"github.com/porter-dev/porter/api/server/handlers/project_oauth"
 	"github.com/porter-dev/porter/api/server/shared"

+ 1 - 1
api/server/router/registry.go

@@ -3,7 +3,7 @@ package router
 import (
 	"fmt"
 
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/registry"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/config"

+ 1 - 1
api/server/router/release.go

@@ -1,7 +1,7 @@
 package router
 
 import (
-	"github.com/go-chi/chi"
+	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers/release"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/config"

部分文件因文件數量過多而無法顯示