Explorar o código

Merge branch 'master' of github.com:porter-dev/porter into status-tab

Feroze Mohideen %!s(int64=2) %!d(string=hai) anos
pai
achega
45e2a5c18d
Modificáronse 100 ficheiros con 5465 adicións e 2150 borrados
  1. 1 1
      .github/actions/porter-deploy/action.yml
  2. 3 1
      .github/golangci-lint.yaml
  3. 60 0
      .github/workflows/app_tests_base.yml
  4. 19 0
      .github/workflows/app_tests_internal_tools.yml
  5. 19 0
      .github/workflows/app_tests_production.yml
  6. 19 0
      .github/workflows/app_tests_sandbox.yml
  7. 0 107
      .github/workflows/dev_cli.yml
  8. 54 0
      .github/workflows/porter_stack_porter-sandbox.yml
  9. 6 32
      .github/workflows/production.yml
  10. 1 1
      .gitignore
  11. 1 1
      CONTRIBUTING.md
  12. 0 0
      Procfile
  13. 3 3
      README.md
  14. 29 0
      api/client/datastore.go
  15. 28 16
      api/client/deployment_target.go
  16. 31 0
      api/client/env_groups.go
  17. 175 159
      api/client/porter_app.go
  18. 29 0
      api/server/authn/handler.go
  19. 79 0
      api/server/authz/deployment_target.go
  20. 2 0
      api/server/authz/policy.go
  21. 106 0
      api/server/handlers/addons/list.go
  22. 35 13
      api/server/handlers/api_contract/list.go
  23. 94 0
      api/server/handlers/api_contract/preflight.go
  24. 0 24
      api/server/handlers/billing/billing_ce.go
  25. 0 22
      api/server/handlers/billing/billing_ee.go
  26. 102 0
      api/server/handlers/billing/create.go
  27. 76 0
      api/server/handlers/billing/customer.go
  28. 63 0
      api/server/handlers/billing/delete.go
  29. 81 0
      api/server/handlers/billing/list.go
  30. 0 9
      api/server/handlers/billing/redirect_billing.go
  31. 119 0
      api/server/handlers/cloud_provider/list_aws.go
  32. 13 15
      api/server/handlers/cluster/cluster_status.go
  33. 136 0
      api/server/handlers/cluster/compliance_checks.go
  34. 14 10
      api/server/handlers/cluster/delete.go
  35. 1 2
      api/server/handlers/cluster/get.go
  36. 21 3
      api/server/handlers/cluster/get_logs.go
  37. 5 0
      api/server/handlers/cluster/notify_new_incident.go
  38. 5 0
      api/server/handlers/cluster/notify_resolved_incident.go
  39. 9 2
      api/server/handlers/cluster/rename.go
  40. 121 0
      api/server/handlers/datastore/create_proxy.go
  41. 109 0
      api/server/handlers/datastore/credential.go
  42. 102 0
      api/server/handlers/datastore/delete.go
  43. 244 0
      api/server/handlers/datastore/get.go
  44. 177 0
      api/server/handlers/datastore/list.go
  45. 0 104
      api/server/handlers/datastore/status.go
  46. 520 0
      api/server/handlers/datastore/update.go
  47. 16 17
      api/server/handlers/deployment_target/create.go
  48. 51 46
      api/server/handlers/deployment_target/get.go
  49. 21 17
      api/server/handlers/deployment_target/list.go
  50. 36 24
      api/server/handlers/environment_groups/create.go
  51. 64 30
      api/server/handlers/environment_groups/list.go
  52. 16 5
      api/server/handlers/gitinstallation/get_accounts.go
  53. 23 5
      api/server/handlers/gitinstallation/oauth_callback.go
  54. 18 5
      api/server/handlers/gitinstallation/webhook.go
  55. 12 6
      api/server/handlers/handler.go
  56. 5 1
      api/server/handlers/namespace/stream_pod_logs_loki.go
  57. 152 0
      api/server/handlers/notifications/get_notification_config.go
  58. 100 0
      api/server/handlers/notifications/notification.go
  59. 233 0
      api/server/handlers/notifications/update_notification_config.go
  60. 19 6
      api/server/handlers/oauth_callback/slack.go
  61. 10 4
      api/server/handlers/porter_app/app_env_variables.go
  62. 104 0
      api/server/handlers/porter_app/app_instances.go
  63. 1 27
      api/server/handlers/porter_app/app_notifications.go
  64. 0 117
      api/server/handlers/porter_app/app_run.go
  65. 0 268
      api/server/handlers/porter_app/apply.go
  66. 100 0
      api/server/handlers/porter_app/attach_env_group.go
  67. 174 0
      api/server/handlers/porter_app/cloudsql.go
  68. 4 19
      api/server/handlers/porter_app/create_and_update_events.go
  69. 15 1
      api/server/handlers/porter_app/create_secret_and_open_pr.go
  70. 25 38
      api/server/handlers/porter_app/current_app_revision.go
  71. 58 30
      api/server/handlers/porter_app/default_deployment_target.go
  72. 2 0
      api/server/handlers/porter_app/delete.go
  73. 28 0
      api/server/handlers/porter_app/get_app_revision_status.go
  74. 3 15
      api/server/handlers/porter_app/get_app_template.go
  75. 47 9
      api/server/handlers/porter_app/get_build.go
  76. 1 1
      api/server/handlers/porter_app/get_logs_within_time_range.go
  77. 1 0
      api/server/handlers/porter_app/helm_values_v2.go
  78. 105 0
      api/server/handlers/porter_app/job_run_cancel.go
  79. 57 43
      api/server/handlers/porter_app/job_status.go
  80. 133 0
      api/server/handlers/porter_app/job_status_by_name.go
  81. 1 0
      api/server/handlers/porter_app/list_app_revisions.go
  82. 90 40
      api/server/handlers/porter_app/logs_apply_v2.go
  83. 112 0
      api/server/handlers/porter_app/manifests.go
  84. 12 3
      api/server/handlers/porter_app/parse_yaml.go
  85. 21 1
      api/server/handlers/porter_app/pod_status.go
  86. 31 19
      api/server/handlers/porter_app/rollback_revision.go
  87. 158 0
      api/server/handlers/porter_app/run_app_job.go
  88. 255 0
      api/server/handlers/porter_app/run_app_job_status.go
  89. 1 0
      api/server/handlers/porter_app/service_status.go
  90. 27 11
      api/server/handlers/porter_app/stream_logs.go
  91. 88 18
      api/server/handlers/porter_app/update_app.go
  92. 0 476
      api/server/handlers/porter_app/update_app_environment_group.go
  93. 26 5
      api/server/handlers/porter_app/update_image.go
  94. 0 63
      api/server/handlers/porter_app/use_new_apply_logic.go
  95. 0 216
      api/server/handlers/porter_app/validate.go
  96. 112 7
      api/server/handlers/porter_app/yaml_from_revision.go
  97. 63 0
      api/server/handlers/project/connect.go
  98. 31 18
      api/server/handlers/project/create.go
  99. 44 14
      api/server/handlers/project/delete.go
  100. 47 0
      api/server/handlers/project/update_onboarding_step.go

+ 1 - 1
.github/actions/porter-deploy/action.yml

@@ -39,7 +39,7 @@ runs:
     - name: Deploy stack
       uses: porter-dev/porter-cli-action@v0.1.0
       with:
-        command: apply
+        command: apply --wait
       env:
         PORTER_CLUSTER: "${{ inputs.cluster }}"
         PORTER_HOST: "${{ inputs.host }}"

+ 3 - 1
.github/golangci-lint.yaml

@@ -4,6 +4,8 @@ run:
   issues-exit-code: 1
   build-tags:
     - codeanalysis
+  skip-dirs:
+    - internal/repository/test
 
 issues:
   new-from-rev: origin/master # default: HEAD, this will only show linting changes in the current change
@@ -62,4 +64,4 @@ output:
   path-prefix: ""
 
   # sorts results by: filepath, line and column
-  sort-results: false
+  sort-results: false

+ 60 - 0
.github/workflows/app_tests_base.yml

@@ -0,0 +1,60 @@
+on:
+  workflow_call:
+    inputs:
+      stage:
+        required: true
+        type: string
+      project:
+        required: true
+        type: string
+      cluster:
+        required: true
+        type: string
+      host:
+        required: true
+        type: string
+    secrets:
+      token:
+        required: true
+      slack_webhook_url:
+        required: true
+
+env:
+  repo: porter
+
+name: Run app tests
+jobs:
+  integration-tests:
+    name: Run app tests
+    runs-on: ubuntu-latest
+    timeout-minutes: 7
+    strategy:
+      matrix:
+        yaml: ['js-test-app-buildpack', 'js-test-app-dockerfile', 'nginx', 'next-test-app-dockerfile']
+      fail-fast: false
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+        with:
+          repository: porter-dev/app-integration-tests
+          ref: refs/heads/main
+      - name: Run test
+        uses: ./.github/actions
+        with:
+          host: ${{ inputs.host }}
+          project: ${{ inputs.project }}
+          cluster: ${{ inputs.cluster }}
+          token: ${{ secrets.token }}
+          yaml_file: ./test-yamls/${{ matrix.yaml }}.yaml
+          app_name: ${{ env.repo }}-${{ matrix.yaml }}
+  notify-on-failure:
+    name: Notify on failure
+    needs: integration-tests
+    runs-on: ubuntu-latest
+    if: failure()
+    steps:
+      - name: Notify Slack on failure
+        env:
+          RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+        run: |
+          curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"<!subteam^S05MGRLKF33> \`${{ env.repo }}\` integration tests failed in \`${{ inputs.STAGE }}\`: $RUN_URL \"}" ${{ secrets.slack_webhook_url }}

+ 19 - 0
.github/workflows/app_tests_internal_tools.yml

@@ -0,0 +1,19 @@
+on:
+  workflow_run:
+    workflows: ["Deploy Porter to Internal Tooling"]
+    branches: [master]
+    types:
+      - completed
+
+name: Run internal tools app tests
+jobs:
+  call-base-workflow:
+    uses: ./.github/workflows/app_tests_base.yml
+    with:
+      stage: internal-tools
+      project: "301"
+      cluster: "142"
+      host: https://dashboard.internal-tools.porter.run
+    secrets:
+      token: ${{ secrets.APP_INTEGRATION_PROJECT_TOKEN }}
+      slack_webhook_url: ${{ secrets.APP_INTEGRATION_SLACK_WEBHOOK }}

+ 19 - 0
.github/workflows/app_tests_production.yml

@@ -0,0 +1,19 @@
+on:
+  workflow_run:
+    workflows: ["Deploy Porter to Production"]
+    branches: [master]
+    types:
+      - completed
+
+name: Run production app tests
+jobs:
+  call-base-workflow:
+    uses: ./.github/workflows/app_tests_base.yml
+    with:
+      stage: production
+      project: "11646"
+      cluster: "3618"
+      host: https://dashboard.getporter.dev
+    secrets:
+      token: ${{ secrets.APP_TESTS_PRODUCTION_TOKEN }}
+      slack_webhook_url: ${{ secrets.APP_INTEGRATION_SLACK_WEBHOOK }}

+ 19 - 0
.github/workflows/app_tests_sandbox.yml

@@ -0,0 +1,19 @@
+on:
+  workflow_run:
+    workflows: ['Deploy to porter-sandbox']
+    branches: [master]
+    types:
+      - completed
+
+name: Run sandbox app tests
+jobs:
+  call-base-workflow:
+    uses: ./.github/workflows/app_tests_base.yml
+    with:
+      stage: sandbox
+      project: '242'
+      cluster: '240'
+      host: https://cloud.porter.run
+    secrets:
+      token: ${{ secrets.APP_TESTS_SANDBOX_TOKEN }}
+      slack_webhook_url: ${{ secrets.APP_INTEGRATION_SLACK_WEBHOOK }}

+ 0 - 107
.github/workflows/dev_cli.yml

@@ -1,107 +0,0 @@
-on:
-  workflow_run:
-    workflows: ["Deploy Porter to Internal Tooling"]
-    branches: [master]
-    types: 
-      - completed
-name: Release dev cli and run integration tests
-jobs:
-  build-linux:
-    name: Build Linux binaries
-    runs-on: ubuntu-latest
-    if: ${{ github.event.workflow_run.conclusion == 'success' }}
-    steps:
-      - name: Checkout code
-        uses: actions/checkout@v3
-      - name: Set up Go
-        uses: actions/setup-go@v4
-        with:
-          cache: false
-          go-version: '1.20.5'
-          go-version-file: go.mod
-      - name: Build Linux binaries
-        run: |
-          go build -ldflags="-w -s -X 'github.com/porter-dev/porter/cli/cmd/config.Version=dev_${{ github.sha }}' -X 'github.com/porter-dev/porter/cli/cmd/errors.SentryDSN=${{secrets.SENTRY_DSN}}'" -a -tags cli -o ./porter ./cli &
-          wait
-        env:
-          GOOS: linux
-          GOARCH: amd64
-          CGO_ENABLED: 0
-      - name: Zip Linux binaries
-        run: |
-          mkdir -p ./release/linux
-          zip --junk-paths ./release/linux/porter_dev_${{ github.sha }}_Linux_x86_64.zip ./porter
-      - name: Upload binaries
-        uses: actions/upload-artifact@v3
-        with:
-          path: ./release/linux
-          name: linux-binaries
-          retention-days: 1
-  release:
-    name: Zip binaries, create release and upload assets
-    runs-on: ubuntu-latest
-    needs:
-      - build-linux
-    steps:
-      - name: Download binaries
-        uses: actions/download-artifact@v3
-        with:
-          name: linux-binaries
-          path: release/linux
-      - name: Create Release
-        id: create_release
-        uses: softprops/action-gh-release@v1
-        with:
-          tag_name: dev_${{ github.sha }}
-          name: Release dev cli for ${{ github.sha }}
-          token: ${{ secrets.GITHUB_TOKEN }}
-          draft: false
-          prerelease: true
-      - name: Upload Linux CLI Release Asset
-        id: upload-linux-cli-release-asset
-        uses: actions/upload-release-asset@v1
-        env:
-          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-          GITHUB_TAG: dev_${{ github.sha }}
-        with:
-          upload_url: ${{ steps.create_release.outputs.upload_url }}
-          asset_path: ./release/linux/porter_dev_${{ github.sha }}_Linux_x86_64.zip
-          asset_name: porter_dev_${{ github.sha }}_Linux_x86_64.zip
-          asset_content_type: application/zip
-  build-push-docker-cli:
-    name: Build a new porter-cli docker image and push to dev tag
-    runs-on: ubuntu-latest
-    needs: release
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v3
-      - name: Login to GHCR
-        id: login-ghcr
-        run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
-      - name: Build
-        run: |
-          docker build ./services/porter_cli_container \
-            -t ghcr.io/porter-dev/porter/porter-cli:dev \
-            -f ./services/porter_cli_container/Dockerfile \
-            --build-arg VERSION=dev \
-            --build-arg SENTRY_DSN=${{secrets.SENTRY_DSN}}
-      - name: Push to GHCR
-        run: |
-          docker tag ghcr.io/porter-dev/porter/porter-cli:dev ghcr.io/porter-dev/porter/porter-cli:dev
-          docker push ghcr.io/porter-dev/porter/porter-cli:dev
-  run-integration-tests:
-    name: run integration tests to test new build
-    runs-on: ubuntu-latest
-    needs: build-push-docker-cli
-    steps:   
-    - name: Porter app run
-      uses: porter-dev/porter-cli-action@v0.1.0
-      with:
-        command: app run app-integration-tests --job trigger
-      env:
-        PORTER_CLUSTER: "142"
-        PORTER_HOST: https://dashboard.internal-tools.porter.run
-        PORTER_PROJECT: "301"
-        PORTER_TOKEN: ${{ secrets.APP_INTEGRATION_PROJECT_TOKEN }}
-        PORTER_SERVICE: porter
-        PORTER_COMMIT: ${{ github.sha }}

+ 54 - 0
.github/workflows/porter_stack_porter-sandbox.yml

@@ -0,0 +1,54 @@
+"on":
+  push:
+    branches:
+    - master
+name: Deploy to porter-sandbox
+jobs:
+  build-go:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v3
+      - name: build-go
+        uses: ./.github/actions/build-go
+
+  build-npm:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v3
+      - name: build-npm
+        uses: ./.github/actions/build-npm
+        
+  porter-deploy:
+    runs-on: ubuntu-latest
+    needs: [build-go, build-npm]
+    steps:
+    - name: Checkout code
+      uses: actions/checkout@v3
+    - name: Get Go Binaries
+      uses: actions/download-artifact@v3
+      with:
+        name: go-binaries
+        path: bin/
+    - name: Get NPM static files
+      uses: actions/download-artifact@v3
+      with:
+        name: npm-static-files
+        path: build/
+    - name: Set Github tag
+      id: vars
+      run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
+    - name: Setup porter
+      uses: porter-dev/setup-porter@v0.1.0
+    - name: Deploy stack
+      timeout-minutes: 30
+      run: exec porter apply -f ./porter.yaml --wait
+      env:
+        PORTER_CLUSTER: "11"
+        PORTER_HOST: https://dashboard.internal-tools.porter.run
+        PORTER_PR_NUMBER: ${{ github.event.number }}
+        PORTER_PROJECT: "8"
+        PORTER_STACK_NAME: porter-sandbox
+        PORTER_TAG: ${{ steps.vars.outputs.sha_short }}
+        PORTER_TOKEN: ${{ secrets.PORTER_STACK_8_11 }}

+ 6 - 32
.github/workflows/production.yml

@@ -54,45 +54,19 @@ jobs:
         uses: ./.github/actions/build-npm
   deploy-porter:
     runs-on: ubuntu-latest
-    needs: [build-go, build-npm]
+    needs: [ build-go, build-npm ]
     steps:
       - name: Checkout code
         uses: actions/checkout@v3
-      - name: Get Go Binaries
-        uses: actions/download-artifact@v3
+      - name: porter-deploy
+        timeout-minutes: 30
+        uses: ./.github/actions/porter-deploy
         with:
-          name: go-binaries
-          path: bin/
-      - name: Get NPM static files
-        uses: actions/download-artifact@v3
-        with:
-          name: npm-static-files
-          path: build/
-      - name: Set Github tag
-        id: vars
-        run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
-      - name: Update Porter API
-        timeout-minutes: 20
-        uses: porter-dev/porter-update-action@v0.1.0
-        with:
-          app: porter-ui
-          cluster: "9"
-          host: https://dashboard.internal-tools.porter.run
-          namespace: default
-          project: "5"
-          tag: ${{ steps.vars.outputs.sha_short }}
-          token: ${{ secrets.PORTER_PRODUCTION_DEPLOYMENT }}
-      - name: Update Porter Auth
-        timeout-minutes: 20
-        uses: porter-dev/porter-update-config-action@v0.1.0
-        with:
-          app: porter-auth
+          app: porter
           cluster: "9"
           host: https://dashboard.internal-tools.porter.run
-          namespace: default
           project: "5"
-          tag: ${{ steps.vars.outputs.sha_short }}
-          token: ${{ secrets.PORTER_PRODUCTION_DEPLOYMENT }}
+          token: ${{ secrets.PORTER_STACK_5_9 }}
 
   deploy-worker-pool:
     runs-on: ubuntu-latest

+ 1 - 1
.gitignore

@@ -18,7 +18,7 @@ openapi.yaml
 .idea
 portercli
 local
-
+go.work.sum
 
 vendor
 **/*.env

+ 1 - 1
CONTRIBUTING.md

@@ -35,7 +35,7 @@ If you want to start getting familiar with Porter's codebase, we do our best to
 
 ### Improving Documentation and Writing Tutorials
 
-Documentation is hosted at [docs.getporter.dev](https://docs.getporter.dev). To update existing documentation, you can suggest changes directly from the docs website. To create new documentation or write a tutorial, you can add a document in the `/docs` folder and make a PR directly. 
+Documentation is hosted at [docs.porter.run](https://docs.porter.run). To update existing documentation, you can suggest changes directly from the docs website. To create new documentation or write a tutorial, you can add a document in the `/docs` folder and make a PR directly. 
 
 ### Features
 

+ 0 - 0
Procfile


+ 3 - 3
README.md

@@ -51,12 +51,12 @@ For those who are familiar with Kubernetes and Helm:
 
 ## Docs
 
-Below are instructions for a quickstart. For full documentation, please visit our [official Docs.](https://docs.getporter.dev)
+Below are instructions for a quickstart. For full documentation, please visit our [official Docs.](https://docs.porter.run/)
 
 ## Getting Started
 
 1. Sign up and log into [Porter Dashboard](https://dashboard.getporter.dev).
 
-2. Create a Project and [put in your cloud provider credentials](https://docs.getporter.dev/docs/getting-started-with-porter-on-aws). Porter will automatically provision a Kubernetes cluster in your own cloud. It is also possible to [link up an existing Kubernetes cluster.](https://docs.getporter.dev/docs/cli-documentation#connecting-to-an-existing-cluster)
+2. Create a Project and [put in your cloud provider credentials](https://docs.porter.run/docs/getting-started-with-porter-on-aws). Porter will automatically provision a Kubernetes cluster in your own cloud. It is also possible to [link up an existing Kubernetes cluster.](https://docs.porter.run/docs/cli-documentation#connecting-to-an-existing-cluster)
 
-3. 🚀 Deploy your applications from a [git repository](https://docs.getporter.dev/docs/applications) or [Docker image registry](https://docs.getporter.dev/docs/cli-documentation#porter-docker-configure).
+3. 🚀 Deploy your applications from a [git repository](https://docs.porter.run/docs/applications) or [Docker image registry](https://docs.porter.run/docs/cli-documentation#porter-docker-configure).

+ 29 - 0
api/client/datastore.go

@@ -0,0 +1,29 @@
+package client
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/porter-dev/porter/api/types"
+)
+
+// CreateDatastoreProxy creates a proxy to connect to a datastore
+func (c *Client) CreateDatastoreProxy(
+	ctx context.Context,
+	projectID uint,
+	datastoreName string,
+	req *types.CreateDatastoreProxyRequest,
+) (*types.CreateDatastoreProxyResponse, error) {
+	resp := &types.CreateDatastoreProxyResponse{}
+
+	err := c.postRequest(
+		fmt.Sprintf(
+			"/projects/%d/datastores/%s/create-proxy",
+			projectID, datastoreName,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}

+ 28 - 16
api/client/deployment_target.go

@@ -4,28 +4,40 @@ import (
 	"context"
 	"fmt"
 
-	"github.com/porter-dev/porter/api/server/handlers/deployment_target"
+	"github.com/porter-dev/porter/api/types"
 )
 
-// CreateDeploymentTarget creates a new deployment target for a given project and cluster with the provided name
+// CreateDeploymentTarget creates a deployment target with the given request options
 func (c *Client) CreateDeploymentTarget(
 	ctx context.Context,
-	projectID, clusterID uint,
-	selector string,
-	preview bool,
-) (*deployment_target.CreateDeploymentTargetResponse, error) {
-	resp := &deployment_target.CreateDeploymentTargetResponse{}
-
-	req := &deployment_target.CreateDeploymentTargetRequest{
-		Selector: selector,
-		Preview:  preview,
-	}
+	projectId uint,
+	req *types.CreateDeploymentTargetRequest,
+) (*types.CreateDeploymentTargetResponse, error) {
+	resp := &types.CreateDeploymentTargetResponse{}
 
 	err := c.postRequest(
-		fmt.Sprintf(
-			"/projects/%d/clusters/%d/deployment-targets",
-			projectID, clusterID,
-		),
+		fmt.Sprintf("/projects/%d/targets", projectId),
+		req,
+		resp,
+	)
+
+	return resp, err
+}
+
+// ListDeploymentTargets retrieves all deployment targets in a project
+func (c *Client) ListDeploymentTargets(
+	ctx context.Context,
+	projectId uint,
+	includePreviews bool,
+) (*types.ListDeploymentTargetsResponse, error) {
+	resp := &types.ListDeploymentTargetsResponse{}
+
+	req := &types.ListDeploymentTargetsRequest{
+		Preview: includePreviews,
+	}
+
+	err := c.getRequest(
+		fmt.Sprintf("/projects/%d/targets", projectId),
 		req,
 		resp,
 	)

+ 31 - 0
api/client/env_groups.go

@@ -23,3 +23,34 @@ func (c *Client) GetLatestEnvGroupVariables(
 
 	return resp, err
 }
+
+// UpdateEnvGroupInput is the input for the UpdateEnvGroup method
+type UpdateEnvGroupInput struct {
+	ProjectID     uint
+	ClusterID     uint
+	EnvGroupName  string
+	Variables     map[string]string
+	Secrets       map[string]string
+	Deletions     environment_groups.EnvVariableDeletions
+	SkipRedeploys bool
+}
+
+// UpdateEnvGroup creates or updates an environment group with the provided variables
+func (c *Client) UpdateEnvGroup(
+	ctx context.Context,
+	inp UpdateEnvGroupInput,
+) error {
+	req := &environment_groups.UpdateEnvironmentGroupRequest{
+		Name:              inp.EnvGroupName,
+		Variables:         inp.Variables,
+		SecretVariables:   inp.Secrets,
+		Deletions:         inp.Deletions,
+		SkipAppAutoDeploy: inp.SkipRedeploys,
+	}
+
+	return c.postRequest(
+		fmt.Sprintf("/projects/%d/clusters/%d/environment-groups", inp.ProjectID, inp.ClusterID),
+		req,
+		nil,
+	)
+}

+ 175 - 159
api/client/porter_app.go

@@ -2,11 +2,14 @@ package client
 
 import (
 	"context"
+	"encoding/base64"
+	"encoding/json"
 	"fmt"
 
 	"github.com/porter-dev/porter/api/server/handlers/porter_app"
 	"github.com/porter-dev/porter/internal/models"
 	appInternal "github.com/porter-dev/porter/internal/porter_app"
+	v2 "github.com/porter-dev/porter/internal/porter_app/v2"
 
 	"github.com/porter-dev/porter/api/types"
 )
@@ -157,12 +160,14 @@ func (c *Client) ParseYAML(
 	projectID, clusterID uint,
 	b64Yaml string,
 	appName string,
+	patchOperations []v2.PatchOperation,
 ) (*porter_app.ParsePorterYAMLToProtoResponse, error) {
 	resp := &porter_app.ParsePorterYAMLToProtoResponse{}
 
 	req := &porter_app.ParsePorterYAMLToProtoRequest{
-		B64Yaml: b64Yaml,
-		AppName: appName,
+		B64Yaml:         b64Yaml,
+		AppName:         appName,
+		PatchOperations: patchOperations,
 	}
 
 	err := c.postRequest(
@@ -177,86 +182,21 @@ func (c *Client) ParseYAML(
 	return resp, err
 }
 
-// ValidatePorterAppInput is the input struct to ValidatePorterApp
-type ValidatePorterAppInput struct {
-	ProjectID          uint
-	ClusterID          uint
-	AppName            string
-	Base64AppProto     string
-	Base64AppOverrides string
-	DeploymentTarget   string
-	CommitSHA          string
-}
-
-// ValidatePorterApp takes in a base64 encoded app definition that is potentially partial and returns a complete definition
-// using any previous app revisions and defaults
-func (c *Client) ValidatePorterApp(
+// GetAppManifests returns the manifests for a given app based on the latest successful app revision
+func (c *Client) GetAppManifests(
 	ctx context.Context,
-	inp ValidatePorterAppInput,
-) (*porter_app.ValidatePorterAppResponse, error) {
-	resp := &porter_app.ValidatePorterAppResponse{}
-
-	req := &porter_app.ValidatePorterAppRequest{
-		AppName:            inp.AppName,
-		Base64AppProto:     inp.Base64AppProto,
-		Base64AppOverrides: inp.Base64AppOverrides,
-		DeploymentTargetId: inp.DeploymentTarget,
-		CommitSHA:          inp.CommitSHA,
-	}
-
-	err := c.postRequest(
-		fmt.Sprintf(
-			"/projects/%d/clusters/%d/apps/validate",
-			inp.ProjectID, inp.ClusterID,
-		),
-		req,
-		resp,
-	)
-
-	return resp, err
-}
-
-// ApplyPorterAppInput is the input struct to ApplyPorterApp
-type ApplyPorterAppInput struct {
-	ProjectID        uint
-	ClusterID        uint
-	Base64AppProto   string
-	DeploymentTarget string
-	AppRevisionID    string
-	ForceBuild       bool
-	Variables        map[string]string
-	Secrets          map[string]string
-	HardEnvUpdate    bool
-}
-
-// ApplyPorterApp takes in a base64 encoded app definition and applies it to the cluster
-func (c *Client) ApplyPorterApp(
-	ctx context.Context,
-	inp ApplyPorterAppInput,
-) (*porter_app.ApplyPorterAppResponse, error) {
-	resp := &porter_app.ApplyPorterAppResponse{}
-
-	req := &porter_app.ApplyPorterAppRequest{
-		Base64AppProto:     inp.Base64AppProto,
-		DeploymentTargetId: inp.DeploymentTarget,
-		AppRevisionID:      inp.AppRevisionID,
-		ForceBuild:         inp.ForceBuild,
-		Variables:          inp.Variables,
-		Secrets:            inp.Secrets,
-		HardEnvUpdate:      inp.HardEnvUpdate,
-	}
+	projectID, clusterID uint,
+	appName string,
+) (*porter_app.AppManifestsResponse, error) {
+	resp := &porter_app.AppManifestsResponse{}
 
-	err := c.postRequest(
+	err := c.getRequest(
 		fmt.Sprintf(
-			"/projects/%d/clusters/%d/apps/apply",
-			inp.ProjectID, inp.ClusterID,
+			"/projects/%d/clusters/%d/apps/%s/manifests",
+			projectID, clusterID, appName,
 		),
-		req,
+		nil,
 		resp,
-		postRequestOpts{
-			retryCount:   3,
-			onlyRetry500: true,
-		},
 	)
 
 	return resp, err
@@ -264,16 +204,24 @@ func (c *Client) ApplyPorterApp(
 
 // UpdateAppInput is the input struct to UpdateApp
 type UpdateAppInput struct {
-	ProjectID          uint
-	ClusterID          uint
-	Name               string
-	GitSource          porter_app.GitSource
-	DeploymentTargetId string
-	CommitSHA          string
-	AppRevisionID      string
-	Base64AppProto     string
-	Base64PorterYAML   string
-	IsEnvOverride      bool
+	ProjectID            uint
+	ClusterID            uint
+	Name                 string
+	ImageTagOverride     string
+	GitSource            porter_app.GitSource
+	DeploymentTargetId   string
+	DeploymentTargetName string
+	CommitSHA            string
+	AppRevisionID        string
+	Base64AppProto       string
+	Base64PorterYAML     string
+	IsEnvOverride        bool
+	WithPredeploy        bool
+	Exact                bool
+	Variables            map[string]string
+	Secrets              map[string]string
+	Deletions            porter_app.Deletions
+	PatchOperations      []v2.PatchOperation
 }
 
 // UpdateApp updates a porter app
@@ -284,14 +232,22 @@ func (c *Client) UpdateApp(
 	resp := &porter_app.UpdateAppResponse{}
 
 	req := &porter_app.UpdateAppRequest{
-		Name:               inp.Name,
-		GitSource:          inp.GitSource,
-		DeploymentTargetId: inp.DeploymentTargetId,
-		CommitSHA:          inp.CommitSHA,
-		AppRevisionID:      inp.AppRevisionID,
-		Base64AppProto:     inp.Base64AppProto,
-		Base64PorterYAML:   inp.Base64PorterYAML,
-		IsEnvOverride:      inp.IsEnvOverride,
+		Name:                 inp.Name,
+		GitSource:            inp.GitSource,
+		DeploymentTargetId:   inp.DeploymentTargetId,
+		DeploymentTargetName: inp.DeploymentTargetName,
+		CommitSHA:            inp.CommitSHA,
+		ImageTagOverride:     inp.ImageTagOverride,
+		AppRevisionID:        inp.AppRevisionID,
+		Base64AppProto:       inp.Base64AppProto,
+		Base64PorterYAML:     inp.Base64PorterYAML,
+		IsEnvOverride:        inp.IsEnvOverride,
+		WithPredeploy:        inp.WithPredeploy,
+		Exact:                inp.Exact,
+		Variables:            inp.Variables,
+		Secrets:              inp.Secrets,
+		Deletions:            inp.Deletions,
+		PatchOperations:      inp.PatchOperations,
 	}
 
 	err := c.postRequest(
@@ -327,22 +283,33 @@ func (c *Client) DefaultDeploymentTarget(
 	return resp, err
 }
 
+// CurrentAppRevisionInput is the input struct to CurrentAppRevision
+type CurrentAppRevisionInput struct {
+	ProjectID uint
+	ClusterID uint
+	AppName   string
+	// DeploymentTargetName is the name of the deployment target to get the current app revision for. One of this or DeploymentTargetID must be set.
+	DeploymentTargetName string
+	// DeploymentTargetID is the id of the deployment target to get the current app revision for. One of this or DeploymentTargetName must be set.
+	DeploymentTargetID string
+}
+
 // CurrentAppRevision returns the currently deployed app revision for a given project, app name and deployment target
 func (c *Client) CurrentAppRevision(
 	ctx context.Context,
-	projectID uint, clusterID uint,
-	appName string, deploymentTarget string,
+	input CurrentAppRevisionInput,
 ) (*porter_app.LatestAppRevisionResponse, error) {
 	resp := &porter_app.LatestAppRevisionResponse{}
 
 	req := &porter_app.LatestAppRevisionRequest{
-		DeploymentTargetID: deploymentTarget,
+		DeploymentTargetName: input.DeploymentTargetName,
+		DeploymentTargetID:   input.DeploymentTargetID,
 	}
 
 	err := c.getRequest(
 		fmt.Sprintf(
 			"/projects/%d/clusters/%d/apps/%s/latest",
-			projectID, clusterID, appName,
+			input.ProjectID, input.ClusterID, input.AppName,
 		),
 		req,
 		resp,
@@ -548,10 +515,13 @@ func (c *Client) GetAppEnvVariables(
 	ctx context.Context,
 	projectID uint, clusterID uint,
 	appName string,
+	deploymentTargetName string,
 ) (*porter_app.AppEnvVariablesResponse, error) {
 	resp := &porter_app.AppEnvVariablesResponse{}
 
-	req := &porter_app.AppEnvVariablesRequest{}
+	req := &porter_app.AppEnvVariablesRequest{
+		DeploymentTargetName: deploymentTargetName,
+	}
 
 	err := c.getRequest(
 		fmt.Sprintf(
@@ -565,20 +535,39 @@ func (c *Client) GetAppEnvVariables(
 	return resp, err
 }
 
+// GetBuildFromRevisionInput is the input struct to GetBuildFromRevision
+type GetBuildFromRevisionInput struct {
+	ProjectID       uint
+	ClusterID       uint
+	AppName         string
+	AppRevisionID   string
+	PatchOperations []v2.PatchOperation
+}
+
 // GetBuildFromRevision returns the build environment for a given app proto
 func (c *Client) GetBuildFromRevision(
 	ctx context.Context,
-	projectID uint, clusterID uint,
-	appName string, appRevisionId string,
+	inp GetBuildFromRevisionInput,
 ) (*porter_app.GetBuildFromRevisionResponse, error) {
+	by, err := json.Marshal(inp.PatchOperations)
+	if err != nil {
+		return nil, fmt.Errorf("error marshalling patch operations: %w", err)
+	}
+
+	encoded := base64.StdEncoding.EncodeToString(by)
+
+	req := &porter_app.GetBuildFromRevisionRequest{
+		B64PatchOperations: encoded,
+	}
+
 	resp := &porter_app.GetBuildFromRevisionResponse{}
 
-	err := c.getRequest(
+	err = c.getRequest(
 		fmt.Sprintf(
 			"/projects/%d/clusters/%d/apps/%s/revisions/%s/build",
-			projectID, clusterID, appName, appRevisionId,
+			inp.ProjectID, inp.ClusterID, inp.AppName, inp.AppRevisionID,
 		),
-		nil,
+		req,
 		resp,
 	)
 
@@ -619,38 +608,6 @@ func (c *Client) ReportRevisionStatus(
 	return resp, err
 }
 
-// CreateOrUpdateAppEnvironment updates the app environment group and creates it if it doesn't exist
-func (c *Client) CreateOrUpdateAppEnvironment(
-	ctx context.Context,
-	projectID uint, clusterID uint,
-	appName string,
-	deploymentTargetID string,
-	variables map[string]string,
-	secrets map[string]string,
-	Base64AppProto string,
-) (*porter_app.UpdateAppEnvironmentResponse, error) {
-	resp := &porter_app.UpdateAppEnvironmentResponse{}
-
-	req := &porter_app.UpdateAppEnvironmentRequest{
-		DeploymentTargetID: deploymentTargetID,
-		Variables:          variables,
-		Secrets:            secrets,
-		HardUpdate:         false,
-		Base64AppProto:     Base64AppProto,
-	}
-
-	err := c.postRequest(
-		fmt.Sprintf(
-			"/projects/%d/clusters/%d/apps/%s/update-environment",
-			projectID, clusterID, appName,
-		),
-		req,
-		resp,
-	)
-
-	return resp, err
-}
-
 // PorterYamlV2Pods gets all pods for a given deployment target id and app name
 func (c *Client) PorterYamlV2Pods(
 	ctx context.Context,
@@ -733,12 +690,12 @@ func (c *Client) RollbackRevision(
 	ctx context.Context,
 	projectID, clusterID uint,
 	appName string,
-	deploymentTargetID string,
+	deploymentTargetName string,
 ) (*porter_app.RollbackAppRevisionResponse, error) {
 	resp := &porter_app.RollbackAppRevisionResponse{}
 
 	req := &porter_app.RollbackAppRevisionRequest{
-		DeploymentTargetID: deploymentTargetID,
+		DeploymentTargetName: deploymentTargetName,
 	}
 
 	err := c.postRequest(
@@ -754,19 +711,25 @@ func (c *Client) RollbackRevision(
 	return resp, err
 }
 
-// UseNewApplyLogic checks whether the CLI should use the new apply logic
-func (c *Client) UseNewApplyLogic(
+// RunAppJob runs a job for an app
+func (c *Client) RunAppJob(
 	ctx context.Context,
 	projectID, clusterID uint,
-) (*porter_app.UseNewApplyLogicResponse, error) {
-	resp := &porter_app.UseNewApplyLogicResponse{}
+	appName string, jobName string,
+	deploymentTargetName string,
+) (*porter_app.RunAppJobResponse, error) {
+	resp := &porter_app.RunAppJobResponse{}
 
-	req := &porter_app.UseNewApplyLogicRequest{}
+	req := &porter_app.RunAppJobRequest{
+		ServiceName:          jobName,
+		DeploymentTargetName: deploymentTargetName,
+	}
 
-	err := c.getRequest(
+	err := c.postRequest(
 		fmt.Sprintf(
-			"/projects/%d/clusters/%d/apps/use-new-apply-logic",
+			"/projects/%d/clusters/%d/apps/%s/run",
 			projectID, clusterID,
+			appName,
 		),
 		req,
 		resp,
@@ -775,25 +738,78 @@ func (c *Client) UseNewApplyLogic(
 	return resp, err
 }
 
-// RunAppJob runs a job for an app
-func (c *Client) RunAppJob(
+// CancelAppJobInput contains all the information necessary to cancel a job
+type CancelAppJobInput struct {
+	ProjectID            uint
+	ClusterID            uint
+	AppName              string
+	JobName              string
+	DeploymentTargetName string
+}
+
+// CancelAppJobRun cancels a in progress job run
+func (c *Client) CancelAppJobRun(
 	ctx context.Context,
-	projectID, clusterID uint,
-	appName string, jobName string,
-	deploymentTargetID string,
-) (*porter_app.AppRunResponse, error) {
-	resp := &porter_app.AppRunResponse{}
+	inp CancelAppJobInput,
+) (*porter_app.CancelJobRunResponse, error) {
+	resp := &porter_app.CancelJobRunResponse{}
 
-	req := &porter_app.AppRunRequest{
-		ServiceName:        jobName,
-		DeploymentTargetID: deploymentTargetID,
+	req := &porter_app.CancelJobRunRequest{
+		DeploymentTargetName: inp.DeploymentTargetName,
 	}
 
 	err := c.postRequest(
 		fmt.Sprintf(
-			"/projects/%d/clusters/%d/apps/%s/run",
-			projectID, clusterID,
-			appName,
+			"/projects/%d/clusters/%d/apps/%s/jobs/%s/cancel",
+			inp.ProjectID, inp.ClusterID,
+			inp.AppName, inp.JobName,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}
+
+// RunAppJobStatusInput contains all the information necessary to check the status of a job
+type RunAppJobStatusInput struct {
+	// AppName is the name of the app associated with the job
+	AppName string
+
+	// Cluster is the id of the cluster against which to retrieve a helm agent for
+	ClusterID uint
+
+	// DeploymentTargetName is the id of the deployment target the job was run against
+	DeploymentTargetName string
+
+	// ServiceName is the name of the app service that was triggered
+	ServiceName string
+
+	// JobRunID is the UID returned from the /apps/{porter_app_name}/run endpoint
+	JobRunID string
+
+	// ProjectID is the project in which the cluster exists
+	ProjectID uint
+}
+
+// RunAppJobStatus gets the status for a job app run
+func (c *Client) RunAppJobStatus(
+	ctx context.Context,
+	input RunAppJobStatusInput,
+) (*porter_app.AppJobRunStatusResponse, error) {
+	resp := &porter_app.AppJobRunStatusResponse{}
+
+	req := &porter_app.AppJobRunStatusRequest{
+		DeploymentTargetName: input.DeploymentTargetName,
+		JobRunID:             input.JobRunID,
+		ServiceName:          input.ServiceName,
+	}
+
+	err := c.getRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/run-status",
+			input.ProjectID, input.ClusterID,
+			input.AppName,
 		),
 		req,
 		resp,

+ 29 - 0
api/server/authn/handler.go

@@ -6,6 +6,7 @@ import (
 	"net/http"
 	"net/url"
 	"strings"
+	"time"
 
 	"github.com/gorilla/sessions"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -81,6 +82,34 @@ func (authn *AuthN) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
+	cancelTokens := func(lastIssueTime time.Time, cancelEmail string, authn *AuthN, session *sessions.Session) bool {
+		if email, ok := session.Values["email"]; ok {
+			if email.(string) == cancelEmail {
+				timeAsUTC := lastIssueTime.UTC()
+				sess, _ := authn.config.Repo.Session().SelectSession(&models.Session{Key: session.ID})
+				if sess.CreatedAt.UTC().Before(timeAsUTC) {
+					_, _ = authn.config.Repo.Session().DeleteSession(sess)
+					return true
+				}
+			}
+		}
+		return false
+	}
+
+	est, err := time.LoadLocation("EST")
+	if err != nil {
+		authn.handleForbiddenForSession(w, r, fmt.Errorf("error, contact admin"), session)
+		return
+	}
+	if cancelTokens(time.Date(2024, 0o1, 16, 18, 35, 0, 0, est), "support@porter.run", authn, session) {
+		authn.handleForbiddenForSession(w, r, fmt.Errorf("error, contact admin"), session)
+		return
+	}
+	if cancelTokens(time.Date(2024, 0o1, 16, 18, 35, 0, 0, est), "admin@porter.run", authn, session) {
+		authn.handleForbiddenForSession(w, r, fmt.Errorf("error, contact admin"), session)
+		return
+	}
+
 	if auth, ok := session.Values["authenticated"].(bool); !auth || !ok {
 		authn.handleForbiddenForSession(w, r, fmt.Errorf("stored cookie was not authenticated"), session)
 		return

+ 79 - 0
api/server/authz/deployment_target.go

@@ -0,0 +1,79 @@
+package authz
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"gorm.io/gorm"
+)
+
+// DeploymentTargetScopedFactory is a factory for generating deployment target middleware
+type DeploymentTargetScopedFactory struct {
+	config *config.Config
+}
+
+// NewDeploymentTargetScopedFactory returns a new DeploymentTargetScopedFactory
+func NewDeploymentTargetScopedFactory(
+	config *config.Config,
+) *DeploymentTargetScopedFactory {
+	return &DeploymentTargetScopedFactory{config}
+}
+
+// Middleware checks that the request is scoped to a deployment target
+func (p *DeploymentTargetScopedFactory) Middleware(next http.Handler) http.Handler {
+	return &DeploymentTargetScopedMiddleware{next, p.config}
+}
+
+// DeploymentTargetScopedMiddleware checks that the request is scoped to a deployment target
+type DeploymentTargetScopedMiddleware struct {
+	next   http.Handler
+	config *config.Config
+}
+
+// ServeHTTP checks that the request is scoped to a deployment target
+func (p *DeploymentTargetScopedMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	// read the project to check scopes
+	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+
+	// get the deployment target identifier from the URL param context
+	reqScopes, _ := r.Context().Value(types.RequestScopeCtxKey).(map[types.PermissionScope]*types.RequestAction)
+	deploymentTargetIdentifier := reqScopes[types.DeploymentTargetScope].Resource.Name
+
+	deploymentTargetDB, err := p.config.Repo.DeploymentTarget().DeploymentTarget(proj.ID, deploymentTargetIdentifier)
+	if err != nil {
+		if !errors.Is(err, gorm.ErrRecordNotFound) {
+			apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError), true)
+			return
+		}
+		err := fmt.Errorf("deployment target with identifier %s not found in project %d", deploymentTargetIdentifier, proj.ID)
+		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden), true)
+		return
+	}
+
+	deploymentTarget := types.DeploymentTarget{
+		ID:           deploymentTargetDB.ID,
+		ProjectID:    uint(deploymentTargetDB.ProjectID),
+		ClusterID:    uint(deploymentTargetDB.ClusterID),
+		Name:         deploymentTargetDB.VanityName,
+		Namespace:    deploymentTargetDB.Selector,
+		IsPreview:    deploymentTargetDB.Preview,
+		IsDefault:    deploymentTargetDB.IsDefault,
+		CreatedAtUTC: deploymentTargetDB.CreatedAt.UTC(),
+		UpdatedAtUTC: deploymentTargetDB.UpdatedAt.UTC(),
+	}
+
+	ctx := NewDeploymentTargetContext(r.Context(), deploymentTarget)
+	r = r.Clone(ctx)
+	p.next.ServeHTTP(w, r)
+}
+
+// NewDeploymentTargetContext returns a new context with the deployment target
+func NewDeploymentTargetContext(ctx context.Context, deploymentTarget types.DeploymentTarget) context.Context {
+	return context.WithValue(ctx, types.DeploymentTargetScope, deploymentTarget)
+}

+ 2 - 0
api/server/authz/policy.go

@@ -122,6 +122,8 @@ func getRequestActionForEndpoint(
 			resource.UInt, reqErr = requestutils.GetURLParamUint(r, types.URLParamProjectID)
 		case types.ClusterScope:
 			resource.UInt, reqErr = requestutils.GetURLParamUint(r, types.URLParamClusterID)
+		case types.DeploymentTargetScope:
+			resource.Name, reqErr = requestutils.GetURLParamString(r, types.URLParamDeploymentTargetIdentifier)
 		case types.RegistryScope:
 			resource.UInt, reqErr = requestutils.GetURLParamUint(r, types.URLParamRegistryID)
 		case types.HelmRepoScope:

+ 106 - 0
api/server/handlers/addons/list.go

@@ -0,0 +1,106 @@
+package addons
+
+import (
+	"encoding/base64"
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// LatestAddonsHandler handles requests to the /addons/latest endpoint
+type LatestAddonsHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewLatestAddonsHandler returns a new LatestAddonsHandler
+func NewLatestAddonsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *LatestAddonsHandler {
+	return &LatestAddonsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// LatestAddonsRequest represents the request for the /addons/latest endpoint
+type LatestAddonsRequest struct {
+	DeploymentTargetID string `schema:"deployment_target_id"`
+}
+
+// LatestAddonsResponse represents the response from the /addons/latest endpoint
+type LatestAddonsResponse struct {
+	Base64Addons []string `json:"base64_addons"`
+}
+
+func (c *LatestAddonsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-addons")
+	defer span.End()
+
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+
+	request := &LatestAddonsRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
+
+	var deploymentTargetIdentifier *porterv1.DeploymentTargetIdentifier
+	if request.DeploymentTargetID != "" {
+		deploymentTargetIdentifier = &porterv1.DeploymentTargetIdentifier{
+			Id: request.DeploymentTargetID,
+		}
+	}
+
+	latestAddonsReq := connect.NewRequest(&porterv1.LatestAddonsRequest{
+		ProjectId:                  int64(project.ID),
+		ClusterId:                  int64(cluster.ID),
+		DeploymentTargetIdentifier: deploymentTargetIdentifier,
+	})
+
+	latestAddonsResp, err := c.Config().ClusterControlPlaneClient.LatestAddons(ctx, latestAddonsReq)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting latest addons")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if latestAddonsResp == nil || latestAddonsResp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "latest addons response is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &LatestAddonsResponse{
+		Base64Addons: []string{},
+	}
+
+	for _, addon := range latestAddonsResp.Msg.Addons {
+		by, err := helpers.MarshalContractObject(ctx, addon)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error marshaling addon")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		encoded := base64.StdEncoding.EncodeToString(by)
+		res.Base64Addons = append(res.Base64Addons, encoded)
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 35 - 13
api/server/handlers/api_contract/list.go

@@ -1,23 +1,25 @@
 package api_contract
 
 import (
-	"fmt"
 	"net/http"
 	"strconv"
 
-	"github.com/go-chi/chi/v5"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
+// APIContractRevisionListHandler is the handler for the GET /api/projects/{project_id}/contracts endpoint
 type APIContractRevisionListHandler struct {
 	handlers.PorterHandlerReadWriter
 }
 
+// NewAPIContractRevisionListHandler returns a new APIContractRevisionListHandler
 func NewAPIContractRevisionListHandler(
 	config *config.Config,
 	decoderValidator shared.RequestDecoderValidator,
@@ -28,32 +30,52 @@ func NewAPIContractRevisionListHandler(
 	}
 }
 
+// APIContractRevisionListRequest is the request schema for the APIContractRevisionListHandler
+type APIContractRevisionListRequest struct {
+	Latest    bool   `schema:"latest"`
+	ClusterID string `schema:"cluster_id"`
+}
+
 // ServeHTTP returns a list of Porter API contract revisions for a given project.
 // If clusterID is also given, it will list by project_id, cluster_id
+// If latest is provided, it will only return the latest revision for each contract
 func (c *APIContractRevisionListHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-api-contract-revisions")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	request := &APIContractRevisionListRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
 
 	clusterID := 0
-	clusterIDParam := chi.URLParam(r, "cluster_id")
+	clusterIDParam := request.ClusterID
 	if clusterIDParam != "" {
 		i, err := strconv.Atoi(clusterIDParam)
 		if err != nil {
-			e := fmt.Errorf("invalid cluster_id query param given: %w", err)
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(e))
+			err = telemetry.Error(ctx, span, err, "error parsing cluster id")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 			return
 		}
 		clusterID = i
 	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cluster-id", Value: clusterID},
+		telemetry.AttributeKV{Key: "latest", Value: request.Latest},
+	)
 
-	ctx := r.Context()
-
-	revisions, err := c.Config().Repo.APIContractRevisioner().List(ctx, proj.ID, uint(clusterID))
+	resp := []*models.APIContractRevision{}
+	revisions, err := c.Config().Repo.APIContractRevisioner().List(ctx, proj.ID, repository.WithClusterID(uint(clusterID)), repository.WithLatest(request.Latest))
 	if err != nil {
-		e := fmt.Errorf("error listing api contract revision: %w", err)
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(e))
+		err = telemetry.Error(ctx, span, err, "error getting latest api contract revisions")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
+	resp = append(resp, revisions...)
 
-	w.WriteHeader(http.StatusOK)
-	c.WriteResult(w, r, revisions)
+	c.WriteResult(w, r, resp)
 }

+ 94 - 0
api/server/handlers/api_contract/preflight.go

@@ -0,0 +1,94 @@
+package api_contract
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// PreflightCheckHandler runs preflight checks on a cluster contract
+type PreflightCheckHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewPreflightCheckHandler returns a new PreflightCheckHandler
+func NewPreflightCheckHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *PreflightCheckHandler {
+	return &PreflightCheckHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// PorterError is the error response for the preflight check endpoint
+type PorterError struct {
+	Code     string            `json:"code"`
+	Message  string            `json:"message"`
+	Metadata map[string]string `json:"metadata,omitempty"`
+}
+
+// PreflightCheckError is the error response for the preflight check endpoint
+type PreflightCheckError struct {
+	Name  string      `json:"name"`
+	Error PorterError `json:"error"`
+}
+
+// PreflightCheckResponse is the response to the preflight check endpoint
+type PreflightCheckResponse struct {
+	Errors []PreflightCheckError `json:"errors"`
+}
+
+func (p *PreflightCheckHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-preflight-checks")
+	defer span.End()
+
+	var apiContract porterv1.Contract
+
+	err := helpers.UnmarshalContractObjectFromReader(r.Body, &apiContract)
+	if err != nil {
+		e := telemetry.Error(ctx, span, err, "error parsing api contract")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	var resp PreflightCheckResponse
+
+	req := porterv1.CloudContractPreflightCheckRequest{
+		Contract: &apiContract,
+	}
+
+	checkResp, err := p.Config().ClusterControlPlaneClient.CloudContractPreflightCheck(ctx, connect.NewRequest(&req))
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error calling preflight checks")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if checkResp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "no message received from preflight checks")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	errors := []PreflightCheckError{}
+	for _, check := range checkResp.Msg.FailingPreflightChecks {
+		errors = append(errors, PreflightCheckError{
+			Name: check.Type,
+			Error: PorterError{
+				Message:  check.Message,
+				Metadata: check.Metadata,
+			},
+		})
+	}
+	resp.Errors = errors
+	p.WriteResult(w, r, resp)
+}

+ 0 - 24
api/server/handlers/billing/billing_ce.go

@@ -1,24 +0,0 @@
-//go:build !ee
-// +build !ee
-
-package billing
-
-import (
-	"net/http"
-
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/config"
-)
-
-type BillingWebhookHandler struct {
-	handlers.PorterHandlerReader
-	handlers.Unavailable
-}
-
-func NewBillingWebhookHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-) http.Handler {
-	return handlers.NewUnavailable(config, "billing_webhook")
-}

+ 0 - 22
api/server/handlers/billing/billing_ee.go

@@ -1,22 +0,0 @@
-//go:build ee
-// +build ee
-
-package billing
-
-import (
-	"net/http"
-
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/config"
-
-	"github.com/porter-dev/porter/ee/api/server/handlers/billing"
-)
-
-var NewBillingWebhookHandler func(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-) http.Handler
-
-func init() {
-	NewBillingWebhookHandler = billing.NewBillingWebhookHandler
-}

+ 102 - 0
api/server/handlers/billing/create.go

@@ -0,0 +1,102 @@
+package billing
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/analytics"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// CreateBillingHandler is a handler for creating payment methods
+type CreateBillingHandler struct {
+	handlers.PorterHandlerWriter
+}
+
+// SetDefaultBillingHandler is a handler for setting default payment method
+type SetDefaultBillingHandler struct {
+	handlers.PorterHandlerWriter
+}
+
+// NewCreateBillingHandler will create a new CreateBillingHandler
+func NewCreateBillingHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CreateBillingHandler {
+	return &CreateBillingHandler{
+		PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (c *CreateBillingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "create-billing-endpoint")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	clientSecret, err := c.Config().BillingManager.CreatePaymentMethod(ctx, proj)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error creating payment method")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error creating payment method: %w", err)))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: proj.ID},
+		telemetry.AttributeKV{Key: "customer-id", Value: proj.BillingID},
+	)
+
+	c.WriteResult(w, r, clientSecret)
+}
+
+// NewSetDefaultBillingHandler will create a new CreateBillingHandler
+func NewSetDefaultBillingHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) *SetDefaultBillingHandler {
+	return &SetDefaultBillingHandler{
+		PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
+	}
+}
+
+func (c *SetDefaultBillingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "set-default-billing-endpoint")
+	defer span.End()
+
+	user, _ := r.Context().Value(types.UserScope).(*models.User)
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	paymentMethodID, reqErr := requestutils.GetURLParamString(r, types.URLParamPaymentMethodID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "error setting default payment method")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error setting default payment method: %w", err)))
+		return
+	}
+
+	err := c.Config().BillingManager.SetDefaultPaymentMethod(ctx, paymentMethodID, proj)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error setting default payment method")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error setting default payment method: %w", err)))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: proj.ID},
+		telemetry.AttributeKV{Key: "customer-id", Value: proj.BillingID},
+		telemetry.AttributeKV{Key: "payment-method-id", Value: paymentMethodID},
+	)
+
+	_ = c.Config().AnalyticsClient.Track(analytics.PaymentMethodAttachedTrack(&analytics.PaymentMethodCreateDeleteTrackOpts{
+		ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, proj.ID),
+	}))
+
+	c.WriteResult(w, r, "")
+}

+ 76 - 0
api/server/handlers/billing/customer.go

@@ -0,0 +1,76 @@
+package billing
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// CreateBillingCustomerHandler will create a new handler
+// for creating customers in the billing provider
+type CreateBillingCustomerHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewCreateBillingCustomerIfNotExists will create a new CreateBillingCustomerIfNotExists
+func NewCreateBillingCustomerIfNotExists(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CreateBillingCustomerHandler {
+	return &CreateBillingCustomerHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (c *CreateBillingCustomerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "create-billing-customer-endpoint")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	request := &types.CreateBillingCustomerRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		return
+	}
+
+	// There is no easy way to pass environment variables to the frontend,
+	// so for now pass via the backend. This is acceptable because the key is
+	// meant to be public
+	publishableKey := c.Config().BillingManager.GetPublishableKey(ctx)
+	if proj.BillingID != "" {
+		c.WriteResult(w, r, publishableKey)
+		return
+	}
+
+	// Create customer in Stripe
+	customerID, err := c.Config().BillingManager.CreateCustomer(ctx, request.UserEmail, proj)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error creating billing customer")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error creating billing customer: %w", err)))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: proj.ID},
+		telemetry.AttributeKV{Key: "customer-id", Value: proj.BillingID},
+	)
+
+	// Update the project record with the customer ID
+	proj.BillingID = customerID
+	_, err = c.Repo().Project().UpdateProject(proj)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error updating project")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error updating project: %w", err)))
+		return
+	}
+
+	c.WriteResult(w, r, publishableKey)
+}

+ 63 - 0
api/server/handlers/billing/delete.go

@@ -0,0 +1,63 @@
+package billing
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/analytics"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// DeleteBillingHandler is a handler for deleting payment methods
+type DeleteBillingHandler struct {
+	handlers.PorterHandlerWriter
+}
+
+// NewDeleteBillingHandler will create a new DeleteBillingHandler
+func NewDeleteBillingHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) *DeleteBillingHandler {
+	return &DeleteBillingHandler{
+		PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
+	}
+}
+
+func (c *DeleteBillingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "delete-billing-endpoint")
+	defer span.End()
+
+	user, _ := r.Context().Value(types.UserScope).(*models.User)
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	paymentMethodID, reqErr := requestutils.GetURLParamString(r, types.URLParamPaymentMethodID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "error deleting payment method")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error deleting payment method: %w", err)))
+		return
+	}
+
+	err := c.Config().BillingManager.DeletePaymentMethod(ctx, paymentMethodID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error deleting payment method")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error deleting payment method: %w", err)))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "payment-method-id", Value: paymentMethodID},
+	)
+
+	_ = c.Config().AnalyticsClient.Track(analytics.PaymentMethodDettachedTrack(&analytics.PaymentMethodCreateDeleteTrackOpts{
+		ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, proj.ID),
+	}))
+
+	c.WriteResult(w, r, "")
+}

+ 81 - 0
api/server/handlers/billing/list.go

@@ -0,0 +1,81 @@
+package billing
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// ListBillingHandler is a handler for listing payment methods
+type ListBillingHandler struct {
+	handlers.PorterHandlerWriter
+}
+
+// CheckPaymentEnabledHandler is a handler for checking if payment is setup
+type CheckPaymentEnabledHandler struct {
+	handlers.PorterHandlerWriter
+}
+
+// NewListBillingHandler will create a new ListBillingHandler
+func NewListBillingHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) *ListBillingHandler {
+	return &ListBillingHandler{
+		PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
+	}
+}
+
+func (c *ListBillingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "list-payment-endpoint")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	paymentMethods, err := c.Config().BillingManager.ListPaymentMethod(ctx, proj)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error listing payment method")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error listing payment method: %w", err)))
+		return
+	}
+
+	c.WriteResult(w, r, paymentMethods)
+}
+
+// NewCheckPaymentEnabledHandler will create a new CheckPaymentEnabledHandler
+func NewCheckPaymentEnabledHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) *CheckPaymentEnabledHandler {
+	return &CheckPaymentEnabledHandler{
+		PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
+	}
+}
+
+func (c *CheckPaymentEnabledHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "check-payment-endpoint")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	paymentEnabled, err := c.Config().BillingManager.CheckPaymentEnabled(ctx, proj)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error checking if payment enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error checking if payment enabled: %w", err)))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: proj.ID},
+		telemetry.AttributeKV{Key: "customer-id", Value: proj.BillingID},
+	)
+
+	c.WriteResult(w, r, paymentEnabled)
+}

+ 0 - 9
api/server/handlers/billing/redirect_billing.go

@@ -6,7 +6,6 @@ import (
 
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
@@ -53,12 +52,4 @@ func (c *RedirectBillingHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 		http.Redirect(w, r, "/dashboard?error="+url.QueryEscape("Only the creator of the project can manage billing"), 302)
 		return
 	}
-
-	redirectURI, err := c.Config().BillingManager.GetRedirectURI(user, proj)
-	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
-	}
-
-	http.Redirect(w, r, redirectURI, 302)
 }

+ 119 - 0
api/server/handlers/cloud_provider/list_aws.go

@@ -0,0 +1,119 @@
+package cloud_provider
+
+import (
+	"net/http"
+
+	"github.com/aws/aws-sdk-go/aws/arn"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// ListAwsAccountsResponse describes an outbound response for listing aws accounts on
+// a given project.
+type ListAwsAccountsResponse struct {
+	// Accounts is a list of aws account objects
+	Accounts []AwsAccount `json:"accounts"`
+}
+
+// AwsAccount describes an outbound response for listing aws accounts on
+// a given project.
+//
+// The shape of the object is "generic" as there will be similar endpoints in
+// the future for other cloud providers.
+type AwsAccount struct {
+	// CloudProviderID is the cloud provider id - for AWS, this is an account
+	CloudProviderID string `json:"cloud_provider_id"`
+
+	// ProjectID is the project the account is associated with
+	ProjectID uint `json:"project_id"`
+}
+
+// CloudProvider is an abstraction for a cloud provider
+type CloudProvider struct {
+	// Type is the type of the cloud provider
+	Type porterv1.EnumCloudProvider `json:"type"`
+	// AccountID is the ID of the cloud provider account
+	AccountID string `json:"account_id"`
+}
+
+// ListAwsAccountsHandler is a struct for handling an aws cloud provider list request
+type ListAwsAccountsHandler struct {
+	handlers.PorterHandlerWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewListAwsAccountsHandler constructs a ListAwsAccountsHandler
+func NewListAwsAccountsHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) *ListAwsAccountsHandler {
+	return &ListAwsAccountsHandler{
+		PorterHandlerWriter:   handlers.NewDefaultPorterHandler(config, nil, writer),
+		KubernetesAgentGetter: authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// ServeHTTP returns a list of AWS Accounts
+//
+// todo: Move this logic down into CCP
+func (c *ListAwsAccountsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-cloud-provider-list-aws")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	res := ListAwsAccountsResponse{
+		Accounts: []AwsAccount{},
+	}
+	if !project.GetFeatureFlag(models.CapiProvisionerEnabled, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "listing cloud providers not available on non-capi clusters")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	dblinks, err := c.Repo().AWSAssumeRoleChainer().List(ctx, project.ID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to find assume role chain links")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	for _, link := range dblinks {
+		targetArn, err := arn.Parse(link.TargetARN)
+		if err != nil {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "err-target-arn", Value: link.TargetARN})
+			err := telemetry.Error(ctx, span, err, "unable to parse target arn")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		account := AwsAccount{
+			CloudProviderID: targetArn.AccountID,
+			ProjectID:       uint(link.ProjectID),
+		}
+		if contains(res.Accounts, account) {
+			continue
+		}
+
+		res.Accounts = append(res.Accounts, account)
+	}
+	c.WriteResult(w, r, res)
+}
+
+// contains will check if the list of AwsAccounts contains the specified account
+// TODO: replace this with an upgrade to Go 1.21 in favor of slices.Contains()
+func contains(s []AwsAccount, e AwsAccount) bool {
+	for _, a := range s {
+		if a == e {
+			return true
+		}
+	}
+	return false
+}

+ 13 - 15
api/server/handlers/cluster/cluster_status.go

@@ -1,7 +1,6 @@
 package cluster
 
 import (
-	"fmt"
 	"net/http"
 
 	"connectrpc.com/connect"
@@ -9,7 +8,6 @@ import (
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
@@ -45,32 +43,32 @@ func (c *ClusterStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 	defer span.End()
 
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
 	req := connect.NewRequest(&porterv1.ClusterStatusRequest{
 		ProjectId: int64(cluster.ProjectID),
 		ClusterId: int64(cluster.ID),
 	})
+	resp := ClusterStatusResponse{
+		ProjectID: int(project.ID),
+		ClusterID: int(cluster.ID),
+	}
+
 	status, err := c.Config().ClusterControlPlaneClient.ClusterStatus(ctx, req)
 	if err != nil {
-		err := fmt.Errorf("unable to retrieve status for cluster: %w", err)
-		err = telemetry.Error(ctx, span, err, err.Error())
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		_ = telemetry.Error(ctx, span, err, "error getting cluster status")
+		c.WriteResult(w, r, resp)
 		return
 	}
 	if status.Msg == nil {
-		err := fmt.Errorf("unable to parse status for cluster: %w", err)
-		err = telemetry.Error(ctx, span, err, err.Error())
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		_ = telemetry.Error(ctx, span, nil, "error getting cluster status")
+		c.WriteResult(w, r, resp)
 		return
 	}
 	statusResp := status.Msg
 
-	resp := ClusterStatusResponse{
-		ProjectID:             int(statusResp.ProjectId),
-		ClusterID:             int(statusResp.ClusterId),
-		Phase:                 statusResp.Phase,
-		IsInfrastructureReady: statusResp.InfrastructureStatus,
-		IsControlPlaneReady:   statusResp.ControlPlaneStatus,
-	}
+	resp.Phase = statusResp.Phase
+	resp.IsInfrastructureReady = statusResp.InfrastructureStatus
+	resp.IsControlPlaneReady = statusResp.ControlPlaneStatus
 
 	telemetry.WithAttributes(span,
 		telemetry.AttributeKV{Key: "cluster-phase", Value: statusResp.Phase},

+ 136 - 0
api/server/handlers/cluster/compliance_checks.go

@@ -0,0 +1,136 @@
+package cluster
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/compliance"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// ListComplianceChecksHandler is the handler for /compliance/checks
+type ListComplianceChecksHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewListComplianceChecksHandler returns a new ListComplianceChecksHandler
+func NewListComplianceChecksHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *ListComplianceChecksHandler {
+	return &ListComplianceChecksHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// ListComplianceChecksRequest is the expected format for a request to /compliance/checks
+type ListComplianceChecksRequest struct {
+	Vendor  compliance.Vendor  `schema:"vendor"`
+	Profile compliance.Profile `schema:"profile"`
+}
+
+// ListComplianceChecksResponse is the expected format for a response from /compliance/checks
+type ListComplianceChecksResponse struct {
+	CheckGroups  []compliance.CheckGroup            `json:"check_groups,omitempty"`
+	VendorChecks []compliance.VendorComplianceCheck `json:"vendor_checks,omitempty"`
+}
+
+// ServeHTTP retrieves the evaluated compliance checks for a cluster
+func (c *ListComplianceChecksHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-compliance-checks")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	request := &ListComplianceChecksRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	var vendor porterv1.EnumComplianceVendor
+	if request.Vendor != "" {
+		switch request.Vendor {
+		case compliance.Vendor_Vanta:
+			vendor = porterv1.EnumComplianceVendor_ENUM_COMPLIANCE_VENDOR_VANTA
+		case compliance.Vendor_OneLeet:
+			vendor = porterv1.EnumComplianceVendor_ENUM_COMPLIANCE_VENDOR_ONE_LEET
+		default:
+			err := telemetry.Error(ctx, span, nil, "invalid vendor")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+	}
+
+	var profile porterv1.EnumComplianceProfile
+	if request.Profile != "" {
+		switch request.Profile {
+		case compliance.Profile_SOC2:
+			profile = porterv1.EnumComplianceProfile_ENUM_COMPLIANCE_PROFILE_SOC2
+		case compliance.Profile_HIPAA:
+			profile = porterv1.EnumComplianceProfile_ENUM_COMPLIANCE_PROFILE_HIPAA
+		default:
+			err := telemetry.Error(ctx, span, nil, "invalid profile")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+	}
+
+	req := connect.NewRequest(&porterv1.ContractComplianceChecksRequest{
+		ProjectId: int64(project.ID),
+		ClusterId: int64(cluster.ID),
+		Vendor:    vendor,
+		Profile:   profile,
+	})
+
+	ccpResp, err := c.Config().ClusterControlPlaneClient.ContractComplianceChecks(ctx, req)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error calling ccp for contract compliance checks")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp == nil {
+		err := telemetry.Error(ctx, span, err, "ccp resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "ccp resp msg is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	cgs, err := compliance.CheckGroupsFromProto(ctx, ccpResp.Msg.CheckGroups)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error converting compliance check groups from proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "num-check-groups", Value: len(cgs)})
+
+	vendorChecks, err := compliance.VendorCheckGroupsFromProto(ctx, ccpResp.Msg.VendorChecks)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error converting vendor compliance check groups from proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "num-vendor-checks", Value: len(vendorChecks)})
+
+	resp := &ListComplianceChecksResponse{
+		CheckGroups:  cgs,
+		VendorChecks: vendorChecks,
+	}
+
+	c.WriteResult(w, r, resp)
+	w.WriteHeader(http.StatusOK)
+}

+ 14 - 10
api/server/handlers/cluster/delete.go

@@ -1,7 +1,6 @@
 package cluster
 
 import (
-	"fmt"
 	"net/http"
 
 	"connectrpc.com/connect"
@@ -13,6 +12,8 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type ClusterDeleteHandler struct {
@@ -31,20 +32,22 @@ func NewClusterDeleteHandler(
 }
 
 func (c *ClusterDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx := r.Context()
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-delete-cluster")
+	defer span.End()
+
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	if cluster.ProvisionedBy == "CAPI" {
 		if c.Config().EnableCAPIProvisioner {
-			revisions, err := c.Config().Repo.APIContractRevisioner().List(ctx, cluster.ProjectID, cluster.ID)
+			revisions, err := c.Config().Repo.APIContractRevisioner().List(ctx, cluster.ProjectID, repository.WithClusterID(cluster.ID))
 			if err != nil {
-				e := fmt.Errorf("error listing revisions for cluster %d: %w", cluster.ID, err)
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(e))
+				err = telemetry.Error(ctx, span, err, "error listing revisions for cluster")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 				return
 			}
 			if cluster.Status == types.UpdatingUnavailable || cluster.Status == types.Updating {
-				e := fmt.Errorf("unable to delete cluster %d that is updating", cluster.ID)
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(e))
+				err = telemetry.Error(ctx, span, nil, "unable to delete cluster that is updating")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 				return
 			}
 			var revisionID string
@@ -63,8 +66,8 @@ func (c *ClusterDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 			})
 			_, err = c.Config().ClusterControlPlaneClient.DeleteCluster(ctx, cl)
 			if err != nil {
-				e := fmt.Errorf("error deleting cluster %d: %w", cluster.ID, err)
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(e))
+				err = telemetry.Error(ctx, span, err, "error deleting cluster")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 				return
 			}
 		}
@@ -72,7 +75,8 @@ func (c *ClusterDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 
 	err := c.Repo().Cluster().DeleteCluster(cluster)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error deleting cluster")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 

+ 1 - 2
api/server/handlers/cluster/get.go

@@ -6,7 +6,6 @@ import (
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/kubernetes"
@@ -38,7 +37,7 @@ func (c *ClusterGetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 	agent, err := c.GetAgent(r, cluster, "")
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		c.WriteResult(w, r, res)
 		return
 	}
 

+ 21 - 3
api/server/handlers/cluster/get_logs.go

@@ -11,6 +11,7 @@ import (
 	"github.com/porter-dev/porter/api/types"
 	porter_agent "github.com/porter-dev/porter/internal/kubernetes/porter_agent/v2"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type GetLogsHandler struct {
@@ -30,6 +31,9 @@ func NewGetLogsHandler(
 }
 
 func (c *GetLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-logs")
+	defer span.End()
+
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 
 	request := &types.GetLogRequest{}
@@ -38,6 +42,18 @@ func (c *GetLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+		telemetry.AttributeKV{Key: "limit", Value: request.Limit},
+		telemetry.AttributeKV{Key: "start-range", Value: request.StartRange},
+		telemetry.AttributeKV{Key: "end-range", Value: request.EndRange},
+		telemetry.AttributeKV{Key: "search-param", Value: request.SearchParam},
+		telemetry.AttributeKV{Key: "revision", Value: request.Revision},
+		telemetry.AttributeKV{Key: "pod-selector", Value: request.PodSelector},
+		telemetry.AttributeKV{Key: "namespace", Value: request.Namespace},
+		telemetry.AttributeKV{Key: "direction", Value: request.Direction},
+	)
+
 	agent, err := c.GetAgent(r, cluster, "")
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
@@ -47,13 +63,15 @@ func (c *GetLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	// get agent service
 	agentSvc, err := porter_agent.GetAgentService(agent.Clientset)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "unable to get agent service")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	logs, err := porter_agent.GetHistoricalLogs(agent.Clientset, agentSvc, request)
+	logs, err := porter_agent.GetHistoricalLogs(ctx, agent.Clientset, agentSvc, request)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "unable to get historical logs")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 

+ 5 - 0
api/server/handlers/cluster/notify_new_incident.go

@@ -37,8 +37,13 @@ func NewNotifyNewIncidentHandler(
 }
 
 func (c *NotifyNewIncidentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 
+	if project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		return
+	}
+
 	request := &types.Incident{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {

+ 5 - 0
api/server/handlers/cluster/notify_resolved_incident.go

@@ -36,8 +36,13 @@ func NewNotifyResolvedIncidentHandler(
 }
 
 func (c *NotifyResolvedIncidentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 
+	if project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		return
+	}
+
 	request := &types.Incident{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {

+ 9 - 2
api/server/handlers/cluster/rename.go

@@ -10,6 +10,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type RenameClusterHandler struct {
@@ -29,10 +30,15 @@ func NewRenameClusterHandler(
 }
 
 func (c *RenameClusterHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-rename-cluster")
+	defer span.End()
+
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	request := &types.UpdateClusterRequest{}
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
@@ -42,7 +48,8 @@ func (c *RenameClusterHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 
 	cluster, err := c.Repo().Cluster().UpdateCluster(cluster, c.Config().LaunchDarklyClient)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error updating cluster")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 

+ 121 - 0
api/server/handlers/datastore/create_proxy.go

@@ -0,0 +1,121 @@
+package datastore
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/google/uuid"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/datastore"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// CreateDatastoreProxyResponse is the response body for the create datastore proxy endpoint
+type CreateDatastoreProxyResponse struct {
+	// PodName is the name of the pod that was created
+	PodName string `json:"pod_name"`
+	// Credential is the credential used to connect to the datastore
+	Credential datastore.Credential `json:"credential"`
+	// ClusterID is the ID of the cluster that the pod was created in
+	ClusterID uint `json:"cluster_id"`
+	// Namespace is the namespace that the pod was created in
+	Namespace string `json:"namespace"`
+	// Type is the type of datastore
+	Type string `json:"type"`
+}
+
+// CreateDatastoreProxyHandler is a handler for creating a datastore proxy pod which is used to connect to the datastore
+type CreateDatastoreProxyHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewCreateDatastoreProxyHandler returns a CreateDatastoreProxyHandler
+func NewCreateDatastoreProxyHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CreateDatastoreProxyHandler {
+	return &CreateDatastoreProxyHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// ServeHTTP creates a datastore proxy pod
+func (c *CreateDatastoreProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-datastore-proxy")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	if project.ID == 0 {
+		err := telemetry.Error(ctx, span, nil, "project not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	projectId := int64(project.ID)
+
+	var resp CreateDatastoreProxyResponse
+
+	datastoreName, reqErr := requestutils.GetURLParamString(r, types.URLParamDatastoreName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing datastore name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "datastore-name", Value: datastoreName})
+
+	datastoreRecord, err := c.Repo().Datastore().GetByProjectIDAndName(ctx, project.ID, datastoreName)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "datastore record not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if datastoreRecord == nil || datastoreRecord.ID == uuid.Nil {
+		err = telemetry.Error(ctx, span, nil, "datastore record does not exist")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	message := porterv1.CreateDatastoreProxyRequest{
+		ProjectId:   projectId,
+		DatastoreId: datastoreRecord.ID.String(),
+	}
+	req := connect.NewRequest(&message)
+	ccpResp, err := c.Config().ClusterControlPlaneClient.CreateDatastoreProxy(ctx, req)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating datastore proxy")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp == nil || ccpResp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "error creating datastore proxy")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	resp = CreateDatastoreProxyResponse{
+		PodName: ccpResp.Msg.PodName,
+		Credential: datastore.Credential{
+			Host:         ccpResp.Msg.Credential.Host,
+			Port:         int(ccpResp.Msg.Credential.Port),
+			Username:     ccpResp.Msg.Credential.Username,
+			Password:     ccpResp.Msg.Credential.Password,
+			DatabaseName: ccpResp.Msg.Credential.DatabaseName,
+		},
+		ClusterID: uint(ccpResp.Msg.ClusterId),
+		Namespace: ccpResp.Msg.Namespace,
+		Type:      datastoreRecord.Type,
+	}
+
+	c.WriteResult(w, r, resp)
+}

+ 109 - 0
api/server/handlers/datastore/credential.go

@@ -0,0 +1,109 @@
+package datastore
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/google/uuid"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/datastore"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// GetDatastoreCredentialResponse describes the datastore credential response body
+type GetDatastoreCredentialResponse struct {
+	// Credential is the credential that has been retrieved for this datastore
+	Credential datastore.Credential `json:"credential"`
+}
+
+// GetDatastoreCredentialHandler is a struct for retrieving credentials for datastore
+type GetDatastoreCredentialHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewGetDatastoreCredentialHandler returns a GetDatastoreCredentialHandler
+func NewGetDatastoreCredentialHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetDatastoreCredentialHandler {
+	return &GetDatastoreCredentialHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// ServeHTTP retrieves the credentials for a datastore
+func (c *GetDatastoreCredentialHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-datastore-credential")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	if project.ID == 0 {
+		err := telemetry.Error(ctx, span, nil, "project not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	projectId := int64(project.ID)
+
+	var resp GetDatastoreCredentialResponse
+
+	datastoreName, reqErr := requestutils.GetURLParamString(r, types.URLParamDatastoreName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing datastore name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "datastore-name", Value: datastoreName})
+
+	datastoreRecord, err := c.Repo().Datastore().GetByProjectIDAndName(ctx, project.ID, datastoreName)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "datastore record not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if datastoreRecord == nil || datastoreRecord.ID == uuid.Nil {
+		err = telemetry.Error(ctx, span, nil, "datastore record does not exist")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	message := porterv1.DatastoreCredentialRequest{
+		ProjectId:   projectId,
+		DatastoreId: datastoreRecord.ID.String(),
+	}
+	req := connect.NewRequest(&message)
+	ccpResp, err := c.Config().ClusterControlPlaneClient.DatastoreCredential(ctx, req)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting datastore credential")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp == nil || ccpResp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "datastore credential not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	resp = GetDatastoreCredentialResponse{
+		Credential: datastore.Credential{
+			Host:         ccpResp.Msg.Credential.Host,
+			Port:         int(ccpResp.Msg.Credential.Port),
+			Username:     ccpResp.Msg.Credential.Username,
+			Password:     ccpResp.Msg.Credential.Password,
+			DatabaseName: ccpResp.Msg.Credential.DatabaseName,
+		},
+	}
+
+	c.WriteResult(w, r, resp)
+}

+ 102 - 0
api/server/handlers/datastore/delete.go

@@ -0,0 +1,102 @@
+package datastore
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/google/uuid"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// DeleteDatastoreHandler is a struct for handling datastore deletion requests
+type DeleteDatastoreHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewDeleteDatastoreHandler constructs a datastore DeleteDatastoreHandler
+func NewDeleteDatastoreHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *DeleteDatastoreHandler {
+	return &DeleteDatastoreHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+func (h *DeleteDatastoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-delete-datastore")
+	defer span.End()
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	datastoreName, reqErr := requestutils.GetURLParamString(r, types.URLParamDatastoreName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing datastore name")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "datastore-name", Value: datastoreName})
+
+	datastoreRecord, err := h.Repo().Datastore().GetByProjectIDAndName(ctx, project.ID, datastoreName)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "datastore record not found")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if !datastoreRecord.OnManagementCluster {
+		if datastoreRecord == nil || datastoreRecord.ID == uuid.Nil {
+			err = telemetry.Error(ctx, span, nil, "datastore record does not exist")
+			h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound))
+			return
+		}
+
+		_, err = h.Repo().Datastore().UpdateStatus(ctx, datastoreRecord, models.DatastoreStatus_AwaitingDeletion)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error updating datastore status")
+			h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		updateReq := connect.NewRequest(&porterv1.UpdateDatastoreRequest{
+			ProjectId:   int64(project.ID),
+			DatastoreId: datastoreRecord.ID.String(),
+		})
+
+		_, err = h.Config().ClusterControlPlaneClient.UpdateDatastore(ctx, updateReq)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error calling ccp update datastore")
+			h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		w.WriteHeader(http.StatusAccepted)
+		return
+	}
+
+	req := connect.NewRequest(&porterv1.PatchCloudContractRequest{
+		ProjectId:    int64(project.ID),
+		Operation:    porterv1.EnumPatchCloudContractOperation_ENUM_PATCH_CLOUD_CONTRACT_OPERATION_DELETE,
+		ResourceType: porterv1.EnumPatchCloudContractType_ENUM_PATCH_CLOUD_CONTRACT_TYPE_DATASTORE,
+		ResourceId:   datastoreRecord.ID.String(),
+	})
+	_, err = h.Config().ClusterControlPlaneClient.PatchCloudContract(ctx, req)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error patching cloud contract")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	w.WriteHeader(http.StatusAccepted)
+}

+ 244 - 0
api/server/handlers/datastore/get.go

@@ -0,0 +1,244 @@
+package datastore
+
+import (
+	"context"
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/aws/aws-sdk-go/aws/arn"
+	"github.com/google/uuid"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/handlers/cloud_provider"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/datastore"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// GetDatastoreResponse describes the list datastores response body
+type GetDatastoreResponse struct {
+	// Datastore is the datastore that has been retrieved
+	Datastore datastore.Datastore `json:"datastore"`
+}
+
+// GetDatastoreHandler is a struct for retrieving a datastore
+type GetDatastoreHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewGetDatastoreHandler returns a GetDatastoreHandler
+func NewGetDatastoreHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetDatastoreHandler {
+	return &GetDatastoreHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+const (
+	// SupportedDatastoreCloudProvider_AWS is the AWS cloud provider
+	SupportedDatastoreCloudProvider_AWS string = "AWS"
+)
+
+// ServeHTTP retrieves the datastore in the given project
+func (c *GetDatastoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-datastore")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	resp := GetDatastoreResponse{}
+
+	datastoreName, reqErr := requestutils.GetURLParamString(r, types.URLParamDatastoreName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing datastore name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "datastore-name", Value: datastoreName})
+
+	datastoreRecord, err := c.Repo().Datastore().GetByProjectIDAndName(ctx, project.ID, datastoreName)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "datastore record not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if datastoreRecord == nil || datastoreRecord.ID == uuid.Nil {
+		err = telemetry.Error(ctx, span, nil, "datastore record does not exist")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound))
+		return
+	}
+
+	// TODO: delete this branch once all datastores are on the management cluster
+	if !datastoreRecord.OnManagementCluster {
+		awsArn, err := arn.Parse(datastoreRecord.CloudProviderCredentialIdentifier)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error parsing aws account id")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		datastore, err := c.LEGACY_handleGetDatastore(ctx, project.ID, awsArn.AccountID, datastoreName, datastoreRecord.ID)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error retrieving datastore")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		resp.Datastore = datastore
+		c.WriteResult(w, r, resp)
+		return
+	}
+
+	ds := datastore.Datastore{
+		Name:                              datastoreRecord.Name,
+		Type:                              datastoreRecord.Type,
+		Engine:                            datastoreRecord.Engine,
+		CreatedAtUTC:                      datastoreRecord.CreatedAt,
+		Status:                            string(datastoreRecord.Status),
+		CloudProvider:                     SupportedDatastoreCloudProvider_AWS,
+		CloudProviderCredentialIdentifier: datastoreRecord.CloudProviderCredentialIdentifier,
+		OnManagementCluster:               true,
+	}
+
+	// this is done for backwards compatibility; eventually we will just return proto
+	ds.ConnectedClusterIds = c.connectedClusters(ctx, project, datastoreRecord.ID)
+	ds.Credential = c.credential(ctx, project, datastoreRecord.ID)
+
+	resp.Datastore = ds
+
+	c.WriteResult(w, r, resp)
+}
+
+// LEGACY_handleGetDatastore retrieves the datastore in the given project for datastores that are on the customer clusters rather than the management cluster
+func (c *GetDatastoreHandler) LEGACY_handleGetDatastore(ctx context.Context, projectId uint, accountId string, datastoreName string, datastoreId uuid.UUID) (datastore.Datastore, error) {
+	ctx, span := telemetry.NewSpan(ctx, "legacy-handle-get-datastore")
+	defer span.End()
+
+	var ds datastore.Datastore
+
+	datastores, err := Datastores(ctx, DatastoresInput{
+		ProjectID: projectId,
+		CloudProvider: cloud_provider.CloudProvider{
+			AccountID: accountId,
+			Type:      porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_AWS,
+		},
+		Name:                datastoreName,
+		IncludeEnvGroup:     true,
+		IncludeMetadata:     true,
+		CCPClient:           c.Config().ClusterControlPlaneClient,
+		DatastoreRepository: c.Repo().Datastore(),
+	})
+	if err != nil {
+		return ds, telemetry.Error(ctx, span, err, "error listing datastores")
+	}
+
+	if len(datastores) != 1 {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "datastore-count", Value: len(datastores)})
+		if len(datastores) == 0 {
+			return ds, telemetry.Error(ctx, span, nil, "datastore not found")
+		}
+		return ds, telemetry.Error(ctx, span, nil, "unexpected number of datastores found matching filters")
+	}
+
+	ds = datastores[0]
+
+	message := porterv1.DatastoreCredentialRequest{
+		ProjectId:   int64(projectId),
+		DatastoreId: datastoreId.String(),
+	}
+	req := connect.NewRequest(&message)
+	ccpResp, err := c.Config().ClusterControlPlaneClient.DatastoreCredential(ctx, req)
+	// the credential may not exist because the datastore is not yet ready
+	if err == nil && ccpResp != nil && ccpResp.Msg != nil {
+		ds.Credential = datastore.Credential{
+			Host:         ccpResp.Msg.Credential.Host,
+			Port:         int(ccpResp.Msg.Credential.Port),
+			Username:     ccpResp.Msg.Credential.Username,
+			Password:     ccpResp.Msg.Credential.Password,
+			DatabaseName: ccpResp.Msg.Credential.DatabaseName,
+		}
+	}
+
+	return ds, nil
+}
+
+func (c *GetDatastoreHandler) connectedClusters(ctx context.Context, project *models.Project, datastoreID uuid.UUID) []uint {
+	ctx, span := telemetry.NewSpan(ctx, "hydrate-connected-clusters")
+	defer span.End()
+
+	connectedClusterIds := make([]uint, 0)
+
+	req := connect.NewRequest(&porterv1.ReadCloudContractRequest{
+		ProjectId: int64(project.ID),
+	})
+	ccpResp, err := c.Config().ClusterControlPlaneClient.ReadCloudContract(ctx, req)
+	if err != nil {
+		return connectedClusterIds
+	}
+	if ccpResp.Msg == nil {
+		return connectedClusterIds
+	}
+
+	cloudContract := ccpResp.Msg.CloudContract
+	if cloudContract == nil {
+		return connectedClusterIds
+	}
+
+	datastores := cloudContract.Datastores
+	if datastores == nil {
+		return connectedClusterIds
+	}
+
+	var matchingDatastore *porterv1.ManagedDatastore
+	for _, ds := range datastores {
+		if ds.Id == datastoreID.String() {
+			matchingDatastore = ds
+			break
+		}
+	}
+
+	if matchingDatastore != nil && matchingDatastore.ConnectedClusters != nil {
+		for _, cc := range matchingDatastore.ConnectedClusters.ConnectedClusterIds {
+			connectedClusterIds = append(connectedClusterIds, uint(cc))
+		}
+	}
+
+	return connectedClusterIds
+}
+
+func (c *GetDatastoreHandler) credential(ctx context.Context, project *models.Project, datastoreID uuid.UUID) datastore.Credential {
+	ctx, span := telemetry.NewSpan(ctx, "hydrate-credential")
+	defer span.End()
+
+	message := porterv1.DatastoreCredentialRequest{
+		ProjectId:   int64(project.ID),
+		DatastoreId: datastoreID.String(),
+	}
+	req := connect.NewRequest(&message)
+	ccpResp, err := c.Config().ClusterControlPlaneClient.DatastoreCredential(ctx, req)
+	if err != nil {
+		return datastore.Credential{}
+	}
+
+	if ccpResp == nil || ccpResp.Msg == nil {
+		return datastore.Credential{}
+	}
+
+	return datastore.Credential{
+		Host:         ccpResp.Msg.Credential.Host,
+		Port:         int(ccpResp.Msg.Credential.Port),
+		Username:     ccpResp.Msg.Credential.Username,
+		Password:     ccpResp.Msg.Credential.Password,
+		DatabaseName: ccpResp.Msg.Credential.DatabaseName,
+	}
+}

+ 177 - 0
api/server/handlers/datastore/list.go

@@ -0,0 +1,177 @@
+package datastore
+
+import (
+	"context"
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/api-contracts/generated/go/porter/v1/porterv1connect"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/handlers/cloud_provider"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/datastore"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// ListDatastoresRequest is a struct that represents the various filter options used for
+// retrieving the datastores
+type ListDatastoresRequest struct {
+	// Name is the name of the datastore to filter by
+	Name string `schema:"name"`
+
+	// Type is the type of the datastore to filter by
+	Type string `schema:"type"`
+
+	// IncludeEnvGroup controls whether to include the datastore env group or not
+	IncludeEnvGroup bool `schema:"include_env_group"`
+
+	// IncludeMetadata controls whether to include datastore metadata or not
+	IncludeMetadata bool `schema:"include_metadata"`
+}
+
+// ListDatastoresResponse describes the list datastores response body
+type ListDatastoresResponse struct {
+	// Datastores is a list of datastore entries for the http response
+	Datastores []datastore.Datastore `json:"datastores"`
+}
+
+// ListDatastoresHandler is a struct for listing all datastores for a given project
+type ListDatastoresHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewListDatastoresHandler constructs a datastore ListDatastoresHandler
+func NewListDatastoresHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *ListDatastoresHandler {
+	return &ListDatastoresHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// ServeHTTP returns a list of datastores associated with the specified project
+func (h *ListDatastoresHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-datastores")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	resp := ListDatastoresResponse{}
+	datastoreList := []datastore.Datastore{}
+
+	datastores, err := h.Repo().Datastore().ListByProjectID(ctx, project.ID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting datastores")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	for _, ds := range datastores {
+		datastoreList = append(datastoreList, datastore.Datastore{
+			Name:                              ds.Name,
+			Type:                              ds.Type,
+			Engine:                            ds.Engine,
+			CreatedAtUTC:                      ds.CreatedAt,
+			Status:                            string(ds.Status),
+			CloudProvider:                     ds.CloudProvider,
+			CloudProviderCredentialIdentifier: ds.CloudProviderCredentialIdentifier,
+		})
+	}
+
+	resp.Datastores = datastoreList
+
+	h.WriteResult(w, r, resp)
+}
+
+// DatastoresInput is the input to the Datastores function
+type DatastoresInput struct {
+	ProjectID       uint
+	CloudProvider   cloud_provider.CloudProvider
+	Name            string
+	Type            porterv1.EnumDatastore
+	IncludeEnvGroup bool
+	IncludeMetadata bool
+
+	CCPClient           porterv1connect.ClusterControlPlaneServiceClient
+	DatastoreRepository repository.DatastoreRepository
+}
+
+// Datastores returns a list of datastores associated with the specified project/cloud-provider
+func Datastores(ctx context.Context, inp DatastoresInput) ([]datastore.Datastore, error) {
+	ctx, span := telemetry.NewSpan(ctx, "datastores-for-cloud-provider")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "datastore-name", Value: inp.Name},
+		telemetry.AttributeKV{Key: "datastore-type", Value: int(inp.Type)},
+		telemetry.AttributeKV{Key: "include-env-group", Value: inp.IncludeEnvGroup},
+		telemetry.AttributeKV{Key: "include-metadata", Value: inp.IncludeMetadata},
+		telemetry.AttributeKV{Key: "cloud-provider-type", Value: int(inp.CloudProvider.Type)},
+		telemetry.AttributeKV{Key: "cloud-provider-id", Value: inp.CloudProvider.AccountID},
+		telemetry.AttributeKV{Key: "project-id", Value: inp.ProjectID},
+	)
+
+	datastores := []datastore.Datastore{}
+
+	if inp.ProjectID == 0 {
+		return datastores, telemetry.Error(ctx, span, nil, "project id must be specified")
+	}
+	if inp.CloudProvider.Type == porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_UNSPECIFIED {
+		return datastores, telemetry.Error(ctx, span, nil, "cloud provider type must be specified")
+	}
+	if inp.CloudProvider.AccountID == "" {
+		return datastores, telemetry.Error(ctx, span, nil, "cloud provider account id must be specified")
+	}
+
+	message := porterv1.ListDatastoresRequest{
+		ProjectId:              int64(inp.ProjectID),
+		CloudProvider:          inp.CloudProvider.Type,
+		CloudProviderAccountId: inp.CloudProvider.AccountID,
+		Name:                   inp.Name,
+	}
+	if inp.Type != porterv1.EnumDatastore_ENUM_DATASTORE_UNSPECIFIED {
+		message.Type = &inp.Type
+	}
+	req := connect.NewRequest(&message)
+	resp, ccpErr := inp.CCPClient.ListDatastores(ctx, req)
+	if ccpErr != nil {
+		return datastores, telemetry.Error(ctx, span, ccpErr, "error listing datastores from ccp")
+	}
+	if resp.Msg == nil {
+		return datastores, telemetry.Error(ctx, span, nil, "missing response message from ccp")
+	}
+
+	for _, ds := range resp.Msg.Datastores {
+		datastoreRecord, err := inp.DatastoreRepository.GetByProjectIDAndName(ctx, inp.ProjectID, ds.Name)
+		if err != nil {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "err-datastore-name", Value: ds.Name})
+			return datastores, telemetry.Error(ctx, span, err, "datastore record not found")
+		}
+
+		encodedDatastore := datastore.Datastore{
+			Name:                              ds.Name,
+			Type:                              datastoreRecord.Type,
+			Engine:                            datastoreRecord.Engine,
+			CreatedAtUTC:                      datastoreRecord.CreatedAt,
+			Status:                            string(datastoreRecord.Status),
+			CloudProvider:                     datastoreRecord.CloudProvider,
+			CloudProviderCredentialIdentifier: datastoreRecord.CloudProviderCredentialIdentifier,
+			ConnectedClusterIds:               []uint{uint(ds.ConnectedClusterId)},
+			OnManagementCluster:               false,
+		}
+		datastores = append(datastores, encodedDatastore)
+	}
+
+	return datastores, nil
+}

+ 0 - 104
api/server/handlers/datastore/status.go

@@ -1,104 +0,0 @@
-package datastore
-
-import (
-	"net/http"
-
-	"connectrpc.com/connect"
-	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-	"github.com/porter-dev/porter/internal/telemetry"
-)
-
-// StatusRequest describes an inbound datastore status request
-type StatusRequest struct {
-	Type string `json:"type"`
-	Name string `json:"name"`
-}
-
-// StatusResponse describes an outbound datastore status response
-type StatusResponse struct {
-	Status string `json:"status"`
-}
-
-// StatusHandler is a struct for handling datastore status requests
-type StatusHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-// NewStatusHandler constructs a datastore StatusHandler
-func NewStatusHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *StatusHandler {
-	return &StatusHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-func (h *StatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx, span := telemetry.NewSpan(r.Context(), "serve-datastore-status")
-	defer span.End()
-	// read the project from context
-	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
-
-	request := &StatusRequest{}
-	if ok := h.DecodeAndValidate(w, r, request); !ok {
-		err := telemetry.Error(ctx, span, nil, "error decoding request")
-		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	telemetry.WithAttributes(span,
-		telemetry.AttributeKV{Key: "datastore-name", Value: request.Name},
-		telemetry.AttributeKV{Key: "datastore-type", Value: request.Type},
-	)
-
-	var datastoreType porterv1.EnumDatastore
-	switch request.Type {
-	case "rds-postgresql":
-		datastoreType = porterv1.EnumDatastore_ENUM_DATASTORE_RDS_POSTGRESQL
-	case "rds-postgresql-aurora":
-		datastoreType = porterv1.EnumDatastore_ENUM_DATASTORE_RDS_AURORA_POSTGRESQL
-	case "elasticache-redis":
-		datastoreType = porterv1.EnumDatastore_ENUM_DATASTORE_ELASTICACHE_REDIS
-	default:
-		err := telemetry.Error(ctx, span, nil, "invalid datastore specified")
-		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	req := connect.NewRequest(&porterv1.DatastoreStatusRequest{
-		ProjectId: int64(project.ID),
-		ClusterId: int64(cluster.ID),
-		Type:      datastoreType,
-		Name:      request.Name,
-	})
-
-	resp, err := h.Config().ClusterControlPlaneClient.DatastoreStatus(ctx, req)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error fetching datastore status from ccp")
-		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	if resp.Msg == nil {
-		err := telemetry.Error(ctx, span, err, "missing response message from ccp")
-		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "datastore-status", Value: resp.Msg.Status})
-	h.WriteResult(w, r, StatusResponse{
-		Status: resp.Msg.Status,
-	})
-}

+ 520 - 0
api/server/handlers/datastore/update.go

@@ -0,0 +1,520 @@
+package datastore
+
+import (
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"net/http"
+	"strings"
+
+	"connectrpc.com/connect"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/handlers/release"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/datastore"
+	"github.com/porter-dev/porter/internal/helm"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+	"github.com/porter-dev/porter/internal/telemetry"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/utils/pointer"
+)
+
+// UpdateDatastoreHandler is a struct for updating datastores.
+// Currently, this is expected to used once (on create) and then not again, however the 'update' terminology was proactively used
+// so we can reuse this handler when we support updates in the future.
+type UpdateDatastoreHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewUpdateDatastoreHandler constructs a datastore UpdateDatastoreHandler
+func NewUpdateDatastoreHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *UpdateDatastoreHandler {
+	return &UpdateDatastoreHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// UpdateDatastoreRequest is the expected format of the request body
+type UpdateDatastoreRequest struct {
+	Name   string                 `json:"name"`
+	Type   string                 `json:"type"`
+	Engine string                 `json:"engine"`
+	Values map[string]interface{} `json:"values"`
+}
+
+// UpdateDatastoreResponse is the expected format of the response body
+type UpdateDatastoreResponse struct{}
+
+// ServeHTTP updates a datastore using the decoded values
+func (h *UpdateDatastoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-update-datastore")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	request := &UpdateDatastoreRequest{}
+	if ok := h.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding update datastore request")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	betaFeaturesEnabled := project.GetFeatureFlag(models.BetaFeaturesEnabled, h.Config().LaunchDarklyClient)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "name", Value: request.Name},
+		telemetry.AttributeKV{Key: "type", Value: request.Type},
+		telemetry.AttributeKV{Key: "engine", Value: request.Engine},
+		telemetry.AttributeKV{Key: "beta-features-enabled", Value: betaFeaturesEnabled},
+	)
+
+	if !betaFeaturesEnabled {
+		err := h.legacy_DatastoreCreateFlow(ctx, request, project, cluster, r)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error creating datastore")
+			h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		h.WriteResult(w, r, UpdateDatastoreResponse{})
+		return
+	}
+
+	region, err := h.getClusterRegion(ctx, project.ID, cluster.ID)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting cluster region")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	// assume we are creating for now; will add update support later
+	datastoreProto := &porterv1.ManagedDatastore{
+		CloudProvider:                     porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_AWS,
+		CloudProviderCredentialIdentifier: cluster.CloudProviderCredentialIdentifier,
+		Region:                            region,
+		ConnectedClusters: &porterv1.ConnectedClusters{
+			ConnectedClusterIds: []int64{int64(cluster.ID)},
+		},
+	}
+	marshaledValues, err := json.Marshal(request.Values)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error marshaling values")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	var datastoreValues struct {
+		Config struct {
+			Name               string `json:"name"`
+			DatabaseName       string `json:"databaseName"`
+			MasterUsername     string `json:"masterUsername"`
+			MasterUserPassword string `json:"masterUserPassword"`
+			AllocatedStorage   int64  `json:"allocatedStorage"`
+			InstanceClass      string `json:"instanceClass"`
+			EngineVersion      string `json:"engineVersion"`
+		} `json:"config"`
+	}
+	err = json.Unmarshal(marshaledValues, &datastoreValues)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error unmarshaling rds postgres values")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if datastoreValues.Config.Name == "" {
+		err = telemetry.Error(ctx, span, nil, "datastore name is required")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	datastoreProto.Name = datastoreValues.Config.Name
+
+	switch request.Type {
+	case "RDS":
+		var engine porterv1.EnumAwsRdsEngine
+		switch request.Engine {
+		case "POSTGRES":
+			engine = porterv1.EnumAwsRdsEngine_ENUM_AWS_RDS_ENGINE_POSTGRESQL
+		case "AURORA-POSTGRES":
+			engine = porterv1.EnumAwsRdsEngine_ENUM_AWS_RDS_ENGINE_AURORA_POSTGRESQL
+		default:
+			err = telemetry.Error(ctx, span, nil, "invalid rds engine")
+			h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+		datastoreProto.Kind = porterv1.EnumDatastoreKind_ENUM_DATASTORE_KIND_AWS_RDS
+		datastoreProto.KindValues = &porterv1.ManagedDatastore_AwsRdsKind{
+			AwsRdsKind: &porterv1.AwsRds{
+				DatabaseName:              pointer.String(datastoreValues.Config.DatabaseName),
+				MasterUsername:            pointer.String(datastoreValues.Config.MasterUsername),
+				MasterUserPasswordLiteral: pointer.String(datastoreValues.Config.MasterUserPassword),
+				AllocatedStorageGigabytes: pointer.Int64(datastoreValues.Config.AllocatedStorage),
+				InstanceClass:             pointer.String(datastoreValues.Config.InstanceClass),
+				Engine:                    engine,
+				EngineVersion:             pointer.String(datastoreValues.Config.EngineVersion),
+			},
+		}
+	case "ELASTICACHE":
+		datastoreProto.Kind = porterv1.EnumDatastoreKind_ENUM_DATASTORE_KIND_AWS_ELASTICACHE
+		datastoreProto.KindValues = &porterv1.ManagedDatastore_AwsElasticacheKind{
+			AwsElasticacheKind: &porterv1.AwsElasticache{
+				Engine:                    porterv1.EnumAwsElasticacheEngine_ENUM_AWS_ELASTICACHE_ENGINE_REDIS,
+				InstanceClass:             pointer.String(datastoreValues.Config.InstanceClass),
+				MasterUserPasswordLiteral: pointer.String(datastoreValues.Config.MasterUserPassword),
+				EngineVersion:             pointer.String(datastoreValues.Config.EngineVersion),
+			},
+		}
+	default:
+		err = telemetry.Error(ctx, span, nil, "invalid datastore type")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	req := connect.NewRequest(&porterv1.PatchCloudContractRequest{
+		ProjectId:    int64(project.ID),
+		Operation:    porterv1.EnumPatchCloudContractOperation_ENUM_PATCH_CLOUD_CONTRACT_OPERATION_UPDATE,
+		ResourceType: porterv1.EnumPatchCloudContractType_ENUM_PATCH_CLOUD_CONTRACT_TYPE_DATASTORE,
+		ResourceValues: &porterv1.PatchCloudContractRequest_Datastore{
+			Datastore: datastoreProto,
+		},
+	})
+	_, err = h.Config().ClusterControlPlaneClient.PatchCloudContract(ctx, req)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error patching cloud contract")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	h.WriteResult(w, r, UpdateDatastoreResponse{})
+}
+
+func (h *UpdateDatastoreHandler) legacy_DatastoreCreateFlow(
+	ctx context.Context,
+	request *UpdateDatastoreRequest,
+	project *models.Project,
+	cluster *models.Cluster,
+	r *http.Request,
+) error {
+	ctx, span := telemetry.NewSpan(ctx, "legacy-datastore-create")
+	defer span.End()
+
+	err := h.InstallDatastore(ctx, InstallDatastoreInput{
+		Name:    request.Name,
+		Type:    request.Type,
+		Engine:  request.Engine,
+		Values:  request.Values,
+		Request: r,
+	})
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error installing datastore")
+	}
+
+	record, err := datastore.CreateOrGetRecord(ctx, datastore.CreateOrGetRecordInput{
+		ProjectID:           project.ID,
+		ClusterID:           cluster.ID,
+		Name:                request.Name,
+		Type:                request.Type,
+		Engine:              request.Engine,
+		DatastoreRepository: h.Repo().Datastore(),
+		ClusterRepository:   h.Repo().Cluster(),
+	})
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error retrieving datastore record")
+	}
+
+	updateReq := connect.NewRequest(&porterv1.UpdateDatastoreRequest{
+		ProjectId:   int64(project.ID),
+		DatastoreId: record.ID.String(),
+	})
+
+	_, err = h.Config().ClusterControlPlaneClient.UpdateDatastore(ctx, updateReq)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error calling ccp update datastore")
+	}
+
+	return nil
+}
+
+// InstallDatastoreInput is the input type for InstallDatastore
+type InstallDatastoreInput struct {
+	Name    string
+	Type    string
+	Engine  string
+	Values  map[string]interface{}
+	Request *http.Request
+}
+
+// InstallDatastore installs a datastore by helm installing a template with the provided values
+func (h *UpdateDatastoreHandler) InstallDatastore(ctx context.Context, inp InstallDatastoreInput) error {
+	ctx, span := telemetry.NewSpan(ctx, "datastore-install")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "name", Value: inp.Name},
+		telemetry.AttributeKV{Key: "type", Value: inp.Type},
+		telemetry.AttributeKV{Key: "engine", Value: inp.Engine},
+	)
+
+	templateName, err := templateNameFromDatastoreTypeAndEngine(inp.Type, inp.Engine)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error getting template name from datastore type and engine")
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "template-name", Value: templateName})
+
+	helmAgent, err := h.GetHelmAgent(ctx, inp.Request, cluster, release.Namespace_ACKSystem)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error creating helm agent")
+	}
+
+	chart, err := release.LoadChart(ctx, h.Config(), &release.LoadAddonChartOpts{
+		ProjectID:    proj.ID,
+		RepoURL:      h.Config().Metadata.DefaultAddonHelmRepoURL,
+		TemplateName: templateName,
+	})
+	if err != nil {
+		return telemetry.Error(ctx, span, nil, "error loading chart")
+	}
+
+	registries, err := h.Repo().Registry().ListRegistriesByProjectID(cluster.ProjectID)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error retrieving project registry")
+	}
+
+	vpcConfig, err := h.getVPCConfig(ctx, templateName, proj, cluster)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error retrieving vpc config")
+	}
+
+	if err := h.performAddonPreinstall(ctx, inp.Request, templateName, cluster); err != nil {
+		return telemetry.Error(ctx, span, err, "error performing addon preinstall")
+	}
+
+	values := inp.Values
+	values["vpcConfig"] = vpcConfig
+
+	conf := &helm.InstallChartConfig{
+		Chart:      chart,
+		Name:       inp.Name,
+		Namespace:  release.Namespace_ACKSystem,
+		Values:     values,
+		Cluster:    cluster,
+		Repo:       h.Repo(),
+		Registries: registries,
+	}
+
+	_, err = helmAgent.InstallChart(ctx, conf, h.Config().DOConf, h.Config().ServerConf.DisablePullSecretsInjection)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error installing chart")
+	}
+
+	return nil
+}
+
+func (h *UpdateDatastoreHandler) getVPCConfig(ctx context.Context, templateName string, project *models.Project, cluster *models.Cluster) (map[string]any, error) {
+	ctx, span := telemetry.NewSpan(ctx, "datastore-get-vpc-config")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cloud-provider", Value: cluster.CloudProvider},
+		telemetry.AttributeKV{Key: "template-name", Value: templateName},
+	)
+
+	vpcConfig := map[string]any{}
+	if cluster.CloudProvider != SupportedDatastoreCloudProvider_AWS {
+		return vpcConfig, nil
+	}
+
+	awsTemplates := map[string]string{
+		"elasticache-redis":     "elasticache",
+		"rds-postgresql":        "rds",
+		"rds-postgresql-aurora": "rds",
+	}
+
+	serviceType, ok := awsTemplates[templateName]
+	if !ok {
+		return vpcConfig, nil
+	}
+
+	req := connect.NewRequest(&porterv1.SharedNetworkSettingsRequest{
+		ProjectId:   int64(project.ID),
+		ClusterId:   int64(cluster.ID),
+		ServiceType: serviceType,
+	})
+
+	resp, err := h.Config().ClusterControlPlaneClient.SharedNetworkSettings(ctx, req)
+	if err != nil {
+		return vpcConfig, telemetry.Error(ctx, span, err, "error fetching cluster network settings from ccp")
+	}
+
+	vpcConfig["cidrBlock"] = resp.Msg.CidrRange
+	vpcConfig["subnetIDs"] = resp.Msg.SubnetIds
+	switch resp.Msg.CloudProvider {
+	case *porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_AWS.Enum():
+		vpcConfig["awsRegion"] = resp.Msg.Region
+		vpcConfig["vpcID"] = resp.Msg.GetEksCloudProviderNetwork().Id
+		telemetry.WithAttributes(span,
+			telemetry.AttributeKV{Key: "aws-region", Value: resp.Msg.Region},
+			telemetry.AttributeKV{Key: "vpc-id", Value: resp.Msg.GetEksCloudProviderNetwork().Id},
+		)
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cidr-block", Value: resp.Msg.CidrRange},
+		telemetry.AttributeKV{Key: "subnet-ids", Value: strings.Join(resp.Msg.SubnetIds, ",")},
+	)
+
+	return vpcConfig, nil
+}
+
+func (h *UpdateDatastoreHandler) scaleAckChartDeployment(ctx context.Context, chart string, agent *kubernetes.Agent) error {
+	ctx, span := telemetry.NewSpan(ctx, "scale-ack-chart")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "chart-name", Value: chart},
+	)
+
+	scale, err := agent.Clientset.AppsV1().Deployments(release.Namespace_ACKSystem).GetScale(ctx, chart, metav1.GetOptions{})
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "failed getting deployment")
+	}
+	if scale.Spec.Replicas > 0 {
+		return nil
+	}
+
+	scale.Spec.Replicas = 1
+	if _, err := agent.Clientset.AppsV1().Deployments(release.Namespace_ACKSystem).UpdateScale(ctx, chart, scale, metav1.UpdateOptions{}); err != nil {
+		return telemetry.Error(ctx, span, err, "failed scaling deployment up")
+	}
+
+	return nil
+}
+
+func (h *UpdateDatastoreHandler) performAddonPreinstall(ctx context.Context, r *http.Request, templateName string, cluster *models.Cluster) error {
+	ctx, span := telemetry.NewSpan(ctx, "datastore-addon-preinstall")
+	defer span.End()
+
+	awsTemplates := map[string][]string{
+		"elasticache-redis":     {"ack-chart-ec2", "ack-chart-elasticache"},
+		"rds-postgresql":        {"ack-chart-ec2", "ack-chart-rds"},
+		"rds-postgresql-aurora": {"ack-chart-ec2", "ack-chart-rds"},
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "template-name", Value: templateName},
+		telemetry.AttributeKV{Key: "cloud-provider", Value: cluster.CloudProvider},
+	)
+
+	if cluster.CloudProvider != SupportedDatastoreCloudProvider_AWS {
+		return nil
+	}
+
+	if _, ok := awsTemplates[templateName]; !ok {
+		return nil
+	}
+
+	agent, err := h.GetAgent(r, cluster, "")
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "failed to get k8s agent")
+	}
+
+	if _, err = agent.GetNamespace(release.Namespace_EnvironmentGroups); err != nil {
+		if _, err := agent.CreateNamespace(release.Namespace_EnvironmentGroups, map[string]string{}); err != nil {
+			return telemetry.Error(ctx, span, err, "failed creating porter-env-group namespace")
+		}
+	}
+
+	for _, chart := range awsTemplates[templateName] {
+		if err := h.scaleAckChartDeployment(ctx, chart, agent); err != nil {
+			return telemetry.Error(ctx, span, err, "failed scaling ack chart deployment")
+		}
+	}
+
+	return nil
+}
+
+func templateNameFromDatastoreTypeAndEngine(databaseType string, databaseEngine string) (string, error) {
+	switch databaseType {
+	case "RDS":
+		switch databaseEngine {
+		case "POSTGRES":
+			return "rds-postgresql", nil
+		case "AURORA-POSTGRES":
+			return "rds-postgresql-aurora", nil
+		default:
+			return "", errors.New("invalid database engine")
+		}
+	case "ELASTICACHE":
+		switch databaseEngine {
+		case "REDIS":
+			return "elasticache-redis", nil
+		default:
+			return "", errors.New("invalid database engine")
+		}
+	default:
+		return "", errors.New("invalid database type")
+	}
+}
+
+// getClusterRegion is a very hacky way of getting the region of the cluster; this will be replaced once we allow the user to specify region from the frontend
+func (h *UpdateDatastoreHandler) getClusterRegion(
+	ctx context.Context,
+	projectId uint,
+	clusterId uint,
+) (string, error) {
+	ctx, span := telemetry.NewSpan(ctx, "get-cluster-region")
+	defer span.End()
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: projectId},
+		telemetry.AttributeKV{Key: "cluster-id", Value: clusterId},
+	)
+
+	var region string
+
+	var clusterContractRecord *models.APIContractRevision
+	clusterContractRevisions, err := h.Config().Repo.APIContractRevisioner().List(ctx, projectId, repository.WithClusterID(clusterId), repository.WithLatest(true))
+	if err != nil {
+		return region, telemetry.Error(ctx, span, err, "error getting latest cluster contract revisions")
+	}
+	if len(clusterContractRevisions) == 0 {
+		return region, telemetry.Error(ctx, span, nil, "no cluster contract revisions found")
+	}
+	clusterContractRecord = clusterContractRevisions[0]
+	var clusterContractProto porterv1.Contract
+	decoded, err := base64.StdEncoding.DecodeString(clusterContractRecord.Base64Contract)
+	if err != nil {
+		return region, telemetry.Error(ctx, span, err, "error decoding cluster contract")
+	}
+	err = helpers.UnmarshalContractObject(decoded, &clusterContractProto)
+	if err != nil {
+		return region, telemetry.Error(ctx, span, err, "error unmarshalling cluster contract")
+	}
+	clusterProto := clusterContractProto.Cluster
+	if clusterProto == nil {
+		return region, telemetry.Error(ctx, span, nil, "cluster contract proto is nil")
+	}
+	eksKindValues := clusterProto.GetEksKind()
+	if eksKindValues == nil {
+		return region, telemetry.Error(ctx, span, nil, "eks kind values are nil")
+	}
+	region = eksKindValues.Region
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "region", Value: region})
+
+	return region, nil
+}

+ 16 - 17
api/server/handlers/deployment_target/create.go

@@ -30,34 +30,21 @@ func NewCreateDeploymentTargetHandler(
 	}
 }
 
-// CreateDeploymentTargetRequest is the request object for the /deployment-targets POST endpoint
-type CreateDeploymentTargetRequest struct {
-	// Deprecated: use name instead
-	Selector string `json:"selector"`
-	Name     string `json:"name,omitempty"`
-	Preview  bool   `json:"preview"`
-}
-
-// CreateDeploymentTargetResponse is the response object for the /deployment-targets POST endpoint
-type CreateDeploymentTargetResponse struct {
-	DeploymentTargetID string `json:"deployment_target_id"`
-}
-
 // ServeHTTP handles POST requests to create a new deployment target
 func (c *CreateDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-deployment-target")
 	defer span.End()
 
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
+	cluster, clusterOk := ctx.Value(types.ClusterScope).(*models.Cluster)
 	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
 		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
 		return
 	}
 
-	request := &CreateDeploymentTargetRequest{}
+	request := &types.CreateDeploymentTargetRequest{}
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
 		err := telemetry.Error(ctx, span, nil, "error decoding request")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
@@ -69,6 +56,16 @@ func (c *CreateDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http
 		return
 	}
 
+	clusterId := request.ClusterId
+	if clusterOk {
+		clusterId = cluster.ID
+	}
+	if clusterId == 0 {
+		err := telemetry.Error(ctx, span, nil, "cluster id is required")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
 	name := request.Name
 	if name == "" {
 		name = request.Selector
@@ -76,7 +73,7 @@ func (c *CreateDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http
 
 	createReq := connect.NewRequest(&porterv1.CreateDeploymentTargetRequest{
 		ProjectId: int64(project.ID),
-		ClusterId: int64(cluster.ID),
+		ClusterId: int64(clusterId),
 		Name:      name,
 		Namespace: name,
 		IsPreview: request.Preview,
@@ -99,7 +96,9 @@ func (c *CreateDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http
 		return
 	}
 
-	res := &CreateDeploymentTargetResponse{
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: ccpResp.Msg.DeploymentTargetId})
+
+	res := &types.CreateDeploymentTargetResponse{
 		DeploymentTargetID: ccpResp.Msg.DeploymentTargetId,
 	}
 

+ 51 - 46
api/server/handlers/deployment_target/get.go

@@ -49,6 +49,7 @@ func (c *GetDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+	deploymentTarget, deploymentTargetOK := ctx.Value(types.DeploymentTargetScope).(types.DeploymentTarget)
 
 	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
@@ -56,55 +57,59 @@ func (c *GetDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		return
 	}
 
-	deploymentTargetID, reqErr := requestutils.GetURLParamString(r, types.URLParamDeploymentTargetID)
-	if reqErr != nil {
-		err := telemetry.Error(ctx, span, reqErr, "error parsing deployment target id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	if deploymentTargetID == "" {
-		err := telemetry.Error(ctx, span, nil, "deployment target id cannot be empty")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
-		ProjectID:          int64(project.ID),
-		ClusterID:          int64(cluster.ID),
-		DeploymentTargetID: deploymentTargetID,
-		CCPClient:          c.Config().ClusterControlPlaneClient,
-	})
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	id, err := uuid.Parse(deploymentTarget.ID)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error parsing deployment target id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	if id == uuid.Nil {
-		err := telemetry.Error(ctx, span, err, "deployment target id is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
+	if !deploymentTargetOK {
+		deploymentTargetID, reqErr := requestutils.GetURLParamString(r, types.URLParamDeploymentTargetID)
+		if reqErr != nil {
+			err := telemetry.Error(ctx, span, reqErr, "error parsing deployment target id")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+		if deploymentTargetID == "" {
+			err := telemetry.Error(ctx, span, nil, "deployment target id cannot be empty")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		deploymentTargetDetails, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
+			ProjectID:          int64(project.ID),
+			ClusterID:          int64(cluster.ID),
+			DeploymentTargetID: deploymentTargetID,
+			CCPClient:          c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting deployment target details")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		id, err := uuid.Parse(deploymentTargetDetails.ID)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error parsing deployment target id")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		if id == uuid.Nil {
+			err := telemetry.Error(ctx, span, err, "deployment target id is nil")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		deploymentTarget = types.DeploymentTarget{
+			ID:           id,
+			ProjectID:    project.ID,
+			ClusterID:    cluster.ID,
+			Name:         deploymentTargetDetails.Name,
+			Namespace:    deploymentTargetDetails.Namespace,
+			IsPreview:    deploymentTargetDetails.IsPreview,
+			IsDefault:    deploymentTargetDetails.IsDefault,
+			CreatedAtUTC: time.Time{}, // not provided by deployment target details response
+			UpdatedAtUTC: time.Time{}, // not provided by deployment target details response
+		}
 	}
 
 	res := &GetDeploymentTargetResponse{
-		DeploymentTarget: types.DeploymentTarget{
-			ID:        id,
-			ProjectID: project.ID,
-			ClusterID: cluster.ID,
-			Name:      deploymentTarget.Name,
-			Namespace: deploymentTarget.Namespace,
-			IsPreview: deploymentTarget.IsPreview,
-			IsDefault: deploymentTarget.IsDefault,
-			CreatedAt: time.Time{}, // not provided by deployment target details response
-			UpdatedAt: time.Time{}, // not provided by deployment target details response
-		},
+		DeploymentTarget: deploymentTarget,
 	}
 
 	c.WriteResult(w, r, res)

+ 21 - 17
api/server/handlers/deployment_target/list.go

@@ -28,16 +28,6 @@ func NewListDeploymentTargetsHandler(
 	}
 }
 
-// ListDeploymentTargetsRequest is the request object for the /deployment-targets GET endpoint
-type ListDeploymentTargetsRequest struct {
-	Preview bool `json:"preview"`
-}
-
-// ListDeploymentTargetsResponse is the response object for the /deployment-targets GET endpoint
-type ListDeploymentTargetsResponse struct {
-	DeploymentTargets []types.DeploymentTarget `json:"deployment_targets"`
-}
-
 func (c *ListDeploymentTargetsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-deployment-targets")
 	defer span.End()
@@ -45,27 +35,41 @@ func (c *ListDeploymentTargetsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "cluster-provided", Value: cluster != nil})
+
 	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
 		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
 		return
 	}
 
-	request := &ListDeploymentTargetsRequest{}
+	request := &types.ListDeploymentTargetsRequest{}
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
 		err := telemetry.Error(ctx, span, nil, "error decoding request")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
-	deploymentTargets, err := c.Repo().DeploymentTarget().List(project.ID, cluster.ID, request.Preview)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error retrieving deployment targets")
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
-		return
+	var deploymentTargets []*models.DeploymentTarget
+	var err error
+
+	if cluster != nil {
+		deploymentTargets, err = c.Repo().DeploymentTarget().ListForCluster(project.ID, cluster.ID, request.Preview)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error retrieving deployment targets for cluster")
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+			return
+		}
+	} else {
+		deploymentTargets, err = c.Repo().DeploymentTarget().List(project.ID, request.Preview)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error retrieving deployment targets for project")
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+			return
+		}
 	}
 
-	response := ListDeploymentTargetsResponse{
+	response := types.ListDeploymentTargetsResponse{
 		DeploymentTargets: make([]types.DeploymentTarget, 0),
 	}
 

+ 36 - 24
api/server/handlers/environment_groups/create.go

@@ -13,7 +13,6 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/kubernetes/environment_groups"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/telemetry"
 )
@@ -34,6 +33,14 @@ func NewUpdateEnvironmentGroupHandler(
 	}
 }
 
+// EnvVariableDeletions is the set of keys to delete from the environment group
+type EnvVariableDeletions struct {
+	// Variables is a set of variable keys to delete from the environment group
+	Variables []string `json:"variables"`
+	// Secrets is a set of secret variable keys to delete from the environment group
+	Secrets []string `json:"secrets"`
+}
+
 type UpdateEnvironmentGroupRequest struct {
 	// Name of the env group to create or update
 	Name string `json:"name"`
@@ -49,6 +56,15 @@ type UpdateEnvironmentGroupRequest struct {
 
 	// SecretVariables are sensitive values. All values must be a string due to a kubernetes limitation.
 	SecretVariables map[string]string `json:"secret_variables"`
+
+	// IsEnvOverride is a flag to determine if provided variables should override or merge with existing variables
+	IsEnvOverride bool `json:"is_env_override"`
+
+	// Deletions is a set of keys to delete from the environment group
+	Deletions EnvVariableDeletions `json:"deletions"`
+
+	// SkipAppAutoDeploy is a flag to determine if the app should be auto deployed
+	SkipAppAutoDeploy bool `json:"skip_app_auto_deploy"`
 }
 type UpdateEnvironmentGroupResponse struct {
 	// Name of the env group to create or update
@@ -78,14 +94,6 @@ func (c *UpdateEnvironmentGroupHandler) ServeHTTP(w http.ResponseWriter, r *http
 		telemetry.AttributeKV{Key: "environment-group-type", Value: request.Type},
 	)
 
-	agent, err := c.GetAgent(r, cluster, "")
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "unable to connect to kubernetes cluster")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	var envGroup environment_groups.EnvironmentGroup
 	switch request.Type {
 	case "doppler":
 		_, err := c.Config().ClusterControlPlaneClient.CreateOrUpdateEnvGroup(ctx, connect.NewRequest(&porterv1.CreateOrUpdateEnvGroupRequest{
@@ -101,29 +109,33 @@ func (c *UpdateEnvironmentGroupHandler) ServeHTTP(w http.ResponseWriter, r *http
 			return
 		}
 
-		envGroup = environment_groups.EnvironmentGroup{
-			Name:         request.Name,
-			CreatedAtUTC: time.Now().UTC(),
-		}
 	default:
-		envGroup := environment_groups.EnvironmentGroup{
-			Name:            request.Name,
-			Variables:       request.Variables,
-			SecretVariables: request.SecretVariables,
-			CreatedAtUTC:    time.Now().UTC(),
-		}
-
-		err = environment_groups.CreateOrUpdateBaseEnvironmentGroup(ctx, agent, envGroup, nil)
+		_, err := c.Config().ClusterControlPlaneClient.CreateOrUpdateEnvGroup(ctx, connect.NewRequest(&porterv1.CreateOrUpdateEnvGroupRequest{
+			ProjectId:            int64(cluster.ProjectID),
+			ClusterId:            int64(cluster.ID),
+			EnvGroupProviderType: porterv1.EnumEnvGroupProviderType_ENUM_ENV_GROUP_PROVIDER_TYPE_PORTER,
+			EnvGroupName:         request.Name,
+			EnvVars: &porterv1.EnvGroupVariables{
+				Normal: request.Variables,
+				Secret: request.SecretVariables,
+			},
+			EnvVariableDeletions: &porterv1.EnvVariableDeletions{
+				Variables: request.Deletions.Variables,
+				Secrets:   request.Deletions.Secrets,
+			},
+			IsEnvOverride:     request.IsEnvOverride,
+			SkipAppAutoDeploy: request.SkipAppAutoDeploy,
+		}))
 		if err != nil {
-			err := telemetry.Error(ctx, span, err, "unable to create or update environment group")
+			err := telemetry.Error(ctx, span, err, "unable to create environment group")
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 			return
 		}
 	}
 
 	envGroupResponse := &UpdateEnvironmentGroupResponse{
-		Name:      envGroup.Name,
-		CreatedAt: envGroup.CreatedAtUTC,
+		Name:      request.Name,
+		CreatedAt: time.Now().UTC(),
 	}
 	c.WriteResult(w, r, envGroupResponse)
 }

+ 64 - 30
api/server/handlers/environment_groups/list.go

@@ -5,6 +5,10 @@ import (
 	"strings"
 	"time"
 
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
@@ -68,6 +72,43 @@ func (c *ListEnvironmentGroupsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-type", Value: request.Type})
 
+	if project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		listEnvGroupsReq := connect.NewRequest(&porterv1.ListEnvGroupsRequest{
+			ProjectId:      int64(project.ID),
+			ClusterId:      int64(cluster.ID),
+			IncludeSecrets: false,
+		})
+
+		listEnvGroupResp, err := c.Config().ClusterControlPlaneClient.ListEnvGroups(ctx, listEnvGroupsReq)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "unable to get linked applications")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		if listEnvGroupResp == nil || listEnvGroupResp.Msg == nil {
+			err = telemetry.Error(ctx, span, err, "ccp resp is nil")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		var envGroups []EnvironmentGroupListItem
+		for _, envGroup := range listEnvGroupResp.Msg.EnvGroups {
+			envGroups = append(envGroups, EnvironmentGroupListItem{
+				Name:               envGroup.Name,
+				Type:               translateProtoTypeToEnvGroupType[envGroup.Type],
+				LatestVersion:      int(envGroup.Version),
+				Variables:          envGroup.Variables,
+				SecretVariables:    envGroup.SecretVariables,
+				CreatedAtUTC:       envGroup.CreatedAt.AsTime(),
+				LinkedApplications: envGroup.LinkedApplications,
+			})
+		}
+
+		// return early for cleaner change
+		c.WriteResult(w, r, ListEnvironmentGroupsResponse{EnvironmentGroups: envGroups})
+		return
+	}
+
 	agent, err := c.GetAgent(r, cluster, "")
 	if err != nil {
 		err = telemetry.Error(ctx, span, err, "unable to connect to cluster")
@@ -75,7 +116,7 @@ func (c *ListEnvironmentGroupsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 		return
 	}
 
-	allEnvGroupVersions, err := environmentgroups.ListEnvironmentGroups(ctx, agent, environmentgroups.WithNamespace(environmentgroups.Namespace_EnvironmentGroups), environmentgroups.WithoutDefaultAppEnvironmentGroups())
+	allEnvGroupVersions, err := environmentgroups.ListEnvironmentGroups(ctx, agent, environmentgroups.WithNamespace(environmentgroups.Namespace_EnvironmentGroups), environmentgroups.WithoutDefaultAppEnvironmentGroups(), environmentgroups.WithoutDefaultAddonEnvironmentGroups())
 	if err != nil {
 		err = telemetry.Error(ctx, span, err, "unable to list all environment groups")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
@@ -112,39 +153,26 @@ func (c *ListEnvironmentGroupsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 		}
 
 		var linkedApplications []string
-		if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
-			applications, err := environmentgroups.LinkedApplications(ctx, agent, latestVersion.Name, true)
-			if err != nil {
-				err = telemetry.Error(ctx, span, err, "unable to get linked applications")
-				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-				return
-			}
+		applications, err := environmentgroups.LinkedApplications(ctx, agent, latestVersion.Name, true)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "unable to get linked applications")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
 
-			applicationSetForEnvGroup := make(map[string]struct{})
-			for _, app := range applications {
-				if app.Namespace == "" {
-					continue
-				}
-				if _, ok := applicationSetForEnvGroup[app.Namespace]; !ok {
-					applicationSetForEnvGroup[app.Namespace] = struct{}{}
-				}
-			}
-			for appNamespace := range applicationSetForEnvGroup {
-				porterAppName := strings.TrimPrefix(appNamespace, "porter-stack-")
-				linkedApplications = append(linkedApplications, porterAppName)
+		applicationSetForEnvGroup := make(map[string]struct{})
+		for _, app := range applications {
+			if app.Namespace == "" {
+				continue
 			}
-		} else {
-			applications, err := environmentgroups.LinkedApplications(ctx, agent, latestVersion.Name, false)
-			if err != nil {
-				err = telemetry.Error(ctx, span, err, "unable to get linked applications")
-				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-				return
-			}
-
-			for _, app := range applications {
-				linkedApplications = append(linkedApplications, app.Name)
+			if _, ok := applicationSetForEnvGroup[app.Namespace]; !ok {
+				applicationSetForEnvGroup[app.Namespace] = struct{}{}
 			}
 		}
+		for appNamespace := range applicationSetForEnvGroup {
+			porterAppName := strings.TrimPrefix(appNamespace, "porter-stack-")
+			linkedApplications = append(linkedApplications, porterAppName)
+		}
 
 		secrets := make(map[string]string)
 		for k, v := range latestVersion.SecretVariables {
@@ -163,3 +191,9 @@ func (c *ListEnvironmentGroupsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 
 	c.WriteResult(w, r, ListEnvironmentGroupsResponse{EnvironmentGroups: envGroups})
 }
+
+var translateProtoTypeToEnvGroupType = map[porterv1.EnumEnvGroupProviderType]string{
+	porterv1.EnumEnvGroupProviderType_ENUM_ENV_GROUP_PROVIDER_TYPE_DATASTORE: "datastore",
+	porterv1.EnumEnvGroupProviderType_ENUM_ENV_GROUP_PROVIDER_TYPE_DOPPLER:   "doppler",
+	porterv1.EnumEnvGroupProviderType_ENUM_ENV_GROUP_PROVIDER_TYPE_PORTER:    "porter",
+}

+ 16 - 5
api/server/handlers/gitinstallation/get_accounts.go

@@ -6,6 +6,8 @@ import (
 	"sort"
 	"time"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/google/go-github/v41/github"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
@@ -70,9 +72,15 @@ func (c *GetGithubAppAccountsHandler) getOrgList(ctx context.Context,
 }
 
 func (c *GetGithubAppAccountsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-github-app-accounts")
+	defer span.End()
+
+	r = r.Clone(ctx)
+
 	tok, err := GetGithubAppOauthTokenFromRequest(c.Config(), r)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		err = telemetry.Error(ctx, span, err, "error getting github app oauth token from request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
 		return
 	}
 
@@ -82,7 +90,7 @@ func (c *GetGithubAppAccountsHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 	resultChannel := make(chan *github.Organization, 10)
 	errChan := make(chan error)
 
-	ctx, cancel := context.WithTimeout(r.Context(), 3*time.Second)
+	ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
 	defer cancel()
 
 	go c.getOrgList(ctx, client, resultChannel, errChan)
@@ -99,7 +107,8 @@ resultOrErrorReader:
 			}
 		case err, ok := <-errChan:
 			if ok {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+				err = telemetry.Error(ctx, span, err, "error getting org list")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 				return
 			} else {
 				// nothing in error, must be a close event
@@ -110,7 +119,8 @@ resultOrErrorReader:
 
 	authUser, _, err := client.Users.Get(r.Context(), "")
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error getting authenticated user")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -120,7 +130,8 @@ resultOrErrorReader:
 	installation, err := c.Repo().GithubAppInstallation().ReadGithubAppInstallationByAccountID(*authUser.ID)
 
 	if err != nil && err != gorm.ErrRecordNotFound {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error reading github app installation")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 

+ 23 - 5
api/server/handlers/gitinstallation/oauth_callback.go

@@ -5,6 +5,8 @@ import (
 	"net/http"
 	"net/url"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -31,21 +33,34 @@ func NewGithubAppOAuthCallbackHandler(
 }
 
 func (c *GithubAppOAuthCallbackHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-github-app-oauth-callback")
+	defer span.End()
+
+	r = r.Clone(ctx)
+
 	user, _ := r.Context().Value(types.UserScope).(*models.User)
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "user-id", Value: user.ID})
+
 	session, err := c.Config().Store.Get(r, c.Config().ServerConf.CookieName)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error getting session")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	token, err := c.Config().GithubAppConf.Exchange(oauth2.NoContext, r.URL.Query().Get("code"))
-
 	if err != nil || !token.Valid() {
+		telemetry.WithAttributes(span,
+			telemetry.AttributeKV{Key: "token-valid", Value: token.Valid()},
+			telemetry.AttributeKV{Key: "token-error", Value: err.Error()},
+		)
 		if redirectStr, ok := session.Values["redirect_uri"].(string); ok && redirectStr != "" {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "redirect-uri", Value: redirectStr})
 			// attempt to parse the redirect uri, if it fails just redirect to dashboard
 			redirectURI, err := url.Parse(redirectStr)
 			if err != nil {
+				telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "redirect-uri-parse-error", Value: err.Error()})
 				http.Redirect(w, r, "/dashboard", 302)
 			}
 
@@ -69,16 +84,17 @@ func (c *GithubAppOAuthCallbackHandler) ServeHTTP(w http.ResponseWriter, r *http
 	oauthInt, err = c.Repo().GithubAppOAuthIntegration().CreateGithubAppOAuthIntegration(oauthInt)
 
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error creating github app oauth integration")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	user.GithubAppIntegrationID = oauthInt.ID
 
 	user, err = c.Repo().User().UpdateUser(user)
-
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error updating user")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -89,9 +105,11 @@ func (c *GithubAppOAuthCallbackHandler) ServeHTTP(w http.ResponseWriter, r *http
 	))
 
 	if redirectStr, ok := session.Values["redirect_uri"].(string); ok && redirectStr != "" {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "redirect-uri", Value: redirectStr})
 		// attempt to parse the redirect uri, if it fails just redirect to dashboard
 		redirectURI, err := url.Parse(redirectStr)
 		if err != nil {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "redirect-uri-parse-error", Value: err.Error()})
 			http.Redirect(w, r, "/dashboard", 302)
 		}
 

+ 18 - 5
api/server/handlers/gitinstallation/webhook.go

@@ -3,6 +3,8 @@ package gitinstallation
 import (
 	"net/http"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/google/go-github/v41/github"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
@@ -30,15 +32,22 @@ func NewGithubAppWebhookHandler(
 }
 
 func (c *GithubAppWebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-new-github-app-webhook")
+	defer span.End()
+
+	r = r.Clone(ctx)
+
 	payload, err := github.ValidatePayload(r, []byte(c.Config().GithubAppConf.WebhookSecret))
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error validating payload")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
 	event, err := github.ParseWebHook(github.WebHookType(r), payload)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error parsing webhook")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
@@ -54,19 +63,23 @@ func (c *GithubAppWebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
 					InstallationID: *e.Installation.ID,
 				})
 				if err != nil {
-					c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+					err = telemetry.Error(ctx, span, err, "error creating github app installation")
+					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+					return
 				}
 
 				return
 			} else if err != nil {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+				err = telemetry.Error(ctx, span, err, "error reading github app installation")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 				return
 			}
 		}
 		if *e.Action == "deleted" {
 			err := c.Repo().GithubAppInstallation().DeleteGithubAppInstallationByAccountID(*e.Installation.Account.ID)
 			if err != nil {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+				err = telemetry.Error(ctx, span, err, "error deleting github app installation")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 				return
 			}
 		}

+ 12 - 6
api/server/handlers/handler.go

@@ -1,9 +1,12 @@
 package handlers
 
 import (
+	"context"
 	"fmt"
 	"net/http"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
@@ -18,6 +21,7 @@ type PorterHandler interface {
 	HandleAPIError(w http.ResponseWriter, r *http.Request, err apierrors.RequestError)
 	HandleAPIErrorNoWrite(w http.ResponseWriter, r *http.Request, err apierrors.RequestError)
 	PopulateOAuthSession(
+		ctx context.Context,
 		w http.ResponseWriter,
 		r *http.Request,
 		state string,
@@ -89,6 +93,7 @@ func IgnoreAPIError(w http.ResponseWriter, r *http.Request, err apierrors.Reques
 }
 
 func (d *DefaultPorterHandler) PopulateOAuthSession(
+	ctx context.Context,
 	w http.ResponseWriter,
 	r *http.Request,
 	state string,
@@ -96,9 +101,12 @@ func (d *DefaultPorterHandler) PopulateOAuthSession(
 	integrationClient types.OAuthIntegrationClient,
 	integrationID uint,
 ) error {
+	ctx, span := telemetry.NewSpan(ctx, "handler-populate-oauth-session")
+	defer span.End()
+
 	session, err := d.Config().Store.Get(r, d.Config().ServerConf.CookieName)
 	if err != nil {
-		return err
+		return telemetry.Error(ctx, span, err, "could not get session")
 	}
 
 	// need state parameter to validate when redirected
@@ -111,9 +119,8 @@ func (d *DefaultPorterHandler) PopulateOAuthSession(
 
 	if isProject {
 		project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-
 		if project == nil {
-			return fmt.Errorf("could not read project")
+			return telemetry.Error(ctx, span, nil, "could not read project")
 		}
 
 		session.Values["project_id"] = project.ID
@@ -121,9 +128,8 @@ func (d *DefaultPorterHandler) PopulateOAuthSession(
 
 	if isUser {
 		user, _ := r.Context().Value(types.UserScope).(*models.User)
-
 		if user == nil {
-			return fmt.Errorf("could not read user")
+			return telemetry.Error(ctx, span, nil, "could not read user")
 		}
 
 		session.Values["user_id"] = user.ID
@@ -135,7 +141,7 @@ func (d *DefaultPorterHandler) PopulateOAuthSession(
 	}
 
 	if err := session.Save(r, w); err != nil {
-		return err
+		return telemetry.Error(ctx, span, err, "could not save session")
 	}
 
 	return nil

+ 5 - 1
api/server/handlers/namespace/stream_pod_logs_loki.go

@@ -13,6 +13,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/websocket"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type StreamPodLogsLokiHandler struct {
@@ -32,6 +33,9 @@ func NewStreamPodLogsLokiHandler(
 }
 
 func (c *StreamPodLogsLokiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-stream-pod-logs")
+	defer span.End()
+
 	request := &types.GetLogRequest{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
@@ -59,7 +63,7 @@ func (c *StreamPodLogsLokiHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		return
 	}
 
-	err = agent.StreamPorterAgentLokiLog([]string{
+	err = agent.StreamPorterAgentLokiLog(ctx, []string{
 		fmt.Sprintf("pod=%s", request.PodSelector),
 		fmt.Sprintf("namespace=%s", request.Namespace),
 	}, string(startTime), request.SearchParam, 0, safeRW)

+ 152 - 0
api/server/handlers/notifications/get_notification_config.go

@@ -0,0 +1,152 @@
+package notifications
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"connectrpc.com/connect"
+
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/config"
+)
+
+// GetNotificationConfigHandler is the handler for the POST /notifications/config/{notification_config_id} endpoint
+type GetNotificationConfigHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewNotificationConfigHandler returns a new GetNotificationConfigHandler
+func NewNotificationConfigHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetNotificationConfigHandler {
+	return &GetNotificationConfigHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// GetNotificationConfigRequest is the request object for the /notifications/config/{notification_config_id} endpoint
+type GetNotificationConfigRequest struct{}
+
+// GetNotificationConfigResponse is the response object for the /notifications/config/{notification_config_id} endpoint
+type GetNotificationConfigResponse struct {
+	Config Config `json:"config"`
+}
+
+// ServeHTTP updates a notification config
+func (n *GetNotificationConfigHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-notification-config")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	notificationConfigID, reqErr := requestutils.GetURLParamUint(r, types.URLParamNotificationConfigID)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, nil, "error parsing event id from url")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "notification-config-id", Value: notificationConfigID},
+	)
+
+	request := &GetNotificationConfigRequest{}
+	if ok := n.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	configReq := connect.NewRequest(&porterv1.NotificationConfigRequest{
+		ProjectId:            int64(project.ID),
+		NotificationConfigId: int64(notificationConfigID),
+	})
+	ccpResp, err := n.Config().ClusterControlPlaneClient.NotificationConfig(ctx, configReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error calling ccp apply porter app")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if ccpResp == nil || ccpResp.Msg == nil {
+		err := telemetry.Error(ctx, span, nil, "ccp response or msg is nil")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	config, err := configFromProto(ccpResp.Msg.Config)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting config from proto")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	response := &GetNotificationConfigResponse{
+		Config: config,
+	}
+
+	n.WriteResult(w, r, response)
+}
+
+func configFromProto(proto *porterv1.NotificationConfig) (Config, error) {
+	// initializing the map to true for all statuses and types
+	// ensures that the default behavior is to notify for missing statuses and types
+	statuses := trueMap(allStatuses)
+	types := trueMap(allTypes)
+
+	for _, protoStatus := range proto.EnabledStatuses {
+		if status, ok := transformProtoToStatusString[protoStatus.Status]; ok {
+			statuses[status] = protoStatus.Enabled
+		}
+	}
+	for _, protoType := range proto.EnabledTypes {
+		if t, ok := transformProtoToTypeString[protoType.Type]; ok {
+			types[t] = protoType.Enabled
+		}
+	}
+
+	statusesStruct := StatusesEnabled{}
+	by, err := json.Marshal(statuses)
+	if err != nil {
+		return Config{}, fmt.Errorf("error marshalling statuses: %s", err)
+	}
+	err = json.Unmarshal(by, &statusesStruct)
+	if err != nil {
+		return Config{}, fmt.Errorf("error unmarshalling statuses: %s", err)
+	}
+
+	typesStruct := TypesEnabled{}
+	by, err = json.Marshal(types)
+	if err != nil {
+		return Config{}, fmt.Errorf("error marshalling types: %s", err)
+	}
+	err = json.Unmarshal(by, &typesStruct)
+	if err != nil {
+		return Config{}, fmt.Errorf("error unmarshalling types: %s", err)
+	}
+
+	var mention string
+	if proto.SlackConfig != nil && len(proto.SlackConfig.Mentions) > 0 {
+		mention = proto.SlackConfig.Mentions[0]
+	}
+
+	config := Config{
+		Statuses: statusesStruct,
+		Mention:  mention,
+		Types:    typesStruct,
+	}
+
+	return config, nil
+}

+ 100 - 0
api/server/handlers/notifications/notification.go

@@ -0,0 +1,100 @@
+package notifications
+
+import (
+	"net/http"
+
+	"github.com/google/uuid"
+
+	"github.com/porter-dev/porter/internal/models"
+
+	"github.com/porter-dev/porter/internal/porter_app/notifications"
+
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/config"
+)
+
+// GetNotificationHandler is the handler for the POST /notifications/{notification_config_id} endpoint
+type GetNotificationHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewNotificationHandler returns a new GetNotificationHandler
+func NewNotificationHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetNotificationHandler {
+	return &GetNotificationHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// GetNotificationRequest is the request object for the /notifications/{notification_id} endpoint
+type GetNotificationRequest struct{}
+
+// NotificationResponse is the response object for the notifications endpoint
+type NotificationResponse struct {
+	// Notifications are the notifications associated with the app revision
+	Notification notifications.Notification `json:"notification"`
+}
+
+// ServeHTTP returns a notification by id
+func (n *GetNotificationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-notification")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	notificationID, reqErr := requestutils.GetURLParamString(r, types.URLParamNotificationID)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, nil, "error parsing notification id from url")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "notification-id", Value: notificationID},
+	)
+
+	request := &GetNotificationRequest{}
+	if ok := n.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	event, err := n.Repo().PorterAppEvent().NotificationByID(ctx, notificationID)
+	if err != nil {
+		e := telemetry.Error(ctx, span, nil, "error getting notification by id")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	// check project scope indirectly with deployment target
+	deploymentTarget, err := n.Repo().DeploymentTarget().DeploymentTarget(project.ID, event.DeploymentTargetID.String())
+	if err != nil || deploymentTarget == nil || deploymentTarget.ID == uuid.Nil {
+		e := telemetry.Error(ctx, span, err, "notification is not in project scope")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	notification, err := notifications.NotificationFromPorterAppEvent(event)
+	if err != nil {
+		e := telemetry.Error(ctx, span, nil, "error converting app event to notification")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusInternalServerError))
+		return
+	}
+
+	resp := &NotificationResponse{
+		Notification: *notification,
+	}
+
+	n.WriteResult(w, r, resp)
+}

+ 233 - 0
api/server/handlers/notifications/update_notification_config.go

@@ -0,0 +1,233 @@
+package notifications
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"connectrpc.com/connect"
+
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/config"
+)
+
+// UpdateNotificationConfigHandler is the handler for the POST /notifications/config/{notification_config_id} endpoint
+type UpdateNotificationConfigHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewUpdateNotificationConfigHandler returns a new UpdateNotificationConfigHandler
+func NewUpdateNotificationConfigHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *UpdateNotificationConfigHandler {
+	return &UpdateNotificationConfigHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// UpdateNotificationConfigRequest is the request object for the /notifications/config/{notification_config_id} endpoint
+type UpdateNotificationConfigRequest struct {
+	Config             Config `json:"config"`
+	SlackIntegrationID uint   `json:"slack_integration_id"`
+}
+
+// Config is the config object for the /notifications endpoint
+type Config struct {
+	Mention  string          `json:"mention"`
+	Statuses StatusesEnabled `json:"statuses"`
+	Types    TypesEnabled    `json:"types"`
+}
+
+// StatusesEnabled is a struct that signifies whether a status is enabled or not
+type StatusesEnabled struct {
+	Successful  bool `json:"successful"`
+	Failed      bool `json:"failed"`
+	Progressing bool `json:"progressing"`
+}
+
+// TypesEnabled is a struct that signifies whether a type is enabled or not
+type TypesEnabled struct {
+	Deploy    bool `json:"deploy"`
+	Build     bool `json:"build"`
+	PreDeploy bool `json:"predeploy"`
+	Alert     bool `json:"alert"`
+}
+
+// UpdateNotificationConfigResponse is the response object for the /notifications/config/{notification_config_id} endpoint
+type UpdateNotificationConfigResponse struct {
+	ID uint `json:"id"`
+}
+
+// ServeHTTP updates a notification config
+func (n *UpdateNotificationConfigHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-notification-config-update")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
+	)
+
+	notificationConfigID, reqErr := requestutils.GetURLParamUint(r, types.URLParamNotificationConfigID)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, nil, "error parsing event id from url")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "notification-config-id", Value: notificationConfigID},
+	)
+
+	request := &UpdateNotificationConfigRequest{}
+	if ok := n.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	configProto, err := configToProto(request.Config)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error converting config to proto")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	updateReq := connect.NewRequest(&porterv1.UpdateNotificationConfigRequest{
+		ProjectId:            int64(project.ID),
+		NotificationConfigId: int64(notificationConfigID),
+		Config:               configProto,
+		SlackIntegrationId:   int64(request.SlackIntegrationID),
+	})
+	updateResp, err := n.Config().ClusterControlPlaneClient.UpdateNotificationConfig(ctx, updateReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error calling ccp apply porter app")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if updateResp == nil || updateResp.Msg == nil {
+		err := telemetry.Error(ctx, span, nil, "ccp response or msg is nil")
+		n.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	response := &UpdateNotificationConfigResponse{
+		ID: uint(updateResp.Msg.NotificationConfigId),
+	}
+
+	n.WriteResult(w, r, response)
+}
+
+func configToProto(config Config) (*porterv1.NotificationConfig, error) {
+	statusMap := map[string]bool{}
+
+	by, err := json.Marshal(config.Statuses)
+	if err != nil {
+		return nil, fmt.Errorf("error marshalling statuses: %s", err)
+	}
+
+	err = json.Unmarshal(by, &statusMap)
+	if err != nil {
+		return nil, fmt.Errorf("error unmarshalling statuses: %s", err)
+	}
+
+	var statuses []*porterv1.NotificationStatusEnabled
+	for status, enabled := range statusMap {
+		if protoStatus, ok := transformStatusStringToProto[status]; ok {
+			statuses = append(statuses, &porterv1.NotificationStatusEnabled{
+				Status:  protoStatus,
+				Enabled: enabled,
+			})
+		}
+	}
+
+	typeMap := map[string]bool{}
+
+	by, err = json.Marshal(config.Types)
+	if err != nil {
+		return nil, fmt.Errorf("error marshalling types: %s", err)
+	}
+
+	err = json.Unmarshal(by, &typeMap)
+	if err != nil {
+		return nil, fmt.Errorf("error unmarshalling types: %s", err)
+	}
+
+	var types []*porterv1.NotificationTypeEnabled
+	for t, enabled := range typeMap {
+		if protoType, ok := transformTypeStringToProto[t]; ok {
+			types = append(types, &porterv1.NotificationTypeEnabled{
+				Type:    protoType,
+				Enabled: enabled,
+			})
+		}
+	}
+
+	protoConfig := &porterv1.NotificationConfig{
+		EnabledStatuses: statuses,
+		EnabledTypes:    types,
+		SlackConfig:     &porterv1.SlackConfig{Mentions: []string{config.Mention}},
+	}
+
+	return protoConfig, nil
+}
+
+var transformStatusStringToProto = map[string]porterv1.EnumNotificationStatus{
+	"successful":  porterv1.EnumNotificationStatus_ENUM_NOTIFICATION_STATUS_SUCCESSFUL,
+	"failed":      porterv1.EnumNotificationStatus_ENUM_NOTIFICATION_STATUS_FAILED,
+	"progressing": porterv1.EnumNotificationStatus_ENUM_NOTIFICATION_STATUS_PROGRESSING,
+}
+
+var transformTypeStringToProto = map[string]porterv1.EnumNotificationEventType{
+	"deploy":    porterv1.EnumNotificationEventType_ENUM_NOTIFICATION_EVENT_TYPE_DEPLOY,
+	"build":     porterv1.EnumNotificationEventType_ENUM_NOTIFICATION_EVENT_TYPE_BUILD,
+	"predeploy": porterv1.EnumNotificationEventType_ENUM_NOTIFICATION_EVENT_TYPE_PREDEPLOY,
+	"alert":     porterv1.EnumNotificationEventType_ENUM_NOTIFICATION_EVENT_TYPE_ALERT,
+}
+
+// reverseMap returns a map with the keys and values swapped
+func reverseMap[K comparable, V comparable](m map[K]V) map[V]K {
+	result := map[V]K{}
+	for k, v := range m {
+		result[v] = k
+	}
+	return result
+}
+
+// mapKeys returns the keys of a map as a slice
+func mapKeys[K comparable, V any](m map[K]V) []K {
+	var keys []K
+	for k := range m {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+// trueMap returns a map with the keys set to true
+func trueMap[K comparable](keys []K) map[K]bool {
+	m := map[K]bool{}
+	for _, k := range keys {
+		m[k] = true
+	}
+	return m
+}
+
+var (
+	transformProtoToStatusString = reverseMap(transformStatusStringToProto)
+	transformProtoToTypeString   = reverseMap(transformTypeStringToProto)
+	allStatuses                  = mapKeys(transformStatusStringToProto)
+	allTypes                     = mapKeys(transformTypeStringToProto)
+)

+ 19 - 6
api/server/handlers/oauth_callback/slack.go

@@ -6,6 +6,8 @@ import (
 	"net/http"
 	"net/url"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -28,31 +30,41 @@ func NewOAuthCallbackSlackHandler(
 }
 
 func (p *OAuthCallbackSlackHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-oauth-callback-slack")
+	defer span.End()
+
+	r = r.Clone(ctx)
+
 	session, err := p.Config().Store.Get(r, p.Config().ServerConf.CookieName)
 	if err != nil {
-		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "session could not be retrieved")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	if _, ok := session.Values["state"]; !ok {
-		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, nil, "state not found in session")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	if r.URL.Query().Get("state") != session.Values["state"] {
-		p.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		err = telemetry.Error(ctx, span, nil, "state does not match")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	token, err := p.Config().SlackConf.Exchange(context.TODO(), r.URL.Query().Get("code"))
 	if err != nil {
-		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "exchange failed")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	slackInt, err := slack.TokenToSlackIntegration(token)
 	if err != nil {
-		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "token to slack integration failed")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -63,7 +75,8 @@ func (p *OAuthCallbackSlackHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 	slackInt.ProjectID = projID
 
 	if _, err = p.Repo().SlackIntegration().CreateSlackIntegration(slackInt); err != nil {
-		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "create slack integration failed")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 

+ 10 - 4
api/server/handlers/porter_app/app_env_variables.go

@@ -39,7 +39,8 @@ type EnvVariables struct {
 
 // AppEnvVariablesRequest is the request object for the /apps/{porter_app_name}/env-variables endpoint
 type AppEnvVariablesRequest struct {
-	DeploymentTargetID string `schema:"deployment_target_id"`
+	DeploymentTargetID   string `schema:"deployment_target_id"`
+	DeploymentTargetName string `schema:"deployment_target_name"`
 }
 
 // AppEnvVariablesResponse is the response object for the /apps/{porter_app_name}/env-variables endpoint
@@ -73,12 +74,17 @@ func (c *AppEnvVariablesHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 
 	// optional deployment target id - if not provided, use the cluster's default
 	deploymentTargetID := request.DeploymentTargetID
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: deploymentTargetID})
+	deploymentTargetName := request.DeploymentTargetName
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: deploymentTargetID},
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+	)
 
 	var deploymentTargetIdentifer *porterv1.DeploymentTargetIdentifier
-	if deploymentTargetID != "" {
+	if deploymentTargetID != "" || deploymentTargetName != "" {
 		deploymentTargetIdentifer = &porterv1.DeploymentTargetIdentifier{
-			Id: deploymentTargetID,
+			Id:   deploymentTargetID,
+			Name: deploymentTargetName,
 		}
 	}
 

+ 104 - 0
api/server/handlers/porter_app/app_instances.go

@@ -0,0 +1,104 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// AppInstancesHandler is the handler for the /apps/instances endpoint
+type AppInstancesHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewAppInstancesHandler handles GET requests to the /apps/instances endpoint
+func NewAppInstancesHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *AppInstancesHandler {
+	return &AppInstancesHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// AppInstancesRequest is the request object for the /apps/instances endpoint
+type AppInstancesRequest struct {
+	DeploymentTargetID string `schema:"deployment_target_id"`
+}
+
+// AppInstancesResponse is the response object for the /apps/instances endpoint
+type AppInstancesResponse struct {
+	AppInstances []porter_app.AppInstance `json:"app_instances"`
+}
+
+// ServeHTTP translates the request into a ListAppInstancesRequest to the cluster control plane
+func (c *AppInstancesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-app-instances")
+	defer span.End()
+
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+
+	request := &AppInstancesRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
+
+	var deploymentTargetIdentifier *porterv1.DeploymentTargetIdentifier
+	if request.DeploymentTargetID != "" {
+		deploymentTargetIdentifier = &porterv1.DeploymentTargetIdentifier{
+			Id: request.DeploymentTargetID,
+		}
+	}
+
+	listAppInstancesReq := connect.NewRequest(&porterv1.ListAppInstancesRequest{
+		ProjectId:                  int64(project.ID),
+		DeploymentTargetIdentifier: deploymentTargetIdentifier,
+	})
+
+	latestAppInstancesResp, err := c.Config().ClusterControlPlaneClient.ListAppInstances(ctx, listAppInstancesReq)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting latest app revisions")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if latestAppInstancesResp == nil || latestAppInstancesResp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "latest app revisions response is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	var appInstances []porter_app.AppInstance
+
+	for _, instance := range latestAppInstancesResp.Msg.AppInstances {
+		appInstances = append(appInstances, porter_app.AppInstance{
+			Id: instance.Id,
+			DeploymentTarget: porter_app.DeploymentTarget{
+				ID:   instance.DeploymentTargetId,
+				Name: "",
+			},
+			Name: instance.Name,
+		})
+	}
+
+	c.WriteResult(w, r, AppInstancesResponse{AppInstances: appInstances})
+}

+ 1 - 27
api/server/handlers/porter_app/app_notifications.go

@@ -85,36 +85,10 @@ func (c *AppNotificationsHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
 	}
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
 
-	porterApps, err := c.Repo().PorterApp().ReadPorterAppsByProjectIDAndName(project.ID, appName)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error getting porter apps")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	if len(porterApps) == 0 {
-		err := telemetry.Error(ctx, span, err, "no porter apps returned")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	if len(porterApps) > 1 {
-		err := telemetry.Error(ctx, span, err, "multiple porter apps returned; unable to determine which one to use")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	appId := porterApps[0].ID
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-id", Value: appId})
-
-	if appId == 0 {
-		err := telemetry.Error(ctx, span, err, "porter app id is missing")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
 	listAppRevisionsReq := connect.NewRequest(&porterv1.ListAppRevisionsRequest{
 		ProjectId:                  int64(project.ID),
-		AppId:                      int64(appId),
 		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{Id: request.DeploymentTargetID},
+		AppName:                    appName,
 	})
 
 	listAppRevisionsResp, err := c.Config().ClusterControlPlaneClient.ListAppRevisions(ctx, listAppRevisionsReq)

+ 0 - 117
api/server/handlers/porter_app/app_run.go

@@ -1,117 +0,0 @@
-package porter_app
-
-import (
-	"net/http"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/shared/requestutils"
-
-	"connectrpc.com/connect"
-
-	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
-
-	"github.com/porter-dev/porter/internal/telemetry"
-
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-)
-
-// AppRunHandler handles requests to the /apps/{porter_app_name}/run endpoint
-type AppRunHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-// NewAppRunHandler returns a new AppRunHandler
-func NewAppRunHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *AppRunHandler {
-	return &AppRunHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-// AppRunRequest is the request object for the /apps/{porter_app_name}/run endpoint
-type AppRunRequest struct {
-	ServiceName        string `json:"service_name"`
-	DeploymentTargetID string `json:"deployment_target_id"`
-}
-
-// AppRunResponse is the response object for the /apps/{porter_app_name}/run endpoint
-type AppRunResponse struct {
-	JobRunID string `json:"job_run_id"`
-}
-
-// ServeHTTP runs a one-off command in the same environment as the provided service, app and deployment target
-func (c *AppRunHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-run")
-	defer span.End()
-
-	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-
-	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
-	if reqErr != nil {
-		e := telemetry.Error(ctx, span, reqErr, "error parsing app name from url")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
-		return
-	}
-
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
-
-	request := &AppRunRequest{}
-	if ok := c.DecodeAndValidate(w, r, request); !ok {
-		err := telemetry.Error(ctx, span, nil, "error decoding request")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	if request.ServiceName == "" {
-		err := telemetry.Error(ctx, span, nil, "service name is required")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: request.ServiceName})
-
-	if request.DeploymentTargetID == "" {
-		err := telemetry.Error(ctx, span, nil, "deployment target id is required")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
-
-	manualServiceRunReq := connect.NewRequest(&porterv1.ManualServiceRunRequest{
-		ProjectId:   int64(project.ID),
-		AppName:     appName,
-		ServiceName: request.ServiceName,
-		Command:     nil, // use default command for job
-		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
-			Id: request.DeploymentTargetID,
-		},
-	})
-
-	serviceResp, err := c.Config().ClusterControlPlaneClient.ManualServiceRun(ctx, manualServiceRunReq)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error getting app helm values from cluster control plane client")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	if serviceResp == nil || serviceResp.Msg == nil {
-		err := telemetry.Error(ctx, span, err, "app helm values resp is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	response := AppRunResponse{
-		JobRunID: serviceResp.Msg.JobRunId,
-	}
-
-	c.WriteResult(w, r, response)
-}

+ 0 - 268
api/server/handlers/porter_app/apply.go

@@ -1,268 +0,0 @@
-package porter_app
-
-import (
-	"context"
-	"encoding/base64"
-	"errors"
-	"fmt"
-	"net/http"
-
-	"connectrpc.com/connect"
-
-	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
-
-	"github.com/porter-dev/api-contracts/generated/go/helpers"
-
-	"github.com/porter-dev/porter/internal/deployment_target"
-	"github.com/porter-dev/porter/internal/porter_app"
-	"github.com/porter-dev/porter/internal/telemetry"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-)
-
-// ApplyPorterAppHandler is the handler for the /apps/parse endpoint
-type ApplyPorterAppHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-// NewApplyPorterAppHandler handles POST requests to the endpoint /apps/apply
-func NewApplyPorterAppHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *ApplyPorterAppHandler {
-	return &ApplyPorterAppHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-// ApplyPorterAppRequest is the request object for the /apps/apply endpoint
-type ApplyPorterAppRequest struct {
-	Base64AppProto     string            `json:"b64_app_proto"`
-	DeploymentTargetId string            `json:"deployment_target_id"`
-	AppRevisionID      string            `json:"app_revision_id"`
-	ForceBuild         bool              `json:"force_build"`
-	Variables          map[string]string `json:"variables"`
-	Secrets            map[string]string `json:"secrets"`
-	// HardEnvUpdate is used to remove any variables that are not specified in the request.  If false, the request will only update the variables specified in the request,
-	// and leave all other variables untouched.
-	HardEnvUpdate bool `json:"hard_env_update"`
-}
-
-// ApplyPorterAppResponse is the response object for the /apps/apply endpoint
-type ApplyPorterAppResponse struct {
-	AppRevisionId string                 `json:"app_revision_id"`
-	CLIAction     porterv1.EnumCLIAction `json:"cli_action"`
-}
-
-// ServeHTTP translates the request into a ApplyPorterApp request, forwards to the cluster control plane, and returns the response
-func (c *ApplyPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx, span := telemetry.NewSpan(r.Context(), "serve-apply-porter-app")
-	defer span.End()
-
-	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
-
-	telemetry.WithAttributes(span,
-		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
-		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
-	)
-
-	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
-		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
-		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
-		return
-	}
-
-	request := &ApplyPorterAppRequest{}
-	if ok := c.DecodeAndValidate(w, r, request); !ok {
-		err := telemetry.Error(ctx, span, nil, "error decoding request")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	var appRevisionID string
-	var appProto *porterv1.PorterApp
-	var deploymentTargetID string
-
-	if request.AppRevisionID != "" {
-		appRevisionID = request.AppRevisionID
-		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-revision-id", Value: request.AppRevisionID})
-	} else {
-		if request.Base64AppProto == "" {
-			err := telemetry.Error(ctx, span, nil, "b64 yaml is empty")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		decoded, err := base64.StdEncoding.DecodeString(request.Base64AppProto)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error decoding base yaml")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		appProto = &porterv1.PorterApp{}
-		err = helpers.UnmarshalContractObject(decoded, appProto)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		if request.DeploymentTargetId == "" {
-			err := telemetry.Error(ctx, span, nil, "deployment target id is empty")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-		deploymentTargetID = request.DeploymentTargetId
-
-		telemetry.WithAttributes(span,
-			telemetry.AttributeKV{Key: "app-name", Value: appProto.Name},
-			telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetId},
-		)
-
-		deploymentTargetDetails, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
-			ProjectID:          int64(project.ID),
-			ClusterID:          int64(cluster.ID),
-			DeploymentTargetID: deploymentTargetID,
-			CCPClient:          c.Config().ClusterControlPlaneClient,
-		})
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error getting deployment target details")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-			return
-		}
-
-		agent, err := c.GetAgent(r, cluster, "")
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error getting kubernetes agent")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-			return
-		}
-
-		subdomainCreateInput := porter_app.CreatePorterSubdomainInput{
-			AppName:             appProto.Name,
-			RootDomain:          c.Config().ServerConf.AppRootDomain,
-			DNSClient:           c.Config().DNSClient,
-			DNSRecordRepository: c.Repo().DNSRecord(),
-			KubernetesAgent:     agent,
-		}
-
-		appProto, err = addPorterSubdomainsIfNecessary(ctx, appProto, deploymentTargetDetails, subdomainCreateInput)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error adding porter subdomains")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-	}
-
-	applyReq := connect.NewRequest(&porterv1.ApplyPorterAppRequest{
-		ProjectId:           int64(project.ID),
-		DeploymentTargetId:  deploymentTargetID,
-		App:                 appProto,
-		PorterAppRevisionId: appRevisionID,
-		ForceBuild:          request.ForceBuild,
-		AppEnv: &porterv1.EnvGroupVariables{
-			Normal: request.Variables,
-			Secret: request.Secrets,
-		},
-		IsHardEnvUpdate: request.HardEnvUpdate,
-	})
-	ccpResp, err := c.Config().ClusterControlPlaneClient.ApplyPorterApp(ctx, applyReq)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error calling ccp apply porter app")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	if ccpResp == nil {
-		err := telemetry.Error(ctx, span, err, "ccp resp is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-	if ccpResp.Msg == nil {
-		err := telemetry.Error(ctx, span, err, "ccp resp msg is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	if ccpResp.Msg.PorterAppRevisionId == "" {
-		err := telemetry.Error(ctx, span, err, "ccp resp app revision id is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "resp-app-revision-id", Value: ccpResp.Msg.PorterAppRevisionId})
-
-	if ccpResp.Msg.CliAction == porterv1.EnumCLIAction_ENUM_CLI_ACTION_UNSPECIFIED {
-		err := telemetry.Error(ctx, span, err, "ccp resp cli action is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "cli-action", Value: ccpResp.Msg.CliAction.String()})
-
-	response := &ApplyPorterAppResponse{
-		AppRevisionId: ccpResp.Msg.PorterAppRevisionId,
-		CLIAction:     ccpResp.Msg.CliAction,
-	}
-
-	c.WriteResult(w, r, response)
-}
-
-// addPorterSubdomainsIfNecessary adds porter subdomains to the app proto if a web service is changed to private and has no domains
-func addPorterSubdomainsIfNecessary(ctx context.Context, appProto *porterv1.PorterApp, deploymentTarget deployment_target.DeploymentTarget, createSubdomainInput porter_app.CreatePorterSubdomainInput) (*porterv1.PorterApp, error) {
-	ctx, span := telemetry.NewSpan(ctx, "add-porter-subdomains-if-necessary")
-	defer span.End()
-
-	// use deprecated services if service list is empty
-	if len(appProto.ServiceList) == 0 {
-		for _, service := range appProto.Services { // nolint:staticcheck
-			appProto.ServiceList = append(appProto.ServiceList, service)
-		}
-	}
-
-	for _, service := range appProto.ServiceList {
-		if service == nil {
-			continue
-		}
-		if service.Type == porterv1.ServiceType_SERVICE_TYPE_WEB {
-			webConfig := service.GetWebConfig()
-			if webConfig != nil && !webConfig.GetPrivate() && len(webConfig.Domains) == 0 {
-				if deploymentTarget.Namespace != DeploymentTargetSelector_Default {
-					createSubdomainInput.AppName = fmt.Sprintf("%s-%s", createSubdomainInput.AppName, deploymentTarget.ID[:6])
-				}
-
-				subdomain, err := porter_app.CreatePorterSubdomain(ctx, createSubdomainInput)
-				if err != nil {
-					return appProto, fmt.Errorf("error creating subdomain: %w", err)
-				}
-
-				if subdomain == "" {
-					return appProto, errors.New("response subdomain is empty")
-				}
-
-				webConfig.Domains = []*porterv1.Domain{
-					{Name: subdomain},
-				}
-			}
-		}
-	}
-
-	serviceMap := make(map[string]*porterv1.Service)
-	for _, service := range appProto.ServiceList {
-		serviceMap[service.Name] = service
-	}
-	appProto.Services = serviceMap // nolint:staticcheck
-
-	return appProto, nil
-}

+ 100 - 0
api/server/handlers/porter_app/attach_env_group.go

@@ -0,0 +1,100 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// AttachEnvGroupHandler is the handler for the /apps/attach-env-group endpoint
+type AttachEnvGroupHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewAttachEnvGroupHandler handles POST requests to the endpoint /apps/attach-env-group
+func NewAttachEnvGroupHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *AttachEnvGroupHandler {
+	return &AttachEnvGroupHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// AttachEnvGroupRequest is the request object for the /apps/attach-env-group endpoint
+type AttachEnvGroupRequest struct {
+	EnvGroupName   string   `json:"env_group_name"`
+	AppInstanceIDs []string `json:"app_instance_ids"`
+}
+
+// ServeHTTP translates the request into a AttachEnvGroup request, then calls update on the app with the env group
+// The latest version of the env group will be attached (ccp makes sure of that)
+func (c *AttachEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-attach-env-group")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	request := &AttachEnvGroupRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-name", Value: request.EnvGroupName})
+
+	if request.EnvGroupName == "" {
+		err := telemetry.Error(ctx, span, nil, "env group name cannot be empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	for _, appInstanceId := range request.AppInstanceIDs {
+		appInstance, err := c.Repo().AppInstance().Get(ctx, appInstanceId)
+		if err != nil {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-instance-id", Value: appInstanceId})
+			err := telemetry.Error(ctx, span, err, "error getting app instance")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		updateReq := connect.NewRequest(&porterv1.UpdateAppRequest{
+			ProjectId: int64(project.ID),
+			DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
+				Id: appInstance.DeploymentTargetID.String(),
+			},
+			App: &porterv1.PorterApp{
+				Name: appInstance.Name,
+				EnvGroups: []*porterv1.EnvGroup{
+					{
+						Name: request.EnvGroupName,
+					},
+				},
+			},
+		})
+
+		_, err = c.Config().ClusterControlPlaneClient.UpdateApp(ctx, updateReq)
+		if err != nil {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-instance-id", Value: appInstanceId})
+			err := telemetry.Error(ctx, span, err, "error calling ccp update app")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+	}
+
+	c.WriteResult(w, r, nil)
+}

+ 174 - 0
api/server/handlers/porter_app/cloudsql.go

@@ -0,0 +1,174 @@
+package porter_app
+
+import (
+	"encoding/base64"
+	"fmt"
+	"net/http"
+
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// GetCloudSqlSecretHandler is a handler to get the cloudsql secret
+type GetCloudSqlSecretHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewGetCloudSqlSecretHandler returns a GetCloudSqlSecretHandler
+func NewGetCloudSqlSecretHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) *GetCloudSqlSecretHandler {
+	return &GetCloudSqlSecretHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// GetCloudSqlSecretResponse is the response payload for the GetCloudSqlSecretHandler
+type GetCloudSqlSecretResponse struct {
+	SecretName string `json:"secret_name"`
+}
+
+// ServeHTTP retrieves the cloudsql secret
+func (c *GetCloudSqlSecretHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+	ctx, span := telemetry.NewSpan(ctx, "serve-get-cloudsql-secret")
+	defer span.End()
+
+	deploymentTarget, _ := ctx.Value(types.DeploymentTargetScope).(types.DeploymentTarget)
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing porter app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "application-name", Value: appName})
+
+	cluster, err := c.Repo().Cluster().ReadCluster(deploymentTarget.ProjectID, deploymentTarget.ClusterID)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error reading cluster")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, deploymentTarget.Namespace)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	secret, err := agent.GetSecret(fmt.Sprintf("cloudsql-secret-%s", appName), deploymentTarget.Namespace)
+	if err != nil && !k8serrors.IsNotFound(err) {
+		err = telemetry.Error(ctx, span, err, "error getting secret")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	var secretName string
+	if secret != nil {
+		secretName = secret.Name
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "secret-name", Value: secretName})
+
+	c.WriteResult(w, r, GetCloudSqlSecretResponse{SecretName: secretName})
+}
+
+// CreateCloudSqlSecretHandler is a handler to create the cloudsql secret
+type CreateCloudSqlSecretHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewCreateCloudSqlSecretHandler returns a CreateCloudSqlSecretHandler
+func NewCreateCloudSqlSecretHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CreateCloudSqlSecretHandler {
+	return &CreateCloudSqlSecretHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// CreateCloudSqlSecretRequest is the request payload for the CreateCloudSqlSecretHandler
+type CreateCloudSqlSecretRequest struct {
+	B64ServiceAccountJson string `json:"b64_service_account_json"`
+}
+
+// ServeHTTP creates the cloudsql secret
+func (c *CreateCloudSqlSecretHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+	ctx, span := telemetry.NewSpan(ctx, "serve-create-cloudsql-secret")
+	defer span.End()
+
+	deploymentTarget, _ := ctx.Value(types.DeploymentTargetScope).(types.DeploymentTarget)
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing porter app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "application-name", Value: appName})
+
+	request := &CreateCloudSqlSecretRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	cluster, err := c.Repo().Cluster().ReadCluster(deploymentTarget.ProjectID, deploymentTarget.ClusterID)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error reading cluster")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, deploymentTarget.Namespace)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	decoded, err := base64.StdEncoding.DecodeString(request.B64ServiceAccountJson)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error decoding base64 service account json")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	secret := &v1.Secret{
+		ObjectMeta: metav1.ObjectMeta{
+			Name: fmt.Sprintf("cloudsql-secret-%s", appName),
+		},
+		Data: map[string][]byte{
+			"service_account.json": decoded,
+		},
+	}
+
+	_, err = agent.CreateSecret(secret, deploymentTarget.Namespace)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating secret")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+}

+ 4 - 19
api/server/handlers/porter_app/create_and_update_events.go

@@ -193,25 +193,7 @@ func (p *CreateUpdatePorterAppEventHandler) createNewAppEvent(ctx context.Contex
 				}
 				return event, nil
 			} else {
-				betaFeaturesEnabled := project.GetFeatureFlag(models.BetaFeaturesEnabled, p.Config().LaunchDarklyClient)
-				telemetry.WithAttributes(span,
-					telemetry.AttributeKV{Key: "beta_features_enabled", Value: betaFeaturesEnabled},
-				)
-				// if beta features are not enabled, then porter makes a request to ccp to update the deploy status
-				// if beta features are enabled, ccp is checking the deploy status, so this request is not necessary
-				// TODO remove this entire branch once beta features are enabled by default
-				if !betaFeaturesEnabled {
-					err := p.updateDeployEventV2(ctx, updateDeployEventV2Input{
-						projectID:             cluster.ProjectID,
-						appName:               porterAppName,
-						appID:                 app.ID,
-						deploymentTargetID:    deploymentTargetID,
-						updatedStatusMetadata: requestMetadata,
-					})
-					if err != nil {
-						return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error updating v2 deploy event")
-					}
-				}
+				// v2 handles its own deploy events
 				return types.PorterAppEvent{}, nil
 			}
 		}
@@ -240,6 +222,7 @@ func (p *CreateUpdatePorterAppEventHandler) createNewAppEvent(ctx context.Contex
 			ProjectId:          int64(cluster.ProjectID),
 			AppId:              int64(app.ID),
 			DeploymentTargetId: deploymentTargetID,
+			AppName:            porterAppName,
 		}))
 		if err != nil {
 			return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error getting current app revision from cluster control plane client")
@@ -573,6 +556,7 @@ func (p *CreateUpdatePorterAppEventHandler) handleNotification(ctx context.Conte
 	if agentEventMetadata == nil {
 		return telemetry.Error(ctx, span, nil, "app event metadata is nil")
 	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-event-type", Value: agentEventMetadata.AppEventType.String()})
 
 	createNotificationRequest := connect.NewRequest(&porterv1.CreateNotificationRequest{
 		ProjectId: int64(projectId),
@@ -584,6 +568,7 @@ func (p *CreateUpdatePorterAppEventHandler) handleNotification(ctx context.Conte
 		ServiceName:        agentEventMetadata.ServiceName,
 		AppRevisionId:      agentEventMetadata.AppRevisionID,
 		PorterAgentEventId: int64(agentEventMetadata.AgentEventID),
+		AppEventType:       agentEventMetadata.AppEventType,
 		RawSummary:         agentEventMetadata.Summary,
 		RawDetail:          agentEventMetadata.Detail,
 		JobRunId:           agentEventMetadata.JobRunID,

+ 15 - 1
api/server/handlers/porter_app/create_secret_and_open_pr.go

@@ -15,6 +15,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/requestutils"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/auth/token"
+	"github.com/porter-dev/porter/internal/encryption"
 	"github.com/porter-dev/porter/internal/integrations/ci/actions"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/telemetry"
@@ -117,6 +118,19 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		prRequestBody = "Hello 👋 from Porter! Please merge this PR to enable preview environments for your application."
 	}
 
+	randStr, err := encryption.GenerateRandomBytes(4)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error generating random bytes")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	prBranchName := fmt.Sprintf("porter-stack-%s-%s", appName, randStr)
+	// limit branch name to 100 characters for safety
+	if len(prBranchName) > 100 {
+		prBranchName = prBranchName[:100]
+	}
+
 	if request.OpenPr || request.DeleteWorkflowFilename != "" {
 		openPRInput := &actions.GithubPROpts{
 			PRAction:       actions.GithubPRAction_NewAppWorkflow,
@@ -131,7 +145,7 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 			SecretName:     secretName,
 			PorterYamlPath: request.PorterYamlPath,
 			Body:           prRequestBody,
-			PRBranch:       "porter-stack",
+			PRBranch:       prBranchName,
 		}
 		if request.DeleteWorkflowFilename != "" {
 			openPRInput.PRAction = actions.GithubPRAction_DeleteAppWorkflow

+ 25 - 38
api/server/handlers/porter_app/current_app_revision.go

@@ -10,8 +10,6 @@ import (
 
 	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
 
-	"github.com/google/uuid"
-
 	"github.com/porter-dev/porter/internal/porter_app"
 	"github.com/porter-dev/porter/internal/telemetry"
 
@@ -43,7 +41,8 @@ func NewLatestAppRevisionHandler(
 
 // LatestAppRevisionRequest is the request object for the /apps/{porter_app_name}/latest endpoint
 type LatestAppRevisionRequest struct {
-	DeploymentTargetID string `schema:"deployment_target_id"`
+	DeploymentTargetID   string `schema:"deployment_target_id,omitempty"`
+	DeploymentTargetName string `schema:"deployment_target_name,omitempty"`
 }
 
 // LatestAppRevisionResponse is the response object for the /apps/{porter_app_name}/latest endpoint
@@ -82,44 +81,32 @@ func (c *LatestAppRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		return
 	}
 
-	_, err := uuid.Parse(request.DeploymentTargetID)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error parsing deployment target id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
-
-	porterApps, err := c.Repo().PorterApp().ReadPorterAppsByProjectIDAndName(project.ID, appName)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error getting porter app from repo")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	if len(porterApps) == 0 {
-		err := telemetry.Error(ctx, span, err, "no porter apps returned")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	if len(porterApps) > 1 {
-		err := telemetry.Error(ctx, span, err, "multiple porter apps returned; unable to determine which one to use")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	appId := porterApps[0].ID
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-id", Value: appId})
-
-	if appId == 0 {
-		err := telemetry.Error(ctx, span, err, "porter app id is missing")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
 	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
 
 	currentAppRevisionReq := connect.NewRequest(&porterv1.CurrentAppRevisionRequest{
-		ProjectId:          int64(project.ID),
-		AppId:              int64(appId),
-		DeploymentTargetId: request.DeploymentTargetID,
+		ProjectId: int64(project.ID),
+		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
+			Id:   request.DeploymentTargetID,
+			Name: deploymentTargetName,
+		},
+		AppName: appName,
 	})
 
 	currentAppRevisionResp, err := c.Config().ClusterControlPlaneClient.CurrentAppRevision(ctx, currentAppRevisionReq)

+ 58 - 30
api/server/handlers/porter_app/default_deployment_target.go

@@ -1,9 +1,12 @@
 package porter_app
 
 import (
+	"context"
 	"net/http"
 	"time"
 
+	"github.com/porter-dev/api-contracts/generated/go/porter/v1/porterv1connect"
+
 	"github.com/google/uuid"
 
 	"connectrpc.com/connect"
@@ -65,53 +68,78 @@ func (c *DefaultDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *htt
 		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
 	)
 
-	defaultDeploymentTargetReq := connect.NewRequest(&porterv1.DefaultDeploymentTargetRequest{
-		ProjectId: int64(project.ID),
-		ClusterId: int64(cluster.ID),
+	defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+		ProjectID:                 project.ID,
+		ClusterID:                 cluster.ID,
+		ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
 	})
-
-	defaultDeploymentTargetResp, err := c.Config().ClusterControlPlaneClient.DefaultDeploymentTarget(ctx, defaultDeploymentTargetReq)
 	if err != nil {
 		err := telemetry.Error(ctx, span, err, "error getting default deployment target")
 		c.WriteResult(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
+	response := &DefaultDeploymentTargetResponse{
+		DeploymentTargetID: defaultDeploymentTarget.ID.String(),
+		DeploymentTarget:   defaultDeploymentTarget,
+	}
+
+	c.WriteResult(w, r, response)
+}
+
+type defaultDeploymentTargetInput struct {
+	ProjectID                 uint
+	ClusterID                 uint
+	ClusterControlPlaneClient porterv1connect.ClusterControlPlaneServiceClient
+}
+
+func defaultDeploymentTarget(ctx context.Context, input defaultDeploymentTargetInput) (types.DeploymentTarget, error) {
+	ctx, span := telemetry.NewSpan(ctx, "default-deployment-target")
+	defer span.End()
+
+	var defaultDeploymentTarget types.DeploymentTarget
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: input.ProjectID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: input.ClusterID},
+	)
+
+	defaultDeploymentTargetReq := connect.NewRequest(&porterv1.DefaultDeploymentTargetRequest{
+		ProjectId: int64(input.ProjectID),
+		ClusterId: int64(input.ClusterID),
+	})
+
+	defaultDeploymentTargetResp, err := input.ClusterControlPlaneClient.DefaultDeploymentTarget(ctx, defaultDeploymentTargetReq)
+	if err != nil {
+		return defaultDeploymentTarget, telemetry.Error(ctx, span, err, "error getting default deployment target")
+	}
+
 	if defaultDeploymentTargetResp == nil || defaultDeploymentTargetResp.Msg == nil {
-		err := telemetry.Error(ctx, span, nil, "default deployment target response is nil")
-		c.WriteResult(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
+		return defaultDeploymentTarget, telemetry.Error(ctx, span, nil, "default deployment target response is nil")
 	}
 
-	defaultDeploymentTarget := defaultDeploymentTargetResp.Msg.DeploymentTarget
+	deploymentTargetProto := defaultDeploymentTargetResp.Msg.DeploymentTarget
 
-	id, err := uuid.Parse(defaultDeploymentTarget.Id)
+	id, err := uuid.Parse(deploymentTargetProto.Id)
 	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error parsing default deployment target id")
-		c.WriteResult(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
+		return defaultDeploymentTarget, telemetry.Error(ctx, span, err, "error parsing default deployment target id")
 	}
 
 	if id == uuid.Nil {
-		err := telemetry.Error(ctx, span, nil, "default deployment target id is nil")
-		c.WriteResult(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
+		return defaultDeploymentTarget, telemetry.Error(ctx, span, nil, "default deployment target id is nil")
 	}
 
-	response := &DefaultDeploymentTargetResponse{
-		DeploymentTargetID: defaultDeploymentTarget.Id,
-		DeploymentTarget: types.DeploymentTarget{
-			ID:        id,
-			ProjectID: uint(defaultDeploymentTarget.ProjectId),
-			ClusterID: uint(defaultDeploymentTarget.ClusterId),
-			Name:      defaultDeploymentTarget.Name,
-			Namespace: defaultDeploymentTarget.Namespace,
-			IsPreview: defaultDeploymentTarget.IsPreview,
-			IsDefault: defaultDeploymentTarget.IsDefault,
-			CreatedAt: time.Time{}, // not provided by default deployment target response
-			UpdatedAt: time.Time{}, // not provided by default deployment target response
-		},
+	defaultDeploymentTarget = types.DeploymentTarget{
+		ID:           id,
+		ProjectID:    uint(deploymentTargetProto.ProjectId),
+		ClusterID:    uint(deploymentTargetProto.ClusterId),
+		Name:         deploymentTargetProto.Name,
+		Namespace:    deploymentTargetProto.Namespace,
+		IsPreview:    deploymentTargetProto.IsPreview,
+		IsDefault:    deploymentTargetProto.IsDefault,
+		CreatedAtUTC: time.Time{}, // not provided by default deployment target response
+		UpdatedAtUTC: time.Time{}, // not provided by default deployment target response
 	}
 
-	c.WriteResult(w, r, response)
+	return defaultDeploymentTarget, nil
 }

+ 2 - 0
api/server/handlers/porter_app/delete.go

@@ -37,6 +37,7 @@ func (c *DeletePorterAppByNameHandler) ServeHTTP(w http.ResponseWriter, r *http.
 	defer span.End()
 
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
 	if reqErr != nil {
@@ -55,6 +56,7 @@ func (c *DeletePorterAppByNameHandler) ServeHTTP(w http.ResponseWriter, r *http.
 
 	deleteReq := connect.NewRequest[porterv1.DeletePorterAppRequest](&porterv1.DeletePorterAppRequest{
 		ProjectId: int64(project.ID),
+		ClusterId: int64(cluster.ID),
 		AppName:   appName,
 	})
 	ccpResp, err := c.Config().ClusterControlPlaneClient.DeletePorterApp(r.Context(), deleteReq)

+ 28 - 0
api/server/handlers/porter_app/get_app_revision_status.go

@@ -35,9 +35,23 @@ func NewGetAppRevisionStatusHandler(
 	}
 }
 
+// HighLevelStatus is a high level status that can be used to determine whether the revisions is progressing, successful or failed
+type HighLevelStatus string
+
+const (
+	// HighLevelStatus_Progressing indicates that the revision is still in progress
+	HighLevelStatus_Progressing HighLevelStatus = "progressing"
+	// HighLevelStatus_Successful indicates that the revision has completed successfully
+	HighLevelStatus_Successful HighLevelStatus = "successful"
+	// HighLevelStatus_Failed indicates that the revision has failed
+	HighLevelStatus_Failed HighLevelStatus = "failed"
+)
+
 // GetAppRevisionStatusResponse represents the response from the /apps/{porter_app_name}/revisions/{app_revision_id}/status endpoint
 type GetAppRevisionStatusResponse struct {
 	AppRevisionStatus porter_app.RevisionProgress `json:"app_revision_status"`
+	// HighLevelStatus is a high level status that can be used to determine whether the revisions is progressing, successful or failed
+	HighLevelStatus HighLevelStatus `json:"status"`
 }
 
 // GetAppRevisionStatusHandler returns the status of an app revision
@@ -84,8 +98,22 @@ func (c *GetAppRevisionStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 		IsInTerminalStatus:   ccpResp.Msg.IsInTerminalStatus,
 	}
 
+	statusTransform := map[porterv1.EnumAppRevisionStatus]HighLevelStatus{
+		porterv1.EnumAppRevisionStatus_ENUM_APP_REVISION_STATUS_PROGRESSING: HighLevelStatus_Progressing,
+		porterv1.EnumAppRevisionStatus_ENUM_APP_REVISION_STATUS_SUCCESSFUL:  HighLevelStatus_Successful,
+		porterv1.EnumAppRevisionStatus_ENUM_APP_REVISION_STATUS_FAILED:      HighLevelStatus_Failed,
+	}
+
+	status, ok := statusTransform[ccpResp.Msg.Status]
+	if !ok {
+		err = telemetry.Error(ctx, span, nil, "unsupported revision status status")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
 	res := &GetAppRevisionStatusResponse{
 		AppRevisionStatus: revisionStatus,
+		HighLevelStatus:   status,
 	}
 
 	c.WriteResult(w, r, res)

+ 3 - 15
api/server/handlers/porter_app/get_app_template.go

@@ -51,6 +51,7 @@ func (c *GetAppTemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	defer span.End()
 
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
@@ -67,31 +68,18 @@ func (c *GetAppTemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
 
-	porterApps, err := c.Repo().PorterApp().ReadPorterAppsByProjectIDAndName(project.ID, appName)
+	app, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, appName)
 	if err != nil {
 		err := telemetry.Error(ctx, span, err, "error getting porter app from repo")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
-	if len(porterApps) == 0 {
-		err := telemetry.Error(ctx, span, err, "no porter apps returned")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	if len(porterApps) > 1 {
-		err := telemetry.Error(ctx, span, err, "multiple porter apps returned; unable to determine which one to use")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	if porterApps[0].ID == 0 {
+	if app.ID == 0 {
 		err := telemetry.Error(ctx, span, err, "porter app id is missing")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	app := porterApps[0]
-
 	templateReq := connect.NewRequest(&porterv1.AppTemplateRequest{
 		ProjectId: int64(project.ID),
 		AppId:     int64(app.ID),

+ 47 - 9
api/server/handlers/porter_app/get_build.go

@@ -2,6 +2,7 @@ package porter_app
 
 import (
 	"encoding/base64"
+	"encoding/json"
 	"net/http"
 
 	"github.com/google/uuid"
@@ -17,6 +18,7 @@ import (
 	"github.com/porter-dev/porter/internal/deployment_target"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/porter_app"
+	v2 "github.com/porter-dev/porter/internal/porter_app/v2"
 	"github.com/porter-dev/porter/internal/telemetry"
 )
 
@@ -54,6 +56,11 @@ type BuildSettings struct {
 	CommitSHA  string   `json:"commit_sha"`
 }
 
+// GetBuildFromRevisionRequest is the request object for the /apps/{porter_app_name}/revisions/{app_revision_id}/build endpoint
+type GetBuildFromRevisionRequest struct {
+	B64PatchOperations string `json:"b64_patch_operations"`
+}
+
 // GetBuildFromRevisionResponse is the response object for the /apps/{porter_app_name}/revisions/{app_revision_id}/build endpoint
 type GetBuildFromRevisionResponse struct {
 	BuildEnvVariables map[string]string `json:"build_env_variables"`
@@ -100,6 +107,30 @@ func (c *GetBuildFromRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 	}
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-revision-id", Value: appRevisionUuid.String()})
 
+	request := &GetBuildFromRevisionRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	var patchOps []v2.PatchOperation
+	if request.B64PatchOperations != "" {
+		decodedPatchOps, err := base64.StdEncoding.DecodeString(request.B64PatchOperations)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error decoding patch operations")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		err = json.Unmarshal(decodedPatchOps, &patchOps)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error unmarshalling patch operations")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+	}
+
 	revision, err := porter_app.GetAppRevision(ctx, porter_app.GetAppRevisionInput{
 		AppRevisionID: appRevisionUuid,
 		ProjectID:     project.ID,
@@ -134,23 +165,30 @@ func (c *GetBuildFromRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 		return
 	}
 
+	patchedProto, err := v2.PatchApp(ctx, appProto, patchOps)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error patching app proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
 	resp.Image = Image{
-		Repository: appProto.Image.Repository,
-		Tag:        appProto.Image.Tag,
+		Repository: patchedProto.Image.Repository,
+		Tag:        patchedProto.Image.Tag,
 	}
 
-	if appProto.Build == nil {
+	if patchedProto.Build == nil {
 		c.WriteResult(w, r, resp)
 		return
 	}
 
 	resp.Build = BuildSettings{
-		Method:     appProto.Build.Method,
-		Context:    appProto.Build.Context,
-		Builder:    appProto.Build.Builder,
-		Buildpacks: appProto.Build.Buildpacks,
-		Dockerfile: appProto.Build.Dockerfile,
-		CommitSHA:  appProto.Build.CommitSha,
+		Method:     patchedProto.Build.Method,
+		Context:    patchedProto.Build.Context,
+		Builder:    patchedProto.Build.Builder,
+		Buildpacks: patchedProto.Build.Buildpacks,
+		Dockerfile: patchedProto.Build.Dockerfile,
+		CommitSHA:  patchedProto.Build.CommitSha,
 	}
 
 	agent, err := c.GetAgent(r, cluster, "")

+ 1 - 1
api/server/handlers/porter_app/get_logs_within_time_range.go

@@ -144,7 +144,7 @@ func (c *GetLogsWithinTimeRangeHandler) ServeHTTP(w http.ResponseWriter, r *http
 		SearchParam: request.SearchParam,
 	}
 
-	logs, err := porter_agent.GetHistoricalLogs(agent.Clientset, agentSvc, logRequest)
+	logs, err := porter_agent.GetHistoricalLogs(ctx, agent.Clientset, agentSvc, logRequest)
 	if err != nil {
 		_ = telemetry.Error(ctx, span, err, "unable to get logs")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get logs for pod selector %s", podSelector), http.StatusInternalServerError))

+ 1 - 0
api/server/handlers/porter_app/helm_values_v2.go

@@ -103,6 +103,7 @@ func (c *AppHelmValuesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		AppId:              int64(request.AppID),
 		DeploymentTargetId: request.DeploymentTargetID,
 		WithDefaults:       request.WithDefaults,
+		AppName:            appName,
 	})
 
 	helmValuesResp, err := c.Config().ClusterControlPlaneClient.AppHelmValues(ctx, helmValuesReq)

+ 105 - 0
api/server/handlers/porter_app/job_run_cancel.go

@@ -0,0 +1,105 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// CancelJobRunHandler is the handler for POST /apps/jobs/{porter_app_name}/jobs/{job_run_name}/cancel
+type CancelJobRunHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewCancelJobRunHandler returns a new CancelJobRunHandler
+func NewCancelJobRunHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CancelJobRunHandler {
+	return &CancelJobRunHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// CancelJobRunRequest is the expected format for a request body on POST /apps/jobs/{porter_app_name}/jobs/{job_run_name}/cancel
+type CancelJobRunRequest struct {
+	DeploymentTargetID   string `json:"deployment_target_id,omitempty" validate:"optional"`
+	DeploymentTargetName string `json:"deployment_target_name,omitempty" validate:"optional"`
+}
+
+// CancelJobRunResponse is the response format for POST /apps/jobs/{porter_app_name}/jobs/{job_run_name}/cancel
+type CancelJobRunResponse struct{}
+
+// ServeHTTP handles the cancel job run request
+func (c *CancelJobRunHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-cancel-job-run")
+	defer span.End()
+
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	request := &CancelJobRunRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	name, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "invalid porter app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: name})
+
+	jobRunName, reqErr := requestutils.GetURLParamString(r, types.URLParamJobRunName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "invalid job run name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	deploymentTargetID := request.DeploymentTargetID
+	deploymentTargetName := request.DeploymentTargetName
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: deploymentTargetID},
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+	)
+
+	var deploymentTargetIdentifer *porterv1.DeploymentTargetIdentifier
+	if deploymentTargetID != "" || deploymentTargetName != "" {
+		deploymentTargetIdentifer = &porterv1.DeploymentTargetIdentifier{
+			Id:   deploymentTargetID,
+			Name: deploymentTargetName,
+		}
+	}
+
+	cancelJobRunRequest := connect.NewRequest(&porterv1.CancelJobRunRequest{
+		ProjectId:                  int64(project.ID),
+		ClusterId:                  int64(cluster.ID),
+		DeploymentTargetIdentifier: deploymentTargetIdentifer,
+		JobRunName:                 jobRunName,
+	})
+
+	_, err := c.Config().ClusterControlPlaneClient.CancelJobRun(ctx, cancelJobRunRequest)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error canceling job run")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &CancelJobRunResponse{}
+
+	c.WriteResult(w, r, res)
+}

+ 57 - 43
api/server/handlers/porter_app/job_status.go

@@ -3,6 +3,8 @@ package porter_app
 import (
 	"net/http"
 
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
@@ -10,9 +12,8 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/server/shared/requestutils"
 	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/deployment_target"
-	"github.com/porter-dev/porter/internal/kubernetes"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
 	"github.com/porter-dev/porter/internal/telemetry"
 )
 
@@ -36,8 +37,14 @@ func NewJobStatusHandler(
 
 // JobStatusRequest is the expected format for a request body on GET /apps/jobs
 type JobStatusRequest struct {
-	DeploymentTargetID string `schema:"deployment_target_id"`
-	JobName            string `schema:"job_name"`
+	DeploymentTargetID   string `schema:"deployment_target_id,omitempty"`
+	DeploymentTargetName string `schema:"deployment_target_name,omitempty"`
+	JobName              string `schema:"job_name"`
+}
+
+// JobStatusResponse is the response format for GET /apps/jobs
+type JobStatusResponse struct {
+	JobRuns []porter_app.JobRun `json:"job_runs"`
 }
 
 func (c *JobStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@@ -63,57 +70,64 @@ func (c *JobStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: name})
 
-	if request.DeploymentTargetID == "" {
-		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
 	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
 
-	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
-		ProjectID:          int64(project.ID),
-		ClusterID:          int64(cluster.ID),
-		DeploymentTargetID: request.DeploymentTargetID,
-		CCPClient:          c.Config().ClusterControlPlaneClient,
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
+
+	jobRunsRequest := connect.NewRequest(&porterv1.JobRunsRequest{
+		ProjectId: int64(project.ID),
+		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
+			Id:   request.DeploymentTargetID,
+			Name: deploymentTargetName,
+		},
+		AppName:        name,
+		JobServiceName: request.JobName,
 	})
+
+	jobRunsResp, err := c.Config().ClusterControlPlaneClient.JobRuns(ctx, jobRunsRequest)
 	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		err := telemetry.Error(ctx, span, err, "error getting job runs from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
-	namespace := deploymentTarget.Namespace
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
-
-	agent, err := c.GetAgent(r, cluster, "")
-	if err != nil {
-		err = telemetry.Error(ctx, span, err, "unable to get agent")
+	if jobRunsResp == nil || jobRunsResp.Msg == nil {
+		err := telemetry.Error(ctx, span, nil, "job runs response is nil")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	labels := []kubernetes.Label{
-		{
-			Key: "porter.run/deployment-target-id",
-			Val: request.DeploymentTargetID,
-		},
-		{
-			Key: "porter.run/app-name",
-			Val: name,
-		},
-	}
-	if request.JobName != "" {
-		labels = append(labels, kubernetes.Label{
-			Key: "porter.run/service-name",
-			Val: request.JobName,
-		})
+	runs := []porter_app.JobRun{}
+	for _, jobRun := range jobRunsResp.Msg.JobRuns {
+		run, err := porter_app.JobRunFromProto(ctx, jobRun)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error converting job run from proto")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		runs = append(runs, run)
 	}
-	jobs, err := agent.ListJobsByLabel(namespace, labels...)
-	if err != nil {
-		err = telemetry.Error(ctx, span, err, "error listing jobs")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
+
+	res := JobStatusResponse{
+		JobRuns: runs,
 	}
 
-	c.WriteResult(w, r, jobs)
+	c.WriteResult(w, r, res)
 }

+ 133 - 0
api/server/handlers/porter_app/job_status_by_name.go

@@ -0,0 +1,133 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// JobStatusByNameHandler is the handler for GET /apps/jobs/{porter_app_name}/{job_run_name}
+type JobStatusByNameHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewJobStatusByNameHandler returns a new JobStatusByNameHandler
+func NewJobStatusByNameHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *JobStatusByNameHandler {
+	return &JobStatusByNameHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// JobStatusByNameRequest is the expected format for a request body on GET /apps/jobs/{porter_app_name}/{job_run_name}
+type JobStatusByNameRequest struct {
+	DeploymentTargetID   string `schema:"deployment_target_id,omitempty"`
+	DeploymentTargetName string `schema:"deployment_target_name,omitempty"`
+	JobRunName           string `schema:"job_run_name"`
+}
+
+// JobStatusByNameResponse is the response format for GET /apps/jobs/{porter_app_name}/{job_run_name}
+type JobStatusByNameResponse struct {
+	JobRun porter_app.JobRun `json:"job_run"`
+}
+
+func (c *JobStatusByNameHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-job-status")
+	defer span.End()
+
+	request := &JobStatusByNameRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	name, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "invalid porter app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: name})
+
+	jobRunName, reqErr := requestutils.GetURLParamString(r, types.URLParamJobRunName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "invalid job run name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
+
+	jobRunsRequest := connect.NewRequest(&porterv1.JobRunStatusRequest{
+		ProjectId: int64(project.ID),
+		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
+			Id:   request.DeploymentTargetID,
+			Name: deploymentTargetName,
+		},
+		JobRunName: jobRunName,
+	})
+
+	jobRunResp, err := c.Config().ClusterControlPlaneClient.JobRunStatus(ctx, jobRunsRequest)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting job run from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if jobRunResp == nil || jobRunResp.Msg == nil {
+		err := telemetry.Error(ctx, span, nil, "job run response is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	run, err := porter_app.JobRunFromProto(ctx, jobRunResp.Msg.JobRun)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error converting job run from proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := JobStatusByNameResponse{
+		JobRun: run,
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 1 - 0
api/server/handlers/porter_app/list_app_revisions.go

@@ -98,6 +98,7 @@ func (c *ListAppRevisionsHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
 		ProjectId:          int64(project.ID),
 		AppId:              int64(app.ID),
 		DeploymentTargetId: request.DeploymentTargetID,
+		AppName:            appName,
 	})
 
 	listAppRevisionsResp, err := c.Config().ClusterControlPlaneClient.ListAppRevisions(ctx, listAppRevisionsReq)

+ 90 - 40
api/server/handlers/porter_app/logs_apply_v2.go

@@ -15,6 +15,7 @@ import (
 	"github.com/porter-dev/porter/internal/deployment_target"
 	porter_agent "github.com/porter-dev/porter/internal/kubernetes/porter_agent/v2"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
 	"github.com/porter-dev/porter/internal/telemetry"
 )
 
@@ -38,15 +39,17 @@ func NewAppLogsHandler(
 
 // AppLogsRequest represents the accepted fields on a request to the /apps/logs endpoint
 type AppLogsRequest struct {
-	DeploymentTargetID string    `schema:"deployment_target_id"`
-	ServiceName        string    `schema:"service_name"`
-	AppID              uint      `schema:"app_id"`
-	Limit              uint      `schema:"limit"`
-	StartRange         time.Time `schema:"start_range,omitempty"`
-	EndRange           time.Time `schema:"end_range,omitempty"`
-	SearchParam        string    `schema:"search_param"`
-	Direction          string    `schema:"direction"`
-	AppRevisionID      string    `schema:"app_revision_id"`
+	DeploymentTargetID   string    `schema:"deployment_target_id"`
+	DeploymentTargetName string    `schema:"deployment_target_name"`
+	ServiceName          string    `schema:"service_name"`
+	AppID                uint      `schema:"app_id"`
+	Limit                uint      `schema:"limit"`
+	StartRange           time.Time `schema:"start_range,omitempty"`
+	EndRange             time.Time `schema:"end_range,omitempty"`
+	SearchParam          string    `schema:"search_param"`
+	Direction            string    `schema:"direction"`
+	AppRevisionID        string    `schema:"app_revision_id"`
+	JobRunName           string    `schema:"job_run_name"`
 }
 
 const (
@@ -55,9 +58,19 @@ const (
 	lokiLabel_PorterServiceName   = "porter_run_service_name"
 	lokiLabel_PorterAppRevisionID = "porter_run_app_revision_id"
 	lokiLabel_DeploymentTargetId  = "porter_run_deployment_target_id"
+	lokiLabel_JobRunName          = "job_name"
 	lokiLabel_Namespace           = "namespace"
 )
 
+const defaultLogLineLimit = 1000
+
+// AppLogsResponse represents the response to the /apps/logs endpoint
+type AppLogsResponse struct {
+	BackwardContinueTime *time.Time                 `json:"backward_continue_time,omitempty"`
+	ForwardContinueTime  *time.Time                 `json:"forward_continue_time,omitempty"`
+	Logs                 []porter_app.StructuredLog `json:"logs"`
+}
+
 // ServeHTTP gets logs for a given app, service, and deployment target
 func (c *AppLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-logs")
@@ -81,12 +94,6 @@ func (c *AppLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
 
-	if request.AppID == 0 {
-		err := telemetry.Error(ctx, span, nil, "must provide app id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
 	if request.ServiceName == "" {
 		err := telemetry.Error(ctx, span, nil, "must provide service name")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
@@ -94,18 +101,32 @@ func (c *AppLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: request.ServiceName})
 
-	if request.DeploymentTargetID == "" {
-		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
 	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
 
 	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
-		ProjectID:          int64(project.ID),
-		ClusterID:          int64(cluster.ID),
-		DeploymentTargetID: request.DeploymentTargetID,
-		CCPClient:          c.Config().ClusterControlPlaneClient,
+		ProjectID:            int64(project.ID),
+		ClusterID:            int64(cluster.ID),
+		DeploymentTargetID:   request.DeploymentTargetID,
+		DeploymentTargetName: deploymentTargetName,
+		CCPClient:            c.Config().ClusterControlPlaneClient,
 	})
 	if err != nil {
 		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
@@ -116,14 +137,32 @@ func (c *AppLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	namespace := deploymentTarget.Namespace
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
 
-	if request.StartRange.IsZero() || request.EndRange.IsZero() {
-		err := telemetry.Error(ctx, span, nil, "must provide start and end range")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
+	startRange := request.StartRange
+	if request.StartRange.IsZero() {
+		dayAgo := time.Now().Add(-24 * time.Hour).UTC()
+		startRange = dayAgo
 	}
+
+	endRange := request.EndRange
+	if request.EndRange.IsZero() {
+		endRange = time.Now().UTC()
+	}
+
+	limit := request.Limit
+	if request.Limit == 0 {
+		limit = defaultLogLineLimit
+	}
+
+	direction := request.Direction
+	if request.Direction == "" {
+		direction = "backward"
+	}
+
 	telemetry.WithAttributes(span,
 		telemetry.AttributeKV{Key: "start-range", Value: request.StartRange.String()},
 		telemetry.AttributeKV{Key: "end-range", Value: request.EndRange.String()},
+		telemetry.AttributeKV{Key: "limit", Value: limit},
+		telemetry.AttributeKV{Key: "direction", Value: direction},
 	)
 
 	k8sAgent, err := c.GetAgent(r, cluster, "")
@@ -141,36 +180,47 @@ func (c *AppLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 
 	matchLabels := map[string]string{
-		lokiLabel_Namespace:     namespace,
-		lokiLabel_PorterAppName: appName,
-		lokiLabel_PorterAppID:   fmt.Sprintf("%d", request.AppID),
+		lokiLabel_Namespace:          namespace,
+		lokiLabel_PorterAppName:      appName,
+		lokiLabel_DeploymentTargetId: request.DeploymentTargetID,
 	}
 
 	if request.ServiceName != "all" {
 		matchLabels[lokiLabel_PorterServiceName] = request.ServiceName
 	}
-
 	if request.AppRevisionID != "" {
 		matchLabels[lokiLabel_PorterAppRevisionID] = request.AppRevisionID
 	}
-
-	matchLabels[lokiLabel_DeploymentTargetId] = request.DeploymentTargetID
+	if request.JobRunName != "" {
+		matchLabels[lokiLabel_JobRunName] = request.JobRunName
+	}
 
 	logRequest := &types.LogRequest{
-		Limit:       request.Limit,
-		StartRange:  &request.StartRange,
-		EndRange:    &request.EndRange,
+		Limit:       limit,
+		StartRange:  &startRange,
+		EndRange:    &endRange,
 		MatchLabels: matchLabels,
-		Direction:   request.Direction,
+		Direction:   direction,
 		SearchParam: request.SearchParam,
 	}
 
-	logs, err := porter_agent.Logs(k8sAgent.Clientset, agentSvc, logRequest)
+	logs, err := porter_agent.Logs(ctx, k8sAgent.Clientset, agentSvc, logRequest)
 	if err != nil {
 		_ = telemetry.Error(ctx, span, err, "unable to get logs")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get logs"), http.StatusInternalServerError))
 		return
 	}
+	if logs == nil {
+		err := telemetry.Error(ctx, span, nil, "logs response is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := AppLogsResponse{
+		Logs:                 porter_app.AgentLogToStructuredLog(logs.Logs),
+		ForwardContinueTime:  logs.ForwardContinueTime,
+		BackwardContinueTime: logs.BackwardContinueTime,
+	}
 
-	c.WriteResult(w, r, logs)
+	c.WriteResult(w, r, res)
 }

+ 112 - 0
api/server/handlers/porter_app/manifests.go

@@ -0,0 +1,112 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+// AppManifestsHandler handles requests to the /apps/{porter_app_name}/manifests endpoint
+type AppManifestsHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewAppManifestsHandler returns a new AppManifestsHandler
+func NewAppManifestsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *AppManifestsHandler {
+	return &AppManifestsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// AppManifestsRequest is the request object for the /apps/{porter_app_name}/manifests endpoint
+type AppManifestsRequest struct {
+	DeploymentTargetID string `schema:"deployment_target_id"`
+}
+
+// AppManifestsResponse is the response object for the /apps/{porter_app_name}/manifests endpoint
+type AppManifestsResponse struct {
+	// Base64Manifests is the base64 encoded manifests
+	Base64Manifests string `json:"base64_manifests"`
+}
+
+// ServeHTTP translates the request into a TemplateAppManifests grpc request, forwards to the cluster control plane, and returns the response.
+func (c *AppManifestsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-manifests")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, reqErr, "error parsing stack name from url")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	request := &AppManifestsRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	// optional deployment target id - if not provided, use the cluster's default
+	deploymentTargetID := request.DeploymentTargetID
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: deploymentTargetID})
+
+	var deploymentTargetIdentifer *porterv1.DeploymentTargetIdentifier
+	if deploymentTargetID != "" {
+		deploymentTargetIdentifer = &porterv1.DeploymentTargetIdentifier{
+			Id: deploymentTargetID,
+		}
+	}
+
+	appManifestsReq := connect.NewRequest(&porterv1.TemplateAppManifestsRequest{
+		ProjectId:                  int64(project.ID),
+		ClusterId:                  int64(cluster.ID),
+		AppName:                    appName,
+		DeploymentTargetIdentifier: deploymentTargetIdentifer,
+	})
+
+	appManifestsRes, err := c.Config().ClusterControlPlaneClient.TemplateAppManifests(ctx, appManifestsReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting current app manifests from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if appManifestsRes == nil || appManifestsRes.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "current app manifests resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	response := AppManifestsResponse{
+		Base64Manifests: appManifestsRes.Msg.Base64Manifests,
+	}
+
+	c.WriteResult(w, r, response)
+}

+ 12 - 3
api/server/handlers/porter_app/parse_yaml.go

@@ -9,6 +9,7 @@ import (
 	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
 
 	"github.com/porter-dev/porter/internal/porter_app"
+	v2 "github.com/porter-dev/porter/internal/porter_app/v2"
 
 	"github.com/porter-dev/porter/internal/telemetry"
 
@@ -38,8 +39,9 @@ func NewParsePorterYAMLToProtoHandler(
 
 // ParsePorterYAMLToProtoRequest is the request object for the /apps/parse endpoint
 type ParsePorterYAMLToProtoRequest struct {
-	B64Yaml string `json:"b64_yaml"`
-	AppName string `json:"app_name"`
+	B64Yaml         string              `json:"b64_yaml"`
+	AppName         string              `json:"app_name"`
+	PatchOperations []v2.PatchOperation `json:"patch_operations"`
 }
 
 // EncodedAppWithEnv is a struct that contains a base64-encoded app proto object and a map of env variables
@@ -107,9 +109,16 @@ func (c *ParsePorterYAMLToProtoHandler) ServeHTTP(w http.ResponseWriter, r *http
 		return
 	}
 
+	patchedProto, err := v2.PatchApp(ctx, appDefinition.AppProto, request.PatchOperations)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error patching app proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
 	response := &ParsePorterYAMLToProtoResponse{}
 
-	encodedApp, err := encodeAppProto(ctx, appDefinition.AppProto)
+	encodedApp, err := encodeAppProto(ctx, patchedProto)
 	if err != nil {
 		err := telemetry.Error(ctx, span, err, "error encoding app proto")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))

+ 21 - 1
api/server/handlers/porter_app/pod_status.go

@@ -70,11 +70,31 @@ func (c *PodStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		telemetry.AttributeKV{Key: "input-deployment-target-name", Value: request.DeploymentTargetName},
 	)
 
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: request.DeploymentTargetName},
+	)
+
 	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
 		ProjectID:            int64(project.ID),
 		ClusterID:            int64(cluster.ID),
 		DeploymentTargetID:   request.DeploymentTargetID,
-		DeploymentTargetName: request.DeploymentTargetName,
+		DeploymentTargetName: deploymentTargetName,
 		CCPClient:            c.Config().ClusterControlPlaneClient,
 	})
 	if err != nil {

+ 31 - 19
api/server/handlers/porter_app/rollback_revision.go

@@ -4,7 +4,6 @@ import (
 	"net/http"
 
 	"connectrpc.com/connect"
-	"github.com/google/uuid"
 	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
@@ -37,8 +36,9 @@ func NewRollbackAppRevisionHandler(
 
 // RollbackAppRevisionRequest is the request body for the /apps/{porter_app_name}/rollback endpoint
 type RollbackAppRevisionRequest struct {
-	DeploymentTargetID string `json:"deployment_target_id"`
-	AppRevisionID      string `json:"app_revision_id"`
+	DeploymentTargetID   string `json:"deployment_target_id"`
+	DeploymentTargetName string `json:"deployment_target_name"`
+	AppRevisionID        string `json:"app_revision_id"`
 }
 
 // RollbackAppRevisionResponse is the response body for the /apps/{porter_app_name}/rollback endpoint
@@ -67,18 +67,6 @@ func (c *RollbackAppRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		return
 	}
 
-	deploymentTargetID, err := uuid.Parse(request.DeploymentTargetID)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error parsing deployment target id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	if deploymentTargetID == uuid.Nil {
-		err := telemetry.Error(ctx, span, nil, "deployment target id is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
 	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
 	if reqErr != nil {
 		err := telemetry.Error(ctx, span, nil, "error parsing porter app name")
@@ -99,11 +87,35 @@ func (c *RollbackAppRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		return
 	}
 
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: request.DeploymentTargetName},
+	)
+
 	rollbackReq := connect.NewRequest(&porterv1.RollbackRevisionRequest{
-		ProjectId:          int64(project.ID),
-		AppId:              int64(app.ID),
-		DeploymentTargetId: deploymentTargetID.String(),
-		AppRevisionId:      request.AppRevisionID,
+		ProjectId: int64(project.ID),
+		AppId:     int64(app.ID),
+		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
+			Id:   request.DeploymentTargetID,
+			Name: deploymentTargetName,
+		},
+		AppRevisionId: request.AppRevisionID,
+		AppName:       appName,
 	})
 	ccpResp, err := c.Config().ClusterControlPlaneClient.RollbackRevision(ctx, rollbackReq)
 	if err != nil {

+ 158 - 0
api/server/handlers/porter_app/run_app_job.go

@@ -0,0 +1,158 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+// RunAppJobHandler handles requests to the /apps/{porter_app_name}/run endpoint
+type RunAppJobHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewRunAppJobHandler returns a new AppJobRunHandler
+func NewRunAppJobHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *RunAppJobHandler {
+	return &RunAppJobHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// RunAppJobRequest is the request object for the /apps/{porter_app_name}/run endpoint
+type RunAppJobRequest struct {
+	ServiceName string `json:"service_name"`
+	// DeploymentTargetID is the id of the deployment target the job should be run against. One of DeploymentTargetID or DeploymentTargetName is required
+	DeploymentTargetID string `json:"deployment_target_id"`
+	// DeploymentTargetName is the name of the deployment target the job should be run against. One of DeploymentTargetID or DeploymentTargetName is required
+	DeploymentTargetName string `json:"deployment_target_name"`
+	// Optional field to override the default run command for the job
+	RunCommand string `json:"run_command"`
+	// Image is an optional field to override the image used for the job
+	Image Image `json:"image,omitempty"`
+}
+
+// RunAppJobResponse is the response object for the /apps/{porter_app_name}/run endpoint
+type RunAppJobResponse struct {
+	JobRunID   string `json:"job_run_id"`
+	JobRunName string `json:"job_run_name"`
+}
+
+// ServeHTTP runs a one-off command in the same environment as the provided service, app and deployment target
+func (c *RunAppJobHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-job-run")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, reqErr, "error parsing app name from url")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	request := &RunAppJobRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if request.ServiceName == "" {
+		err := telemetry.Error(ctx, span, nil, "service name is required")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: request.ServiceName})
+
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
+	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
+
+	var commandOptional *string
+	if request.RunCommand != "" {
+		commandOptional = &request.RunCommand
+	}
+
+	var imageOverrideOptional *porterv1.AppImage
+	if request.Image.Tag != "" {
+		telemetry.WithAttributes(span,
+			telemetry.AttributeKV{Key: "image-override-repo", Value: request.Image.Repository},
+			telemetry.AttributeKV{Key: "image-override-tag", Value: request.Image.Tag},
+		)
+		imageOverrideOptional = &porterv1.AppImage{
+			Repository: request.Image.Repository,
+			Tag:        request.Image.Tag,
+		}
+	}
+
+	manualServiceRunReq := connect.NewRequest(&porterv1.ManualServiceRunRequest{
+		ProjectId:   int64(project.ID),
+		AppName:     appName,
+		ServiceName: request.ServiceName,
+		Command:     commandOptional,
+		Image:       imageOverrideOptional,
+		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
+			Id:   request.DeploymentTargetID,
+			Name: deploymentTargetName,
+		},
+	})
+
+	serviceResp, err := c.Config().ClusterControlPlaneClient.ManualServiceRun(ctx, manualServiceRunReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app helm values from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if serviceResp == nil || serviceResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "app helm values resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	response := RunAppJobResponse{
+		JobRunID:   serviceResp.Msg.JobRunId,
+		JobRunName: serviceResp.Msg.JobRunName,
+	}
+
+	c.WriteResult(w, r, response)
+}

+ 255 - 0
api/server/handlers/porter_app/run_app_job_status.go

@@ -0,0 +1,255 @@
+package porter_app
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/porter_app"
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+// AppJobRunStatusHandler handles requests to the /apps/{porter_app_name}/run-status endpoint
+type AppJobRunStatusHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewAppJobRunStatusHandler returns a new AppRunJobStatusHandler
+func NewAppJobRunStatusHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *AppJobRunStatusHandler {
+	return &AppJobRunStatusHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// AppJobRunStatusRequest is the request object for the /apps/{porter_app_name}/run-status endpoint
+type AppJobRunStatusRequest struct {
+	// DeploymentTargetID is the id of the deployment target the job was run against
+	DeploymentTargetID string `json:"deployment_target_id"`
+
+	// DeploymentTargetName is the name of the deployment target the job was run against
+	DeploymentTargetName string `json:"deployment_target_name"`
+
+	// JobRunID is the UID returned from the /apps/{porter_app_name}/run endpoint
+	JobRunID string `json:"job_id"`
+
+	// ServiceName is the name of the app service that was triggered
+	ServiceName string `json:"service_name"`
+}
+
+// AppJobRunStatusResponse is the response object for the /apps/{porter_app_name}/run-status endpoint
+type AppJobRunStatusResponse struct {
+	Status porter_app.InstanceStatusDescriptor `json:"status"`
+}
+
+// ServeHTTP gets the status of a one-off command in the same environment as the provided service, app and deployment target
+func (c *AppJobRunStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-job-run-status")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, reqErr, "error parsing app name from url")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	request := &AppJobRunStatusRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if request.JobRunID == "" {
+		err := telemetry.Error(ctx, span, nil, "job id is required")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "job-run-id", Value: request.JobRunID})
+
+	if request.ServiceName == "" {
+		err := telemetry.Error(ctx, span, nil, "service name is required")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: request.ServiceName})
+
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
+	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
+
+	details, err := c.Config().ClusterControlPlaneClient.DeploymentTargetDetails(ctx, connect.NewRequest(&porterv1.DeploymentTargetDetailsRequest{
+		ProjectId: int64(project.ID),
+		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
+			Id:   request.DeploymentTargetID,
+			Name: deploymentTargetName,
+		},
+	}))
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if details == nil || details.Msg == nil || details.Msg.DeploymentTarget == nil {
+		err := telemetry.Error(ctx, span, err, "deployment target details are nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	namespace := details.Msg.DeploymentTarget.Namespace
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to get kubernetes agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if agent == nil {
+		err := telemetry.Error(ctx, span, nil, "no kubernetes agent returned")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	status, err := c.getJobStatus(ctx, getJobStatusInput{
+		AppName:            appName,
+		DeploymentTargetID: details.Msg.DeploymentTarget.Id,
+		ClusterK8sAgent:    *agent,
+		JobRunID:           request.JobRunID,
+		Namespace:          namespace,
+		ServiceName:        request.ServiceName,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting job status")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	response := AppJobRunStatusResponse{
+		Status: status,
+	}
+
+	c.WriteResult(w, r, response)
+}
+
+type getJobStatusInput struct {
+	// AppName is the name of the app associated with the job
+	AppName string
+
+	// DeploymentTargetID is the id of the deployment target the job was run against
+	DeploymentTargetID string
+
+	// ClusterK8sAgent is a kubernetes agent
+	ClusterK8sAgent kubernetes.Agent
+
+	// JobRunID is the UID returned from the /apps/{porter_app_name}/run endpoint
+	JobRunID string
+
+	// Namespace is the namespace in which the job was deployed
+	Namespace string
+
+	// ServiceName is the name of the app service that was triggered
+	ServiceName string
+}
+
+func (c *AppJobRunStatusHandler) getJobStatus(ctx context.Context, input getJobStatusInput) (porter_app.InstanceStatusDescriptor, error) {
+	ctx, span := telemetry.NewSpan(ctx, "get-job-status")
+	defer span.End()
+
+	if input.AppName == "" {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, nil, "missing app name in input")
+	}
+	if input.DeploymentTargetID == "" {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, nil, "missing deployment target id in input")
+	}
+	if input.JobRunID == "" {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, nil, "missing job run id in input")
+	}
+	if input.Namespace == "" {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, nil, "missing namespace in input")
+	}
+	if input.ServiceName == "" {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, nil, "missing service name in input")
+	}
+
+	selectors := []string{
+		fmt.Sprintf("batch.kubernetes.io/controller-uid=%s", input.JobRunID),
+		fmt.Sprintf("porter.run/app-name=%s", input.AppName),
+		fmt.Sprintf("porter.run/deployment-target-id=%s", input.DeploymentTargetID),
+		fmt.Sprintf("porter.run/service-name=%s", input.ServiceName),
+	}
+	labelSelector := strings.Join(selectors, ",")
+
+	podsList, err := input.ClusterK8sAgent.GetPodsByLabel(labelSelector, input.Namespace)
+	if err != nil {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, err, "error getting jobs from cluster")
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "pod-count", Value: len(podsList.Items)})
+
+	if len(podsList.Items) == 0 {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, err, "no matching jobs found for specified job id")
+	}
+
+	if len(podsList.Items) != 1 {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, err, "too many pods found for specified job id")
+	}
+
+	status, err := porter_app.InstanceStatusFromPod(ctx, porter_app.InstanceStatusFromPodInput{
+		Pod:         podsList.Items[0],
+		AppName:     input.AppName,
+		ServiceName: input.ServiceName,
+	})
+	if err != nil {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, err, "unable to fetch instance status from job pod")
+	}
+
+	if status.Status == porter_app.InstanceStatusDescriptor_Unknown {
+		return porter_app.InstanceStatusDescriptor_Unknown, telemetry.Error(ctx, span, nil, "unknown status for job")
+	}
+
+	return status.Status, nil
+}

+ 1 - 0
api/server/handlers/porter_app/service_status.go

@@ -122,6 +122,7 @@ func (c *ServiceStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		ProjectId:          int64(project.ID),
 		AppId:              int64(app.ID),
 		DeploymentTargetId: request.DeploymentTargetID,
+		AppName:            appName,
 	})
 
 	listAppRevisionsResp, err := c.Config().ClusterControlPlaneClient.ListAppRevisions(ctx, listAppRevisionsReq)

+ 27 - 11
api/server/handlers/porter_app/stream_logs.go

@@ -68,18 +68,32 @@ func (c *StreamLogsLokiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	}
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: request.ServiceName})
 
-	if request.DeploymentTargetID == "" {
-		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
 	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
 
 	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
-		ProjectID:          int64(project.ID),
-		ClusterID:          int64(cluster.ID),
-		DeploymentTargetID: request.DeploymentTargetID,
-		CCPClient:          c.Config().ClusterControlPlaneClient,
+		ProjectID:            int64(project.ID),
+		ClusterID:            int64(cluster.ID),
+		DeploymentTargetID:   request.DeploymentTargetID,
+		DeploymentTargetName: deploymentTargetName,
+		CCPClient:            c.Config().ClusterControlPlaneClient,
 	})
 	if err != nil {
 		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
@@ -122,14 +136,16 @@ func (c *StreamLogsLokiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	if request.ServiceName != "all" {
 		labels = append(labels, fmt.Sprintf("%s=%s", lokiLabel_PorterServiceName, request.ServiceName))
 	}
-
 	if request.AppRevisionID != "" {
 		labels = append(labels, fmt.Sprintf("%s=%s", lokiLabel_PorterAppRevisionID, request.AppRevisionID))
 	}
+	if request.JobRunName != "" {
+		labels = append(labels, fmt.Sprintf("%s=%s", lokiLabel_JobRunName, request.JobRunName))
+	}
 
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "labels", Value: strings.Join(labels, ",")})
 
-	err = agent.StreamPorterAgentLokiLog(labels, string(startTime), request.SearchParam, 0, safeRW)
+	err = agent.StreamPorterAgentLokiLog(ctx, labels, string(startTime), request.SearchParam, 0, safeRW)
 	if err != nil {
 		err := telemetry.Error(ctx, span, err, "error streaming logs")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))

+ 88 - 18
api/server/handlers/porter_app/update_app.go

@@ -16,6 +16,7 @@ import (
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/porter_app"
+	v2 "github.com/porter-dev/porter/internal/porter_app/v2"
 	"github.com/porter-dev/porter/internal/telemetry"
 )
 
@@ -37,6 +38,29 @@ func NewUpdateAppHandler(
 	}
 }
 
+// ServiceDeletions are deletions to apply to a specific service
+type ServiceDeletions struct {
+	DomainNames           []string `json:"domain_names"`
+	IngressAnnotationKeys []string `json:"ingress_annotation_keys"`
+}
+
+// EnvVariableDeletions is the set of keys to delete from the environment group
+type EnvVariableDeletions struct {
+	// Variables is a set of variable keys to delete from the environment group
+	Variables []string `json:"variables"`
+	// Secrets is a set of secret variable keys to delete from the environment group
+	Secrets []string `json:"secrets"`
+}
+
+// Deletions are the names of services and env variables to delete
+type Deletions struct {
+	ServiceNames         []string                    `json:"service_names"`
+	Predeploy            []string                    `json:"predeploy"`
+	EnvGroupNames        []string                    `json:"env_group_names"`
+	ServiceDeletions     map[string]ServiceDeletions `json:"service_deletions"`
+	EnvVariableDeletions EnvVariableDeletions        `json:"env_variable_deletions"`
+}
+
 // UpdateAppRequest is the request object for the POST /apps/update endpoint
 type UpdateAppRequest struct {
 	// Name is the name of the app to update. If not specified, the name will be inferred from the porter yaml
@@ -45,6 +69,8 @@ type UpdateAppRequest struct {
 	GitSource GitSource `json:"git_source,omitempty"`
 	// DeploymentTargetId is the ID of the deployment target to apply the update to
 	DeploymentTargetId string `json:"deployment_target_id"`
+	// DeploymentTargetName is the name of the deployment target to apply the update to
+	DeploymentTargetName string `json:"deployment_target_name"`
 	// Variables is a map of environment variable names to values
 	Variables map[string]string `json:"variables"`
 	// Secrets is a map of secret names to values
@@ -53,6 +79,8 @@ type UpdateAppRequest struct {
 	Deletions Deletions `json:"deletions"`
 	// CommitSHA is the commit sha of the git commit that triggered this update, indicating a source change and triggering a build
 	CommitSHA string `json:"commit_sha"`
+	// ImageTagOverride is the image tag to override the image tag in the porter.yaml (it will override the image tag in the porter.yaml if specified)
+	ImageTagOverride string `json:"image_tag_override"`
 	// PorterYAMLPath is the path to the porter yaml file in the git repo
 	PorterYAMLPath string `json:"porter_yaml_path"`
 	// AppRevisionID is the ID of the revision to perform follow up actions on after the initial apply
@@ -64,9 +92,15 @@ type UpdateAppRequest struct {
 	Base64AddonProtos []string `json:"b64_addon_protos"`
 	// Base64PorterYAML is a base64 encoded porter yaml to apply representing a potentially partial porter app contract
 	Base64PorterYAML string `json:"b64_porter_yaml"`
+	// PatchOperations is a set of patch operations to apply to the porter.yaml if specified
+	PatchOperations []v2.PatchOperation `json:"patch_operations"`
 	// IsEnvOverride is used to remove any variables that are not specified in the request.  If false, the request will only update the variables specified in the request,
 	// and leave all other variables untouched.
 	IsEnvOverride bool `json:"is_env_override"`
+	// WithPredeploy is a flag to indicate whether to run the predeploy job
+	WithPredeploy bool `json:"with_predeploy"`
+	// Exact is a flag to indicate whether to apply the update exactly as specified in the request (default is to merge with existing app)
+	Exact bool `json:"exact"`
 }
 
 // UpdateAppResponse is the response object for the POST /apps/update endpoint
@@ -95,12 +129,21 @@ func (c *UpdateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
-	if request.DeploymentTargetId == "" {
-		err := telemetry.Error(ctx, span, nil, "deployment target id is empty")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
+
 	deploymentTargetID := request.DeploymentTargetId
+	deploymentTargetName := request.DeploymentTargetName
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: deploymentTargetID},
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: deploymentTargetName},
+	)
+
+	var deploymentTargetIdentifer *porterv1.DeploymentTargetIdentifier
+	if deploymentTargetID != "" || deploymentTargetName != "" {
+		deploymentTargetIdentifer = &porterv1.DeploymentTargetIdentifier{
+			Id:   deploymentTargetID,
+			Name: deploymentTargetName,
+		}
+	}
 
 	telemetry.WithAttributes(span,
 		telemetry.AttributeKV{Key: "name", Value: request.Name},
@@ -109,12 +152,14 @@ func (c *UpdateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		telemetry.AttributeKV{Key: "commit-sha", Value: request.CommitSHA},
 		telemetry.AttributeKV{Key: "porter-yaml-path", Value: request.PorterYAMLPath},
 		telemetry.AttributeKV{Key: "is-env-override", Value: request.IsEnvOverride},
+		telemetry.AttributeKV{Key: "with-predeploy", Value: request.WithPredeploy},
 	)
 
 	var addons, addonOverrides []*porterv1.Addon
 	var overrides *porterv1.PorterApp
 	appProto := &porterv1.PorterApp{}
 
+	var previewEnvVariables map[string]string
 	envVariables := request.Variables
 
 	// get app definition from either base64 yaml or base64 porter app proto
@@ -167,6 +212,7 @@ func (c *UpdateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 			return
 		}
+
 		appProto = appFromYaml.AppProto
 
 		// only public variables can be defined in porter.yaml
@@ -175,12 +221,22 @@ func (c *UpdateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		if appFromYaml.PreviewApp != nil {
 			overrides = appFromYaml.PreviewApp.AppProto
 			addonOverrides = appFromYaml.PreviewApp.Addons
-			envVariables = mergeEnvVariables(envVariables, appFromYaml.PreviewApp.EnvVariables)
+			previewEnvVariables = appFromYaml.PreviewApp.EnvVariables
 		}
 
 		addons = appFromYaml.Addons
 	}
 
+	if appProto != nil {
+		patchedProto, err := v2.PatchApp(ctx, appProto, request.PatchOperations)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error patching app proto")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		appProto = patchedProto
+	}
+
 	if appProto.Name == "" {
 		if request.Name == "" {
 			err := telemetry.Error(ctx, span, nil, "app name is empty")
@@ -227,29 +283,43 @@ func (c *UpdateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		}
 	}
 
+	if request.ImageTagOverride != "" {
+		if appProto.Image == nil {
+			appProto.Image = &porterv1.AppImage{}
+		}
+		appProto.Image.Tag = request.ImageTagOverride
+	}
+
 	updateReq := connect.NewRequest(&porterv1.UpdateAppRequest{
-		ProjectId: int64(project.ID),
-		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
-			Id: deploymentTargetID,
-		},
-		App:           appProto,
-		AppRevisionId: request.AppRevisionID,
+		ProjectId:                  int64(project.ID),
+		ClusterId:                  int64(cluster.ID),
+		DeploymentTargetIdentifier: deploymentTargetIdentifer,
+		App:                        appProto,
+		AppRevisionId:              request.AppRevisionID,
 		AppEnv: &porterv1.EnvGroupVariables{
 			Normal: envVariables,
 			Secret: request.Secrets,
 		},
+		AppEnvOverrides: &porterv1.EnvGroupVariables{
+			Normal: previewEnvVariables,
+		},
 		Deletions: &porterv1.Deletions{
 			ServiceNames:     request.Deletions.ServiceNames,
 			PredeployNames:   request.Deletions.Predeploy,
-			EnvVariableNames: request.Deletions.EnvVariableNames,
 			EnvGroupNames:    request.Deletions.EnvGroupNames,
 			ServiceDeletions: serviceDeletions,
+			EnvVariableDeletions: &porterv1.EnvVariableDeletions{
+				Variables: request.Deletions.EnvVariableDeletions.Variables,
+				Secrets:   request.Deletions.EnvVariableDeletions.Secrets,
+			},
 		},
-		AppOverrides:  overrides,
-		CommitSha:     request.CommitSHA,
-		IsEnvOverride: request.IsEnvOverride,
-		Addons:        addons,
-		AddonOverrides: addonOverrides,
+		AppOverrides:        overrides,
+		CommitSha:           request.CommitSHA,
+		IsEnvOverride:       request.IsEnvOverride,
+		Addons:              addons,
+		AddonOverrides:      addonOverrides,
+		IsPredeployEligible: request.WithPredeploy,
+		Exact:               request.Exact,
 	})
 
 	ccpResp, err := c.Config().ClusterControlPlaneClient.UpdateApp(ctx, updateReq)

+ 0 - 476
api/server/handlers/porter_app/update_app_environment_group.go

@@ -1,476 +0,0 @@
-package porter_app
-
-import (
-	"context"
-	"encoding/base64"
-	"net/http"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/porter-dev/porter/internal/deployment_target"
-	"github.com/porter-dev/porter/internal/kubernetes"
-	"github.com/porter-dev/porter/internal/porter_app"
-
-	"github.com/porter-dev/porter/api/server/shared/requestutils"
-	"github.com/porter-dev/porter/internal/kubernetes/environment_groups"
-
-	"github.com/porter-dev/api-contracts/generated/go/helpers"
-	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-	"github.com/porter-dev/porter/internal/telemetry"
-)
-
-// UpdateAppEnvironmentHandler handles the /apps/{porter_app_name}/update-environment endpoint
-type UpdateAppEnvironmentHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-// NewUpdateAppEnvironmentHandler returns a new UpdateAppEnvironmentHandler
-func NewUpdateAppEnvironmentHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *UpdateAppEnvironmentHandler {
-	return &UpdateAppEnvironmentHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-const (
-	// LabelKey_AppName is the label key for the app name
-	LabelKey_AppName = "porter.run/app-name"
-	// LabelKey_DeploymentTargetID is the label key for the deployment target id
-	LabelKey_DeploymentTargetID = "porter.run/deployment-target-id"
-	// LabelKey_PorterManaged is the label key signifying the resource is managed by porter
-	LabelKey_PorterManaged = "porter.run/managed"
-)
-
-// UpdateAppEnvironmentRequest represents the accepted fields on a request to the /apps/{porter_app_name}/environment-group endpoint
-type UpdateAppEnvironmentRequest struct {
-	Base64AppProto     string            `json:"b64_app_proto"`
-	DeploymentTargetID string            `json:"deployment_target_id"`
-	Variables          map[string]string `json:"variables"`
-	Secrets            map[string]string `json:"secrets"`
-	// HardUpdate is used to remove any variables that are not specified in the request.  If false, the request will only update the variables specified in the request,
-	// and leave all other variables untouched.
-	HardUpdate bool `json:"remove_missing"`
-}
-
-// UpdateAppEnvironmentResponse represents the fields on the response object from the /apps/{porter_app_name}/environment-group endpoint
-type UpdateAppEnvironmentResponse struct {
-	Base64AppProto string                                `json:"b64_app_proto"`
-	EnvGroups      []environment_groups.EnvironmentGroup `json:"env_groups"`
-}
-
-// ServeHTTP updates or creates the environment group for an app
-func (c *UpdateAppEnvironmentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx, span := telemetry.NewSpan(r.Context(), "serve-update-app-env-group")
-	defer span.End()
-	r = r.Clone(ctx)
-	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
-
-	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
-	if reqErr != nil {
-		err := telemetry.Error(ctx, span, nil, "error parsing porter app name")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
-
-	request := &UpdateAppEnvironmentRequest{}
-	if ok := c.DecodeAndValidate(w, r, request); !ok {
-		err := telemetry.Error(ctx, span, nil, "invalid request")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	porterApp, err := c.Config().Repo.PorterApp().ReadPorterAppByName(cluster.ID, appName)
-	if err != nil {
-		err := telemetry.Error(ctx, span, nil, "error getting porter app by name")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	if porterApp.ID == 0 {
-		err := telemetry.Error(ctx, span, nil, "porter app not found")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound))
-		return
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-id", Value: porterApp.ID})
-
-	if request.DeploymentTargetID == "" {
-		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
-
-	appProto := &porterv1.PorterApp{}
-
-	if request.Base64AppProto == "" {
-		if appName == "" {
-			err := telemetry.Error(ctx, span, nil, "app name is empty and no base64 proto provided")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		appProto.Name = appName
-	} else {
-		decoded, err := base64.StdEncoding.DecodeString(request.Base64AppProto)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error decoding base yaml")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		err = helpers.UnmarshalContractObject(decoded, appProto)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-	}
-
-	if appProto.Name == "" {
-		err := telemetry.Error(ctx, span, nil, "app proto name is empty")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
-		ProjectID:          int64(project.ID),
-		ClusterID:          int64(cluster.ID),
-		DeploymentTargetID: request.DeploymentTargetID,
-		CCPClient:          c.Config().ClusterControlPlaneClient,
-	})
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	namespace := deploymentTarget.Namespace
-	isPreview := deploymentTarget.IsPreview
-
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "is-preview", Value: isPreview})
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "hard-update", Value: request.HardUpdate})
-
-	appEnvGroupName, err := porter_app.AppEnvGroupName(ctx, appName, request.DeploymentTargetID, cluster.ID, c.Repo().PorterApp())
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error getting app env group name")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	agent, err := c.GetAgent(r, cluster, "")
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "unable to connect to kubernetes cluster")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	latestEnvironmentGroup, err := environment_groups.LatestBaseEnvironmentGroup(ctx, agent, appEnvGroupName)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "unable to get latest base environment group")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-exists", Value: latestEnvironmentGroup.Name != ""})
-
-	previewTemplateEnvName, err := porter_app.AppTemplateEnvGroupName(ctx, appName, cluster.ID, c.Repo().PorterApp())
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error getting preview template env name")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	// filter out preview template and app env groups
-	filteredEnvGroups := []*porterv1.EnvGroup{}
-	for _, envGroup := range appProto.EnvGroups {
-		if envGroup.GetName() != previewTemplateEnvName && envGroup.GetName() != appEnvGroupName {
-			filteredEnvGroups = append(filteredEnvGroups, envGroup)
-		}
-	}
-
-	if latestEnvironmentGroup.Name != "" {
-		sameEnvGroup := true
-		for key, newValue := range request.Variables {
-			if existingValue, ok := latestEnvironmentGroup.Variables[key]; !ok || existingValue != newValue {
-				sameEnvGroup = false
-			}
-		}
-		for key, newValue := range request.Secrets {
-			// We cannot check if the values are the same because the existing secrets are substituted with dummy values. However, if the new value is a dummy value, then it is unchanged.
-			if _, ok := latestEnvironmentGroup.SecretVariables[key]; !ok || newValue != environment_groups.EnvGroupSecretDummyValue {
-				sameEnvGroup = false
-			}
-		}
-		if request.HardUpdate {
-			for key := range latestEnvironmentGroup.Variables {
-				if _, ok := request.Variables[key]; !ok {
-					sameEnvGroup = false
-				}
-			}
-			for key := range latestEnvironmentGroup.SecretVariables {
-				if _, ok := request.Secrets[key]; !ok {
-					sameEnvGroup = false
-				}
-			}
-		}
-		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "same-env-group", Value: sameEnvGroup})
-
-		if sameEnvGroup {
-			// even if the env group is the same, we still need to sync the latest versions of the other env groups
-			syncInp := syncLatestEnvGroupVersionsInput{
-				envGroups:          filteredEnvGroups,
-				appName:            appName,
-				namespace:          namespace,
-				deploymentTargetID: request.DeploymentTargetID,
-				k8sAgent:           agent,
-			}
-			latestEnvGroups, err := syncLatestEnvGroupVersions(ctx, syncInp)
-			if err != nil {
-				err := telemetry.Error(ctx, span, err, "error syncing latest env group versions")
-				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-				return
-			}
-
-			latestEnvGroups = append(latestEnvGroups, environment_groups.EnvironmentGroup{
-				Name:    latestEnvironmentGroup.Name,
-				Version: latestEnvironmentGroup.Version,
-			})
-
-			var protoEnvGroups []*porterv1.EnvGroup
-			for _, envGroup := range latestEnvGroups {
-				protoEnvGroups = append(protoEnvGroups, &porterv1.EnvGroup{
-					Name:    envGroup.Name,
-					Version: int64(envGroup.Version),
-				})
-			}
-			appProto.EnvGroups = protoEnvGroups
-
-			encodedApp, err := encodeAppProto(ctx, appProto)
-			if err != nil {
-				err := telemetry.Error(ctx, span, err, "error encoding app proto")
-				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-				return
-			}
-
-			res := &UpdateAppEnvironmentResponse{
-				EnvGroups:      latestEnvGroups,
-				Base64AppProto: encodedApp,
-			}
-
-			c.WriteResult(w, r, res)
-			return
-		}
-	}
-
-	// if this app does not have a default env group for this deployment target and is a preview
-	// then use the preview template env group as the default
-	// this should only run when the app is first deployed to a given deployment target
-	if latestEnvironmentGroup.Name == "" && isPreview {
-		latestEnvironmentGroup, err = environment_groups.LatestBaseEnvironmentGroup(ctx, agent, previewTemplateEnvName)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "unable to get latest base environment group")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-			return
-		}
-	}
-
-	variables := make(map[string]string)
-	secrets := make(map[string]string)
-
-	if !request.HardUpdate {
-		for key, value := range latestEnvironmentGroup.Variables {
-			variables[key] = value
-		}
-		for key, value := range latestEnvironmentGroup.SecretVariables {
-			secrets[key] = value
-		}
-	}
-
-	for key, value := range request.Variables {
-		if len(key) > 0 && len(value) > 0 {
-			variables[key] = value
-		}
-	}
-	for key, value := range request.Secrets {
-		if len(key) > 0 && len(value) > 0 {
-			secrets[key] = value
-		}
-	}
-
-	envGroup := environment_groups.EnvironmentGroup{
-		Name:            appEnvGroupName,
-		Variables:       variables,
-		SecretVariables: secrets,
-		CreatedAtUTC:    time.Now().UTC(),
-	}
-
-	additionalEnvGroupLabels := map[string]string{
-		LabelKey_AppName:                                  appName,
-		LabelKey_DeploymentTargetID:                       request.DeploymentTargetID,
-		environment_groups.LabelKey_DefaultAppEnvironment: "true",
-		LabelKey_PorterManaged:                            "true",
-	}
-
-	err = environment_groups.CreateOrUpdateBaseEnvironmentGroup(ctx, agent, envGroup, additionalEnvGroupLabels)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "unable to create or update base environment group")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	inp := environment_groups.SyncLatestVersionToNamespaceInput{
-		BaseEnvironmentGroupName: appEnvGroupName,
-		TargetNamespace:          namespace,
-	}
-
-	syncedAppEnvironment, err := environment_groups.SyncLatestVersionToNamespace(ctx, agent, inp, additionalEnvGroupLabels)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "unable to create or update synced environment group")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-versioned-name", Value: syncedAppEnvironment.EnvironmentGroupVersionedName})
-
-	syncInp := syncLatestEnvGroupVersionsInput{
-		envGroups:          filteredEnvGroups,
-		appName:            appName,
-		namespace:          namespace,
-		deploymentTargetID: request.DeploymentTargetID,
-		k8sAgent:           agent,
-	}
-	latestEnvGroups, err := syncLatestEnvGroupVersions(ctx, syncInp)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error syncing latest env group versions")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	split := strings.Split(syncedAppEnvironment.EnvironmentGroupVersionedName, ".")
-	if len(split) != 2 {
-		err := telemetry.Error(ctx, span, err, "unexpected environment group versioned name")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	version, err := strconv.Atoi(split[1])
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error converting environment group version to int")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	latestEnvGroups = append(latestEnvGroups, environment_groups.EnvironmentGroup{
-		Name:    split[0],
-		Version: version,
-	})
-
-	var protoEnvGroups []*porterv1.EnvGroup
-	for _, envGroup := range latestEnvGroups {
-		protoEnvGroups = append(protoEnvGroups, &porterv1.EnvGroup{
-			Name:    envGroup.Name,
-			Version: int64(envGroup.Version),
-		})
-	}
-	appProto.EnvGroups = protoEnvGroups
-
-	encodedApp, err := encodeAppProto(ctx, appProto)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error encoding app proto")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	res := &UpdateAppEnvironmentResponse{
-		EnvGroups:      latestEnvGroups,
-		Base64AppProto: encodedApp,
-	}
-
-	c.WriteResult(w, r, res)
-}
-
-type syncLatestEnvGroupVersionsInput struct {
-	// envGroups is the list of env groups to sync. We only need the names and will get the latest version of each from the porter-env-group ns
-	envGroups []*porterv1.EnvGroup
-	// appName is the name of the app
-	appName string
-	// namespace is the namespace to sync the latest versions to
-	namespace string
-	// deploymentTargetID is the id of the deployment target
-	deploymentTargetID string
-	// k8sAgent is the kubernetes agent
-	k8sAgent *kubernetes.Agent
-}
-
-// syncLatestEnvGroupVersions syncs the latest versions of the env groups to the namespace where an app is deployed
-func syncLatestEnvGroupVersions(ctx context.Context, inp syncLatestEnvGroupVersionsInput) ([]environment_groups.EnvironmentGroup, error) {
-	ctx, span := telemetry.NewSpan(ctx, "sync-latest-env-group-versions")
-	defer span.End()
-
-	var envGroups []environment_groups.EnvironmentGroup
-
-	if inp.deploymentTargetID == "" {
-		return envGroups, telemetry.Error(ctx, span, nil, "deployment target id is empty")
-	}
-	if inp.appName == "" {
-		return envGroups, telemetry.Error(ctx, span, nil, "app name is empty")
-	}
-	if inp.namespace == "" {
-		return envGroups, telemetry.Error(ctx, span, nil, "namespace is empty")
-	}
-	if inp.k8sAgent == nil {
-		return envGroups, telemetry.Error(ctx, span, nil, "k8s agent is nil")
-	}
-
-	for _, envGroup := range inp.envGroups {
-		if envGroup == nil {
-			continue
-		}
-
-		additionalEnvGroupLabels := map[string]string{
-			LabelKey_AppName:            inp.appName,
-			LabelKey_DeploymentTargetID: inp.deploymentTargetID,
-			LabelKey_PorterManaged:      "true",
-		}
-
-		syncedEnvironment, err := environment_groups.SyncLatestVersionToNamespace(ctx, inp.k8sAgent, environment_groups.SyncLatestVersionToNamespaceInput{
-			TargetNamespace:          inp.namespace,
-			BaseEnvironmentGroupName: envGroup.GetName(),
-		}, additionalEnvGroupLabels)
-		if err != nil {
-			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-name", Value: envGroup.GetName()})
-			return envGroups, telemetry.Error(ctx, span, err, "error syncing latest version to namespace")
-		}
-
-		split := strings.Split(syncedEnvironment.EnvironmentGroupVersionedName, ".")
-		if len(split) != 2 {
-			return envGroups, telemetry.Error(ctx, span, err, "unexpected environment group versioned name")
-		}
-
-		version, err := strconv.Atoi(split[1])
-		if err != nil {
-			return envGroups, telemetry.Error(ctx, span, err, "error converting environment group version to int")
-		}
-
-		envGroups = append(envGroups, environment_groups.EnvironmentGroup{
-			Name:    split[0],
-			Version: version,
-		})
-	}
-
-	return envGroups, nil
-}

+ 26 - 5
api/server/handlers/porter_app/update_image.go

@@ -36,7 +36,7 @@ func NewUpdateImageHandler(
 
 // UpdateImageRequest is the request object for the /apps/{porter_app_name}/update-image endpoint
 type UpdateImageRequest struct {
-	DeploymentTargetId   string `json:"deployment_target_id"`
+	DeploymentTargetID   string `json:"deployment_target_id"`
 	DeploymentTargetName string `json:"deployment_target_name"`
 	Repository           string `json:"repository"`
 	Tag                  string `json:"tag"`
@@ -46,6 +46,7 @@ type UpdateImageRequest struct {
 type UpdateImageResponse struct {
 	Repository string `json:"repository"`
 	Tag        string `json:"tag"`
+	RevisionID string `json:"revision_id"`
 }
 
 func (c *UpdateImageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@@ -53,6 +54,7 @@ func (c *UpdateImageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	defer span.End()
 
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
@@ -76,20 +78,38 @@ func (c *UpdateImageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 
 	telemetry.WithAttributes(span,
-		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetId},
-		telemetry.AttributeKV{Key: "deployment-target-name", Value: request.DeploymentTargetName},
 		telemetry.AttributeKV{Key: "repository", Value: request.Repository},
 		telemetry.AttributeKV{Key: "tag", Value: request.Tag},
 	)
 
+	deploymentTargetName := request.DeploymentTargetName
+	if request.DeploymentTargetName == "" && request.DeploymentTargetID == "" {
+		defaultDeploymentTarget, err := defaultDeploymentTarget(ctx, defaultDeploymentTargetInput{
+			ProjectID:                 project.ID,
+			ClusterID:                 cluster.ID,
+			ClusterControlPlaneClient: c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting default deployment target")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		deploymentTargetName = defaultDeploymentTarget.Name
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: request.DeploymentTargetName},
+	)
+
 	updateImageReq := connect.NewRequest(&porterv1.UpdateAppImageRequest{
 		ProjectId:     int64(project.ID),
 		RepositoryUrl: request.Repository,
 		Tag:           request.Tag,
 		AppName:       appName,
 		DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
-			Id:   request.DeploymentTargetId,
-			Name: request.DeploymentTargetName,
+			Id:   request.DeploymentTargetID,
+			Name: deploymentTargetName,
 		},
 	})
 	ccpResp, err := c.Config().ClusterControlPlaneClient.UpdateAppImage(ctx, updateImageReq)
@@ -102,6 +122,7 @@ func (c *UpdateImageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	res := &UpdateImageResponse{
 		Repository: ccpResp.Msg.RepositoryUrl,
 		Tag:        ccpResp.Msg.Tag,
+		RevisionID: ccpResp.Msg.RevisionId,
 	}
 
 	c.WriteResult(w, r, res)

+ 0 - 63
api/server/handlers/porter_app/use_new_apply_logic.go

@@ -1,63 +0,0 @@
-package porter_app
-
-import (
-	"net/http"
-
-	"github.com/porter-dev/porter/api/server/authz"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-	"github.com/porter-dev/porter/internal/telemetry"
-)
-
-// UseNewApplyLogicHandler returns whether the CLI should use the new apply logic or not
-type UseNewApplyLogicHandler struct {
-	handlers.PorterHandlerReadWriter
-	authz.KubernetesAgentGetter
-}
-
-// NewUseNewApplyLogicHandler returns a new UseNewApplyLogicHandler
-func NewUseNewApplyLogicHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *UseNewApplyLogicHandler {
-	return &UseNewApplyLogicHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
-	}
-}
-
-// UseNewApplyLogicRequest is the request body for the /apps/use-new-apply-logic endpoint
-type UseNewApplyLogicRequest struct{}
-
-// UseNewApplyLogicResponse is the response body for the /apps/use-new-apply-logic endpoint
-type UseNewApplyLogicResponse struct {
-	UseNewApplyLogic bool `json:"use_new_apply_logic"`
-}
-
-// ServeHTTP handles the request on the /apps/use-new-apply-logic endpoint, allowing the server to tell the CLI whether to use the new apply logic or not
-func (c *UseNewApplyLogicHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx, span := telemetry.NewSpan(r.Context(), "serve-use-new-apply-logic")
-	defer span.End()
-
-	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
-
-	telemetry.WithAttributes(span,
-		telemetry.AttributeKV{Key: "project_id", Value: project.ID},
-		telemetry.AttributeKV{Key: "cluster_id", Value: cluster.ID},
-	)
-
-	betaFeaturesEnabled := project.GetFeatureFlag(models.BetaFeaturesEnabled, c.Config().LaunchDarklyClient)
-
-	telemetry.WithAttributes(span,
-		telemetry.AttributeKV{Key: "beta_features_enabled", Value: betaFeaturesEnabled},
-	)
-
-	c.WriteResult(w, r, &UseNewApplyLogicResponse{
-		UseNewApplyLogic: betaFeaturesEnabled,
-	})
-}

+ 0 - 216
api/server/handlers/porter_app/validate.go

@@ -1,216 +0,0 @@
-package porter_app
-
-import (
-	"encoding/base64"
-	"net/http"
-
-	"connectrpc.com/connect"
-
-	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
-
-	"github.com/porter-dev/api-contracts/generated/go/helpers"
-
-	"github.com/porter-dev/porter/internal/telemetry"
-
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-)
-
-// ValidatePorterAppHandler is handles requests to the /apps/validate endpoint
-type ValidatePorterAppHandler struct {
-	handlers.PorterHandlerReadWriter
-}
-
-// NewValidatePorterAppHandler returns a new ValidatePorterAppHandler
-func NewValidatePorterAppHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *ValidatePorterAppHandler {
-	return &ValidatePorterAppHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-	}
-}
-
-// ServiceDeletions are deletions to apply to a specific service
-type ServiceDeletions struct {
-	DomainNames           []string `json:"domain_names"`
-	IngressAnnotationKeys []string `json:"ingress_annotation_keys"`
-}
-
-// Deletions are the names of services and env variables to delete
-type Deletions struct {
-	ServiceNames     []string                    `json:"service_names"`
-	Predeploy        []string                    `json:"predeploy"`
-	EnvVariableNames []string                    `json:"env_variable_names"`
-	EnvGroupNames    []string                    `json:"env_group_names"`
-	ServiceDeletions map[string]ServiceDeletions `json:"service_deletions"`
-}
-
-// ValidatePorterAppRequest is the request object for the /apps/validate endpoint
-type ValidatePorterAppRequest struct {
-	AppName            string    `json:"app_name"`
-	Base64AppProto     string    `json:"b64_app_proto"`
-	Base64AppOverrides string    `json:"b64_app_overrides"`
-	DeploymentTargetId string    `json:"deployment_target_id"`
-	CommitSHA          string    `json:"commit_sha"`
-	Deletions          Deletions `json:"deletions"`
-}
-
-// ValidatePorterAppResponse is the response object for the /apps/validate endpoint
-type ValidatePorterAppResponse struct {
-	ValidatedBase64AppProto string `json:"validate_b64_app_proto"`
-}
-
-// ServeHTTP translates requests into protobuf objects and forwards them to the cluster control plane, returning the result
-func (c *ValidatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx, span := telemetry.NewSpan(r.Context(), "serve-validate-porter-app")
-	defer span.End()
-
-	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
-
-	telemetry.WithAttributes(span,
-		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
-		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
-	)
-
-	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
-		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
-		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
-		return
-	}
-
-	request := &ValidatePorterAppRequest{}
-	if ok := c.DecodeAndValidate(w, r, request); !ok {
-		err := telemetry.Error(ctx, span, nil, "error decoding request")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	appProto := &porterv1.PorterApp{}
-
-	if request.Base64AppProto == "" {
-		if request.AppName == "" {
-			err := telemetry.Error(ctx, span, nil, "app name is empty and no base64 proto provided")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		appProto.Name = request.AppName
-	} else {
-		decoded, err := base64.StdEncoding.DecodeString(request.Base64AppProto)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error decoding base yaml")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		err = helpers.UnmarshalContractObject(decoded, appProto)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-	}
-
-	if appProto.Name == "" {
-		err := telemetry.Error(ctx, span, nil, "app proto name is empty")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	telemetry.WithAttributes(span,
-		telemetry.AttributeKV{Key: "app-name", Value: appProto.Name},
-		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetId},
-		telemetry.AttributeKV{Key: "commit-sha", Value: request.CommitSHA},
-	)
-
-	var overrides *porterv1.PorterApp
-
-	if request.Base64AppOverrides != "" {
-		decoded, err := base64.StdEncoding.DecodeString(request.Base64AppOverrides)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error decoding base  yaml")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		overrides = &porterv1.PorterApp{}
-		err = helpers.UnmarshalContractObject(decoded, overrides)
-		if err != nil {
-			err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "validated-with-overrides", Value: true})
-	}
-
-	var serviceDeletions map[string]*porterv1.ServiceDeletions
-	if request.Deletions.ServiceDeletions != nil {
-		serviceDeletions = make(map[string]*porterv1.ServiceDeletions)
-		for k, v := range request.Deletions.ServiceDeletions {
-			serviceDeletions[k] = &porterv1.ServiceDeletions{
-				DomainNames:        v.DomainNames,
-				IngressAnnotations: v.IngressAnnotationKeys,
-			}
-		}
-	}
-
-	validateReq := connect.NewRequest(&porterv1.ValidatePorterAppRequest{
-		ProjectId:          int64(project.ID),
-		DeploymentTargetId: request.DeploymentTargetId,
-		CommitSha:          request.CommitSHA,
-		App:                appProto,
-		AppOverrides:       overrides,
-		Deletions: &porterv1.Deletions{
-			ServiceNames:     request.Deletions.ServiceNames,
-			PredeployNames:   request.Deletions.Predeploy,
-			EnvVariableNames: request.Deletions.EnvVariableNames,
-			EnvGroupNames:    request.Deletions.EnvGroupNames,
-			ServiceDeletions: serviceDeletions,
-		},
-	})
-	ccpResp, err := c.Config().ClusterControlPlaneClient.ValidatePorterApp(ctx, validateReq)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error calling ccp validate porter app")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	if ccpResp == nil {
-		err := telemetry.Error(ctx, span, err, "ccp resp is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-	if ccpResp.Msg == nil {
-		err := telemetry.Error(ctx, span, err, "ccp resp msg is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	if ccpResp.Msg.App == nil {
-		err := telemetry.Error(ctx, span, err, "ccp resp app is nil")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	encoded, err := helpers.MarshalContractObject(ctx, ccpResp.Msg.App)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error marshalling app proto back to json")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-		return
-	}
-
-	b64 := base64.StdEncoding.EncodeToString(encoded)
-	response := &ValidatePorterAppResponse{
-		ValidatedBase64AppProto: b64,
-	}
-
-	c.WriteResult(w, r, response)
-}

+ 112 - 7
api/server/handlers/porter_app/yaml_from_revision.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"encoding/base64"
 	"net/http"
+	"sort"
 	"strings"
 
 	"github.com/porter-dev/porter/internal/kubernetes"
@@ -167,6 +168,41 @@ func (c *PorterYAMLFromRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http
 		app = formatForExport(app, c.Config().ServerConf.AppRootDomain)
 	}
 
+	// sort services by name
+	sortedServices := app.Services
+	sort.Slice(sortedServices, func(i, j int) bool {
+		serviceTypeSortPriorityA, ok := serviceTypeSortPriority[sortedServices[i].Type]
+		if !ok {
+			return false
+		}
+		serviceTypeSortPriorityB, ok := serviceTypeSortPriority[sortedServices[j].Type]
+		if !ok {
+			return false
+		}
+		if serviceTypeSortPriorityA != serviceTypeSortPriorityB {
+			return serviceTypeSortPriorityA < serviceTypeSortPriorityB
+		}
+		return sortedServices[i].Name < sortedServices[j].Name
+	})
+	app.Services = sortedServices
+
+	servicesWithDomainsSorted := app.Services
+	for i := range servicesWithDomainsSorted {
+		sortedDomains := servicesWithDomainsSorted[i].Domains
+		sort.Slice(sortedDomains, func(i, j int) bool {
+			return sortedDomains[i].Name < sortedDomains[j].Name
+		})
+		servicesWithDomainsSorted[i].Domains = sortedDomains
+	}
+	app.Services = servicesWithDomainsSorted
+
+	// sort env variables by key
+	sortedEnv := app.Env
+	sort.Slice(sortedEnv, func(i, j int) bool {
+		return sortedEnv[i].Key < sortedEnv[j].Key
+	})
+	app.Env = sortedEnv
+
 	porterYAMLString, err := yaml.Marshal(app)
 	if err != nil {
 		err = telemetry.Error(ctx, span, err, "error marshaling porter yaml")
@@ -194,11 +230,11 @@ type formatDefaultEnvGroupInput struct {
 	PorterAppRepository       repository.PorterAppRepository
 }
 
-func defaultEnvGroup(ctx context.Context, input formatDefaultEnvGroupInput) (map[string]string, string, error) {
+func defaultEnvGroup(ctx context.Context, input formatDefaultEnvGroupInput) ([]v2.EnvVariableDefinition, string, error) {
 	ctx, span := telemetry.NewSpan(ctx, "format-default-env-group")
 	defer span.End()
 
-	env := map[string]string{}
+	var env []v2.EnvVariableDefinition
 
 	revision, err := porter_app.GetAppRevision(ctx, porter_app.GetAppRevisionInput{
 		AppRevisionID: input.AppRevisionID,
@@ -243,10 +279,46 @@ func defaultEnvGroup(ctx context.Context, input formatDefaultEnvGroupInput) (map
 	}
 
 	for key, val := range revisionWithEnv.Env.Variables {
-		env[key] = val
+		env = append(env, v2.EnvVariableDefinition{
+			Key:    key,
+			Source: v2.EnvVariableSource_Value,
+			Value: v2.EnvValueOptional{
+				Value: val,
+				IsSet: true,
+			},
+		})
 	}
 	for key, val := range revisionWithEnv.Env.SecretVariables {
-		env[key] = val
+		env = append(env, v2.EnvVariableDefinition{
+			Key:    key,
+			Source: v2.EnvVariableSource_Value,
+			Value: v2.EnvValueOptional{
+				Value: val,
+				IsSet: true,
+			},
+		})
+	}
+
+	for _, ev := range appProto.Env {
+		if ev.Source == porterv1.EnvVariableSource_ENV_VARIABLE_SOURCE_FROM_APP {
+			fromAppProto := ev.GetFromApp()
+			if fromAppProto == nil {
+				continue
+			}
+
+			fromApp, err := v2.EnvVarFromAppFromProto(fromAppProto)
+			if err != nil {
+				return env, "", telemetry.Error(ctx, span, err, "error converting env var from app to proto")
+			}
+
+			envVar := v2.EnvVariableDefinition{
+				Key:     ev.Key,
+				Source:  v2.EnvVariableSource_FromApp,
+				FromApp: fromApp,
+			}
+
+			env = append(env, envVar)
+		}
 	}
 
 	return env, revisionWithEnv.Env.Name, nil
@@ -280,11 +352,13 @@ func formatForExport(app v2.PorterApp, appRootDomain string) v2.PorterApp {
 	}
 
 	// remove env secrets from env
-	for key, val := range app.Env {
-		if val == "********" {
-			delete(app.Env, key)
+	var filtered []v2.EnvVariableDefinition
+	for _, ev := range app.Env {
+		if ev.Value.Value != "********" {
+			filtered = append(filtered, ev)
 		}
 	}
+	app.Env = filtered
 
 	// don't show env group versions
 	for i := range app.EnvGroups {
@@ -403,5 +477,36 @@ func zeroOutValues(app v2.PorterApp) v2.PorterApp {
 		app.Predeploy.TimeoutSeconds = 0
 	}
 
+	if app.InitialDeploy != nil {
+		// remove name
+		app.InitialDeploy.Name = ""
+		// remove type
+		app.InitialDeploy.Type = ""
+		// remove smart optimization
+		app.InitialDeploy.SmartOptimization = nil
+		// remove launcher
+		if app.InitialDeploy.Run != nil {
+			launcherLess := strings.TrimPrefix(*app.InitialDeploy.Run, "launcher ")
+			launcherLess = strings.TrimPrefix(launcherLess, "/cnb/lifecycle/launcher ")
+			app.InitialDeploy.Run = &launcherLess
+		}
+		// remove port
+		app.InitialDeploy.Port = 0
+		// remove instances
+		app.InitialDeploy.Instances = nil
+		// remove suspendCron
+		app.InitialDeploy.SuspendCron = nil
+		// remove allowConcurrency
+		app.InitialDeploy.AllowConcurrent = nil
+		// remove timeout
+		app.InitialDeploy.TimeoutSeconds = 0
+	}
+
 	return app
 }
+
+var serviceTypeSortPriority = map[v2.ServiceType]int{
+	v2.ServiceType_Web:    0,
+	v2.ServiceType_Worker: 1,
+	v2.ServiceType_Job:    2,
+}

+ 63 - 0
api/server/handlers/project/connect.go

@@ -0,0 +1,63 @@
+package project
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/analytics"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// ConnectHandler is the handler for the POST /projects/{project_id}/connect endpoint
+type ConnectHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewConnectHandler returns a new ConnectHandler
+func NewConnectHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *ConnectHandler {
+	return &ConnectHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// ServeHTTP connects a project to the hosted cluster
+func (p *ConnectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "connect-project-to-hosted")
+	defer span.End()
+
+	user, _ := r.Context().Value(types.UserScope).(*models.User)
+	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+
+	var err error
+	resp, err := p.Config().ClusterControlPlaneClient.ConnectHostedProject(ctx, connect.NewRequest(&porterv1.ConnectHostedProjectRequest{
+		ProjectId: int64(proj.ID),
+	}))
+	if err != nil {
+		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	if resp == nil || resp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "connect to hosted response is nil")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	p.WriteResult(w, r, resp.Msg.ClusterId)
+
+	_ = p.Config().AnalyticsClient.Track(analytics.ProjectConnectTrack(&analytics.ProjectCreateDeleteTrackOpts{
+		ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, proj.ID),
+	}))
+}

+ 31 - 18
api/server/handlers/project/create.go

@@ -11,6 +11,7 @@ import (
 	"github.com/porter-dev/porter/internal/analytics"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/repository"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type ProjectCreateHandler struct {
@@ -28,11 +29,14 @@ func NewProjectCreateHandler(
 }
 
 func (p *ProjectCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	request := &types.CreateProjectRequest{}
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-project")
+	defer span.End()
 
-	ok := p.DecodeAndValidate(w, r, request)
+	request := &types.CreateProjectRequest{}
 
-	if !ok {
+	if ok := p.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding create project request")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
@@ -46,23 +50,42 @@ func (p *ProjectCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		HelmValuesEnabled:      false,
 		MultiCluster:           false,
 		EnableReprovision:      false,
+		EnableSandbox:          p.Config().ServerConf.EnableSandbox,
 	}
 
 	var err error
-	proj, _, err = CreateProjectWithUser(p.Repo().Project(), proj, user)
 
+	if p.Config().ServerConf.StripeSecretKey != "" && p.Config().ServerConf.StripePublishableKey != "" {
+		// Create billing customer for project and set the billing ID
+		billingID, err := p.Config().BillingManager.CreateCustomer(ctx, user.Email, proj)
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error creating billing customer")
+			p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+			return
+		}
+		proj.BillingID = billingID
+	}
+
+	proj, _, err = CreateProjectWithUser(p.Repo().Project(), proj, user)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating project with user")
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 
-	// create onboarding flow set to the first step
+	step := types.StepConnectSource
+
+	if p.Config().ServerConf.EnableSandbox {
+		step = types.StepCleanUp
+	}
+
+	// create onboarding flow set to the first step. Read in env var
 	_, err = p.Repo().Onboarding().CreateProjectOnboarding(&models.Onboarding{
 		ProjectID:   proj.ID,
-		CurrentStep: types.StepConnectSource,
+		CurrentStep: step,
 	})
-
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating project onboarding")
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
@@ -75,23 +98,14 @@ func (p *ProjectCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		Clusters:       types.BasicPlan.Clusters,
 		Users:          types.BasicPlan.Users,
 	})
-
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating project usage")
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 
 	p.WriteResult(w, r, proj.ToProjectType(p.Config().LaunchDarklyClient))
 
-	// add project to billing team
-	_, err = p.Config().BillingManager.CreateTeam(user, proj)
-
-	if err != nil {
-		// we do not write error response, since setting up billing error can be
-		// resolved later and may not be fatal
-		p.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(err))
-	}
-
 	p.Config().AnalyticsClient.Track(analytics.ProjectCreateTrack(&analytics.ProjectCreateDeleteTrackOpts{
 		ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, proj.ID),
 	}))
@@ -121,7 +135,6 @@ func CreateProjectWithUser(
 
 	// read the project again to get the model with the role attached
 	proj, err = projectRepo.ReadProject(proj.ID)
-
 	if err != nil {
 		return nil, nil, err
 	}

+ 44 - 14
api/server/handlers/project/delete.go

@@ -1,7 +1,6 @@
 package project
 
 import (
-	"fmt"
 	"net/http"
 
 	"connectrpc.com/connect"
@@ -13,6 +12,8 @@ import (
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/notifier"
+	"github.com/porter-dev/porter/internal/repository"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type ProjectDeleteHandler struct {
@@ -29,14 +30,18 @@ func NewProjectDeleteHandler(
 }
 
 func (p *ProjectDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx := r.Context()
+	ctx, span := telemetry.NewSpan(r.Context(), "delete-project")
+	defer span.End()
+
 	user, _ := ctx.Value(types.UserScope).(*models.User)
 	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
 
 	if proj.GetFeatureFlag(models.CapiProvisionerEnabled, p.Config().LaunchDarklyClient) {
 		clusters, err := p.Config().Repo.Cluster().ListClustersByProjectID(proj.ID)
 		if err != nil {
-			p.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error finding clusters for project: %w", err)))
+			e := "error finding clusters for project"
+			err = telemetry.Error(ctx, span, err, e)
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 			return
 		}
 
@@ -46,9 +51,11 @@ func (p *ProjectDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 					continue
 				}
 
-				contractRevision, err := p.Config().Repo.APIContractRevisioner().List(ctx, proj.ID, cluster.ID)
+				contractRevision, err := p.Config().Repo.APIContractRevisioner().List(ctx, proj.ID, repository.WithClusterID(cluster.ID))
 				if err != nil {
-					p.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error finding contract revisions for cluster: %w", err)))
+					e := "error finding contract revisions for cluster"
+					err = telemetry.Error(ctx, span, err, e)
+					p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 					return
 				}
 				if len(contractRevision) == 0 {
@@ -64,7 +71,9 @@ func (p *ProjectDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 				})
 				_, err = p.Config().ClusterControlPlaneClient.DeleteCluster(ctx, req)
 				if err != nil {
-					p.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error deleting cluster: %w", err)))
+					e := "error deleting cluster"
+					err = telemetry.Error(ctx, span, err, e)
+					p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 					return
 				}
 			}
@@ -77,22 +86,43 @@ func (p *ProjectDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		},
 	)
 	if err != nil {
-		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		e := "error sending project deletion email"
+		err = telemetry.Error(ctx, span, err, e)
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	err = p.Config().BillingManager.DeleteCustomer(ctx, proj)
+	if err != nil {
+		e := "error deleting project in billing provider"
+		err = telemetry.Error(ctx, span, err, e)
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	deletedProject, err := p.Repo().Project().DeleteProject(proj)
 	if err != nil {
-		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		e := "error deleting project"
+		err = telemetry.Error(ctx, span, err, e)
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	p.WriteResult(w, r, deletedProject.ToProjectType(p.Config().LaunchDarklyClient))
+	err = p.Repo().AWSAssumeRoleChainer().Delete(ctx, proj.ID)
+	if err != nil {
+		e := "error deleting assume role chain"
+		err = telemetry.Error(ctx, span, err, e)
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
 
-	// delete the billing team
-	if err := p.Config().BillingManager.DeleteTeam(user, proj); err != nil {
-		// we do not write error response, since setting up billing error can be
-		// resolved later and may not be fatal
-		p.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(err))
+	err = p.Repo().Project().DeleteRolesForProject(proj.ID)
+	if err != nil {
+		e := "error deleting roles for project"
+		err = telemetry.Error(ctx, span, err, e)
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
 	}
+
+	p.WriteResult(w, r, deletedProject.ToProjectType(p.Config().LaunchDarklyClient))
 }

+ 47 - 0
api/server/handlers/project/update_onboarding_step.go

@@ -236,5 +236,52 @@ func (v *UpdateOnboardingStepHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 		}
 	}
 
+	if request.Step == "cloud-provider-permissions-granted" {
+		err := v.Config().AnalyticsClient.Track(analytics.CloudProviderPermissionsGrantedTrack(&analytics.CloudProviderPermissionsGrantedTrackOpts{
+			ProjectScopedTrackOpts:            analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
+			Email:                             user.Email,
+			FirstName:                         user.FirstName,
+			LastName:                          user.LastName,
+			CompanyName:                       user.CompanyName,
+			CloudProvider:                     request.Provider,
+			CloudProviderCredentialIdentifier: request.CloudProviderCredentialIdentifier,
+		}))
+		if err != nil {
+			_ = telemetry.Error(ctx, span, err, "error tracking cloud provider permissions granted")
+		}
+	}
+
+	if request.Step == "cluster-preflight-checks-failed" {
+		err := v.Config().AnalyticsClient.Track(analytics.ClusterPreflightChecksFailedTrack(&analytics.ClusterPreflightChecksFailedTrackOpts{
+			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
+			Email:                  user.Email,
+			FirstName:              user.FirstName,
+			LastName:               user.LastName,
+			CompanyName:            user.CompanyName,
+			ErrorMessage:           request.ErrorMessage,
+			ClusterName:            request.ClusterName,
+			CloudProvider:          request.Provider,
+		}))
+		if err != nil {
+			_ = telemetry.Error(ctx, span, err, "error tracking cluster preflight checks failed")
+		}
+	}
+
+	if request.Step == "cluster-update-failed" {
+		err := v.Config().AnalyticsClient.Track(analytics.ClusterUpdateFailedTrack(&analytics.ClusterUpdateFailedTrackOpts{
+			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
+			Email:                  user.Email,
+			FirstName:              user.FirstName,
+			LastName:               user.LastName,
+			CompanyName:            user.CompanyName,
+			ErrorMessage:           request.ErrorMessage,
+			ClusterName:            request.ClusterName,
+			CloudProvider:          request.Provider,
+		}))
+		if err != nil {
+			_ = telemetry.Error(ctx, span, err, "error tracking cluster update failed")
+		}
+	}
+
 	v.WriteResult(w, r, user.ToUserType())
 }

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio