Forráskód Böngészése

Merge branch 'master' of github.com:porter-dev/porter into stacks-notifications-tab

Feroze Mohideen 2 éve
szülő
commit
3f7e1c357e
100 módosított fájl, 5757 hozzáadás és 653 törlés
  1. 39 0
      .github/actions/build-go/action.yml
  2. 27 0
      .github/actions/build-npm/action.yml
  3. 49 0
      .github/actions/porter-deploy/action.yml
  4. 13 70
      .github/workflows/porter_stack_porter-ui.yml
  5. 0 87
      .github/workflows/pr_push_checks.yaml
  6. 111 0
      .github/workflows/pr_push_checks_go.yaml
  7. 61 0
      .github/workflows/pr_push_checks_node.yaml
  8. 8 4
      .github/workflows/prerelease.yaml
  9. 101 50
      .github/workflows/production.yml
  10. 1 0
      .gitignore
  11. 3 8
      README.md
  12. 27 0
      Taskfile.yaml
  13. 16 7
      Tiltfile
  14. 42 7
      api/client/api.go
  15. 34 0
      api/client/deployment_target.go
  16. 1 0
      api/client/k8s.go
  17. 270 34
      api/client/porter_app.go
  18. 2 1
      api/server/authn/handler.go
  19. 11 5
      api/server/authz/policy.go
  20. 2 2
      api/server/authz/policy_test.go
  21. 1 1
      api/server/authz/preview_environment.go
  22. 10 6
      api/server/authz/release.go
  23. 1 1
      api/server/handlers/api_token/create.go
  24. 1 1
      api/server/handlers/api_token/get.go
  25. 1 1
      api/server/handlers/api_token/list.go
  26. 1 1
      api/server/handlers/api_token/revoke.go
  27. 16 6
      api/server/handlers/cluster/cluster_status.go
  28. 4 2
      api/server/handlers/cluster/create.go
  29. 1 1
      api/server/handlers/cluster/create_candidate.go
  30. 2 2
      api/server/handlers/cluster/get_pod_metrics.go
  31. 1 1
      api/server/handlers/cluster/list_nginx_ingresses.go
  32. 1 1
      api/server/handlers/cluster/rename.go
  33. 1 1
      api/server/handlers/cluster/resolve_candidate.go
  34. 1 1
      api/server/handlers/cluster/update.go
  35. 102 0
      api/server/handlers/datastore/status.go
  36. 100 0
      api/server/handlers/deployment_target/create.go
  37. 69 0
      api/server/handlers/deployment_target/delete.go
  38. 85 0
      api/server/handlers/deployment_target/get.go
  39. 81 0
      api/server/handlers/deployment_target/list.go
  40. 14 5
      api/server/handlers/environment/common.go
  41. 2 7
      api/server/handlers/environment_groups/create.go
  42. 34 20
      api/server/handlers/environment_groups/list.go
  43. 82 0
      api/server/handlers/environment_groups/update_linked_apps.go
  44. 3 0
      api/server/handlers/gitinstallation/get_buildpack.go
  45. 8 16
      api/server/handlers/gitinstallation/get_porter_yaml.go
  46. 45 15
      api/server/handlers/gitinstallation/rerun_workflow.go
  47. 15 4
      api/server/handlers/helmrepo/get_chart.go
  48. 6 0
      api/server/handlers/infra/forms.go
  49. 44 5
      api/server/handlers/namespace/clone_env_group.go
  50. 51 3
      api/server/handlers/porter_app/analytics.go
  51. 156 0
      api/server/handlers/porter_app/app_metrics.go
  52. 110 4
      api/server/handlers/porter_app/apply.go
  53. 40 27
      api/server/handlers/porter_app/create.go
  54. 217 50
      api/server/handlers/porter_app/create_and_update_events.go
  55. 55 15
      api/server/handlers/porter_app/create_app.go
  56. 298 0
      api/server/handlers/porter_app/create_app_template.go
  57. 73 25
      api/server/handlers/porter_app/create_secret_and_open_pr.go
  58. 3 3
      api/server/handlers/porter_app/create_subdomain.go
  59. 4 1
      api/server/handlers/porter_app/current_app_revision.go
  60. 1 1
      api/server/handlers/porter_app/get.go
  61. 175 0
      api/server/handlers/porter_app/get_app_env.go
  62. 121 0
      api/server/handlers/porter_app/get_app_revision.go
  63. 132 0
      api/server/handlers/porter_app/get_app_template.go
  64. 156 0
      api/server/handlers/porter_app/get_build_env.go
  65. 133 0
      api/server/handlers/porter_app/helm_values_v2.go
  66. 132 0
      api/server/handlers/porter_app/job_status.go
  67. 131 0
      api/server/handlers/porter_app/latest_app_revisions.go
  68. 3 0
      api/server/handlers/porter_app/list_app_revisions.go
  69. 7 4
      api/server/handlers/porter_app/list_events.go
  70. 107 0
      api/server/handlers/porter_app/list_events_apply_v2.go
  71. 191 0
      api/server/handlers/porter_app/logs_apply_v2.go
  72. 60 25
      api/server/handlers/porter_app/parse.go
  73. 51 11
      api/server/handlers/porter_app/parse_yaml.go
  74. 127 0
      api/server/handlers/porter_app/pod_status.go
  75. 273 0
      api/server/handlers/porter_app/report_status.go
  76. 6 6
      api/server/handlers/porter_app/rollback.go
  77. 134 0
      api/server/handlers/porter_app/rollback_revision.go
  78. 71 0
      api/server/handlers/porter_app/status.go
  79. 152 0
      api/server/handlers/porter_app/stream_logs.go
  80. 489 0
      api/server/handlers/porter_app/update_app_environment_group.go
  81. 106 0
      api/server/handlers/porter_app/update_app_revision_status.go
  82. 97 0
      api/server/handlers/porter_app/update_image.go
  83. 86 24
      api/server/handlers/porter_app/validate.go
  84. 112 0
      api/server/handlers/porter_app/yaml_from_revision.go
  85. 1 1
      api/server/handlers/project/create.go
  86. 1 0
      api/server/handlers/project/create_test.go
  87. 2 2
      api/server/handlers/project/delete.go
  88. 1 1
      api/server/handlers/project/get.go
  89. 4 3
      api/server/handlers/project/get_test.go
  90. 107 0
      api/server/handlers/project/images.go
  91. 2 2
      api/server/handlers/project/list.go
  92. 4 4
      api/server/handlers/project/list_test.go
  93. 1 1
      api/server/handlers/project/rename.go
  94. 16 0
      api/server/handlers/project/update_onboarding_step.go
  95. 1 1
      api/server/handlers/project_integration/create_aws.go
  96. 1 1
      api/server/handlers/project_integration/create_gcp.go
  97. 3 0
      api/server/handlers/project_integration/get_gitlab_repo_buildpack.go
  98. 1 1
      api/server/handlers/project_integration/list_aws.go
  99. 2 3
      api/server/handlers/project_integration/preflight_check.go
  100. 0 61
      api/server/handlers/project_integration/preflight_check_aws_usage.go

+ 39 - 0
.github/actions/build-go/action.yml

@@ -0,0 +1,39 @@
+---
+name: 'build-go'
+description: builds the go binaries for the app
+
+runs:
+  using: "composite"
+  steps:
+    - name: Setup Go Cache
+      uses: actions/cache@v3
+      with:
+        path: |
+          ~/.cache/go-build
+          ~/go/pkg/mod
+        key: porter-go-${{ hashFiles('**/go.sum') }}
+        restore-keys: porter-go-`
+    - name: Setup Go
+      uses: actions/setup-go@v4
+      with:
+        cache: false
+        go-version-file: go.mod
+    - name: Download Go Modules
+      shell: bash
+      run: go mod download
+    - name: Build Server Binary
+      shell: bash
+      run: go build -ldflags="-w -s -X 'main.Version=production'" -tags ee -o ./bin/app ./cmd/app
+    - name: Build Migration Binary
+      shell: bash
+      run: go build -ldflags '-w -s' -tags ee -o ./bin/migrate ./cmd/migrate
+    - name: Compress binaries
+      shell: bash
+      run: |
+        upx bin/* --best --lzma
+    - name: Store Binaries
+      uses: actions/upload-artifact@v3
+      with:
+        name: go-binaries
+        path: bin/
+        retention-days: 1

+ 27 - 0
.github/actions/build-npm/action.yml

@@ -0,0 +1,27 @@
+---
+name: 'build-npm'
+description: builds the static dashboard files for the app
+
+runs:
+  using: "composite"
+  steps:
+    - name: Setup Node
+      uses: actions/setup-node@v3
+      with:
+        node-version: 16
+    - name: Install NPM Dependencies
+      shell: bash
+      run: |
+        cd dashboard
+        npm i --legacy-peer-deps
+    - name: Run NPM Build
+      shell: bash
+      run: |
+        cd dashboard
+        npm run build
+    - name: Store NPM Static Files
+      uses: actions/upload-artifact@v3
+      with:
+        name: npm-static-files
+        path: dashboard/build/
+        retention-days: 1

+ 49 - 0
.github/actions/porter-deploy/action.yml

@@ -0,0 +1,49 @@
+---
+name: 'porter-deploy'
+description: deploys porter
+
+inputs:
+  app:
+    description: 'app to deploy'
+    required: true
+  cluster:
+    description: 'cluster to deploy to'
+    required: true
+  host:
+    description: 'project to deploy to'
+    required: true
+  project:
+    description: 'project to deploy to'
+    required: true
+  token:
+    description: 'porter deploy api token'
+    required: true
+
+runs:
+  using: "composite"
+  steps:
+    - name: Get Go Binaries
+      uses: actions/download-artifact@v3
+      with:
+        name: go-binaries
+        path: bin/
+    - name: Get NPM static files
+      uses: actions/download-artifact@v3
+      with:
+        name: npm-static-files
+        path: build/
+    - name: Set Github tag
+      shell: bash
+      id: vars
+      run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
+    - name: Deploy stack
+      uses: porter-dev/porter-cli-action@v0.1.0
+      with:
+        command: apply
+      env:
+        PORTER_CLUSTER: "${{ inputs.cluster }}"
+        PORTER_HOST: "${{ inputs.host }}"
+        PORTER_PROJECT: "${{ inputs.project }}"
+        PORTER_STACK_NAME: "${{ inputs.app }}"
+        PORTER_TAG: ${{ steps.vars.outputs.sha_short }}
+        PORTER_TOKEN: "${{ inputs.token }}"

+ 13 - 70
.github/workflows/porter_stack_porter-ui.yml

@@ -9,86 +9,29 @@ jobs:
     steps:
       - name: Checkout code
         uses: actions/checkout@v3
-      - name: Setup Go Cache
-        uses: actions/cache@v3
-        with:
-          path: |
-            ~/.cache/go-build
-            ~/go/pkg/mod
-          key: porter-go-${{ hashFiles('**/go.sum') }}
-          restore-keys: porter-go-`
-      - name: Setup Go
-        uses: actions/setup-go@v4
-        with:
-          go-version-file: go.mod
-          cache: false
-          go-version: '1.20.5'
-      - name: Download Go Modules
-        run: go mod download
-      - name: Build Server Binary
-        run: go build -ldflags="-w -s -X 'main.Version=production'" -tags ee -o ./bin/app ./cmd/app
-      - name: Build Migration Binary
-        run: go build -ldflags '-w -s' -tags ee -o ./bin/migrate ./cmd/migrate
-      - name: Compress binaries
-        run: |
-          upx bin/* --best --lzma
-      - name: Store Binaries
-        uses: actions/upload-artifact@v3
-        with:
-          name: go-binaries
-          path: bin/
-          retention-days: 1
+      - name: build-go
+        uses: ./.github/actions/build-go
+
   build-npm:
     runs-on: ubuntu-latest
     steps:
       - name: Checkout code
         uses: actions/checkout@v3
-      - name: Setup Node
-        uses: actions/setup-node@v3
-        with:
-          node-version: 16
-      - name: Install NPM Dependencies
-        run: |
-          cd dashboard
-          npm i --legacy-peer-deps
-      - name: Run NPM Build
-        run: |
-          cd dashboard
-          npm run build
-      - name: Store NPM Static Files
-        uses: actions/upload-artifact@v3
-        with:
-          name: npm-static-files
-          path: dashboard/build/
-          retention-days: 1
+      - name: build-npm
+        uses: ./.github/actions/build-npm
+
   porter-deploy:
     runs-on: ubuntu-latest
     needs: [build-go, build-npm]
     steps:
       - name: Checkout code
         uses: actions/checkout@v3
-      - name: Get Go Binaries
-        uses: actions/download-artifact@v3
-        with:
-          name: go-binaries
-          path: bin/
-      - name: Get NPM static files
-        uses: actions/download-artifact@v3
-        with:
-          name: npm-static-files
-          path: build/
-      - name: Set Github tag
-        id: vars
-        run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
-      - name: Deploy stack
+      - name: porter-deploy
         timeout-minutes: 30
-        uses: porter-dev/porter-cli-action@v0.1.0
+        uses: ./.github/actions/porter-deploy
         with:
-          command: apply
-        env:
-          PORTER_CLUSTER: "11"
-          PORTER_HOST: https://dashboard.internal-tools.porter.run
-          PORTER_PROJECT: "8"
-          PORTER_STACK_NAME: porter-ui
-          PORTER_TAG: ${{ steps.vars.outputs.sha_short }}
-          PORTER_TOKEN: ${{ secrets.PORTER_STACK_8_11 }}
+          app: porter-ui
+          cluster: "11"
+          host: https://dashboard.internal-tools.porter.run
+          project: "8"
+          token: ${{ secrets.PORTER_INTERNAL_TOOLS_DEPLOYMENT }}

+ 0 - 87
.github/workflows/pr_push_checks.yaml

@@ -1,87 +0,0 @@
-name: PR Checks
-
-on:
-  - pull_request
-
-jobs:
-  testing_matrix:
-    strategy:
-      matrix:
-        os: [ubuntu-latest]
-        folder: [cli, api, cmd, internal, provisioner]
-    name: Running Go Tests
-    runs-on: ${{ matrix.os }}
-    steps:
-      - uses: actions/checkout@v3
-      - name: Setup Go Cache
-        uses: actions/cache@v3
-        with:
-          path: |
-            ~/.cache/go-build
-            ~/go/pkg/mod
-          key: porter-go-${{ hashFiles('**/go.sum') }}
-      - name: Download Go Modules
-        run: go mod download
-      - uses: actions/setup-go@v4
-        with:
-          go-version-file: go.mod
-          cache: false
-      - name: Run Go tests
-        run: go test ./${{ matrix.folder }}/...
-  linting:
-    name: Go Linter
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/setup-go@v4
-        with:
-          cache: false
-      - uses: actions/checkout@v3
-      - name: Setup Go Cache
-        uses: actions/cache@v3
-        with:
-          path: |
-            ~/.cache/go-build
-            ~/go/pkg/mod
-          key: porter-go-${{ hashFiles('**/go.sum') }}
-          restore-keys: porter-go-`
-      - name: golangci-lint
-        uses: golangci/golangci-lint-action@v3
-        with:
-          version: latest
-          args: -c .github/golangci-lint.yaml --verbose
-          skip-pkg-cache: true
-          only-new-issues: true # this is needed until the following is merged: https://github.com/golangci/golangci-lint-action/issues/820
-  build-npm:
-    name: Running smoke test npm build
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout code
-        uses: actions/checkout@v3
-      - name: Setup Node
-        uses: actions/setup-node@v3
-        with:
-          node-version: 16
-      - name: Setup NPM
-        working-directory: dashboard
-        run: |
-          # installing updated npm
-
-          # Verify npm works before capturing and ensure its stderr is inspectable later
-          version="$(jq -r '.engines.npm' package.json)"
-          npm --version 2>&1 1>/dev/null
-
-          npm_version="$(npm --version)"
-          echo "Bootstrapping npm $version (replacing $npm_version)..."
-          npm install --unsafe-perm -g --quiet "npm@$version"
-
-          # Verify npm works before capturing and ensure its stderr is inspectable later
-          npm --version 2>&1 1>/dev/null
-          echo "npm $(npm --version) installed"
-      - name: Install NPM Dependencies
-        working-directory: dashboard
-        run: |
-          npm i --legacy-peer-deps
-      - name: Run NPM Build
-        working-directory: dashboard
-        run: |
-          npm run build

+ 111 - 0
.github/workflows/pr_push_checks_go.yaml

@@ -0,0 +1,111 @@
+name: Go PR Checks
+
+on:
+  - pull_request
+
+concurrency:
+  group: pr-go-${{ github.event.pull_request.number || github.ref }}
+  cancel-in-progress: true
+
+jobs:
+  cache:
+    name: Setup cache
+    runs-on: ubuntu-latest
+    outputs:
+      go-changes: ${{ steps.changed-files.outputs.any_changed }}
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+      - name: Get changed go files
+        id: changed-files
+        uses: tj-actions/changed-files@v35
+        with:
+          files: |
+            *.go
+            *.mod
+            *.sum
+            **/*.go
+            **/*.mod
+            **/*.sum
+      - name: List all changed files
+        run: |
+          for file in ${{ steps.changed-files.outputs.all_changed_files }}; do
+            echo "$file was changed"
+          done
+      - name: Setup Go Cache
+        uses: actions/cache@v3
+        if: steps.changed-files.outputs.any_changed == 'true'
+        with:
+          path: |
+            ~/.cache/go-build
+            ~/go/pkg/mod
+          key: porter-go-${{ hashFiles('**/go.sum') }}
+          restore-keys: porter-go-
+      - uses: actions/setup-go@v4
+        if: steps.changed-files.outputs.any_changed == 'true'
+        with:
+          cache: false
+          go-version-file: go.mod
+      - name: Download Go Modules
+        if: steps.changed-files.outputs.any_changed == 'true'
+        run: go mod download
+
+  testing_matrix:
+    name: Running Go Tests
+    runs-on: ${{ matrix.os }}
+    needs: cache
+    strategy:
+      matrix:
+        os: [ubuntu-latest]
+        folder: [cli, api, cmd, internal, provisioner]
+    steps:
+      - uses: actions/checkout@v3
+      - name: Setup Go Cache
+        uses: actions/cache/restore@v3
+        if: needs.cache.outputs.go-changes == 'true'
+        with:
+          path: |
+            ~/.cache/go-build
+            ~/go/pkg/mod
+          key: porter-go-${{ hashFiles('**/go.sum') }}
+      - uses: actions/setup-go@v4
+        if: needs.cache.outputs.go-changes == 'true'
+        with:
+          cache: false
+          go-version-file: go.mod
+      - name: Download Go Modules
+        if: needs.cache.outputs.go-changes == 'true'
+        run: go mod download
+      - name: Run Go tests
+        if: needs.cache.outputs.go-changes == 'true'
+        run: go test ./${{ matrix.folder }}/...
+
+  linting:
+    name: Go Linter
+    runs-on: ubuntu-latest
+    needs: cache
+    steps:
+      - uses: actions/checkout@v3
+      - name: Setup Go Cache
+        uses: actions/cache/restore@v3
+        if: needs.cache.outputs.go-changes == 'true'
+        with:
+          path: |
+            ~/.cache/go-build
+            ~/go/pkg/mod
+          key: porter-go-${{ hashFiles('**/go.sum') }}
+      - uses: actions/setup-go@v4
+        if: needs.cache.outputs.go-changes == 'true'
+        with:
+          cache: false
+          go-version-file: go.mod
+      - name: golangci-lint
+        uses: golangci/golangci-lint-action@v3
+        if: needs.cache.outputs.go-changes == 'true'
+        with:
+          version: latest
+          args: -c .github/golangci-lint.yaml --verbose
+          skip-pkg-cache: true
+          only-new-issues: true # this is needed until the following is merged: https://github.com/golangci/golangci-lint-action/issues/820

+ 61 - 0
.github/workflows/pr_push_checks_node.yaml

@@ -0,0 +1,61 @@
+name: Node PR Checks
+
+on:
+  - pull_request
+
+concurrency:
+  group: pr-node-${{ github.event.pull_request.number || github.ref }}
+  cancel-in-progress: true
+
+jobs:
+  build-npm:
+    name: Running smoke test npm build
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+      - name: Get changed dashboard files
+        id: changed-files
+        uses: tj-actions/changed-files@v35
+        with:
+          files: |
+            dashboard/**
+      - name: List all changed files
+        run: |
+          for file in ${{ steps.changed-files.outputs.all_changed_files }}; do
+            echo "$file was changed"
+          done
+      - name: Setup Node
+        uses: actions/setup-node@v3
+        if: steps.changed-files.outputs.any_changed == 'true'
+        with:
+          node-version: 16
+      - name: Setup NPM
+        if: steps.changed-files.outputs.any_changed == 'true'
+        working-directory: dashboard
+        run: |
+          # installing updated npm
+
+          # Verify npm works before capturing and ensure its stderr is inspectable later
+          version="$(jq -r '.engines.npm' package.json)"
+          npm --version 2>&1 1>/dev/null
+
+          npm_version="$(npm --version)"
+          echo "Bootstrapping npm $version (replacing $npm_version)..."
+          npm install --unsafe-perm -g --quiet "npm@$version"
+
+          # Verify npm works before capturing and ensure its stderr is inspectable later
+          npm --version 2>&1 1>/dev/null
+          echo "npm $(npm --version) installed"
+      - name: Install NPM Dependencies
+        if: steps.changed-files.outputs.any_changed == 'true'
+        working-directory: dashboard
+        run: |
+          npm i --legacy-peer-deps
+      - name: Run NPM Build
+        if: steps.changed-files.outputs.any_changed == 'true'
+        working-directory: dashboard
+        run: |
+          npm run build

+ 8 - 4
.github/workflows/prerelease.yaml

@@ -112,9 +112,11 @@ jobs:
       - name: Checkout code
         uses: actions/checkout@v3
       - name: Set up Go
-        uses: actions/setup-go@v3
+        uses: actions/setup-go@v4
         with:
-          go-version: 1.20.5
+          cache: false
+          go-version: '1.20.5'
+          go-version-file: go.mod
       - name: Set up Node
         uses: actions/setup-node@v3
         with:
@@ -182,9 +184,11 @@ jobs:
       - name: Checkout code
         uses: actions/checkout@v3
       - name: Set up Go
-        uses: actions/setup-go@v3
+        uses: actions/setup-go@v4
         with:
-          go-version: 1.20.5
+          cache: false
+          go-version: '1.20.5'
+          go-version-file: go.mod
       - name: Write Dashboard Environment Variables
         run: |
           cat >./dashboard/.env <<EOL

+ 101 - 50
.github/workflows/production.yml

@@ -4,64 +4,55 @@
       - production
 name: Deploy Porter to Production
 jobs:
+  deploy-start:
+    name: Mark deploy as started
+    runs-on: ubuntu-latest
+    outputs:
+      deploy-ts: ${{ steps.deploy.outputs.ts }}
+    steps:
+    - name: Slack Notification
+      uses: slackapi/slack-github-action@v1
+      id: deploy
+      continue-on-error: true
+      env:
+        SLACK_WEBHOOK_URL: ${{ secrets.PORTER_PROD_NOTIFICATIONS_SLACK_WEBHOOK }}
+      with:
+        payload: |
+          {
+            "text": "porter prod deploy started (In Progress)\n<${{ github.event.pull_request.html_url || github.event.head_commit.url }}|Link to commit>",
+            "attachments": [
+              {
+                "pretext": "Deployment started",
+                "color": "dbab09",
+                "fields": [
+                  {
+                    "title": "Status",
+                    "short": true,
+                    "value": "In Progress"
+                  }
+                ]
+              }
+            ]
+          }
   build-go:
     runs-on: ubuntu-latest
+    needs:
+      - deploy-start
     steps:
       - name: Checkout code
         uses: actions/checkout@v3
-      - name: Setup Go Cache
-        uses: actions/cache@v3
-        with:
-          path: |
-            ~/.cache/go-build
-            ~/go/pkg/mod
-          key: porter-go-${{ hashFiles('**/go.sum') }}
-          restore-keys: porter-go-`
-      - name: Setup Go
-        uses: actions/setup-go@v4
-        with:
-          go-version-file: go.mod
-          cache: false
-          go-version: '1.20.5'
-      - name: Download Go Modules
-        run: go mod download
-      - name: Build Server Binary
-        run: go build -ldflags="-w -s -X 'main.Version=production'" -tags ee -o ./bin/app ./cmd/app
-      - name: Build Migration Binary
-        run: go build -ldflags '-w -s' -tags ee -o ./bin/migrate ./cmd/migrate
-      - name: Compress binaries
-        run: |
-          upx bin/* --best --lzma
-      - name: Store Binaries
-        uses: actions/upload-artifact@v3
-        with:
-          name: go-binaries
-          path: bin/
-          retention-days: 1
+      - name: build-go
+        uses: ./.github/actions/build-go
   build-npm:
     runs-on: ubuntu-latest
+    needs:
+      - deploy-start
     steps:
       - name: Checkout code
         uses: actions/checkout@v3
-      - name: Setup Node
-        uses: actions/setup-node@v3
-        with:
-          node-version: 16
-      - name: Install NPM Dependencies
-        run: |
-          cd dashboard
-          npm i --legacy-peer-deps
-      - name: Run NPM Build
-        run: |
-          cd dashboard
-          npm run build
-      - name: Store NPM Static Files
-        uses: actions/upload-artifact@v3
-        with:
-          name: npm-static-files
-          path: dashboard/build/
-          retention-days: 1
-  porter-deploy:
+      - name: build-npm
+        uses: ./.github/actions/build-npm
+  deploy-porter:
     runs-on: ubuntu-latest
     needs: [build-go, build-npm]
     steps:
@@ -90,7 +81,7 @@ jobs:
           namespace: default
           project: "5"
           tag: ${{ steps.vars.outputs.sha_short }}
-          token: ${{ secrets.PORTER_TOKEN_5 }}
+          token: ${{ secrets.PORTER_PRODUCTION_DEPLOYMENT }}
       - name: Update Porter Auth
         timeout-minutes: 20
         uses: porter-dev/porter-update-config-action@v0.1.0
@@ -101,4 +92,64 @@ jobs:
           namespace: default
           project: "5"
           tag: ${{ steps.vars.outputs.sha_short }}
-          token: ${{ secrets.PORTER_TOKEN_5 }}
+          token: ${{ secrets.PORTER_PRODUCTION_DEPLOYMENT }}
+
+  deploy-worker-pool:
+    runs-on: ubuntu-latest
+    needs: [build-go, build-npm] # don't run this step unless these finish successfully
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v3
+      - name: Set Github tag
+        id: vars
+        run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
+      - name: Update Worker Pool (revision cull job)
+        timeout-minutes: 20
+        uses: porter-dev/porter-update-action@v0.1.0
+        with:
+          app: cull-helm-revisions
+          cluster: "9"
+          host: https://dashboard.internal-tools.porter.run
+          namespace: default
+          project: "5"
+          tag: ${{ steps.vars.outputs.sha_short }}
+          token: ${{ secrets.PORTER_PRODUCTION_DEPLOYMENT }}
+
+  deploy-end:
+    name: Mark deploy as ended
+    runs-on: ubuntu-latest
+    if: always()
+    needs:
+      - deploy-start
+      - deploy-porter
+      - deploy-worker-pool
+    steps:
+    - name: Slack Notification
+      uses: slackapi/slack-github-action@v1
+      continue-on-error: true
+      env:
+        SLACK_WEBHOOK_URL: ${{ secrets.PORTER_PROD_NOTIFICATIONS_SLACK_WEBHOOK }}
+      with:
+        update-ts: ${{ steps.deploy-start.outputs.deploy-ts }}
+        payload: |
+          {
+            "text": "porter prod deploy completed\n<${{ github.event.pull_request.html_url || github.event.head_commit.url }}|Link to commit>",
+            "attachments": [
+              {
+                "pretext": "Deployment completed",
+                "color": "8590ff",
+                "fields": [
+                  {
+                    "title": "Porter Result",
+                    "short": true,
+                    "value": "${{ needs.deploy-porter.result }}"
+                  },
+                  {
+                    "title": "Worker Pool Result",
+                    "short": true,
+                    "value": "${{ needs.deploy-worker-pool.result }}"
+                  }
+                ]
+              }
+            ]
+          }

+ 1 - 0
.gitignore

@@ -17,6 +17,7 @@ bin
 openapi.yaml
 .idea
 portercli
+local
 
 
 vendor

+ 3 - 8
README.md

@@ -1,6 +1,6 @@
 # Porter
 
-[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Go Report Card](https://goreportcard.com/badge/gojp/goreportcard)](https://goreportcard.com/report/github.com/porter-dev/porter) [![Discord](https://img.shields.io/discord/542888846271184896?color=7389D8&label=community&logo=discord&logoColor=ffffff)](https://discord.gg/mmGAw5nNjr)
+[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Go Report Card](https://goreportcard.com/badge/gojp/goreportcard)](https://goreportcard.com/report/github.com/porter-dev/porter)
 [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow)](https://twitter.com/porterdotrun)
 
 **Porter is a Kubernetes-powered PaaS that runs in your own cloud provider.** Porter brings the Heroku experience to your own AWS/GCP account, while upgrading your infrastructure to Kubernetes. Get started on Porter without the overhead of DevOps and customize your infrastructure later when you need to.
@@ -9,9 +9,7 @@
 
 ## Community and Updates
 
-For help, questions, or if you just want a place to hang out, [join our Discord community.](https://discord.gg/mmGAw5nNjr)
-
-To keep updated on our progress, please watch the repo for new releases (**Watch > Custom > Releases**) and [follow us on Twitter](https://twitter.com/getporterdev)!
+To keep updated on our progress, please watch the repo for new releases (**Watch > Custom > Releases**) and [follow us on Twitter](https://twitter.com/porterdotrun)!
 
 ## Why Porter?
 
@@ -30,6 +28,7 @@ Porter brings the simplicity of a traditional PaaS to your own cloud provider wh
 - One-click provisioning of a Kubernetes cluster in your own cloud console
   - ✅ AWS
   - ✅ GCP
+  - ✅ Azure
 - Simple deploy of any public or private Docker image
 - Auto CI/CD with [buildpacks](https://buildpacks.io) for non-Dockerized apps
 - Heroku-like GUI to monitor application status, logs, and history
@@ -61,7 +60,3 @@ Below are instructions for a quickstart. For full documentation, please visit ou
 2. Create a Project and [put in your cloud provider credentials](https://docs.getporter.dev/docs/getting-started-with-porter-on-aws). Porter will automatically provision a Kubernetes cluster in your own cloud. It is also possible to [link up an existing Kubernetes cluster.](https://docs.getporter.dev/docs/cli-documentation#connecting-to-an-existing-cluster)
 
 3. 🚀 Deploy your applications from a [git repository](https://docs.getporter.dev/docs/applications) or [Docker image registry](https://docs.getporter.dev/docs/cli-documentation#porter-docker-configure).
-
-## Want to Help?
-
-We welcome all contributions. If you're interested in contributing, please read our [contributing guide](https://github.com/porter-dev/porter/blob/master/CONTRIBUTING.md) and [join our Discord community](https://discord.gg/GJynMR3KXK).

+ 27 - 0
Taskfile.yaml

@@ -4,6 +4,10 @@ tasks:
   move-to-production:
     desc: Move the current branch to production
     cmds:
+    - cmd: git checkout master
+      silent: true
+    - cmd: git pull origin master
+      silent: true
     - cmd: git tag -d production
       ignore_error: false
       silent: true
@@ -16,6 +20,29 @@ tasks:
     - cmd: git push origin production
       ignore_error: false
       silent: true
+
+  cli-prerelease:
+    desc: Create prerelease of CLI at the provided semantic version. Call `task cli-prerelease -- v1.2.3` where v1.2.3 is the desired tag for releasing
+    cmds:
+    - task: semantic-check
+    - cmd: git fetch origin --tags 
+      silent: true
+    - cmd: git checkout master
+      silent: true
+    - cmd: git pull origin master
+      silent: true
+    - cmd: git tag {{.CLI_ARGS}}
+      silent: true
+      ignore_error: false
+    - cmd: git push origin {{.CLI_ARGS}}
+      silent: true
+      ignore_error: false
+    - cmd: echo "View your pre-release at https://github.com/porter-dev/porter/releases/tag{{ .CLI_ARGS }}"
+
+  semantic-check:
+    preconditions:
+    - sh: version={{ .CLI_ARGS }}; semantic_version_regex='^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$'; if [[ $version =~  $semantic_version_regex ]]; then; else exit 1; fi
+      msg: must use a semantic version such as v0.1.2
  
   lint:
     desc: Run all available linters. This mimics any checks performed in Pull Request pre-merge checks

+ 16 - 7
Tiltfile

@@ -2,15 +2,17 @@ load('ext://restart_process', 'docker_build_with_restart')
 
 secret_settings(disable_scrub=True)
 
-if not os.path.exists("vendor"):
-    local(command="go mod vendor")
-
 if config.tilt_subcommand == "up":
+    if not os.path.exists("vendor"):
+        local(command="go mod vendor")
+
     local(command="cd dashboard; npm i --legacy-peer-deps")
 
 if config.tilt_subcommand == "down":
-    local(command="rm -rf vendor")
-    local(command="rm -rf dashboard/node_modules")
+    if os.path.exists("vendor"):
+        local(command="rm -rf vendor")
+    if os.path.exists("dashboard/node_modules"):
+        local(command="rm -rf dashboard/node_modules")
 
 build_args = "GOOS=linux GOARCH=arm64"
 if os.getenv("PLATFORM") == "amd64":
@@ -36,6 +38,11 @@ else:
     local("echo 'Be careful that you aren't connected to a staging or prod cluster' && exit 1")
     exit()
 
+ngrok_url = os.getenv("NGROK_URL")
+if ngrok_url == "":
+    local("echo 'NGROK_URL env variable is required but not set' && exit 1")
+    exit()
+
 k8s_resource(
     workload='porter-server-web',
     port_forwards=["8080:8080"],
@@ -142,5 +149,7 @@ local_resource(
     resource_deps=["postgresql"],
     labels=["porter"]
 )
-# local_resource('public-url', serve_cmd='lt --subdomain "$(whoami)" --port 8080', resource_deps=["porter-dashboard"], labels=["porter"])
-# local_resource('public-url', serve_cmd='ngrok http 8081 --log=stdout', resource_deps=["porter-dashboard"], labels=["porter"])
+local_resource('public-url', 
+serve_cmd='''
+echo " \n\n****** NGROK URL ****** \n\n" && echo https://%s && echo "\n\n********\n\n" && ngrok http 8081 --log=stdout --domain=%s''' 
+% (ngrok_url, ngrok_url), resource_deps=["porter-dashboard"], labels=["porter"])

+ 42 - 7
api/client/api.go

@@ -80,9 +80,23 @@ func NewClientWithConfig(ctx context.Context, input NewClientInput) (Client, err
 // ErrNoAuthCredential returns an error when no auth credentials have been provided such as cookies or tokens
 var ErrNoAuthCredential = errors.New("unable to create an API session with cookie nor token")
 
-func (c *Client) getRequest(relPath string, data interface{}, response interface{}) error {
+// getRequestConfig defines configuration for a GET request
+type getRequestConfig struct {
+	retryCount uint
+}
+
+// withRetryCount is a convenience function for setting the retry count
+func withRetryCount(retryCount uint) func(*getRequestConfig) {
+	return func(o *getRequestConfig) {
+		o.retryCount = retryCount
+	}
+}
+
+// getRequest makes a GET request to the API
+func (c *Client) getRequest(relPath string, data interface{}, response interface{}, opts ...func(*getRequestConfig)) error {
 	vals := make(map[string][]string)
-	err := schema.NewEncoder().Encode(data, vals)
+	_ = schema.NewEncoder().Encode(data, vals)
+	var err error
 
 	urlVals := url.Values(vals)
 	encodedURLVals := urlVals.Encode()
@@ -106,15 +120,36 @@ func (c *Client) getRequest(relPath string, data interface{}, response interface
 		return err
 	}
 
-	if httpErr, err := c.sendRequest(req, response, true); httpErr != nil || err != nil {
-		if httpErr != nil {
-			return fmt.Errorf("%v", httpErr.Error)
+	config := &getRequestConfig{
+		retryCount: 1,
+	}
+
+	for _, opt := range opts {
+		opt(config)
+	}
+
+	var httpErr *types.ExternalError
+	for i := 0; i < int(config.retryCount); i++ {
+		httpErr, err = c.sendRequest(req, response, true)
+
+		if httpErr == nil && err == nil {
+			return nil
 		}
 
-		return err
+		if i != int(config.retryCount)-1 {
+			if httpErr != nil {
+				fmt.Fprintf(os.Stderr, "Error: %s (status code %d), retrying request...\n", httpErr.Error, httpErr.Code)
+			} else {
+				fmt.Fprintf(os.Stderr, "Error: %v, retrying request...\n", err)
+			}
+		}
 	}
 
-	return nil
+	if httpErr != nil {
+		return fmt.Errorf("%v", httpErr.Error)
+	}
+
+	return err
 }
 
 type postRequestOpts struct {

+ 34 - 0
api/client/deployment_target.go

@@ -0,0 +1,34 @@
+package client
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/porter-dev/porter/api/server/handlers/deployment_target"
+)
+
+// CreateDeploymentTarget creates a new deployment target for a given project and cluster with the provided name
+func (c *Client) CreateDeploymentTarget(
+	ctx context.Context,
+	projectID, clusterID uint,
+	selector string,
+	preview bool,
+) (*deployment_target.CreateDeploymentTargetResponse, error) {
+	resp := &deployment_target.CreateDeploymentTargetResponse{}
+
+	req := &deployment_target.CreateDeploymentTargetRequest{
+		Selector: selector,
+		Preview:  preview,
+	}
+
+	err := c.postRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/deployment-targets",
+			projectID, clusterID,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}

+ 1 - 0
api/client/k8s.go

@@ -182,6 +182,7 @@ func (c *Client) GetRelease(
 		),
 		nil,
 		resp,
+		withRetryCount(3),
 	)
 
 	return resp, err

+ 270 - 34
api/client/porter_app.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 
 	"github.com/porter-dev/porter/api/server/handlers/porter_app"
+	"github.com/porter-dev/porter/internal/models"
 
 	"github.com/porter-dev/porter/api/types"
 )
@@ -154,11 +155,13 @@ func (c *Client) ParseYAML(
 	ctx context.Context,
 	projectID, clusterID uint,
 	b64Yaml string,
+	appName string,
 ) (*porter_app.ParsePorterYAMLToProtoResponse, error) {
 	resp := &porter_app.ParsePorterYAMLToProtoResponse{}
 
 	req := &porter_app.ParsePorterYAMLToProtoRequest{
 		B64Yaml: b64Yaml,
+		AppName: appName,
 	}
 
 	err := c.postRequest(
@@ -173,27 +176,37 @@ func (c *Client) ParseYAML(
 	return resp, err
 }
 
+// ValidatePorterAppInput is the input struct to ValidatePorterApp
+type ValidatePorterAppInput struct {
+	ProjectID          uint
+	ClusterID          uint
+	AppName            string
+	Base64AppProto     string
+	Base64AppOverrides string
+	DeploymentTarget   string
+	CommitSHA          string
+}
+
 // ValidatePorterApp takes in a base64 encoded app definition that is potentially partial and returns a complete definition
 // using any previous app revisions and defaults
 func (c *Client) ValidatePorterApp(
 	ctx context.Context,
-	projectID, clusterID uint,
-	base64AppProto string,
-	deploymentTarget string,
-	commitSHA string,
+	inp ValidatePorterAppInput,
 ) (*porter_app.ValidatePorterAppResponse, error) {
 	resp := &porter_app.ValidatePorterAppResponse{}
 
 	req := &porter_app.ValidatePorterAppRequest{
-		Base64AppProto:     base64AppProto,
-		DeploymentTargetId: deploymentTarget,
-		CommitSHA:          commitSHA,
+		AppName:            inp.AppName,
+		Base64AppProto:     inp.Base64AppProto,
+		Base64AppOverrides: inp.Base64AppOverrides,
+		DeploymentTargetId: inp.DeploymentTarget,
+		CommitSHA:          inp.CommitSHA,
 	}
 
 	err := c.postRequest(
 		fmt.Sprintf(
 			"/projects/%d/clusters/%d/apps/validate",
-			projectID, clusterID,
+			inp.ProjectID, inp.ClusterID,
 		),
 		req,
 		resp,
@@ -202,26 +215,40 @@ func (c *Client) ValidatePorterApp(
 	return resp, err
 }
 
+// ApplyPorterAppInput is the input struct to ApplyPorterApp
+type ApplyPorterAppInput struct {
+	ProjectID        uint
+	ClusterID        uint
+	Base64AppProto   string
+	DeploymentTarget string
+	AppRevisionID    string
+	ForceBuild       bool
+	Variables        map[string]string
+	Secrets          map[string]string
+	HardEnvUpdate    bool
+}
+
 // ApplyPorterApp takes in a base64 encoded app definition and applies it to the cluster
 func (c *Client) ApplyPorterApp(
 	ctx context.Context,
-	projectID, clusterID uint,
-	base64AppProto string,
-	deploymentTarget string,
-	appRevisionID string,
+	inp ApplyPorterAppInput,
 ) (*porter_app.ApplyPorterAppResponse, error) {
 	resp := &porter_app.ApplyPorterAppResponse{}
 
 	req := &porter_app.ApplyPorterAppRequest{
-		Base64AppProto:     base64AppProto,
-		DeploymentTargetId: deploymentTarget,
-		AppRevisionID:      appRevisionID,
+		Base64AppProto:     inp.Base64AppProto,
+		DeploymentTargetId: inp.DeploymentTarget,
+		AppRevisionID:      inp.AppRevisionID,
+		ForceBuild:         inp.ForceBuild,
+		Variables:          inp.Variables,
+		Secrets:            inp.Secrets,
+		HardEnvUpdate:      inp.HardEnvUpdate,
 	}
 
 	err := c.postRequest(
 		fmt.Sprintf(
 			"/projects/%d/clusters/%d/apps/apply",
-			projectID, clusterID,
+			inp.ProjectID, inp.ClusterID,
 		),
 		req,
 		resp,
@@ -277,14 +304,15 @@ func (c *Client) CurrentAppRevision(
 
 // CreatePorterAppDBEntryInput is the input struct to CreatePorterAppDBEntry
 type CreatePorterAppDBEntryInput struct {
-	AppName         string
-	GitRepoName     string
-	GitRepoID       uint
-	GitBranch       string
-	ImageRepository string
-	PorterYamlPath  string
-	ImageTag        string
-	Local           bool
+	AppName            string
+	GitRepoName        string
+	GitRepoID          uint
+	GitBranch          string
+	ImageRepository    string
+	PorterYamlPath     string
+	ImageTag           string
+	Local              bool
+	DeploymentTargetID string
 }
 
 // CreatePorterAppDBEntry creates an entry in the porter app
@@ -308,18 +336,16 @@ func (c *Client) CreatePorterAppDBEntry(
 			Tag:        inp.ImageTag,
 		}
 	}
-	if sourceType == "" {
-		return fmt.Errorf("cannot determine source type")
-	}
 
 	req := &porter_app.CreateAppRequest{
-		Name:           inp.AppName,
-		SourceType:     sourceType,
-		GitBranch:      inp.GitBranch,
-		GitRepoName:    inp.GitRepoName,
-		GitRepoID:      inp.GitRepoID,
-		PorterYamlPath: inp.PorterYamlPath,
-		Image:          image,
+		Name:               inp.AppName,
+		SourceType:         sourceType,
+		GitBranch:          inp.GitBranch,
+		GitRepoName:        inp.GitRepoName,
+		GitRepoID:          inp.GitRepoID,
+		PorterYamlPath:     inp.PorterYamlPath,
+		Image:              image,
+		DeploymentTargetID: inp.DeploymentTargetID,
 	}
 
 	err := c.postRequest(
@@ -381,3 +407,213 @@ func (c *Client) PredeployStatus(
 
 	return resp, err
 }
+
+// UpdateRevisionStatus updates the status of an app revision
+func (c *Client) UpdateRevisionStatus(
+	ctx context.Context,
+	projectID uint, clusterID uint,
+	appName string, appRevisionId string,
+	status models.AppRevisionStatus,
+) (*porter_app.UpdateAppRevisionStatusResponse, error) {
+	resp := &porter_app.UpdateAppRevisionStatusResponse{}
+
+	req := &porter_app.UpdateAppRevisionStatusRequest{
+		Status: status,
+	}
+
+	err := c.postRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/revisions/%s",
+			projectID, clusterID, appName, appRevisionId,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}
+
+// GetBuildEnv returns the build environment for a given app proto
+func (c *Client) GetBuildEnv(
+	ctx context.Context,
+	projectID uint, clusterID uint,
+	appName string, appRevisionId string,
+) (*porter_app.GetBuildEnvResponse, error) {
+	resp := &porter_app.GetBuildEnvResponse{}
+
+	err := c.getRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/revisions/%s/build-env",
+			projectID, clusterID, appName, appRevisionId,
+		),
+		nil,
+		resp,
+	)
+
+	return resp, err
+}
+
+// ReportRevisionStatusInput is the input struct to ReportRevisionStatus
+type ReportRevisionStatusInput struct {
+	ProjectID     uint
+	ClusterID     uint
+	AppName       string
+	AppRevisionID string
+	PRNumber      int
+	CommitSHA     string
+}
+
+// ReportRevisionStatus reports the status of an app revision to external services
+func (c *Client) ReportRevisionStatus(
+	ctx context.Context,
+	inp ReportRevisionStatusInput,
+) (*porter_app.ReportRevisionStatusResponse, error) {
+	resp := &porter_app.ReportRevisionStatusResponse{}
+
+	req := &porter_app.ReportRevisionStatusRequest{
+		PRNumber:  inp.PRNumber,
+		CommitSHA: inp.CommitSHA,
+	}
+
+	err := c.postRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/revisions/%s/status",
+			inp.ProjectID, inp.ClusterID, inp.AppName, inp.AppRevisionID,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}
+
+// CreateOrUpdateAppEnvironment updates the app environment group and creates it if it doesn't exist
+func (c *Client) CreateOrUpdateAppEnvironment(
+	ctx context.Context,
+	projectID uint, clusterID uint,
+	appName string,
+	deploymentTargetID string,
+	variables map[string]string,
+	secrets map[string]string,
+	Base64AppProto string,
+) (*porter_app.UpdateAppEnvironmentResponse, error) {
+	resp := &porter_app.UpdateAppEnvironmentResponse{}
+
+	req := &porter_app.UpdateAppEnvironmentRequest{
+		DeploymentTargetID: deploymentTargetID,
+		Variables:          variables,
+		Secrets:            secrets,
+		HardUpdate:         false,
+		Base64AppProto:     Base64AppProto,
+	}
+
+	err := c.postRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/update-environment",
+			projectID, clusterID, appName,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}
+
+// PorterYamlV2Pods gets all pods for a given deployment target id and app name
+func (c *Client) PorterYamlV2Pods(
+	ctx context.Context,
+	projectID, clusterID uint,
+	porterAppName string,
+	req *types.PorterYamlV2PodsRequest,
+) (*types.GetReleaseAllPodsResponse, error) {
+	resp := &types.GetReleaseAllPodsResponse{}
+
+	err := c.getRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/pods",
+			projectID, clusterID,
+			porterAppName,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}
+
+// UpdateImage updates the image for a porter app (porter yaml v2 only)
+func (c *Client) UpdateImage(
+	ctx context.Context,
+	projectID, clusterID uint,
+	appName, deploymentTargetId, tag string,
+) (*porter_app.UpdateImageResponse, error) {
+	req := &porter_app.UpdateImageRequest{
+		Tag:                tag,
+		DeploymentTargetId: deploymentTargetId,
+	}
+
+	resp := &porter_app.UpdateImageResponse{}
+
+	err := c.postRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/update-image",
+			projectID, clusterID, appName,
+		),
+		&req,
+		resp,
+	)
+
+	return resp, err
+}
+
+// ListAppRevisions lists the last ten app revisions for a given app
+func (c *Client) ListAppRevisions(
+	ctx context.Context,
+	projectID, clusterID uint,
+	appName string,
+	deploymentTargetID string,
+) (*porter_app.ListAppRevisionsResponse, error) {
+	resp := &porter_app.ListAppRevisionsResponse{}
+
+	req := &porter_app.ListAppRevisionsRequest{
+		DeploymentTargetID: deploymentTargetID,
+	}
+
+	err := c.getRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/revisions",
+			projectID, clusterID,
+			appName,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}
+
+// RollbackRevision reverts an app to a previous revision
+func (c *Client) RollbackRevision(
+	ctx context.Context,
+	projectID, clusterID uint,
+	appName string,
+	deploymentTargetID string,
+) (*porter_app.RollbackAppRevisionResponse, error) {
+	resp := &porter_app.RollbackAppRevisionResponse{}
+
+	req := &porter_app.RollbackAppRevisionRequest{
+		DeploymentTargetID: deploymentTargetID,
+	}
+
+	err := c.postRequest(
+		fmt.Sprintf(
+			"/projects/%d/clusters/%d/apps/%s/rollback",
+			projectID, clusterID,
+			appName,
+		),
+		req,
+		resp,
+	)
+
+	return resp, err
+}

+ 2 - 1
api/server/authn/handler.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"fmt"
 	"net/http"
+	"net/url"
 	"strings"
 
 	"github.com/gorilla/sessions"
@@ -131,7 +132,7 @@ func (authn *AuthN) handleForbiddenForSession(
 				return
 			}
 
-			http.Redirect(w, r, "/register?email="+invite.Email, http.StatusTemporaryRedirect)
+			http.Redirect(w, r, "/register?email="+url.QueryEscape(invite.Email), http.StatusTemporaryRedirect)
 			return
 		}
 

+ 11 - 5
api/server/authz/policy.go

@@ -2,7 +2,6 @@ package authz
 
 import (
 	"context"
-	"fmt"
 	"net/http"
 
 	"github.com/porter-dev/porter/api/server/authz/policy"
@@ -11,6 +10,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/requestutils"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type PolicyMiddleware struct {
@@ -39,11 +39,15 @@ type PolicyHandler struct {
 }
 
 func (h *PolicyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-policy-handler")
+	defer span.End()
+
 	// get the full map of scopes to resource actions
 	reqScopes, reqErr := getRequestActionForEndpoint(r, h.endpointMeta)
 
 	if reqErr != nil {
-		apierrors.HandleAPIError(h.config.Logger, h.config.Alerter, w, r, reqErr, true)
+		err := telemetry.Error(ctx, span, reqErr, "unable to get request action for endpoint")
+		apierrors.HandleAPIError(h.config.Logger, h.config.Alerter, w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest), true)
 		return
 	}
 
@@ -70,7 +74,8 @@ func (h *PolicyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	policyDocs, reqErr := h.loader.LoadPolicyDocuments(policyLoaderOpts)
 
 	if reqErr != nil {
-		apierrors.HandleAPIError(h.config.Logger, h.config.Alerter, w, r, reqErr, true)
+		err := telemetry.Error(ctx, span, reqErr, "unable to load policy documents")
+		apierrors.HandleAPIError(h.config.Logger, h.config.Alerter, w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError), true)
 		return
 	}
 
@@ -78,12 +83,13 @@ func (h *PolicyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	hasAccess := policy.HasScopeAccess(policyDocs, reqScopes)
 
 	if !hasAccess {
+		err := telemetry.Error(ctx, span, nil, "insufficient permissions to perform action")
 		apierrors.HandleAPIError(
 			h.config.Logger,
 			h.config.Alerter,
 			w,
 			r,
-			apierrors.NewErrForbidden(fmt.Errorf("policy forbids action in project %d", policyLoaderOpts.ProjectID)),
+			apierrors.NewErrPassThroughToClient(err, http.StatusForbidden),
 			true,
 		)
 
@@ -91,7 +97,7 @@ func (h *PolicyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 
 	// add the set of resource ids to the request context
-	ctx := NewRequestScopeCtx(r.Context(), reqScopes)
+	ctx = NewRequestScopeCtx(ctx, reqScopes)
 	r = r.Clone(ctx)
 	h.next.ServeHTTP(w, r)
 }

+ 2 - 2
api/server/authz/policy_test.go

@@ -158,7 +158,7 @@ func TestPolicyMiddlewareInvalidPermissions(t *testing.T) {
 	handler.ServeHTTP(rr, req)
 
 	assert.False(t, next.WasCalled, "next handler should not have been called")
-	apitest.AssertResponseForbidden(t, rr)
+	apitest.AssertForbiddenError(t, rr)
 }
 
 func TestPolicyMiddlewareFailInvalidLoader(t *testing.T) {
@@ -296,5 +296,5 @@ func assertInternalError(t *testing.T, next *testHandler, rr *httptest.ResponseR
 	// first assert that that the next middleware was not called
 	assert.False(next.WasCalled, "next handler should not have been called")
 
-	apitest.AssertResponseInternalServerError(t, rr)
+	apitest.AssertInternalServerError(t, rr)
 }

+ 1 - 1
api/server/authz/preview_environment.go

@@ -38,7 +38,7 @@ func (p *PreviewEnvironmentScopedMiddleware) ServeHTTP(w http.ResponseWriter, r
 	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
 
-	if !project.PreviewEnvsEnabled {
+	if !project.GetFeatureFlag(models.PreviewEnvsEnabled, p.config.LaunchDarklyClient) {
 		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r,
 			apierrors.NewErrForbidden(errPreviewProjectDisabled), true)
 		return

+ 10 - 6
api/server/authz/release.go

@@ -11,6 +11,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/requestutils"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 	"github.com/stefanmcshane/helm/pkg/release"
 )
 
@@ -35,22 +36,25 @@ type ReleaseScopedMiddleware struct {
 }
 
 func (p *ReleaseScopedMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "middleware-release-scope")
+	defer span.End()
 
-	helmAgent, err := p.agentGetter.GetHelmAgent(r.Context(), r, cluster, "")
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	helmAgent, err := p.agentGetter.GetHelmAgent(ctx, r, cluster, "")
 	if err != nil {
-		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrInternal(err), true)
+		apierrors.HandleAPIError(p.config.Logger, p.config.Alerter, w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError), true)
 		return
 	}
 
 	// get the name of the application
-	reqScopes, _ := r.Context().Value(types.RequestScopeCtxKey).(map[types.PermissionScope]*types.RequestAction)
+	reqScopes, _ := ctx.Value(types.RequestScopeCtxKey).(map[types.PermissionScope]*types.RequestAction)
 	name := reqScopes[types.ReleaseScope].Resource.Name
 
 	// get the version for the application
 	version, _ := requestutils.GetURLParamUint(r, types.URLParamReleaseVersion)
 
-	release, err := helmAgent.GetRelease(context.Background(), name, int(version), false)
+	release, err := helmAgent.GetRelease(ctx, name, int(version), false)
 	if err != nil {
 		// ugly casing since at the time of this commit Helm doesn't have an errors package.
 		// so we rely on the Helm error containing "not found"
@@ -66,7 +70,7 @@ func (p *ReleaseScopedMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Reque
 		return
 	}
 
-	ctx := NewReleaseContext(r.Context(), release)
+	ctx = NewReleaseContext(ctx, release)
 	r = r.Clone(ctx)
 	p.next.ServeHTTP(w, r)
 }

+ 1 - 1
api/server/handlers/api_token/create.go

@@ -35,7 +35,7 @@ func (p *APITokenCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 	user, _ := r.Context().Value(types.UserScope).(*models.User)
 	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 
-	if !proj.APITokensEnabled {
+	if !proj.GetFeatureFlag(models.APITokensEnabled, p.Config().LaunchDarklyClient) {
 		p.HandleAPIError(w, r, apierrors.NewErrForbidden(fmt.Errorf("api token endpoints are not enabled for this project")))
 		return
 	}

+ 1 - 1
api/server/handlers/api_token/get.go

@@ -33,7 +33,7 @@ func NewAPITokenGetHandler(
 func (p *APITokenGetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 
-	if !proj.APITokensEnabled {
+	if !proj.GetFeatureFlag(models.APITokensEnabled, p.Config().LaunchDarklyClient) {
 		p.HandleAPIError(w, r, apierrors.NewErrForbidden(fmt.Errorf("api token endpoints are not enabled for this project")))
 		return
 	}

+ 1 - 1
api/server/handlers/api_token/list.go

@@ -29,7 +29,7 @@ func NewAPITokenListHandler(
 func (p *APITokenListHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 
-	if !proj.APITokensEnabled {
+	if !proj.GetFeatureFlag(models.APITokensEnabled, p.Config().LaunchDarklyClient) {
 		p.HandleAPIError(w, r, apierrors.NewErrForbidden(fmt.Errorf("api token endpoints are not enabled for this project")))
 		return
 	}

+ 1 - 1
api/server/handlers/api_token/revoke.go

@@ -32,7 +32,7 @@ func NewAPITokenRevokeHandler(
 func (p *APITokenRevokeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 
-	if !proj.APITokensEnabled {
+	if !proj.GetFeatureFlag(models.APITokensEnabled, p.Config().LaunchDarklyClient) {
 		p.HandleAPIError(w, r, apierrors.NewErrForbidden(fmt.Errorf("api token endpoints are not enabled for this project")))
 		return
 	}

+ 16 - 6
api/server/handlers/cluster/cluster_status.go

@@ -13,6 +13,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type ClusterStatusHandler struct {
@@ -40,22 +41,25 @@ type ClusterStatusResponse struct {
 }
 
 func (c *ClusterStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx := r.Context()
-	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-cluster-status")
+	defer span.End()
 
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 	req := connect.NewRequest(&porterv1.ClusterStatusRequest{
 		ProjectId: int64(cluster.ProjectID),
 		ClusterId: int64(cluster.ID),
 	})
 	status, err := c.Config().ClusterControlPlaneClient.ClusterStatus(ctx, req)
 	if err != nil {
-		e := fmt.Errorf("unable to retrieve status for cluster: %w", err)
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(e))
+		err := fmt.Errorf("unable to retrieve status for cluster: %w", err)
+		err = telemetry.Error(ctx, span, err, err.Error())
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 	if status.Msg == nil {
-		e := fmt.Errorf("unable to parse status for cluster: %w", err)
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(e))
+		err := fmt.Errorf("unable to parse status for cluster: %w", err)
+		err = telemetry.Error(ctx, span, err, err.Error())
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
 	statusResp := status.Msg
@@ -68,6 +72,12 @@ func (c *ClusterStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		IsControlPlaneReady:   statusResp.ControlPlaneStatus,
 	}
 
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "cluster-phase", Value: statusResp.Phase},
+		telemetry.AttributeKV{Key: "cluster-infra-status", Value: statusResp.InfrastructureStatus},
+		telemetry.AttributeKV{Key: "cluster-control-plane-status", Value: statusResp.ControlPlaneStatus},
+	)
+
 	c.WriteResult(w, r, resp)
 	w.WriteHeader(http.StatusOK)
 }

+ 4 - 2
api/server/handlers/cluster/create.go

@@ -11,6 +11,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/features"
 	"github.com/porter-dev/porter/internal/kubernetes/resolver"
 	"github.com/porter-dev/porter/internal/models"
 	"github.com/porter-dev/porter/internal/repository"
@@ -46,7 +47,7 @@ func (c *CreateClusterManualHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		return
 	}
 
-	cluster, err = c.Repo().Cluster().CreateCluster(cluster)
+	cluster, err = c.Repo().Cluster().CreateCluster(cluster, c.Config().LaunchDarklyClient)
 
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
@@ -117,6 +118,7 @@ func createClusterFromCandidate(
 	user *models.User,
 	candidate *models.ClusterCandidate,
 	clResolver *types.ClusterResolverAll,
+	launchDarklyClient *features.Client,
 ) (*models.Cluster, *models.ClusterCandidate, error) {
 	// we query the repo again to get the decrypted version of the cluster candidate
 	cc, err := repo.Cluster().ReadClusterCandidate(project.ID, candidate.ID)
@@ -137,7 +139,7 @@ func createClusterFromCandidate(
 		return nil, nil, err
 	}
 
-	cluster, err := cResolver.ResolveCluster(repo)
+	cluster, err := cResolver.ResolveCluster(repo, launchDarklyClient)
 	if err != nil {
 		return nil, nil, err
 	}

+ 1 - 1
api/server/handlers/cluster/create_candidate.go

@@ -67,7 +67,7 @@ func (c *CreateClusterCandidateHandler) ServeHTTP(w http.ResponseWriter, r *http
 		// automatically
 		if len(cc.Resolvers) == 0 {
 			var cluster *models.Cluster
-			cluster, cc, err = createClusterFromCandidate(c.Repo(), proj, user, cc, &types.ClusterResolverAll{})
+			cluster, cc, err = createClusterFromCandidate(c.Repo(), proj, user, cc, &types.ClusterResolverAll{}, c.Config().LaunchDarklyClient)
 
 			if err != nil {
 				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))

+ 2 - 2
api/server/handlers/cluster/get_pod_metrics.go

@@ -38,7 +38,7 @@ func (c *GetPodMetricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
-	request := &types.GetPodMetricsRequest{}
+	request := &prometheus.GetPodMetricsRequest{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
 		err := telemetry.Error(ctx, span, nil, "error decoding request")
@@ -61,7 +61,7 @@ func (c *GetPodMetricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		return
 	}
 
-	rawQuery, err := prometheus.QueryPrometheus(agent.Clientset, promSvc, &request.QueryOpts)
+	rawQuery, err := prometheus.QueryPrometheus(ctx, agent.Clientset, promSvc, &request.QueryOpts)
 	if err != nil {
 		err = telemetry.Error(ctx, span, err, "error querying prometheus")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))

+ 1 - 1
api/server/handlers/cluster/list_nginx_ingresses.go

@@ -43,7 +43,7 @@ func (c *ListNGINXIngressesHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 		return
 	}
 
-	var res types.ListNGINXIngressesResponse = ingresses
+	var res prometheus.ListNGINXIngressesResponse = ingresses
 
 	c.WriteResult(w, r, res)
 }

+ 1 - 1
api/server/handlers/cluster/rename.go

@@ -40,7 +40,7 @@ func (c *RenameClusterHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		cluster.VanityName = request.Name
 	}
 
-	cluster, err := c.Repo().Cluster().UpdateCluster(cluster)
+	cluster, err := c.Repo().Cluster().UpdateCluster(cluster, c.Config().LaunchDarklyClient)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 1 - 1
api/server/handlers/cluster/resolve_candidate.go

@@ -45,7 +45,7 @@ func (c *ResolveClusterCandidateHandler) ServeHTTP(w http.ResponseWriter, r *htt
 		return
 	}
 
-	cluster, cc, err := createClusterFromCandidate(c.Repo(), proj, user, cc, request)
+	cluster, cc, err := createClusterFromCandidate(c.Repo(), proj, user, cc, request, c.Config().LaunchDarklyClient)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 1 - 1
api/server/handlers/cluster/update.go

@@ -72,7 +72,7 @@ func (c *ClusterUpdateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		cluster.Name = request.Name
 	}
 
-	cluster, err := c.Repo().Cluster().UpdateCluster(cluster)
+	cluster, err := c.Repo().Cluster().UpdateCluster(cluster, c.Config().LaunchDarklyClient)
 	if err != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return

+ 102 - 0
api/server/handlers/datastore/status.go

@@ -0,0 +1,102 @@
+package datastore
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// StatusRequest describes an inbound datastore status request
+type StatusRequest struct {
+	Type string `json:"type"`
+	Name string `json:"name"`
+}
+
+// StatusResponse describes an outbound datastore status response
+type StatusResponse struct {
+	Status string `json:"status"`
+}
+
+// StatusHandler is a struct for handling datastore status requests
+type StatusHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewStatusHandler constructs a datastore StatusHandler
+func NewStatusHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *StatusHandler {
+	return &StatusHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+func (h *StatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-datastore-status")
+	defer span.End()
+	// read the project from context
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	request := &StatusRequest{}
+	if ok := h.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "datastore-name", Value: request.Name},
+		telemetry.AttributeKV{Key: "datastore-type", Value: request.Type},
+	)
+
+	var datastoreType porterv1.EnumDatastore
+	switch request.Type {
+	case "rds-postgresql":
+		datastoreType = porterv1.EnumDatastore_ENUM_DATASTORE_RDS_POSTGRESQL
+	case "rds-postgresql-aurora":
+		datastoreType = porterv1.EnumDatastore_ENUM_DATASTORE_RDS_AURORA_POSTGRESQL
+	default:
+		err := telemetry.Error(ctx, span, nil, "invalid datastore specified")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	req := connect.NewRequest(&porterv1.DatastoreStatusRequest{
+		ProjectId: int64(project.ID),
+		ClusterId: int64(cluster.ID),
+		Type:      datastoreType,
+		Name:      request.Name,
+	})
+
+	resp, err := h.Config().ClusterControlPlaneClient.DatastoreStatus(ctx, req)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error fetching datastore status from ccp")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if resp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "missing response message from ccp")
+		h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "datastore-status", Value: resp.Msg.Status})
+	h.WriteResult(w, r, StatusResponse{
+		Status: resp.Msg.Status,
+	})
+}

+ 100 - 0
api/server/handlers/deployment_target/create.go

@@ -0,0 +1,100 @@
+package deployment_target
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// CreateDeploymentTargetHandler is the handler for the /deployment-targets endpoint
+type CreateDeploymentTargetHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewCreateDeploymentTargetHandler handles POST requests to the endpoint /deployment-targets
+func NewCreateDeploymentTargetHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CreateDeploymentTargetHandler {
+	return &CreateDeploymentTargetHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// CreateDeploymentTargetRequest is the request object for the /deployment-targets POST endpoint
+type CreateDeploymentTargetRequest struct {
+	Selector string `json:"selector"`
+	Preview  bool   `json:"preview"`
+}
+
+// CreateDeploymentTargetResponse is the response object for the /deployment-targets POST endpoint
+type CreateDeploymentTargetResponse struct {
+	DeploymentTargetID string `json:"deployment_target_id"`
+}
+
+// ServeHTTP handles POST requests to create a new deployment target
+func (c *CreateDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-deployment-target")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		return
+	}
+
+	request := &CreateDeploymentTargetRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if request.Selector == "" {
+		err := telemetry.Error(ctx, span, nil, "name is required")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	createReq := connect.NewRequest(&porterv1.CreateDeploymentTargetRequest{
+		ProjectId: int64(project.ID),
+		ClusterId: int64(cluster.ID),
+		Name:      request.Selector,
+		Namespace: request.Selector,
+		IsPreview: request.Preview,
+	})
+
+	ccpResp, err := c.Config().ClusterControlPlaneClient.CreateDeploymentTarget(ctx, createReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error creating deployment target")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp == nil || ccpResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "ccp resp msg is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp.Msg.DeploymentTargetId == "" {
+		err := telemetry.Error(ctx, span, err, "deployment target id is empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &CreateDeploymentTargetResponse{
+		DeploymentTargetID: ccpResp.Msg.DeploymentTargetId,
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 69 - 0
api/server/handlers/deployment_target/delete.go

@@ -0,0 +1,69 @@
+package deployment_target
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// DeleteDeploymentTargetHandler is the handler for DELETE /api/projects/{project_id}/clusters/{cluster_id}/deployment-targets/{deployment_target_id}
+type DeleteDeploymentTargetHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewDeleteDeploymentTargetHandler creates a new DeleteDeploymentTargetHandler
+func NewDeleteDeploymentTargetHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *DeleteDeploymentTargetHandler {
+	return &DeleteDeploymentTargetHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// ServeHTTP deletes the deployment target from the cluster
+func (c *DeleteDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "server-delete-deployment-target-by-id")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	deploymentTargetID, reqErr := requestutils.GetURLParamString(r, types.URLParamDeploymentTargetID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "error parsing deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if deploymentTargetID == "" {
+		err := telemetry.Error(ctx, span, nil, "deployment target id cannot be empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	deleteReq := connect.NewRequest(&porterv1.DeleteDeploymentTargetRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: deploymentTargetID,
+	})
+
+	_, err := c.Config().ClusterControlPlaneClient.DeleteDeploymentTarget(ctx, deleteReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error deleting deployment target")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	c.WriteResult(w, r, nil)
+}

+ 85 - 0
api/server/handlers/deployment_target/get.go

@@ -0,0 +1,85 @@
+package deployment_target
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/deployment_target"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// GetDeploymentTargetHandler is the handler for the /deployment-targets/{deployment_target_id} endpoint
+type GetDeploymentTargetHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewGetDeploymentTargetHandler handles GET requests to the endpoint /deployment-targets/{deployment_target_id}
+func NewGetDeploymentTargetHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetDeploymentTargetHandler {
+	return &GetDeploymentTargetHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// GetDeploymentTargetRequest is the request object for the /deployment-targets/{deployment_target_id} GET endpoint
+type GetDeploymentTargetRequest struct {
+	Preview bool `json:"preview"`
+}
+
+// GetDeploymentTargetResponse is the response object for the /deployment-targets/{deployment_target_id} GET endpoint
+type GetDeploymentTargetResponse struct {
+	DeploymentTarget deployment_target.DeploymentTarget `json:"deployment_target"`
+}
+
+func (c *GetDeploymentTargetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-deployment-target")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		return
+	}
+
+	deploymentTargetID, reqErr := requestutils.GetURLParamString(r, types.URLParamDeploymentTargetID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "error parsing deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if deploymentTargetID == "" {
+		err := telemetry.Error(ctx, span, nil, "deployment target id cannot be empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
+		ProjectID:          int64(project.ID),
+		ClusterID:          int64(cluster.ID),
+		DeploymentTargetID: deploymentTargetID,
+		CCPClient:          c.Config().ClusterControlPlaneClient,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &GetDeploymentTargetResponse{
+		DeploymentTarget: deploymentTarget,
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 81 - 0
api/server/handlers/deployment_target/list.go

@@ -0,0 +1,81 @@
+package deployment_target
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// ListDeploymentTargetsHandler is the handler for the /deployment-targets endpoint
+type ListDeploymentTargetsHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewListDeploymentTargetsHandler handles GET requests to the endpoint /deployment-targets
+func NewListDeploymentTargetsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *ListDeploymentTargetsHandler {
+	return &ListDeploymentTargetsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// ListDeploymentTargetsRequest is the request object for the /deployment-targets GET endpoint
+type ListDeploymentTargetsRequest struct {
+	Preview bool `json:"preview"`
+}
+
+// ListDeploymentTargetsResponse is the response object for the /deployment-targets GET endpoint
+type ListDeploymentTargetsResponse struct {
+	DeploymentTargets []types.DeploymentTarget `json:"deployment_targets"`
+}
+
+func (c *ListDeploymentTargetsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-deployment-targets")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		return
+	}
+
+	request := &ListDeploymentTargetsRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	deploymentTargets, err := c.Repo().DeploymentTarget().List(project.ID, cluster.ID, request.Preview)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error retrieving deployment targets")
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+
+	response := ListDeploymentTargetsResponse{
+		DeploymentTargets: make([]types.DeploymentTarget, 0),
+	}
+
+	for _, dt := range deploymentTargets {
+		if dt == nil {
+			continue
+		}
+
+		response.DeploymentTargets = append(response.DeploymentTargets, *dt.ToDeploymentTargetType())
+	}
+
+	c.WriteResult(w, r, response)
+}

+ 14 - 5
api/server/handlers/environment/common.go

@@ -45,11 +45,20 @@ func getGithubClientFromEnvironment(config *config.Config, env *models.Environme
 }
 
 func isSystemNamespace(namespace string) bool {
-	return namespace == "cert-manager" || namespace == "ingress-nginx" ||
-		namespace == "kube-node-lease" || namespace == "kube-public" ||
-		namespace == "kube-system" || namespace == "monitoring" ||
-		namespace == "porter-agent-system" || namespace == "default" ||
-		namespace == "ingress-nginx-private"
+	systemNamespaces := map[string]bool{
+		"ack-system":            true,
+		"cert-manager":          true,
+		"default":               true,
+		"ingress-nginx":         true,
+		"ingress-nginx-private": true,
+		"kube-node-lease":       true,
+		"kube-public":           true,
+		"kube-system":           true,
+		"monitoring":            true,
+		"porter-agent-system":   true,
+	}
+
+	return systemNamespaces[namespace]
 }
 
 func isGithubPRClosed(

+ 2 - 7
api/server/handlers/environment_groups/create.go

@@ -75,19 +75,14 @@ func (c *UpdateEnvironmentGroupHandler) ServeHTTP(w http.ResponseWriter, r *http
 		return
 	}
 
-	secrets := make(map[string][]byte)
-	for k, v := range request.SecretVariables {
-		secrets[k] = []byte(v)
-	}
-
 	envGroup := environment_groups.EnvironmentGroup{
 		Name:            request.Name,
 		Variables:       request.Variables,
-		SecretVariables: secrets,
+		SecretVariables: request.SecretVariables,
 		CreatedAtUTC:    time.Now().UTC(),
 	}
 
-	err = environment_groups.CreateOrUpdateBaseEnvironmentGroup(ctx, agent, envGroup)
+	err = environment_groups.CreateOrUpdateBaseEnvironmentGroup(ctx, agent, envGroup, nil)
 	if err != nil {
 		err := telemetry.Error(ctx, span, err, "unable to create or update environment group")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))

+ 34 - 20
api/server/handlers/environment_groups/list.go

@@ -39,8 +39,8 @@ type ListEnvironmentGroupsResponse struct {
 type EnvironmentGroupListItem struct {
 	Name               string            `json:"name"`
 	LatestVersion      int               `json:"latest_version"`
-	Variables          map[string]string `json:"variables"`
-	SecretVariables    map[string]string `json:"secret_variables"`
+	Variables          map[string]string `json:"variables,omitempty"`
+	SecretVariables    map[string]string `json:"secret_variables,omitempty"`
 	CreatedAtUTC       time.Time         `json:"created_at"`
 	LinkedApplications []string          `json:"linked_applications,omitempty"`
 }
@@ -49,6 +49,7 @@ func (c *ListEnvironmentGroupsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-env-groups")
 	defer span.End()
 
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	agent, err := c.GetAgent(r, cluster, "")
@@ -58,7 +59,7 @@ func (c *ListEnvironmentGroupsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 		return
 	}
 
-	allEnvGroupVersions, err := environmentgroups.ListEnvironmentGroups(ctx, agent, environmentgroups.WithNamespace(environmentgroups.Namespace_EnvironmentGroups))
+	allEnvGroupVersions, err := environmentgroups.ListEnvironmentGroups(ctx, agent, environmentgroups.WithNamespace(environmentgroups.Namespace_EnvironmentGroups), environmentgroups.WithoutDefaultAppEnvironmentGroups())
 	if err != nil {
 		err = telemetry.Error(ctx, span, err, "unable to list all environment groups")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
@@ -84,26 +85,39 @@ func (c *ListEnvironmentGroupsHandler) ServeHTTP(w http.ResponseWriter, r *http.
 			return
 		}
 
-		applications, err := environmentgroups.LinkedApplications(ctx, agent, latestVersion.Name)
-		if err != nil {
-			err = telemetry.Error(ctx, span, err, "unable to get linked applications")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
-			return
-		}
+		var linkedApplications []string
+		if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+			applications, err := environmentgroups.LinkedApplications(ctx, agent, latestVersion.Name, true)
+			if err != nil {
+				err = telemetry.Error(ctx, span, err, "unable to get linked applications")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+				return
+			}
 
-		applicationSetForEnvGroup := make(map[string]struct{})
-		for _, app := range applications {
-			if app.Namespace == "" {
-				continue
+			applicationSetForEnvGroup := make(map[string]struct{})
+			for _, app := range applications {
+				if app.Namespace == "" {
+					continue
+				}
+				if _, ok := applicationSetForEnvGroup[app.Namespace]; !ok {
+					applicationSetForEnvGroup[app.Namespace] = struct{}{}
+				}
 			}
-			if _, ok := applicationSetForEnvGroup[app.Namespace]; !ok {
-				applicationSetForEnvGroup[app.Namespace] = struct{}{}
+			for appNamespace := range applicationSetForEnvGroup {
+				porterAppName := strings.TrimPrefix(appNamespace, "porter-stack-")
+				linkedApplications = append(linkedApplications, porterAppName)
+			}
+		} else {
+			applications, err := environmentgroups.LinkedApplications(ctx, agent, latestVersion.Name, false)
+			if err != nil {
+				err = telemetry.Error(ctx, span, err, "unable to get linked applications")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+				return
+			}
+
+			for _, app := range applications {
+				linkedApplications = append(linkedApplications, app.Name)
 			}
-		}
-		var linkedApplications []string
-		for appNamespace := range applicationSetForEnvGroup {
-			porterAppName := strings.TrimPrefix(appNamespace, "porter-stack-")
-			linkedApplications = append(linkedApplications, porterAppName)
 		}
 
 		secrets := make(map[string]string)

+ 82 - 0
api/server/handlers/environment_groups/update_linked_apps.go

@@ -0,0 +1,82 @@
+package environment_groups
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// UpdateLinkedAppsHandler is the handle for the /environment-group/update-linked-apps endpoint
+type UpdateLinkedAppsHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewUpdateLinkedAppsHandler creates an instance of UpdateLinkedAppsHandler
+func NewUpdateLinkedAppsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *UpdateLinkedAppsHandler {
+	return &UpdateLinkedAppsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// UpdateLinkedAppsRequest is the request object for the /environment-group/update-linked-apps endpoint
+type UpdateLinkedAppsRequest struct {
+	Name string `json:"name"`
+}
+
+// UpdateLinkedAppsResponse is the response object for the /environment-group/update-linked-apps endpoint
+type UpdateLinkedAppsResponse struct{}
+
+// ServeHTTP updates all apps linked to an environment group
+func (c *UpdateLinkedAppsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-update-apps-linked-to-env-group")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
+		return
+	}
+
+	request := &UpdateLinkedAppsRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-name", Value: request.Name})
+
+	updateLinkedAppsReq := connect.NewRequest(&porterv1.UpdateAppsLinkedToEnvGroupRequest{
+		ProjectId:    int64(project.ID),
+		ClusterId:    int64(cluster.ID),
+		EnvGroupName: request.Name,
+	})
+	_, err := c.Config().ClusterControlPlaneClient.UpdateAppsLinkedToEnvGroup(ctx, updateLinkedAppsReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error calling ccp update apps linked to env group")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &UpdateLinkedAppsResponse{}
+
+	c.WriteResult(w, r, res)
+}

+ 3 - 0
api/server/handlers/gitinstallation/get_buildpack.go

@@ -22,12 +22,15 @@ func initBuilderInfo() map[string]*buildpacks.BuilderInfo {
 	builders[buildpacks.PaketoBuilder] = &buildpacks.BuilderInfo{
 		Name: "Paketo",
 		Builders: []string{
+			"paketobuildpacks/builder-jammy-full:latest",
 			"paketobuildpacks/builder:full",
 		},
 	}
 	builders[buildpacks.HerokuBuilder] = &buildpacks.BuilderInfo{
 		Name: "Heroku",
 		Builders: []string{
+			"heroku/builder:22",
+			"heroku/builder-classic:22",
 			"heroku/buildpacks:20",
 			"heroku/buildpacks:18",
 		},

+ 8 - 16
api/server/handlers/gitinstallation/get_porter_yaml.go

@@ -5,8 +5,6 @@ import (
 	b64 "encoding/base64"
 	"net/http"
 
-	"github.com/porter-dev/porter/api/server/handlers/porter_app"
-
 	"github.com/google/go-github/v41/github"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
@@ -16,6 +14,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
 	"github.com/porter-dev/porter/internal/telemetry"
 	"gopkg.in/yaml.v2"
 )
@@ -94,22 +93,16 @@ func (c *GithubGetPorterYamlHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 		return
 	}
 
-	parsed := &porter_app.PorterStackYAML{}
-	err = yaml.Unmarshal([]byte(fileData), parsed)
+	version := &porter_app.YamlVersion{}
+	err = yaml.Unmarshal([]byte(fileData), version)
 	if err != nil {
-		err = telemetry.Error(ctx, span, err, "invalid porter yaml format")
+		err = telemetry.Error(ctx, span, err, "invalid porter yaml version")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
-	if project.ValidateApplyV2 {
-		if parsed.Version == nil {
-			err = telemetry.Error(ctx, span, nil, "v2 porter yaml is required")
-			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-			return
-		}
-
-		if *parsed.Version != "v2" {
+	if project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		if version.Version != "" && version.Version != "v2" {
 			err = telemetry.Error(ctx, span, nil, "porter YAML version is not supported")
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 			return
@@ -117,9 +110,8 @@ func (c *GithubGetPorterYamlHandler) ServeHTTP(w http.ResponseWriter, r *http.Re
 	}
 
 	// backwards compatibility so that old porter yamls are no longer valid
-	if !project.ValidateApplyV2 && parsed.Version != nil {
-		version := *parsed.Version
-		if version != "v1stack" {
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		if version.Version != "" && version.Version != "v1stack" {
 			err = telemetry.Error(ctx, span, nil, "porter YAML version is not supported")
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 			return

+ 45 - 15
api/server/handlers/gitinstallation/rerun_workflow.go

@@ -11,6 +11,9 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/commonutils"
 	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type RerunWorkflowHandler struct {
@@ -28,9 +31,20 @@ func NewRerunWorkflowHandler(
 }
 
 func (c *RerunWorkflowHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	owner, name, ok := commonutils.GetOwnerAndNameParams(c, w, r)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-rerun-github-workflow")
+	defer span.End()
 
-	if !ok {
+	owner, reqErr := requestutils.GetURLParamString(r, types.URLParamGitRepoOwner)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "repo owner not found in request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	name, reqErr := requestutils.GetURLParamString(r, types.URLParamGitRepoName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "repo name not found in request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
@@ -40,9 +54,17 @@ func (c *RerunWorkflowHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 	// only the very latest of the workflow runs will be rerun
 	branch := r.URL.Query().Get("branch")
 	releaseName := r.URL.Query().Get("release_name")
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "repo-owner", Value: owner},
+		telemetry.AttributeKV{Key: "repo-name", Value: name},
+		telemetry.AttributeKV{Key: "branch", Value: branch},
+		telemetry.AttributeKV{Key: "release-name", Value: releaseName},
+	)
 
 	if filename == "" && releaseName == "" {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("filename and release name are both empty")))
+		err := telemetry.Error(ctx, span, nil, "filename and release name are both empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
@@ -59,45 +81,53 @@ func (c *RerunWorkflowHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		}
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "filename", Value: filename})
+
 	client, err := GetGithubAppClientFromRequest(c.Config(), r)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err := telemetry.Error(ctx, span, err, "error getting github app client from request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	latestWorkflowRun, err := commonutils.GetLatestWorkflowRun(client, owner, name, filename, branch)
-
 	if err != nil && errors.Is(err, commonutils.ErrNoWorkflowRuns) {
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, 400))
+		err = telemetry.Error(ctx, span, err, "no workflow runs found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	} else if err != nil && errors.Is(err, commonutils.ErrWorkflowNotFound) {
-		w.WriteHeader(http.StatusNotFound)
-		c.WriteResult(w, r, filename)
+		err = telemetry.Error(ctx, span, err, "workflow not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound))
 		return
 	} else if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error getting latest workflow run")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	if latestWorkflowRun.GetStatus() == "in_progress" || latestWorkflowRun.GetStatus() == "queued" {
-		w.WriteHeader(409)
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "existing-pending-workflow-status", Value: latestWorkflowRun.GetStatus()})
+		w.WriteHeader(http.StatusConflict)
 		c.WriteResult(w, r, latestWorkflowRun.GetHTMLURL())
 		return
 	}
 
 	_, err = client.Actions.RerunWorkflowByID(r.Context(), owner, name, latestWorkflowRun.GetID())
-
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error rerunning workflow")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	latestWorkflowRun, err = commonutils.GetLatestWorkflowRun(client, owner, name, filename, branch)
-
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error getting latest workflow run after rerunning workflow")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	c.WriteResult(w, r, latestWorkflowRun.GetHTMLURL())
+	newWorkflowRunUrl := latestWorkflowRun.GetHTMLURL()
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "new-workflow-run-url", Value: newWorkflowRunUrl})
+
+	c.WriteResult(w, r, newWorkflowRunUrl)
 }

+ 15 - 4
api/server/handlers/helmrepo/get_chart.go

@@ -12,6 +12,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/requestutils"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 	"github.com/porter-dev/porter/internal/templater/parser"
 )
 
@@ -30,8 +31,11 @@ func NewChartGetHandler(
 }
 
 func (t *ChartGetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	helmRepo, _ := r.Context().Value(types.HelmRepoScope).(*models.HelmRepo)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-chart")
+	defer span.End()
+
+	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	helmRepo, _ := ctx.Value(types.HelmRepoScope).(*models.HelmRepo)
 
 	name, _ := requestutils.GetURLParamString(r, types.URLParamTemplateName)
 	version, _ := requestutils.GetURLParamString(r, types.URLParamTemplateVersion)
@@ -41,14 +45,21 @@ func (t *ChartGetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		version = ""
 	}
 
-	chart, err := release.LoadChart(t.Config(), &release.LoadAddonChartOpts{
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "helm-repo-url", Value: helmRepo.RepoURL},
+		telemetry.AttributeKV{Key: "template-name", Value: name},
+		telemetry.AttributeKV{Key: "template-version", Value: version},
+	)
+
+	chart, err := release.LoadChart(ctx, t.Config(), &release.LoadAddonChartOpts{
 		ProjectID:       proj.ID,
 		RepoURL:         helmRepo.RepoURL,
 		TemplateName:    name,
 		TemplateVersion: version,
 	})
 	if err != nil {
-		t.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err := telemetry.Error(ctx, span, nil, "error loading chart from helm")
+		t.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 

+ 6 - 0
api/server/handlers/infra/forms.go

@@ -743,6 +743,12 @@ tabs:
       placeholder: "ex: 10.99"
       settings:
         default: "10.99"
+    - type: string-input
+      label: "Add a different CIDR range prefix for cluster services (first two octets: for example 172.20 will configure EKS with CIDR range 172.20.0.0/16)."
+      variable: cluster_service_cidr_octets
+      placeholder: "ex: 172.20"
+      settings:
+        default: "172.20"
     - type: checkbox
       label: "Add additional private subnets to the cluster in each AZ."
       variable: additional_private_subnets

+ 44 - 5
api/server/handlers/namespace/clone_env_group.go

@@ -15,6 +15,9 @@ import (
 	"github.com/porter-dev/porter/internal/kubernetes"
 	"github.com/porter-dev/porter/internal/kubernetes/envgroup"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
 type CloneEnvGroupHandler struct {
@@ -34,6 +37,9 @@ func NewCloneEnvGroupHandler(
 }
 
 func (c *CloneEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "clone-env-group-legacy")
+	defer span.End()
+
 	request := &types.CloneEnvGroupRequest{}
 
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
@@ -45,13 +51,15 @@ func (c *CloneEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 
 	agent, err := c.GetAgent(r, cluster, "")
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error getting kubernetes agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	cm, _, err := agent.GetLatestVersionedConfigMap(request.SourceName, namespace)
 	if err != nil {
 		if errors.Is(err, kubernetes.IsNotFoundError) {
+			_ = telemetry.Error(ctx, span, err, "error finding latest config map")
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 				fmt.Errorf("error cloning env group: envgroup %s in namespace %s not found", request.SourceName, namespace), http.StatusNotFound,
 				"no config map found for envgroup",
@@ -59,13 +67,16 @@ func (c *CloneEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 			return
 		}
 
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error getting latest config map")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	secret, _, err := agent.GetLatestVersionedSecret(request.SourceName, namespace)
 	if err != nil {
 		if errors.Is(err, kubernetes.IsNotFoundError) {
+			_ = telemetry.Error(ctx, span, err, "error finding latest secret")
+
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(
 				fmt.Errorf("error cloning env group: envgroup %s in namespace %s not found", request.SourceName, namespace), http.StatusNotFound,
 				"no k8s secret found for envgroup",
@@ -73,7 +84,8 @@ func (c *CloneEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 			return
 		}
 
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error getting secret")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
@@ -94,6 +106,30 @@ func (c *CloneEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		secretVars[key] = string(val)
 	}
 
+	_, err = agent.Clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
+	if err != nil {
+		if !errors.Is(err, kubernetes.IsNotFoundError) {
+			err = telemetry.Error(ctx, span, err, "error getting namespace")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		_, err = agent.Clientset.CoreV1().Namespaces().Create(ctx, &v1.Namespace{
+			TypeMeta: metav1.TypeMeta{
+				Kind:       "Namespace",
+				APIVersion: "v1",
+			},
+			ObjectMeta: metav1.ObjectMeta{
+				Name: namespace,
+			},
+		}, metav1.CreateOptions{})
+		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error creating namespace")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+	}
+
 	configMap, err := envgroup.CreateEnvGroup(agent, types.ConfigMapInput{
 		Name:            request.TargetName,
 		Namespace:       request.TargetNamespace,
@@ -101,13 +137,16 @@ func (c *CloneEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		SecretVariables: secretVars,
 	})
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		err = telemetry.Error(ctx, span, err, "error creating env group")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
 	envGroup, err := envgroup.ToEnvGroup(configMap)
 	if err != nil {
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+
+		err = telemetry.Error(ctx, span, err, "error converting env group")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 

+ 51 - 3
api/server/handlers/porter_app/analytics.go

@@ -1,6 +1,7 @@
 package porter_app
 
 import (
+	"context"
 	"net/http"
 
 	"github.com/porter-dev/porter/api/server/handlers"
@@ -9,6 +10,7 @@ import (
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/internal/analytics"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type PorterAppAnalyticsHandler struct {
@@ -35,6 +37,7 @@ func (v *PorterAppAnalyticsHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 		return
 	}
 
+	validateApplyV2 := project.GetFeatureFlag(models.ValidateApplyV2, v.Config().LaunchDarklyClient)
 	if request.Step == "stack-launch-start" {
 		v.Config().AnalyticsClient.Track(analytics.StackLaunchStartTrack(&analytics.StackLaunchStartOpts{
 			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
@@ -42,6 +45,7 @@ func (v *PorterAppAnalyticsHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 			FirstName:              user.FirstName,
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
+			ValidateApplyV2:        validateApplyV2,
 		}))
 	}
 
@@ -53,6 +57,7 @@ func (v *PorterAppAnalyticsHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 			FirstName:              user.FirstName,
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
+			ValidateApplyV2:        validateApplyV2,
 		}))
 	}
 
@@ -64,6 +69,7 @@ func (v *PorterAppAnalyticsHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 			FirstName:              user.FirstName,
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
+			ValidateApplyV2:        validateApplyV2,
 		}))
 	}
 
@@ -76,6 +82,7 @@ func (v *PorterAppAnalyticsHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
 			ErrorMessage:           request.ErrorMessage,
+			ValidateApplyV2:        validateApplyV2,
 		}))
 	}
 
@@ -88,6 +95,21 @@ func (v *PorterAppAnalyticsHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
 			DeleteWorkflowFile:     request.DeleteWorkflowFile,
+			ValidateApplyV2:        validateApplyV2,
+		}))
+	}
+
+	if request.Step == "porter-app-update-failure" {
+		v.Config().AnalyticsClient.Track(analytics.PorterAppUpdateFailureTrack(&analytics.PorterAppUpdateOpts{
+			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
+			StackName:              request.StackName,
+			Email:                  user.Email,
+			FirstName:              user.FirstName,
+			LastName:               user.LastName,
+			CompanyName:            user.CompanyName,
+			ErrorMessage:           request.ErrorMessage,
+			ErrorStackTrace:        request.ErrorStackTrace,
+			ValidateApplyV2:        validateApplyV2,
 		}))
 	}
 
@@ -95,45 +117,71 @@ func (v *PorterAppAnalyticsHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 }
 
 func TrackStackBuildStatus(
+	ctx context.Context,
 	config *config.Config,
 	user *models.User,
 	project *models.Project,
 	stackName string,
 	errorMessage string,
 	status types.PorterAppEventStatus,
+	validateApplyV2 bool,
+	b64BuildLogs string,
 ) error {
+	ctx, span := telemetry.NewSpan(ctx, "track-build-status")
+	defer span.End()
+
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "porter-app-build-status", Value: string(status)},
+		telemetry.AttributeKV{Key: "porter-app-name", Value: stackName},
+		telemetry.AttributeKV{Key: "porter-app-error-message", Value: errorMessage},
+	)
+
 	if status == types.PorterAppEventStatus_Progressing {
-		return config.AnalyticsClient.Track(analytics.StackBuildProgressingTrack(&analytics.StackBuildOpts{
+		err := config.AnalyticsClient.Track(analytics.StackBuildProgressingTrack(&analytics.StackBuildOpts{
 			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
 			StackName:              stackName,
 			Email:                  user.Email,
 			FirstName:              user.FirstName,
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
+			ValidateApplyV2:        validateApplyV2,
 		}))
+		if err != nil {
+			return telemetry.Error(ctx, span, err, "Failed to track stack build progressing")
+		}
 	}
 
 	if status == types.PorterAppEventStatus_Success {
-		return config.AnalyticsClient.Track(analytics.StackBuildSuccessTrack(&analytics.StackBuildOpts{
+		err := config.AnalyticsClient.Track(analytics.StackBuildSuccessTrack(&analytics.StackBuildOpts{
 			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
 			StackName:              stackName,
 			Email:                  user.Email,
 			FirstName:              user.FirstName,
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
+			ValidateApplyV2:        validateApplyV2,
 		}))
+		if err != nil {
+			return telemetry.Error(ctx, span, err, "Failed to track stack build success")
+		}
 	}
 
 	if status == types.PorterAppEventStatus_Failed {
-		return config.AnalyticsClient.Track(analytics.StackBuildFailureTrack(&analytics.StackBuildOpts{
+		er := config.AnalyticsClient.Track(analytics.StackBuildFailureTrack(&analytics.StackBuildOpts{
 			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
 			StackName:              stackName,
 			ErrorMessage:           errorMessage,
+			B64BuildLogs:           b64BuildLogs,
 			Email:                  user.Email,
 			FirstName:              user.FirstName,
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
+			ValidateApplyV2:        validateApplyV2,
 		}))
+		if er != nil {
+			return telemetry.Error(ctx, span, er, "Failed to track stack build failure")
+		}
 	}
 
 	return nil

+ 156 - 0
api/server/handlers/porter_app/app_metrics.go

@@ -0,0 +1,156 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/internal/kubernetes/prometheus"
+
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+// AppMetricsHandler handles the /apps/metrics endpoint
+type AppMetricsHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewAppMetricsHandler returns a new AppMetricsHandler
+func NewAppMetricsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *AppMetricsHandler {
+	return &AppMetricsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// MetricsRequest is the expected request body for the /apps/metrics endpoint
+type MetricsRequest struct {
+	// Deployment target of the app to query for metrics
+	DeploymentTargetID string `schema:"deployment_target_id"`
+
+	// Below is just a copy of prometheus.QueryOpts, other than namespace
+	// the name of the metric being queried for
+	Metric    string   `schema:"metric"`
+	ShouldSum bool     `schema:"shouldsum"`
+	Kind      string   `schema:"kind"`
+	PodList   []string `schema:"pods"`
+	Name      string   `schema:"name"`
+	// start time (in unix timestamp) for prometheus results
+	StartRange uint `schema:"startrange"`
+	// end time time (in unix timestamp) for prometheus results
+	EndRange   uint    `schema:"endrange"`
+	Resolution string  `schema:"resolution"`
+	Percentile float64 `schema:"percentile"`
+}
+
+// ServeHTTP returns metrics for a given app in the provided deployment target
+func (c *AppMetricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-metrics")
+	defer span.End()
+	r = r.Clone(ctx)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	request := &MetricsRequest{}
+
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if request.DeploymentTargetID == "" {
+		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	deploymentTargetDetailsReq := connect.NewRequest(&porterv1.DeploymentTargetDetailsRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: request.DeploymentTargetID,
+	})
+
+	deploymentTargetDetailsResp, err := c.Config().ClusterControlPlaneClient.DeploymentTargetDetails(ctx, deploymentTargetDetailsReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if deploymentTargetDetailsResp == nil || deploymentTargetDetailsResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if deploymentTargetDetailsResp.Msg.ClusterId != int64(cluster.ID) {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp cluster id does not match cluster id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	namespace := deploymentTargetDetailsResp.Msg.Namespace
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting k8s agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	// get prometheus service
+	promSvc, found, err := prometheus.GetPrometheusService(agent.Clientset)
+	if err != nil || !found {
+		err = telemetry.Error(ctx, span, err, "error getting prometheus service")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "metric", Value: request.Metric},
+		telemetry.AttributeKV{Key: "shouldsum", Value: request.ShouldSum},
+		telemetry.AttributeKV{Key: "kind", Value: request.Kind},
+		telemetry.AttributeKV{Key: "name", Value: request.Name},
+		telemetry.AttributeKV{Key: "start-range", Value: request.StartRange},
+		telemetry.AttributeKV{Key: "end-range", Value: request.EndRange},
+		telemetry.AttributeKV{Key: "resolution", Value: request.Resolution},
+		telemetry.AttributeKV{Key: "percentile", Value: request.Percentile},
+	)
+
+	queryOpts := &prometheus.QueryOpts{
+		Metric:     request.Metric,
+		ShouldSum:  request.ShouldSum,
+		Kind:       request.Kind,
+		Name:       request.Name,
+		Namespace:  namespace,
+		StartRange: request.StartRange,
+		EndRange:   request.EndRange,
+		Resolution: request.Resolution,
+		Percentile: request.Percentile,
+	}
+
+	rawQuery, err := prometheus.QueryPrometheus(ctx, agent.Clientset, promSvc, queryOpts)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error querying prometheus")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	c.WriteResult(w, r, rawQuery)
+}

+ 110 - 4
api/server/handlers/porter_app/apply.go

@@ -1,7 +1,10 @@
 package porter_app
 
 import (
+	"context"
 	"encoding/base64"
+	"errors"
+	"fmt"
 	"net/http"
 
 	"connectrpc.com/connect"
@@ -10,8 +13,12 @@ import (
 
 	"github.com/porter-dev/api-contracts/generated/go/helpers"
 
+	"github.com/porter-dev/porter/internal/deployment_target"
+	"github.com/porter-dev/porter/internal/porter_app"
+	v2 "github.com/porter-dev/porter/internal/porter_app/v2"
 	"github.com/porter-dev/porter/internal/telemetry"
 
+	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -23,6 +30,7 @@ import (
 // ApplyPorterAppHandler is the handler for the /apps/parse endpoint
 type ApplyPorterAppHandler struct {
 	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
 }
 
 // NewApplyPorterAppHandler handles POST requests to the endpoint /apps/apply
@@ -33,14 +41,21 @@ func NewApplyPorterAppHandler(
 ) *ApplyPorterAppHandler {
 	return &ApplyPorterAppHandler{
 		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
 	}
 }
 
 // ApplyPorterAppRequest is the request object for the /apps/apply endpoint
 type ApplyPorterAppRequest struct {
-	Base64AppProto     string `json:"b64_app_proto"`
-	DeploymentTargetId string `json:"deployment_target_id"`
-	AppRevisionID      string `json:"app_revision_id"`
+	Base64AppProto     string            `json:"b64_app_proto"`
+	DeploymentTargetId string            `json:"deployment_target_id"`
+	AppRevisionID      string            `json:"app_revision_id"`
+	ForceBuild         bool              `json:"force_build"`
+	Variables          map[string]string `json:"variables"`
+	Secrets            map[string]string `json:"secrets"`
+	// HardEnvUpdate is used to remove any variables that are not specified in the request.  If false, the request will only update the variables specified in the request,
+	// and leave all other variables untouched.
+	HardEnvUpdate bool `json:"hard_env_update"`
 }
 
 // ApplyPorterAppResponse is the response object for the /apps/apply endpoint
@@ -62,7 +77,7 @@ func (c *ApplyPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
 	)
 
-	if !project.ValidateApplyV2 {
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
 		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
 		return
@@ -104,6 +119,13 @@ func (c *ApplyPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 			return
 		}
 
+		app, err := v2.AppFromProto(appProto)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error converting app proto to app")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
 		if request.DeploymentTargetId == "" {
 			err := telemetry.Error(ctx, span, err, "deployment target id is empty")
 			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
@@ -115,6 +137,47 @@ func (c *ApplyPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 			telemetry.AttributeKV{Key: "app-name", Value: appProto.Name},
 			telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetId},
 		)
+
+		deploymentTargetDetails, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
+			ProjectID:          int64(project.ID),
+			ClusterID:          int64(cluster.ID),
+			DeploymentTargetID: deploymentTargetID,
+			CCPClient:          c.Config().ClusterControlPlaneClient,
+		})
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting deployment target details")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		agent, err := c.GetAgent(r, cluster, "")
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting kubernetes agent")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		subdomainCreateInput := porter_app.CreatePorterSubdomainInput{
+			AppName:             app.Name,
+			RootDomain:          c.Config().ServerConf.AppRootDomain,
+			DNSClient:           c.Config().DNSClient,
+			DNSRecordRepository: c.Repo().DNSRecord(),
+			KubernetesAgent:     agent,
+		}
+
+		appWithDomains, err := addPorterSubdomainsIfNecessary(ctx, app, deploymentTargetDetails, subdomainCreateInput)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error adding porter subdomains")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		appProto, _, err = v2.ProtoFromApp(ctx, appWithDomains)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error converting app to proto")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
 	}
 
 	applyReq := connect.NewRequest(&porterv1.ApplyPorterAppRequest{
@@ -122,6 +185,12 @@ func (c *ApplyPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		DeploymentTargetId:  deploymentTargetID,
 		App:                 appProto,
 		PorterAppRevisionId: appRevisionID,
+		ForceBuild:          request.ForceBuild,
+		AppEnv: &porterv1.EnvGroupVariables{
+			Normal: request.Variables,
+			Secret: request.Secrets,
+		},
+		IsHardEnvUpdate: request.HardEnvUpdate,
 	})
 	ccpResp, err := c.Config().ClusterControlPlaneClient.ApplyPorterApp(ctx, applyReq)
 	if err != nil {
@@ -164,3 +233,40 @@ func (c *ApplyPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 
 	c.WriteResult(w, r, response)
 }
+
+// addPorterSubdomainsIfNecessary adds porter subdomains to the app proto if a web service is changed to private and has no domains
+func addPorterSubdomainsIfNecessary(ctx context.Context, app v2.PorterApp, deploymentTarget deployment_target.DeploymentTarget, createSubdomainInput porter_app.CreatePorterSubdomainInput) (v2.PorterApp, error) {
+	ctx, span := telemetry.NewSpan(ctx, "add-porter-subdomains-if-necessary")
+	defer span.End()
+
+	services := make([]v2.Service, 0)
+
+	for _, service := range app.Services {
+		if service.Type == v2.ServiceType_Web {
+			if service.Private != nil && !*service.Private && service.Domains != nil && len(service.Domains) == 0 {
+				if deploymentTarget.Namespace != DeploymentTargetSelector_Default {
+					createSubdomainInput.AppName = fmt.Sprintf("%s-%s", createSubdomainInput.AppName, deploymentTarget.ID[:6])
+				}
+
+				subdomain, err := porter_app.CreatePorterSubdomain(ctx, createSubdomainInput)
+				if err != nil {
+					return app, fmt.Errorf("error creating subdomain: %w", err)
+				}
+
+				if subdomain == "" {
+					return app, errors.New("response subdomain is empty")
+				}
+
+				service.Domains = []v2.Domains{
+					{Name: subdomain},
+				}
+			}
+		}
+
+		services = append(services, service)
+	}
+
+	app.Services = services
+
+	return app, nil
+}

+ 40 - 27
api/server/handlers/porter_app/create.go

@@ -54,6 +54,12 @@ func (c *CreatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-porter-app")
 	defer span.End()
 
+	if project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "unable to update app: please upgrade the CLI and try again")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
+		return
+	}
+
 	request := &types.CreatePorterAppRequest{}
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
 		err := telemetry.Error(ctx, span, nil, "error decoding request")
@@ -106,30 +112,29 @@ func (c *CreatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 
 	var releaseValues map[string]interface{}
 	var releaseDependencies []*chart.Dependency
-	if shouldCreate || request.OverrideRelease {
-		releaseValues = nil
-		releaseDependencies = nil
-
-		// this is required because when the front-end sends an update request with overrideRelease=true, it is unable to
-		// get the image info from the release. unless it is explicitly provided in the request, we avoid overwriting it
-		// by attempting to get the image info from the release or the provided helm values
-		if helmRelease != nil && (imageInfo.Repository == "" || imageInfo.Tag == "") {
-			if request.FullHelmValues != "" {
-				imageInfo, err = attemptToGetImageInfoFromFullHelmValues(request.FullHelmValues)
-				if err != nil {
-					err = telemetry.Error(ctx, span, err, "error getting image info from full helm values")
-					telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-yaml-base64", Value: porterYamlBase64})
-					c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-					return
-				}
-			} else {
-				imageInfo = attemptToGetImageInfoFromRelease(helmRelease.Config)
+	// unless it is explicitly provided in the request, we avoid overwriting the image info
+	// by attempting to get it from the release or the provided helm values
+	if helmRelease != nil && (imageInfo.Repository == "" || imageInfo.Tag == "") {
+		if request.FullHelmValues != "" {
+			imageInfo, err = attemptToGetImageInfoFromFullHelmValues(request.FullHelmValues)
+			if err != nil {
+				err = telemetry.Error(ctx, span, err, "error getting image info from full helm values")
+				telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-yaml-base64", Value: porterYamlBase64})
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+				return
 			}
+		} else {
+			imageInfo = attemptToGetImageInfoFromRelease(helmRelease.Config)
 		}
+	}
+	if shouldCreate {
+		releaseValues = nil
+		releaseDependencies = nil
 	} else {
 		releaseValues = helmRelease.Config
 		releaseDependencies = helmRelease.Chart.Metadata.Dependencies
 	}
+
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "image-repo", Value: imageInfo.Repository}, telemetry.AttributeKV{Key: "image-tag", Value: imageInfo.Tag})
 
 	if request.Builder == "" {
@@ -182,16 +187,17 @@ func (c *CreatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 			ExistingHelmValues:        releaseValues,
 			ExistingChartDependencies: releaseDependencies,
 			SubdomainCreateOpts: SubdomainCreateOpts{
-				k8sAgent:       k8sAgent,
-				dnsRepo:        c.Repo().DNSRecord(),
-				powerDnsClient: c.Config().PowerDNSClient,
-				appRootDomain:  c.Config().ServerConf.AppRootDomain,
-				stackName:      appName,
+				k8sAgent:      k8sAgent,
+				dnsRepo:       c.Repo().DNSRecord(),
+				dnsClient:     c.Config().DNSClient,
+				appRootDomain: c.Config().ServerConf.AppRootDomain,
+				stackName:     appName,
 			},
 			InjectLauncherToStartCommand: injectLauncher,
 			ShouldValidateHelmValues:     shouldCreate,
 			FullHelmValues:               request.FullHelmValues,
 			AddCustomNodeSelector:        addCustomNodeSelector,
+			RemoveDeletedServices:        request.OverrideRelease,
 		},
 	)
 	if err != nil {
@@ -303,7 +309,7 @@ func (c *CreatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 
 		if features.AreAgentDeployEventsEnabled(k8sAgent) {
 			serviceDeploymentStatusMap := getServiceDeploymentMetadataFromValues(values, types.PorterAppEventStatus_Progressing)
-			_, err = createNewPorterAppDeployEvent(ctx, serviceDeploymentStatusMap, types.PorterAppEventStatus_Progressing, porterApp.ID, 1, imageInfo.Tag, c.Repo().PorterAppEvent())
+			_, err = createNewPorterAppDeployEvent(ctx, serviceDeploymentStatusMap, porterApp.ID, 1, imageInfo.Tag, c.Repo().PorterAppEvent())
 		} else {
 			_, err = createOldPorterAppDeployEvent(ctx, types.PorterAppEventStatus_Success, porterApp.ID, 1, imageInfo.Tag, c.Repo().PorterAppEvent())
 		}
@@ -492,7 +498,7 @@ func (c *CreatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 
 		if features.AreAgentDeployEventsEnabled(k8sAgent) {
 			serviceDeploymentStatusMap := getServiceDeploymentMetadataFromValues(values, types.PorterAppEventStatus_Progressing)
-			_, err = createNewPorterAppDeployEvent(ctx, serviceDeploymentStatusMap, types.PorterAppEventStatus_Progressing, updatedPorterApp.ID, helmRelease.Version+1, imageInfo.Tag, c.Repo().PorterAppEvent())
+			_, err = createNewPorterAppDeployEvent(ctx, serviceDeploymentStatusMap, updatedPorterApp.ID, helmRelease.Version+1, imageInfo.Tag, c.Repo().PorterAppEvent())
 		} else {
 			_, err = createOldPorterAppDeployEvent(ctx, types.PorterAppEventStatus_Success, updatedPorterApp.ID, helmRelease.Version+1, imageInfo.Tag, c.Repo().PorterAppEvent())
 		}
@@ -542,7 +548,6 @@ func createOldPorterAppDeployEvent(ctx context.Context, status types.PorterAppEv
 func createNewPorterAppDeployEvent(
 	ctx context.Context,
 	serviceStatusMap map[string]types.ServiceDeploymentMetadata,
-	status types.PorterAppEventStatus,
 	appID uint,
 	revision int,
 	tag string,
@@ -554,9 +559,17 @@ func createNewPorterAppDeployEvent(
 	// mark all pending deployments from the deploy event of the previous revision as canceled
 	updatePreviousPorterAppDeployEvent(ctx, appID, revision, repo)
 
+	deployEventStatus := types.PorterAppEventStatus_Success
+	for _, metadata := range serviceStatusMap {
+		if metadata.Status != types.PorterAppEventStatus_Success {
+			deployEventStatus = types.PorterAppEventStatus_Progressing
+			break
+		}
+	}
+
 	event := models.PorterAppEvent{
 		ID:                 uuid.New(),
-		Status:             string(status),
+		Status:             string(deployEventStatus),
 		Type:               "DEPLOY",
 		TypeExternalSource: "KUBERNETES",
 		PorterAppID:        appID,

+ 217 - 50
api/server/handlers/porter_app/create_and_update_events.go

@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"net/http"
 	"strings"
+	"time"
 
 	"github.com/google/uuid"
 	"github.com/porter-dev/porter/api/server/authz"
@@ -34,6 +35,10 @@ func NewCreateUpdatePorterAppEventHandler(
 	}
 }
 
+const (
+	crashLoopBackoffSubstring string = "stuck in a restart loop"
+)
+
 func (p *CreateUpdatePorterAppEventHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	ctx, span := telemetry.NewSpan(r.Context(), "serve-post-porter-app-event")
 	defer span.End()
@@ -58,17 +63,19 @@ func (p *CreateUpdatePorterAppEventHandler) ServeHTTP(w http.ResponseWriter, r *
 	telemetry.WithAttributes(span,
 		telemetry.AttributeKV{Key: "porter-app-name", Value: appName},
 		telemetry.AttributeKV{Key: "porter-app-event-type", Value: string(request.Type)},
-		telemetry.AttributeKV{Key: "porter-app-event-status", Value: request.Status},
+		telemetry.AttributeKV{Key: "porter-app-event-status", Value: string(request.Status)},
 		telemetry.AttributeKV{Key: "porter-app-event-external-source", Value: request.TypeExternalSource},
 		telemetry.AttributeKV{Key: "porter-app-event-id", Value: request.ID},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
 	)
 
 	if request.Type == types.PorterAppEventType_Build {
-		reportBuildStatus(ctx, request, p.Config(), user, project, appName)
+		validateApplyV2 := project.GetFeatureFlag(models.ValidateApplyV2, p.Config().LaunchDarklyClient)
+		reportBuildStatus(ctx, request, p.Config(), user, project, appName, validateApplyV2)
 	}
 
 	if request.ID == "" {
-		event, err := p.createNewAppEvent(ctx, *cluster, appName, request.Status, string(request.Type), request.TypeExternalSource, request.Metadata)
+		event, err := p.createNewAppEvent(ctx, *cluster, appName, request.DeploymentTargetID, request.Status, string(request.Type), request.TypeExternalSource, request.Metadata)
 		if err != nil {
 			e := telemetry.Error(ctx, span, err, "error creating new app event")
 			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
@@ -87,19 +94,24 @@ func (p *CreateUpdatePorterAppEventHandler) ServeHTTP(w http.ResponseWriter, r *
 	p.WriteResult(w, r, event)
 }
 
-func reportBuildStatus(ctx context.Context, request *types.CreateOrUpdatePorterAppEventRequest, config *config.Config, user *models.User, project *models.Project, stackName string) {
+func reportBuildStatus(ctx context.Context, request *types.CreateOrUpdatePorterAppEventRequest, config *config.Config, user *models.User, project *models.Project, stackName string, validateApplyV2 bool) {
 	ctx, span := telemetry.NewSpan(ctx, "report-build-status")
 	defer span.End()
 
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-build-status", Value: request.Status})
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-build-status", Value: string(request.Status)})
 
 	var errStr string
+	var buildLogs string
 	if errors, ok := request.Metadata["errors"]; ok {
 		if errs, ok := errors.(map[string]interface{}); ok {
 			errStringMap := make(map[string]string)
 			for k, v := range errs {
 				if valueStr, ok := v.(string); ok {
-					errStringMap[k] = valueStr
+					if k == "b64-build-logs" {
+						buildLogs = valueStr
+					} else {
+						errStringMap[k] = valueStr
+					}
 				}
 			}
 
@@ -112,11 +124,11 @@ func reportBuildStatus(ctx context.Context, request *types.CreateOrUpdatePorterA
 		}
 	}
 
-	_ = TrackStackBuildStatus(config, user, project, stackName, errStr, request.Status)
+	_ = TrackStackBuildStatus(ctx, config, user, project, stackName, errStr, request.Status, validateApplyV2, buildLogs)
 }
 
 // createNewAppEvent will create a new app event for the given porter app name. If the app event is an agent event, then it will be created only if there is no existing event which has the agent ID. In the case that an existing event is found, that will be returned instead
-func (p *CreateUpdatePorterAppEventHandler) createNewAppEvent(ctx context.Context, cluster models.Cluster, porterAppName string, status types.PorterAppEventStatus, eventType string, externalSource string, requestMetadata map[string]any) (types.PorterAppEvent, error) {
+func (p *CreateUpdatePorterAppEventHandler) createNewAppEvent(ctx context.Context, cluster models.Cluster, porterAppName string, deploymentTargetID string, status types.PorterAppEventStatus, eventType string, externalSource string, requestMetadata map[string]any) (types.PorterAppEvent, error) {
 	ctx, span := telemetry.NewSpan(ctx, "create-porter-app-event")
 	defer span.End()
 
@@ -138,13 +150,29 @@ func (p *CreateUpdatePorterAppEventHandler) createNewAppEvent(ctx context.Contex
 		// Agent has no way to know what the porter app event id is, so if we must dedup here
 		// TODO: create a filter to filter by only agent events. Not an issue now as app events are deduped per hour on the agent side
 		if agentEventID, ok := requestMetadata["agent_event_id"]; ok {
-			existingEvents, _, err := p.Repo().PorterAppEvent().ListEventsByPorterAppID(ctx, app.ID)
-			if err != nil {
-				return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error listing porter app events for event type")
+			var existingEvents []*models.PorterAppEvent
+			if deploymentTargetID == "" {
+				existingEvents, _, err = p.Repo().PorterAppEvent().ListEventsByPorterAppID(ctx, app.ID)
+				if err != nil {
+					return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error listing porter app events for event type")
+				}
+			} else {
+				deploymentTargetUUID, err := uuid.Parse(deploymentTargetID)
+				if err != nil {
+					return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error parsing deployment target id")
+				}
+				if deploymentTargetUUID == uuid.Nil {
+					return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "deployment target id cannot be nil")
+				}
+
+				existingEvents, _, err = p.Repo().PorterAppEvent().ListEventsByPorterAppIDAndDeploymentTargetID(ctx, app.ID, deploymentTargetUUID)
+				if err != nil {
+					return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error listing porter app events for event type with deployment target id")
+				}
 			}
 
 			for _, existingEvent := range existingEvents {
-				if existingEvent.Type == eventType {
+				if existingEvent != nil && existingEvent.Type == eventType {
 					existingAgentEventID, ok := existingEvent.Metadata["agent_event_id"]
 					if !ok {
 						continue
@@ -158,12 +186,29 @@ func (p *CreateUpdatePorterAppEventHandler) createNewAppEvent(ctx context.Contex
 				}
 			}
 		}
+
+		// before creating a new app_event type event, check the event for crashloop backoff
+		// if the event is a crashloop backoff, then update the service status of the deployment event associated it to FAILED, since the service's deployment will never succeed from crashloop backoff
+		// only applies to v2 apps (where the deployment target id is not empty)
+		if deploymentTargetID != "" {
+			updateMetadata, appEventFormattedCorrectly := appEventMatchesDetail(requestMetadata, crashLoopBackoffSubstring)
+			if appEventFormattedCorrectly {
+				_ = p.updateDeployEventMatchingAppEventDetails(
+					ctx,
+					porterAppName,
+					app.ID,
+					deploymentTargetID,
+					updateMetadata,
+					types.PorterAppEventStatus_Failed,
+				)
+			}
+		}
 	}
 
 	if eventType == string(types.PorterAppEventType_Deploy) {
 		// Agent has no way to know what the porter app event id is, so update the deploy event if it exists
 		if _, ok := requestMetadata["deploy_status"]; ok {
-			return p.updateDeployEvent(ctx, porterAppName, app.ID, requestMetadata), nil
+			return p.updateDeployEvent(ctx, porterAppName, app.ID, deploymentTargetID, requestMetadata), nil
 		}
 	}
 
@@ -176,6 +221,17 @@ func (p *CreateUpdatePorterAppEventHandler) createNewAppEvent(ctx context.Contex
 		Metadata:           make(map[string]any),
 	}
 
+	if deploymentTargetID != "" {
+		deploymentTargetUUID, err := uuid.Parse(deploymentTargetID)
+		if err != nil {
+			return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "error parsing deployment target id")
+		}
+		if deploymentTargetUUID == uuid.Nil {
+			return types.PorterAppEvent{}, telemetry.Error(ctx, span, err, "deployment target id cannot be nil")
+		}
+		event.DeploymentTargetID = deploymentTargetUUID
+	}
+
 	for k, v := range requestMetadata {
 		event.Metadata[k] = v
 	}
@@ -246,40 +302,86 @@ func (p *CreateUpdatePorterAppEventHandler) updateExistingAppEvent(ctx context.C
 // 4. the services specified in the updatedStatusMetadata match the services in the deploy event metadata
 // 5. some of the above services are still in the PROGRESSING state
 // if one of these conditions is not met, then an empty event is returned and no update is made; otherwise, the matched event is returned
-func (p *CreateUpdatePorterAppEventHandler) updateDeployEvent(ctx context.Context, appName string, appID uint, updatedStatusMetadata map[string]any) types.PorterAppEvent {
+func (p *CreateUpdatePorterAppEventHandler) updateDeployEvent(ctx context.Context, appName string, appID uint, deploymentTargetID string, updatedStatusMetadata map[string]any) types.PorterAppEvent {
 	ctx, span := telemetry.NewSpan(ctx, "update-deploy-event")
 	defer span.End()
 
-	revision, ok := updatedStatusMetadata["revision"]
-	if !ok {
-		_ = telemetry.Error(ctx, span, nil, "revision not found in request metadata")
-		return types.PorterAppEvent{}
-	}
-	revisionFloat64, ok := revision.(float64)
-	if !ok {
-		_ = telemetry.Error(ctx, span, nil, "revision not a float64")
-		return types.PorterAppEvent{}
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "revision", Value: revisionFloat64})
+	var serviceName string
+	var matchEvent models.PorterAppEvent
 
-	podName, ok := updatedStatusMetadata["pod_name"]
-	if !ok {
-		_ = telemetry.Error(ctx, span, nil, "pod name not found in request metadata")
-		return types.PorterAppEvent{}
-	}
-	podNameStr, ok := podName.(string)
-	if !ok {
-		_ = telemetry.Error(ctx, span, nil, "pod name not a string")
-		return types.PorterAppEvent{}
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "pod-name", Value: podNameStr})
+	if deploymentTargetID != "" {
+		appRevisionIDField, ok := updatedStatusMetadata["app_revision_id"]
+		if !ok {
+			_ = telemetry.Error(ctx, span, nil, "app_revision_id not found in request metadata")
+			return types.PorterAppEvent{}
+		}
+		appRevisionID, ok := appRevisionIDField.(string)
+		if !ok {
+			_ = telemetry.Error(ctx, span, nil, "appRevisionID is not a string")
+			return types.PorterAppEvent{}
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-revision-id", Value: appRevisionID})
 
-	serviceName := getServiceNameFromPodName(podNameStr, appName)
-	if serviceName == "" {
-		_ = telemetry.Error(ctx, span, nil, "service name not found in pod name")
-		return types.PorterAppEvent{}
+		serviceNameField, ok := updatedStatusMetadata["service_name"]
+		if !ok {
+			_ = telemetry.Error(ctx, span, nil, "service_name not found in request metadata")
+			return types.PorterAppEvent{}
+		}
+		serviceName, ok = serviceNameField.(string)
+		if !ok {
+			_ = telemetry.Error(ctx, span, nil, "serviceName is not a string")
+			return types.PorterAppEvent{}
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: serviceName})
+
+		var err error
+		matchEvent, err = p.Repo().PorterAppEvent().ReadDeployEventByAppRevisionID(ctx, appID, appRevisionID)
+		if err != nil {
+			_ = telemetry.Error(ctx, span, err, "error finding matching deploy event")
+			return types.PorterAppEvent{}
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "updating-deployment-event", Value: false})
+	} else {
+		revision, ok := updatedStatusMetadata["revision"]
+		if !ok {
+			_ = telemetry.Error(ctx, span, nil, "revision not found in request metadata")
+			return types.PorterAppEvent{}
+		}
+		revisionFloat64, ok := revision.(float64)
+		if !ok {
+			_ = telemetry.Error(ctx, span, nil, "revision not a float64")
+			return types.PorterAppEvent{}
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "revision", Value: revisionFloat64})
+
+		podName, ok := updatedStatusMetadata["pod_name"]
+		if !ok {
+			_ = telemetry.Error(ctx, span, nil, "pod name not found in request metadata")
+			return types.PorterAppEvent{}
+		}
+		podNameStr, ok := podName.(string)
+		if !ok {
+			_ = telemetry.Error(ctx, span, nil, "pod name not a string")
+			return types.PorterAppEvent{}
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "pod-name", Value: podNameStr})
+
+		serviceName = getServiceNameFromPodName(podNameStr, appName)
+		if serviceName == "" {
+			_ = telemetry.Error(ctx, span, nil, "service name not found in pod name")
+			return types.PorterAppEvent{}
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: serviceName})
+
+		var err error
+		matchEvent, err = p.Repo().PorterAppEvent().ReadDeployEventByRevision(ctx, appID, revisionFloat64)
+		if err != nil {
+			_ = telemetry.Error(ctx, span, err, "error finding matching deploy event")
+			return types.PorterAppEvent{}
+		}
+
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "updating-deployment-event", Value: false})
 	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: serviceName})
 
 	newStatus, ok := updatedStatusMetadata["deploy_status"]
 	if !ok {
@@ -306,14 +408,6 @@ func (p *CreateUpdatePorterAppEventHandler) updateDeployEvent(ctx context.Contex
 
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "new-status", Value: string(porterAppEventStatus)})
 
-	matchEvent, err := p.Repo().PorterAppEvent().ReadDeployEventByRevision(ctx, appID, revisionFloat64)
-	if err != nil {
-		_ = telemetry.Error(ctx, span, err, "error finding matching deploy event")
-		return types.PorterAppEvent{}
-	}
-
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "updating-deployment-event", Value: false})
-
 	// first check to see if the event is empty, meaning there was no match found, or not progressing, meaning it has already been updated
 	if matchEvent.ID == uuid.Nil || matchEvent.Status != string(types.PorterAppEventStatus_Progressing) {
 		return types.PorterAppEvent{}
@@ -375,6 +469,7 @@ func (p *CreateUpdatePorterAppEventHandler) updateDeployEvent(ctx context.Contex
 			}
 		}
 		if allServicesDone {
+			matchEvent.Metadata["end_time"] = time.Now().UTC()
 			if anyServicesFailed {
 				matchEvent.Status = string(types.PorterAppEventStatus_Failed)
 			} else {
@@ -382,7 +477,7 @@ func (p *CreateUpdatePorterAppEventHandler) updateDeployEvent(ctx context.Contex
 			}
 		}
 
-		err = p.Repo().PorterAppEvent().UpdateEvent(ctx, &matchEvent)
+		err := p.Repo().PorterAppEvent().UpdateEvent(ctx, &matchEvent)
 		if err != nil {
 			_ = telemetry.Error(ctx, span, err, "error updating deploy event")
 			return matchEvent.ToPorterAppEvent()
@@ -424,3 +519,75 @@ func getServiceNameFromPodName(podName, porterAppName string) string {
 
 	return ""
 }
+
+// appEventMatchesDetail checks if the app event metadata matches the detail string, returning true if so
+// also returns the app event metadata as a defined struct, rather than a generic map
+func appEventMatchesDetail(eventMetadata map[string]any, detail string) (*types.PorterAppAppEventMetadata, bool) {
+	appEventMetadata := &types.PorterAppAppEventMetadata{}
+
+	by, err := json.Marshal(eventMetadata)
+	if err != nil {
+		return appEventMetadata, false
+	}
+
+	err = json.Unmarshal(by, appEventMetadata)
+	if err != nil {
+		return appEventMetadata, false
+	}
+
+	if appEventMetadata.AppRevisionID == "" {
+		return appEventMetadata, false
+	}
+	if appEventMetadata.ServiceName == "" {
+		return appEventMetadata, false
+	}
+	if !strings.Contains(appEventMetadata.Detail, detail) {
+		return appEventMetadata, false
+	}
+
+	return appEventMetadata, true
+}
+
+// updateDeployEventMatchingAppEventDetails updates the deploy event and service specified by the app event metadata, if it exists
+func (p *CreateUpdatePorterAppEventHandler) updateDeployEventMatchingAppEventDetails(
+	ctx context.Context,
+	porterAppName string,
+	porterAppId uint,
+	deploymentTargetID string,
+	updateMetadata *types.PorterAppAppEventMetadata,
+	status types.PorterAppEventStatus,
+) error {
+	ctx, span := telemetry.NewSpan(ctx, "update-deploy-event-matching-app-event-details")
+	defer span.End()
+
+	if updateMetadata == nil {
+		return telemetry.Error(ctx, span, nil, "update metadata is nil")
+	}
+
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "porter-app-name", Value: porterAppName},
+		telemetry.AttributeKV{Key: "porter-app-id", Value: porterAppId},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: deploymentTargetID},
+		telemetry.AttributeKV{Key: "app-revision-id", Value: updateMetadata.AppRevisionID},
+		telemetry.AttributeKV{Key: "service-name", Value: updateMetadata.ServiceName},
+		telemetry.AttributeKV{Key: "detail", Value: updateMetadata.Detail},
+	)
+
+	// convert the metadata to a map[string]interface{} because that is the type updateDeployEvent expects
+	// TODO: refactor updateDeployEvent so we don't have to do this
+	updateMetadataBytes, err := json.Marshal(updateMetadata)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error marshaling update metadata")
+	}
+	updateMetadataMap := make(map[string]interface{})
+	err = json.Unmarshal(updateMetadataBytes, &updateMetadataMap)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error unmarshaling update metadata")
+	}
+
+	updateMetadataMap["deploy_status"] = string(status)
+	// we do not need the returned updated event
+	_ = p.updateDeployEvent(ctx, porterAppName, porterAppId, deploymentTargetID, updateMetadataMap)
+	return nil
+}

+ 55 - 15
api/server/handlers/porter_app/create_app.go

@@ -2,9 +2,14 @@ package porter_app
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"net/http"
 
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -31,6 +36,9 @@ func NewCreateAppHandler(
 	}
 }
 
+// ErrMissingSourceType is returned when the source type is not specified
+var ErrMissingSourceType = errors.New("missing source type")
+
 // SourceType is a string type specifying the source type of an app. This is specified in the incoming request
 type SourceType string
 
@@ -51,13 +59,15 @@ type Image struct {
 
 // CreateAppRequest is the request object for the /apps/create endpoint
 type CreateAppRequest struct {
-	Name           string     `json:"name"`
-	SourceType     SourceType `json:"type"`
-	GitBranch      string     `json:"git_branch"`
-	GitRepoName    string     `json:"git_repo_name"`
-	GitRepoID      uint       `json:"git_repo_id"`
-	PorterYamlPath string     `json:"porter_yaml_path"`
-	Image          *Image     `json:"image,omitempty"`
+	Name                 string     `json:"name"`
+	SourceType           SourceType `json:"type"`
+	GitBranch            string     `json:"git_branch"`
+	GitRepoName          string     `json:"git_repo_name"`
+	GitRepoID            uint       `json:"git_repo_id"`
+	PorterYamlPath       string     `json:"porter_yaml_path"`
+	Image                *Image     `json:"image,omitempty"`
+	DeploymentTargetName string     `json:"deployment_target_name,omitempty"`
+	DeploymentTargetID   string     `json:"deployment_target_id,omitempty"`
 }
 
 // CreateGithubAppInput is the input for creating an app with a github source
@@ -97,7 +107,7 @@ func (c *CreateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
-	if !project.ValidateApplyV2 {
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
 		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
 		return
@@ -117,13 +127,6 @@ func (c *CreateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	}
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: request.Name})
 
-	if request.SourceType == "" {
-		err := telemetry.Error(ctx, span, nil, "source type is required")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "source-type", Value: request.SourceType})
-
 	porterAppDBEntries, err := c.Repo().PorterApp().ReadPorterAppsByProjectIDAndName(project.ID, request.Name)
 	if err != nil {
 		err := telemetry.Error(ctx, span, nil, "error reading porter apps by project id and name")
@@ -142,6 +145,13 @@ func (c *CreateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
+	if request.SourceType == "" {
+		err := telemetry.Error(ctx, span, ErrMissingSourceType, "source type is required")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "source-type", Value: request.SourceType})
+
 	var porterApp *types.PorterApp
 	switch request.SourceType {
 	case SourceType_Github:
@@ -236,6 +246,36 @@ func (c *CreateAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-id", Value: porterApp.ID})
 
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-name", Value: request.DeploymentTargetName},
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID},
+	)
+
+	if request.DeploymentTargetName != "" || request.DeploymentTargetID != "" {
+		createAppInstanceReq := connect.NewRequest(&porterv1.CreateAppInstanceRequest{
+			ProjectId: int64(project.ID),
+			AppName:   request.Name,
+			DeploymentTargetIdentifier: &porterv1.DeploymentTargetIdentifier{
+				Id:   request.DeploymentTargetID,
+				Name: request.DeploymentTargetName,
+			},
+			PorterAppId: int64(porterApp.ID),
+		})
+
+		createAppInstanceResp, err := c.Config().ClusterControlPlaneClient.CreateAppInstance(ctx, createAppInstanceReq)
+		if err != nil {
+			// ignore error until app instances are fully supported: POR-2056
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "create-app-instance-error", Value: err.Error()})
+		}
+
+		if createAppInstanceResp == nil || createAppInstanceResp.Msg == nil {
+			// ignore error until app instances are fully supported: POR-2056
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "create-app-instance-nil", Value: true})
+		} else {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-instance-id", Value: createAppInstanceResp.Msg.AppInstanceId})
+		}
+	}
+
 	c.WriteResult(w, r, porterApp)
 }
 

+ 298 - 0
api/server/handlers/porter_app/create_app_template.go

@@ -0,0 +1,298 @@
+package porter_app
+
+import (
+	"context"
+	"encoding/base64"
+	"net/http"
+	"time"
+
+	"github.com/google/uuid"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/kubernetes/environment_groups"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// CreateAppTemplateHandler is the handler for the /app-template endpoint
+type CreateAppTemplateHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewCreateAppTemplateHandler handles POST requests to the endpoint /app-template
+func NewCreateAppTemplateHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *CreateAppTemplateHandler {
+	return &CreateAppTemplateHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// CreateAppTemplateRequest is the request object for the /app-template POST endpoint
+type CreateAppTemplateRequest struct {
+	B64AppProto            string            `json:"b64_app_proto"`
+	Variables              map[string]string `json:"variables"`
+	Secrets                map[string]string `json:"secrets"`
+	BaseDeploymentTargetID string            `json:"base_deployment_target_id"`
+}
+
+// CreateAppTemplateResponse is the response object for the /app-template POST endpoint
+type CreateAppTemplateResponse struct {
+	AppTemplateID string `json:"app_template_id"`
+}
+
+// ServeHTTP creates or updates an app template for a given porter app
+func (c *CreateAppTemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-create-app-template")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		return
+	}
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, reqErr, "error parsing stack name from url")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	request := &CreateAppTemplateRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if request.B64AppProto == "" {
+		err := telemetry.Error(ctx, span, nil, "b64 app proto is empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	baseDeploymentTarget, err := uuid.Parse(request.BaseDeploymentTargetID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error parsing base deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if baseDeploymentTarget == uuid.Nil {
+		err := telemetry.Error(ctx, span, err, "base deployment target id is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	porterApps, err := c.Repo().PorterApp().ReadPorterAppsByProjectIDAndName(project.ID, appName)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting porter app from repo")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if len(porterApps) == 0 {
+		err := telemetry.Error(ctx, span, err, "no porter apps returned")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if len(porterApps) > 1 {
+		err := telemetry.Error(ctx, span, err, "multiple porter apps returned; unable to determine which one to use")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if porterApps[0].ID == 0 {
+		err := telemetry.Error(ctx, span, err, "porter app id is missing")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-id", Value: porterApps[0].ID})
+
+	var appTemplate *models.AppTemplate
+
+	existingAppTemplate, err := c.Repo().AppTemplate().AppTemplateByPorterAppID(
+		project.ID,
+		porterApps[0].ID,
+	)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error checking for existing app template")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if existingAppTemplate.ID != uuid.Nil {
+		appTemplate = existingAppTemplate
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "update-app-template", Value: true})
+	}
+	if appTemplate == nil {
+		appTemplate = &models.AppTemplate{
+			ProjectID:   int(project.ID),
+			PorterAppID: int(porterApps[0].ID),
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "update-app-template", Value: false})
+	}
+
+	protoWithoutDefaultAppEnvGroups, err := filterDefaultAppEnvGroups(ctx, request.B64AppProto, agent)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error filtering default app env groups")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	appTemplate.Base64App = protoWithoutDefaultAppEnvGroups
+	appTemplate.BaseDeploymentTargetID = baseDeploymentTarget
+
+	updatedAppTemplate, err := c.Repo().AppTemplate().CreateAppTemplate(appTemplate)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error creating app template")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if updatedAppTemplate == nil {
+		err := telemetry.Error(ctx, span, err, "updated app template is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if updatedAppTemplate.ID == uuid.Nil {
+		err := telemetry.Error(ctx, span, err, "updated app template id is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	previewTemplateEnvName, err := porter_app.AppTemplateEnvGroupName(ctx, appName, cluster.ID, c.Repo().PorterApp())
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to get app template env group name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	envGroup, err := environment_groups.LatestBaseEnvironmentGroup(ctx, agent, previewTemplateEnvName)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to get latest base environment group")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if envGroup.Name == "" {
+		envGroup = environment_groups.EnvironmentGroup{
+			Name:         previewTemplateEnvName,
+			CreatedAtUTC: time.Now().UTC(),
+		}
+	}
+	envGroup.Variables = request.Variables
+	envGroup.SecretVariables = request.Secrets
+
+	additionalEnvGroupLabels := map[string]string{
+		LabelKey_AppName: appName,
+		environment_groups.LabelKey_DefaultAppEnvironment: "true",
+		LabelKey_PorterManaged:                            "true",
+	}
+
+	err = environment_groups.CreateOrUpdateBaseEnvironmentGroup(ctx, agent, envGroup, additionalEnvGroupLabels)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to create or update base environment group")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	err = porter_app.CreateAppWebhook(ctx, porter_app.CreateAppWebhookInput{
+		PorterAppName:           appName,
+		ProjectID:               project.ID,
+		ClusterID:               cluster.ID,
+		GithubAppSecret:         c.Config().ServerConf.GithubAppSecret,
+		GithubAppID:             c.Config().ServerConf.GithubAppID,
+		GithubWebhookSecret:     c.Config().ServerConf.GithubIncomingWebhookSecret,
+		ServerURL:               c.Config().ServerConf.ServerURL,
+		PorterAppRepository:     c.Repo().PorterApp(),
+		GithubWebhookRepository: c.Repo().GithubWebhook(),
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to set repo webhook")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &CreateAppTemplateResponse{
+		AppTemplateID: updatedAppTemplate.ID.String(),
+	}
+
+	c.WriteResult(w, r, res)
+}
+
+// filterDefaultAppEnvGroups filters out any default app env groups found when creating an app template
+// app templates are based on the latest version of a given app, so it is possible for this env group to be included
+// however, the app template will get its own default env group when used to deploy to a preview environment
+func filterDefaultAppEnvGroups(ctx context.Context, b64AppProto string, agent *kubernetes.Agent) (string, error) {
+	ctx, span := telemetry.NewSpan(ctx, "filter-default-app-env-groups")
+	defer span.End()
+
+	var finalAppProto string
+
+	if b64AppProto == "" {
+		return finalAppProto, telemetry.Error(ctx, span, nil, "b64 app proto is empty")
+	}
+	if agent == nil {
+		return finalAppProto, telemetry.Error(ctx, span, nil, "agent is nil")
+	}
+
+	decoded, err := base64.StdEncoding.DecodeString(b64AppProto)
+	if err != nil {
+		return finalAppProto, telemetry.Error(ctx, span, err, "error decoding base app")
+	}
+
+	appProto := &porterv1.PorterApp{}
+	err = helpers.UnmarshalContractObject(decoded, appProto)
+	if err != nil {
+		return finalAppProto, telemetry.Error(ctx, span, err, "error unmarshalling app proto")
+	}
+
+	filteredEnvGroups := []*porterv1.EnvGroup{}
+	for _, envGroup := range appProto.EnvGroups {
+		baseEnvGroup, err := environment_groups.LatestBaseEnvironmentGroup(ctx, agent, envGroup.Name)
+		if err != nil {
+			return finalAppProto, telemetry.Error(ctx, span, err, "unable to get latest base environment group")
+		}
+		if baseEnvGroup.DefaultAppEnvironment {
+			continue
+		}
+
+		filteredEnvGroups = append(filteredEnvGroups, envGroup)
+	}
+
+	appProto.EnvGroups = filteredEnvGroups
+
+	encoded, err := helpers.MarshalContractObject(ctx, appProto)
+	if err != nil {
+		return finalAppProto, telemetry.Error(ctx, span, err, "error marshalling app proto")
+	}
+
+	finalAppProto = base64.StdEncoding.EncodeToString(encoded)
+
+	return finalAppProto, nil
+}

+ 73 - 25
api/server/handlers/porter_app/create_secret_and_open_pr.go

@@ -17,6 +17,7 @@ import (
 	"github.com/porter-dev/porter/internal/auth/token"
 	"github.com/porter-dev/porter/internal/integrations/ci/actions"
 	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
 )
 
 type OpenStackPRHandler struct {
@@ -34,9 +35,12 @@ func NewOpenStackPRHandler(
 }
 
 func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	user, _ := r.Context().Value(types.UserScope).(*models.User)
-	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-open-stack-pr")
+	defer span.End()
+
+	user, _ := ctx.Value(types.UserScope).(*models.User)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
 	if reqErr != nil {
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(reqErr, http.StatusBadRequest))
@@ -45,11 +49,25 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 	request := &types.CreateSecretAndOpenGHPRRequest{}
 	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if request.Branch == "" {
+		err := telemetry.Error(ctx, span, nil, "branch cannot be empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if request.PreviewsWorkflowFilename != "" && request.DeleteWorkflowFilename != "" {
+		err := telemetry.Error(ctx, span, nil, "both preview and delete workflow filenames cannot be set")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
 
 	client, err := getGithubClient(c.Config(), request.GithubAppInstallationID)
 	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error creating github client")
 		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 		return
 	}
@@ -59,12 +77,16 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		// generate porter jwt token
 		jwt, err := token.GetTokenForAPI(user.ID, project.ID)
 		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error getting token for API: %w", err)))
+			err = fmt.Errorf("error getting token for API: %w", err)
+			err := telemetry.Error(ctx, span, err, err.Error())
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return
 		}
 		encoded, err := jwt.EncodeToken(c.Config().TokenConf)
 		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error encoding API token: %w", err)))
+			err = fmt.Errorf("error encoding API token: %w", err)
+			err := telemetry.Error(ctx, span, err, err.Error())
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return
 		}
 
@@ -78,7 +100,9 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 			request.GithubRepoName,
 		)
 		if err != nil {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error generating secret: %w", err)))
+			err = fmt.Errorf("error generating secret: %w", err)
+			err := telemetry.Error(ctx, span, err, err.Error())
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return
 		}
 	}
@@ -87,24 +111,40 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	var prRequestBody string
 	if request.DeleteWorkflowFilename == "" {
 		prRequestBody = "Hello 👋 from Porter! Please merge this PR to finish setting up your application."
-	} else {
+	} else if request.PreviewsWorkflowFilename == "" {
 		prRequestBody = "Please merge this PR to delete the workflow file associated with your application."
+	} else {
+		prRequestBody = "Hello 👋 from Porter! Please merge this PR to enable preview environments for your application."
 	}
+
 	if request.OpenPr || request.DeleteWorkflowFilename != "" {
-		pr, err = actions.OpenGithubPR(&actions.GithubPROpts{
-			Client:                 client,
-			GitRepoOwner:           request.GithubRepoOwner,
-			GitRepoName:            request.GithubRepoName,
-			StackName:              appName,
-			ProjectID:              project.ID,
-			ClusterID:              cluster.ID,
-			ServerURL:              c.Config().ServerConf.ServerURL,
-			DefaultBranch:          request.Branch,
-			SecretName:             secretName,
-			PorterYamlPath:         request.PorterYamlPath,
-			Body:                   prRequestBody,
-			DeleteWorkflowFilename: request.DeleteWorkflowFilename,
-		})
+		openPRInput := &actions.GithubPROpts{
+			PRAction:       actions.GithubPRAction_NewAppWorkflow,
+			Client:         client,
+			GitRepoOwner:   request.GithubRepoOwner,
+			GitRepoName:    request.GithubRepoName,
+			StackName:      appName,
+			ProjectID:      project.ID,
+			ClusterID:      cluster.ID,
+			ServerURL:      c.Config().ServerConf.ServerURL,
+			DefaultBranch:  request.Branch,
+			SecretName:     secretName,
+			PorterYamlPath: request.PorterYamlPath,
+			Body:           prRequestBody,
+			PRBranch:       "porter-stack",
+		}
+		if request.DeleteWorkflowFilename != "" {
+			openPRInput.PRAction = actions.GithubPRAction_DeleteAppWorkflow
+			openPRInput.WorkflowFileName = request.DeleteWorkflowFilename
+			openPRInput.PRBranch = "porter-stack-delete"
+		}
+		if request.PreviewsWorkflowFilename != "" {
+			openPRInput.PRAction = actions.GithubPRAction_PreviewAppWorkflow
+			openPRInput.WorkflowFileName = request.PreviewsWorkflowFilename
+			openPRInput.PRBranch = "porter-stack-preview"
+		}
+
+		pr, err = actions.OpenGithubPR(openPRInput)
 	}
 
 	if err != nil {
@@ -113,12 +153,16 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		if unwrappedErr != nil {
 			if errors.Is(unwrappedErr, actions.ErrProtectedBranch) {
 				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusConflict))
+				return
 			} else if errors.Is(unwrappedErr, actions.ErrCreatePRForProtectedBranch) {
 				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusPreconditionFailed))
+				return
 			}
 		} else {
-			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error setting up application in the github "+
-				"repo: %w", err)))
+			err = fmt.Errorf("error setting up application in the github "+
+				"repo: %w", err)
+			err := telemetry.Error(ctx, span, err, err.Error())
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 			return
 		}
 	}
@@ -133,7 +177,9 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 			// update DB with the PR url
 			porterApp, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, appName)
 			if err != nil {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("unable to get porter app db: %w", err)))
+				err = fmt.Errorf("unable to get porter app db: %w", err)
+				err := telemetry.Error(ctx, span, err, err.Error())
+				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 				return
 			}
 
@@ -141,7 +187,9 @@ func (c *OpenStackPRHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 			_, err = c.Repo().PorterApp().UpdatePorterApp(porterApp)
 			if err != nil {
-				c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("unable to write pr url to porter app db: %w", err)))
+				err = fmt.Errorf("unable to write pr url to porter app db: %w", err)
+				err := telemetry.Error(ctx, span, err, err.Error())
+				c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
 				return
 			}
 		}

+ 3 - 3
api/server/handlers/porter_app/create_subdomain.go

@@ -132,13 +132,13 @@ func (c *CreateSubdomainHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 
 	_record := domain.DNSRecord(*record)
 
-	if c.Config().PowerDNSClient == nil {
-		err := telemetry.Error(ctx, span, nil, "powerdns client is nil")
+	if c.Config().DNSClient == nil {
+		err := telemetry.Error(ctx, span, nil, "dns client is nil")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	err = _record.CreateDomain(c.Config().PowerDNSClient)
+	err = _record.CreateDomain(c.Config().DNSClient)
 	if err != nil {
 		err := telemetry.Error(ctx, span, nil, "error creating domain")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))

+ 4 - 1
api/server/handlers/porter_app/current_app_revision.go

@@ -3,6 +3,7 @@ package porter_app
 import (
 	"net/http"
 
+	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/shared/requestutils"
 
 	"connectrpc.com/connect"
@@ -25,6 +26,7 @@ import (
 // LatestAppRevisionHandler handles requests to the /apps/{porter_app_name}/latest endpoint
 type LatestAppRevisionHandler struct {
 	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
 }
 
 // NewLatestAppRevisionHandler returns a new LatestAppRevisionHandler
@@ -35,6 +37,7 @@ func NewLatestAppRevisionHandler(
 ) *LatestAppRevisionHandler {
 	return &LatestAppRevisionHandler{
 		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
 	}
 }
 
@@ -105,7 +108,7 @@ func (c *LatestAppRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 	}
 
 	if porterApps[0].ID == 0 {
-		err := telemetry.Error(ctx, span, err, "porter app id is missiong")
+		err := telemetry.Error(ctx, span, err, "porter app id is missing")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}

+ 1 - 1
api/server/handlers/porter_app/get.go

@@ -60,7 +60,7 @@ func (c *GetPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 
 	// this is a temporary fix until we figure out how to reconcile the new revisions table
 	// with dependencies on helm releases throuhg the api
-	if project.ValidateApplyV2 {
+	if project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		c.WriteResult(w, r, app.ToPorterAppType())
 		return
 	}

+ 175 - 0
api/server/handlers/porter_app/get_app_env.go

@@ -0,0 +1,175 @@
+package porter_app
+
+import (
+	"encoding/base64"
+	"net/http"
+
+	"github.com/google/uuid"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/deployment_target"
+	"github.com/porter-dev/porter/internal/kubernetes/environment_groups"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// GetAppEnvHandler is the handler for the /apps/{porter_app_name}/revisions/{app_revision_id}/env endpoint
+type GetAppEnvHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewGetAppEnvHandler handles GET requests to the /apps/{porter_app_name}/revisions/{app_revision_id}/env endpoint
+func NewGetAppEnvHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetAppEnvHandler {
+	return &GetAppEnvHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// GetAppEnvRequest is the request object for the /apps/{porter_app_name}/revisions/{app_revision_id}/env endpoint
+type GetAppEnvRequest struct {
+	// EnvGroups is a list of environment group names to query. If empty, all environment groups will be queried
+	EnvGroups []string `json:"env_groups"`
+}
+
+// GetAppEnvResponse is the response object for the /apps/{porter_app_name}/revisions/{app_revision_id}/env endpoint
+type GetAppEnvResponse struct {
+	EnvGroups []environment_groups.EnvironmentGroup `json:"env_groups"`
+	AppEnv    environment_groups.EnvironmentGroup   `json:"app_env"`
+}
+
+// ServeHTTP translates the request into a GetAppEnvRequest request, uses the revision proto to query the cluster for the requested env groups, and returns the response
+func (c *GetAppEnvHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-app-env")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		return
+	}
+
+	revisionID, reqErr := requestutils.GetURLParamString(r, types.URLParamAppRevisionID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing app revision id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appRevisionUuid, err := uuid.Parse(revisionID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error parsing app revision id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if appRevisionUuid == uuid.Nil {
+		err := telemetry.Error(ctx, span, nil, "app revision id is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-revision-id", Value: appRevisionUuid.String()})
+
+	request := &GetAppEnvRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	revision, err := porter_app.GetAppRevision(ctx, porter_app.GetAppRevisionInput{
+		AppRevisionID: appRevisionUuid,
+		ProjectID:     project.ID,
+		CCPClient:     c.Config().ClusterControlPlaneClient,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	decoded, err := base64.StdEncoding.DecodeString(revision.B64AppProto)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error decoding base proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appProto := &porterv1.PorterApp{}
+	err = helpers.UnmarshalContractObject(decoded, appProto)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
+		ProjectID:          int64(project.ID),
+		ClusterID:          int64(cluster.ID),
+		DeploymentTargetID: revision.DeploymentTargetID,
+		CCPClient:          c.Config().ClusterControlPlaneClient,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	envFromProtoInp := porter_app.AppEnvironmentFromProtoInput{
+		ProjectID:        project.ID,
+		ClusterID:        int(cluster.ID),
+		DeploymentTarget: deploymentTarget,
+		App:              appProto,
+		K8SAgent:         agent,
+	}
+
+	envGroups, err := porter_app.AppEnvironmentFromProto(ctx, envFromProtoInp, porter_app.WithEnvGroupFilter(request.EnvGroups), porter_app.WithSecrets(), porter_app.WithoutDefaultAppEnvGroups())
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app environment from revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	revisionWithEnv, err := porter_app.AttachEnvToRevision(ctx, porter_app.AttachEnvToRevisionInput{
+		ProjectID:           project.ID,
+		ClusterID:           int(cluster.ID),
+		Revision:            revision,
+		DeploymentTarget:    deploymentTarget,
+		K8SAgent:            agent,
+		PorterAppRepository: c.Repo().PorterApp(),
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error attaching env to revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &GetAppEnvResponse{
+		EnvGroups: envGroups,
+		AppEnv:    revisionWithEnv.Env,
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 121 - 0
api/server/handlers/porter_app/get_app_revision.go

@@ -0,0 +1,121 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/deployment_target"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// GetAppRevisionHandler handles requests to the /apps/{porter_app_name}/revisions/{app_revision_id} endpoint
+type GetAppRevisionHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewGetAppRevisionHandler returns a new GetAppRevisionHandler
+func NewGetAppRevisionHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetAppRevisionHandler {
+	return &GetAppRevisionHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// GetAppRevisionResponse represents the response from the /apps/{porter_app_name}/revisions/{app_revision_id} endpoint
+type GetAppRevisionResponse struct {
+	AppRevision porter_app.Revision `json:"app_revision"`
+}
+
+// GetAppRevisionHandler returns a single app revision
+func (c *GetAppRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-app-revision")
+	defer span.End()
+
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+
+	appRevisionID, reqErr := requestutils.GetURLParamString(r, types.URLParamAppRevisionID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing app revision id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	getRevisionReq := connect.NewRequest(&porterv1.GetAppRevisionRequest{
+		ProjectId:     int64(project.ID),
+		AppRevisionId: appRevisionID,
+	})
+	ccpResp, err := c.Config().ClusterControlPlaneClient.GetAppRevision(ctx, getRevisionReq)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting app revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if ccpResp == nil || ccpResp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "get app revision response is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	encodedRevision, err := porter_app.EncodedRevisionFromProto(ctx, ccpResp.Msg.AppRevision)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting encoded revision from proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
+		ProjectID:          int64(project.ID),
+		ClusterID:          int64(cluster.ID),
+		DeploymentTargetID: ccpResp.Msg.AppRevision.DeploymentTargetId,
+		CCPClient:          c.Config().ClusterControlPlaneClient,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	revisionWithEnv, err := porter_app.AttachEnvToRevision(ctx, porter_app.AttachEnvToRevisionInput{
+		ProjectID:           project.ID,
+		ClusterID:           int(cluster.ID),
+		Revision:            encodedRevision,
+		DeploymentTarget:    deploymentTarget,
+		K8SAgent:            agent,
+		PorterAppRepository: c.Repo().PorterApp(),
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error attaching env to revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &GetAppRevisionResponse{
+		AppRevision: revisionWithEnv,
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 132 - 0
api/server/handlers/porter_app/get_app_template.go

@@ -0,0 +1,132 @@
+package porter_app
+
+import (
+	"encoding/base64"
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/kubernetes/environment_groups"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// GetAppTemplateHandler is the handler for the /apps/{porter_app_name}/templates endpoint
+type GetAppTemplateHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewGetAppTemplateHandler handles GET requests to the endpoint /apps/{porter_app_name}/templates
+func NewGetAppTemplateHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetAppTemplateHandler {
+	return &GetAppTemplateHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// GetAppTemplateResponse is the response object for the /apps/{porter_app_name}/templates GET endpoint
+type GetAppTemplateResponse struct {
+	// Template is the set of app overrides explicitly set by the user to be used in subsequent preview deploys
+	TemplateB64AppProto string `json:"template_b64_app_proto"`
+	// AppEnv is the base set of environment variables that will be used in subsequent preview deploys
+	AppEnv environment_groups.EnvironmentGroup `json:"app_env"`
+}
+
+// ServeHTTP creates or updates an app template for a given porter app
+func (c *GetAppTemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-app-template")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		return
+	}
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, reqErr, "error parsing stack name from url")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	porterApps, err := c.Repo().PorterApp().ReadPorterAppsByProjectIDAndName(project.ID, appName)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting porter app from repo")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if len(porterApps) == 0 {
+		err := telemetry.Error(ctx, span, err, "no porter apps returned")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if len(porterApps) > 1 {
+		err := telemetry.Error(ctx, span, err, "multiple porter apps returned; unable to determine which one to use")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if porterApps[0].ID == 0 {
+		err := telemetry.Error(ctx, span, err, "porter app id is missing")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	app := porterApps[0]
+
+	templateReq := connect.NewRequest(&porterv1.AppTemplateRequest{
+		ProjectId: int64(project.ID),
+		AppId:     int64(app.ID),
+	})
+
+	ccpResp, err := c.Config().ClusterControlPlaneClient.AppTemplate(ctx, templateReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app template")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if ccpResp == nil || ccpResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "app template resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	appTemplate := ccpResp.Msg.AppTemplate
+
+	by, err := helpers.MarshalContractObject(ctx, appTemplate)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error marshaling app template")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	encoded := base64.StdEncoding.EncodeToString(by)
+
+	appEnv := environment_groups.EnvironmentGroup{
+		Variables:       ccpResp.Msg.AppEnv.Normal,
+		SecretVariables: ccpResp.Msg.AppEnv.Secret,
+	}
+
+	res := &GetAppTemplateResponse{
+		TemplateB64AppProto: encoded,
+		AppEnv:              appEnv,
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 156 - 0
api/server/handlers/porter_app/get_build_env.go

@@ -0,0 +1,156 @@
+package porter_app
+
+import (
+	"encoding/base64"
+	"net/http"
+
+	"github.com/google/uuid"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/deployment_target"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// GetBuildEnvHandler is the handler for the /apps/{porter_app_name}/revisions/{app_revision_id}/build-env endpoint
+type GetBuildEnvHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewGetBuildEnvHandler handles GET requests to the /apps/{porter_app_name}/revisions/{app_revision_id}/build-env endpoint
+func NewGetBuildEnvHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *GetBuildEnvHandler {
+	return &GetBuildEnvHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// GetBuildEnvResponse is the response object for the /apps/{porter_app_name}/revisions/{app_revision_id}/build-env endpoint
+type GetBuildEnvResponse struct {
+	BuildEnvVariables map[string]string `json:"build_env_variables"`
+}
+
+// ServeHTTP translates the request into a GetBuildEnvRequest request, uses the proto to query the cluster for the build env, and returns the response
+func (c *GetBuildEnvHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-build-env")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+	)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		return
+	}
+
+	revisionID, reqErr := requestutils.GetURLParamString(r, types.URLParamAppRevisionID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing app revision id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appRevisionUuid, err := uuid.Parse(revisionID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error parsing app revision id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if appRevisionUuid == uuid.Nil {
+		err := telemetry.Error(ctx, span, nil, "app revision id is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-revision-id", Value: appRevisionUuid.String()})
+
+	revision, err := porter_app.GetAppRevision(ctx, porter_app.GetAppRevisionInput{
+		AppRevisionID: appRevisionUuid,
+		ProjectID:     project.ID,
+		CCPClient:     c.Config().ClusterControlPlaneClient,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	decoded, err := base64.StdEncoding.DecodeString(revision.B64AppProto)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error decoding base proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appProto := &porterv1.PorterApp{}
+	err = helpers.UnmarshalContractObject(decoded, appProto)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
+		ProjectID:          int64(project.ID),
+		ClusterID:          int64(cluster.ID),
+		DeploymentTargetID: revision.DeploymentTargetID,
+		CCPClient:          c.Config().ClusterControlPlaneClient,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	envFromProtoInp := porter_app.AppEnvironmentFromProtoInput{
+		ProjectID:        project.ID,
+		ClusterID:        int(cluster.ID),
+		DeploymentTarget: deploymentTarget,
+		App:              appProto,
+		K8SAgent:         agent,
+	}
+	envGroups, err := porter_app.AppEnvironmentFromProto(ctx, envFromProtoInp)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app environment from revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	buildEnvVariables := make(map[string]string)
+	for _, envGroup := range envGroups {
+		for key, val := range envGroup.Variables {
+			buildEnvVariables[key] = val
+		}
+	}
+
+	res := &GetBuildEnvResponse{
+		BuildEnvVariables: buildEnvVariables,
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 133 - 0
api/server/handlers/porter_app/helm_values_v2.go

@@ -0,0 +1,133 @@
+package porter_app
+
+import (
+	"encoding/base64"
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
+	"github.com/google/uuid"
+
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+// AppHelmValuesHandler handles requests to the /apps/{porter_app_name}/helm-values endpoint
+type AppHelmValuesHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewAppHelmValuesHandler returns a new AppHelmValuesHandler
+func NewAppHelmValuesHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *AppHelmValuesHandler {
+	return &AppHelmValuesHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// AppHelmValuesRequest is the request object for the /apps/{porter_app_name}/helm-values endpoint
+type AppHelmValuesRequest struct {
+	AppID              uint   `schema:"app_id"`
+	DeploymentTargetID string `schema:"deployment_target_id"`
+	WithDefaults       bool   `schema:"with_defaults"`
+}
+
+// AppHelmValuesResponse is the response object for the /apps/{porter_app_name}/helm-values endpoint
+type AppHelmValuesResponse struct {
+	// AppRevision is the latest revision for the app
+	HelmValues string `json:"helm_values"`
+}
+
+// ServeHTTP translates the request into a helmValues grpc request, forwards to the cluster control plane, and returns the response.
+func (c *AppHelmValuesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-helm-values")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: project.ID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+	)
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, reqErr, "error parsing app name from url")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	request := &AppHelmValuesRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	_, err := uuid.Parse(request.DeploymentTargetID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error parsing deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	if request.AppID == 0 {
+		err := telemetry.Error(ctx, span, nil, "app id is required")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-id", Value: request.AppID})
+
+	helmValuesReq := connect.NewRequest(&porterv1.AppHelmValuesRequest{
+		ProjectId:          int64(project.ID),
+		AppId:              int64(request.AppID),
+		DeploymentTargetId: request.DeploymentTargetID,
+		WithDefaults:       request.WithDefaults,
+	})
+
+	helmValuesResp, err := c.Config().ClusterControlPlaneClient.AppHelmValues(ctx, helmValuesReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app helm values from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if helmValuesResp == nil || helmValuesResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "app helm values resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	decodedValues, err := base64.StdEncoding.DecodeString(helmValuesResp.Msg.B64Values)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error decoding helm values")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	response := AppHelmValuesResponse{
+		HelmValues: string(decodedValues),
+	}
+
+	c.WriteResult(w, r, response)
+}

+ 132 - 0
api/server/handlers/porter_app/job_status.go

@@ -0,0 +1,132 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// JobStatusHandler is the handler for GET /apps/jobs
+type JobStatusHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewJobStatusHandler returns a new JobStatusHandler
+func NewJobStatusHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *JobStatusHandler {
+	return &JobStatusHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// JobStatusRequest is the expected format for a request body on GET /apps/jobs
+type JobStatusRequest struct {
+	DeploymentTargetID string `schema:"deployment_target_id"`
+	JobName            string `schema:"job_name"`
+}
+
+func (c *JobStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-job-status")
+	defer span.End()
+
+	request := &JobStatusRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	name, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "invalid porter app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: name})
+
+	if request.DeploymentTargetID == "" {
+		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	deploymentTargetDetailsReq := connect.NewRequest(&porterv1.DeploymentTargetDetailsRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: request.DeploymentTargetID,
+	})
+
+	deploymentTargetDetailsResp, err := c.Config().ClusterControlPlaneClient.DeploymentTargetDetails(ctx, deploymentTargetDetailsReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if deploymentTargetDetailsResp == nil || deploymentTargetDetailsResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if deploymentTargetDetailsResp.Msg.ClusterId != int64(cluster.ID) {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp cluster id does not match cluster id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	namespace := deploymentTargetDetailsResp.Msg.Namespace
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "unable to get agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	labels := []kubernetes.Label{
+		{
+			Key: "porter.run/deployment-target-id",
+			Val: request.DeploymentTargetID,
+		},
+		{
+			Key: "porter.run/app-name",
+			Val: name,
+		},
+	}
+	if request.JobName != "" {
+		labels = append(labels, kubernetes.Label{
+			Key: "porter.run/service-name",
+			Val: request.JobName,
+		})
+	}
+	jobs, err := agent.ListJobsByLabel(namespace, labels...)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error listing jobs")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	c.WriteResult(w, r, jobs)
+}

+ 131 - 0
api/server/handlers/porter_app/latest_app_revisions.go

@@ -0,0 +1,131 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/google/uuid"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// LatestAppRevisionsHandler handles requests to the /apps/revisions endpoint
+type LatestAppRevisionsHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewLatestAppRevisionsHandler returns a new LatestAppRevisionsHandler
+func NewLatestAppRevisionsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *LatestAppRevisionsHandler {
+	return &LatestAppRevisionsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// LatestAppRevisionsRequest represents the request for the /apps/revisions endpoint
+type LatestAppRevisionsRequest struct {
+	DeploymentTargetID string `schema:"deployment_target_id"`
+}
+
+// LatestRevisionWithSource is an app revision and its source porter app
+type LatestRevisionWithSource struct {
+	AppRevision porter_app.Revision `json:"app_revision"`
+	Source      types.PorterApp     `json:"source"`
+}
+
+// LatestAppRevisionsResponse represents the response from the /apps/revisions endpoint
+type LatestAppRevisionsResponse struct {
+	AppRevisions []LatestRevisionWithSource `json:"app_revisions"`
+}
+
+func (c *LatestAppRevisionsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-app-revisions")
+	defer span.End()
+
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+
+	request := &LatestAppRevisionsRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	deploymentTargetID, err := uuid.Parse(request.DeploymentTargetID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error parsing deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if deploymentTargetID == uuid.Nil {
+		err := telemetry.Error(ctx, span, err, "deployment target id is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	listAppRevisionsReq := connect.NewRequest(&porterv1.LatestAppRevisionsRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: deploymentTargetID.String(),
+	})
+
+	latestAppRevisionsResp, err := c.Config().ClusterControlPlaneClient.LatestAppRevisions(ctx, listAppRevisionsReq)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting latest app revisions")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if latestAppRevisionsResp == nil || latestAppRevisionsResp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "latest app revisions response is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	appRevisions := latestAppRevisionsResp.Msg.AppRevisions
+	if appRevisions == nil {
+		appRevisions = []*porterv1.AppRevision{}
+	}
+
+	res := &LatestAppRevisionsResponse{
+		AppRevisions: make([]LatestRevisionWithSource, 0),
+	}
+
+	for _, revision := range appRevisions {
+		encodedRevision, err := porter_app.EncodedRevisionFromProto(ctx, revision)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error getting encoded revision from proto")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		porterApp, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, revision.App.Name)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error reading porter app")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		if porterApp == nil {
+			err := telemetry.Error(ctx, span, err, "porter app is nil")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+
+		res.AppRevisions = append(res.AppRevisions, LatestRevisionWithSource{
+			AppRevision: encodedRevision,
+			Source:      *porterApp.ToPorterAppType(),
+		})
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 3 - 0
api/server/handlers/porter_app/list_app_revisions.go

@@ -6,6 +6,7 @@ import (
 	"connectrpc.com/connect"
 	"github.com/google/uuid"
 	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
@@ -20,6 +21,7 @@ import (
 // ListAppRevisionsHandler handles requests to the /apps/{porter_app_name}/revisions endpoint
 type ListAppRevisionsHandler struct {
 	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
 }
 
 // NewListAppRevisionsHandler returns a new ListAppRevisionsHandler
@@ -30,6 +32,7 @@ func NewListAppRevisionsHandler(
 ) *ListAppRevisionsHandler {
 	return &ListAppRevisionsHandler{
 		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
 	}
 }
 

+ 7 - 4
api/server/handlers/porter_app/list_events.go

@@ -67,7 +67,8 @@ func (p *PorterAppEventListHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
 		return
 	}
 
-	porterAppEvents, paginatedResult, err := p.Repo().PorterAppEvent().ListEventsByPorterAppID(ctx, app.ID, helpers.WithPageSize(20), helpers.WithPage(int(pr.Page)))
+	// legacy app events will have a nil deployment target id
+	porterAppEvents, paginatedResult, err := p.Repo().PorterAppEvent().ListEventsByPorterAppIDAndDeploymentTargetID(ctx, app.ID, uuid.Nil, helpers.WithPageSize(20), helpers.WithPage(int(pr.Page)))
 	if err != nil {
 		if !errors.Is(err, gorm.ErrRecordNotFound) {
 			e := telemetry.Error(ctx, span, nil, "error listing porter app events by porter app id")
@@ -132,7 +133,8 @@ func (p *PorterAppEventListHandler) updateExistingAppEvent(
 
 	// TODO: get rid of this block and related methods if still here after 08-04-2023
 	if appEvent.Type == string(types.PorterAppEventType_Build) && appEvent.TypeExternalSource == "GITHUB" {
-		err = p.updateBuildEvent_Github(ctx, &event, user, project, stackName)
+		validateApplyV2 := project.GetFeatureFlag(models.ValidateApplyV2, p.Config().LaunchDarklyClient)
+		err = p.updateBuildEvent_Github(ctx, &event, user, project, stackName, validateApplyV2)
 		if err != nil {
 			return appEvent, telemetry.Error(ctx, span, err, "error updating porter app event for github build")
 		}
@@ -158,6 +160,7 @@ func (p *PorterAppEventListHandler) updateBuildEvent_Github(
 	user *models.User,
 	project *models.Project,
 	stackName string,
+	validateApplyV2 bool,
 ) error {
 	ctx, span := telemetry.NewSpan(ctx, "update-porter-app-build-event")
 	defer span.End()
@@ -220,10 +223,10 @@ func (p *PorterAppEventListHandler) updateBuildEvent_Github(
 	if *actionRun.Status == "completed" {
 		if *actionRun.Conclusion == "success" {
 			event.Status = string(types.PorterAppEventStatus_Success)
-			_ = TrackStackBuildStatus(p.Config(), user, project, stackName, "", types.PorterAppEventStatus_Success)
+			_ = TrackStackBuildStatus(ctx, p.Config(), user, project, stackName, "", types.PorterAppEventStatus_Success, validateApplyV2, "")
 		} else {
 			event.Status = string(types.PorterAppEventStatus_Failed)
-			_ = TrackStackBuildStatus(p.Config(), user, project, stackName, "", types.PorterAppEventStatus_Failed)
+			_ = TrackStackBuildStatus(ctx, p.Config(), user, project, stackName, "", types.PorterAppEventStatus_Failed, validateApplyV2, "")
 		}
 		event.Metadata["end_time"] = actionRun.GetUpdatedAt().Time
 	}

+ 107 - 0
api/server/handlers/porter_app/list_events_apply_v2.go

@@ -0,0 +1,107 @@
+package porter_app
+
+import (
+	"errors"
+	"net/http"
+
+	"github.com/google/uuid"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository/gorm/helpers"
+	"github.com/porter-dev/porter/internal/telemetry"
+	"gorm.io/gorm"
+)
+
+// PorterAppV2EventListHandler handles the /apps/{app_name}/events endpoint (used for validate_apply v2)
+type PorterAppV2EventListHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// ListPorterAppEventsRequest represents the accepted fields on a request to the /apps/{app_name}/events endpoint
+type ListPorterAppEventsRequest struct {
+	DeploymentTargetId string `schema:"deployment_target_id"`
+	Page               int64  `schema:"page"`
+}
+
+// NewPorterAppV2EventListHandler returns a new PorterAppV2EventListHandler
+func NewPorterAppV2EventListHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *PorterAppV2EventListHandler {
+	return &PorterAppV2EventListHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+func (p *PorterAppV2EventListHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-list-porter-app-v2-events")
+	defer span.End()
+
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		e := telemetry.Error(ctx, span, nil, "error parsing porter app name from url")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "porter-app-name", Value: appName},
+	)
+
+	request := &ListPorterAppEventsRequest{}
+	if ok := p.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetId},
+	)
+	uid, err := uuid.Parse(request.DeploymentTargetId)
+	if err != nil {
+		e := telemetry.Error(ctx, span, nil, "error parsing deployment target id")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+		return
+	}
+
+	app, err := p.Repo().PorterApp().ReadPorterAppByName(cluster.ID, appName)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error retrieving porter app by name")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	porterAppEvents, paginatedResult, err := p.Repo().PorterAppEvent().ListEventsByPorterAppIDAndDeploymentTargetID(ctx, app.ID, uid, helpers.WithPageSize(20), helpers.WithPage(int(request.Page)))
+	if err != nil {
+		if !errors.Is(err, gorm.ErrRecordNotFound) {
+			e := telemetry.Error(ctx, span, nil, "error listing porter app events by porter app id")
+			p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusBadRequest))
+			return
+		}
+	}
+
+	res := struct {
+		Events []types.PorterAppEvent `json:"events"`
+		types.PaginationResponse
+	}{
+		PaginationResponse: types.PaginationResponse(paginatedResult),
+	}
+	res.Events = make([]types.PorterAppEvent, 0)
+
+	for _, porterApp := range porterAppEvents {
+		if porterApp == nil {
+			continue
+		}
+		pa := porterApp.ToPorterAppEvent()
+		res.Events = append(res.Events, pa)
+	}
+	p.WriteResult(w, r, res)
+}

+ 191 - 0
api/server/handlers/porter_app/logs_apply_v2.go

@@ -0,0 +1,191 @@
+package porter_app
+
+import (
+	"fmt"
+	"net/http"
+	"time"
+
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	porter_agent "github.com/porter-dev/porter/internal/kubernetes/porter_agent/v2"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// AppLogsHandler handles the /apps/logs endpoint
+type AppLogsHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewAppLogsHandler returns a new AppLogsHandler
+func NewAppLogsHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *AppLogsHandler {
+	return &AppLogsHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// AppLogsRequest represents the accepted fields on a request to the /apps/logs endpoint
+type AppLogsRequest struct {
+	DeploymentTargetID string    `schema:"deployment_target_id"`
+	ServiceName        string    `schema:"service_name"`
+	AppID              uint      `schema:"app_id"`
+	Limit              uint      `schema:"limit"`
+	StartRange         time.Time `schema:"start_range,omitempty"`
+	EndRange           time.Time `schema:"end_range,omitempty"`
+	SearchParam        string    `schema:"search_param"`
+	Direction          string    `schema:"direction"`
+	AppRevisionID      string    `schema:"app_revision_id"`
+}
+
+const (
+	lokiLabel_PorterAppName       = "porter_run_app_name"
+	lokiLabel_PorterAppID         = "porter_run_app_id"
+	lokiLabel_PorterServiceName   = "porter_run_service_name"
+	lokiLabel_PorterAppRevisionID = "porter_run_app_revision_id"
+	lokiLabel_DeploymentTargetId  = "porter_run_deployment_target_id"
+	lokiLabel_Namespace           = "namespace"
+)
+
+// ServeHTTP gets logs for a given app, service, and deployment target
+func (c *AppLogsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-logs")
+	defer span.End()
+	r = r.Clone(ctx)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	request := &AppLogsRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "porter app name not found in request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	if request.AppID == 0 {
+		err := telemetry.Error(ctx, span, nil, "must provide app id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if request.ServiceName == "" {
+		err := telemetry.Error(ctx, span, nil, "must provide service name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: request.ServiceName})
+
+	if request.DeploymentTargetID == "" {
+		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	deploymentTargetDetailsReq := connect.NewRequest(&porterv1.DeploymentTargetDetailsRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: request.DeploymentTargetID,
+	})
+
+	deploymentTargetDetailsResp, err := c.Config().ClusterControlPlaneClient.DeploymentTargetDetails(ctx, deploymentTargetDetailsReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if deploymentTargetDetailsResp == nil || deploymentTargetDetailsResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if deploymentTargetDetailsResp.Msg.ClusterId != int64(cluster.ID) {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp cluster id does not match cluster id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	namespace := deploymentTargetDetailsResp.Msg.Namespace
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
+
+	if request.StartRange.IsZero() || request.EndRange.IsZero() {
+		err := telemetry.Error(ctx, span, nil, "must provide start and end range")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "start-range", Value: request.StartRange.String()},
+		telemetry.AttributeKV{Key: "end-range", Value: request.EndRange.String()},
+	)
+
+	k8sAgent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		_ = telemetry.Error(ctx, span, err, "unable to get agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get agent"), http.StatusInternalServerError))
+		return
+	}
+
+	agentSvc, err := porter_agent.GetAgentService(k8sAgent.Clientset)
+	if err != nil {
+		_ = telemetry.Error(ctx, span, err, "unable to get agent service")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get agent service"), http.StatusInternalServerError))
+		return
+	}
+
+	matchLabels := map[string]string{
+		lokiLabel_Namespace:     namespace,
+		lokiLabel_PorterAppName: appName,
+		lokiLabel_PorterAppID:   fmt.Sprintf("%d", request.AppID),
+	}
+
+	if request.ServiceName != "all" {
+		matchLabels[lokiLabel_PorterServiceName] = request.ServiceName
+	}
+
+	if request.AppRevisionID != "" {
+		matchLabels[lokiLabel_PorterAppRevisionID] = request.AppRevisionID
+	}
+
+	matchLabels[lokiLabel_DeploymentTargetId] = request.DeploymentTargetID
+
+	logRequest := &types.LogRequest{
+		Limit:       request.Limit,
+		StartRange:  &request.StartRange,
+		EndRange:    &request.EndRange,
+		MatchLabels: matchLabels,
+		Direction:   request.Direction,
+		SearchParam: request.SearchParam,
+	}
+
+	logs, err := porter_agent.Logs(k8sAgent.Clientset, agentSvc, logRequest)
+	if err != nil {
+		_ = telemetry.Error(ctx, span, err, "unable to get logs")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(fmt.Errorf("unable to get logs"), http.StatusInternalServerError))
+		return
+	}
+
+	c.WriteResult(w, r, logs)
+}

+ 60 - 25
api/server/handlers/porter_app/parse.go

@@ -10,7 +10,7 @@ import (
 	"github.com/porter-dev/porter/api/types"
 	porterAppUtils "github.com/porter-dev/porter/api/utils/porter_app"
 	"github.com/porter-dev/porter/internal/helm/loader"
-	"github.com/porter-dev/porter/internal/integrations/powerdns"
+	"github.com/porter-dev/porter/internal/integrations/dns"
 	"github.com/porter-dev/porter/internal/kubernetes"
 	"github.com/porter-dev/porter/internal/kubernetes/domain"
 	"github.com/porter-dev/porter/internal/kubernetes/environment_groups"
@@ -70,11 +70,11 @@ type SyncedEnvSectionKey struct {
 }
 
 type SubdomainCreateOpts struct {
-	k8sAgent       *kubernetes.Agent
-	dnsRepo        repository.DNSRecordRepository
-	powerDnsClient *powerdns.Client
-	appRootDomain  string
-	stackName      string
+	k8sAgent      *kubernetes.Agent
+	dnsRepo       repository.DNSRecordRepository
+	dnsClient     *dns.Client
+	appRootDomain string
+	stackName     string
 }
 
 type ParseConf struct {
@@ -111,6 +111,8 @@ type ParseConf struct {
 	FullHelmValues string
 	// AddCustomNodeSelector is a flag to determine whether to add porter.run/workload-kind: application to the nodeselector attribute of the helm values
 	AddCustomNodeSelector bool
+	// RemoveDeletedServices is a flag to determine whether to remove values and dependencies for services that are not defined in the porter.yaml
+	RemoveDeletedServices bool
 }
 
 func parse(ctx context.Context, conf ParseConf) (*chart.Chart, map[string]interface{}, map[string]interface{}, error) {
@@ -199,7 +201,7 @@ func parse(ctx context.Context, conf ParseConf) (*chart.Chart, map[string]interf
 		Release:  parsed.Release,
 	}
 
-	values, err := buildUmbrellaChartValues(ctx, application, synced_env, conf.ImageInfo, conf.ExistingHelmValues, conf.SubdomainCreateOpts, conf.InjectLauncherToStartCommand, conf.ShouldValidateHelmValues, conf.UserUpdate, conf.Namespace, conf.AddCustomNodeSelector)
+	values, err := buildUmbrellaChartValues(ctx, application, synced_env, conf.ImageInfo, conf.ExistingHelmValues, conf.SubdomainCreateOpts, conf.InjectLauncherToStartCommand, conf.ShouldValidateHelmValues, conf.UserUpdate, conf.Namespace, conf.AddCustomNodeSelector, conf.RemoveDeletedServices)
 	if err != nil {
 		err = telemetry.Error(ctx, span, err, "error building values")
 		return nil, nil, nil, err
@@ -210,7 +212,7 @@ func parse(ctx context.Context, conf ParseConf) (*chart.Chart, map[string]interf
 		return nil, nil, nil, err
 	}
 
-	umbrellaChart, err := buildUmbrellaChart(application, conf.ServerConfig, conf.ProjectID, conf.ExistingChartDependencies)
+	umbrellaChart, err := buildUmbrellaChart(application, conf.ServerConfig, conf.ProjectID, conf.ExistingChartDependencies, conf.RemoveDeletedServices)
 	if err != nil {
 		err = telemetry.Error(ctx, span, err, "error building umbrella chart")
 		return nil, nil, nil, err
@@ -238,6 +240,7 @@ func buildUmbrellaChartValues(
 	userUpdate bool,
 	namespace string,
 	addCustomNodeSelector bool,
+	removeDeletedValues bool,
 ) (map[string]interface{}, error) {
 	values := make(map[string]interface{})
 
@@ -259,6 +262,23 @@ func buildUmbrellaChartValues(
 		if existingValues != nil {
 			if existingValues[helmName] != nil {
 				existingValuesMap := existingValues[helmName].(map[string]interface{})
+				if removeDeletedValues {
+					// strip the env variables before coalescing
+					if existingValuesMap["container"] != nil {
+						containerMap := existingValuesMap["container"].(map[string]interface{})
+						if containerMap["env"] != nil {
+							envMap := containerMap["env"].(map[string]interface{})
+							if envMap["normal"] != nil {
+								envMap["normal"] = make(map[string]interface{})
+							}
+							if envMap["synced"] != nil {
+								envMap["synced"] = make([]map[string]interface{}, 0)
+							}
+							containerMap["env"] = envMap
+						}
+						existingValuesMap["container"] = containerMap
+					}
+				}
 				helm_values = utils.DeepCoalesceValues(existingValuesMap, helm_values)
 			}
 		}
@@ -290,10 +310,12 @@ func buildUmbrellaChartValues(
 		values[helmName] = helm_values
 	}
 
-	// add back in the existing services that were not overwritten
-	for k, v := range existingValues {
-		if values[k] == nil {
-			values[k] = v
+	if !removeDeletedValues {
+		// add back in the existing services that were not overwritten
+		for k, v := range existingValues {
+			if values[k] == nil {
+				values[k] = v
+			}
 		}
 	}
 
@@ -338,13 +360,16 @@ func syncEnvironmentGroupToNamespaceIfLabelsExist(ctx context.Context, agent *ku
 		}
 	}
 
+	service.Config["configMapRefs"] = []string{}
+	service.Config["secretRefs"] = []string{}
+
 	for _, linkedGroupName := range strings.Split(linkedGroupNames, ".") {
 		inp := environment_groups.SyncLatestVersionToNamespaceInput{
 			BaseEnvironmentGroupName: linkedGroupName,
 			TargetNamespace:          targetNamespace,
 		}
 
-		syncedEnvironment, err := environment_groups.SyncLatestVersionToNamespace(ctx, agent, inp)
+		syncedEnvironment, err := environment_groups.SyncLatestVersionToNamespace(ctx, agent, inp, nil)
 		if err != nil {
 			return fmt.Errorf("error syncing environment group: %w", err)
 		}
@@ -363,7 +388,7 @@ func syncEnvironmentGroupToNamespaceIfLabelsExist(ctx context.Context, agent *ku
 				service.Config["configMapRefs"] = append(service.Config["configMapRefs"].([]any), syncedEnvironment.EnvironmentGroupVersionedName)
 			}
 
-			switch service.Config["configMapRefs"].(type) {
+			switch service.Config["secretRefs"].(type) {
 			case []string:
 				service.Config["secretRefs"] = append(service.Config["secretRefs"].([]string), syncedEnvironment.EnvironmentGroupVersionedName)
 			case []any:
@@ -505,7 +530,7 @@ func deconstructSyncedEnvs(synced_env []*SyncedEnvSection, env map[string]string
 	return synced
 }
 
-func buildUmbrellaChart(application *Application, config *config.Config, projectID uint, existingDependencies []*chart.Dependency) (*chart.Chart, error) {
+func buildUmbrellaChart(application *Application, config *config.Config, projectID uint, existingDependencies []*chart.Dependency, removeDeletedDependencies bool) (*chart.Chart, error) {
 	deps := make([]*chart.Dependency, 0)
 	for alias, service := range application.Services {
 		var serviceType string
@@ -540,14 +565,24 @@ func buildUmbrellaChart(application *Application, config *config.Config, project
 		})
 	}
 
-	// add in the existing dependencies that were not overwritten
-	for _, dep := range existingDependencies {
-		if !dependencyExists(deps, dep) {
-			// have to repair the dependency name because of https://github.com/helm/helm/issues/9214
-			if strings.HasSuffix(dep.Name, "-web") || strings.HasSuffix(dep.Name, "-wkr") || strings.HasSuffix(dep.Name, "-job") {
-				dep.Name = getChartTypeFromHelmName(dep.Name)
+	if !removeDeletedDependencies {
+		// add in the existing dependencies that were not overwritten
+		for _, dep := range existingDependencies {
+			if !dependencyExists(deps, dep) {
+				// have to repair the dependency name because of https://github.com/helm/helm/issues/9214
+				if strings.HasSuffix(dep.Name, "-web") || strings.HasSuffix(dep.Name, "-wkr") || strings.HasSuffix(dep.Name, "-job") {
+					dep.Name = getChartTypeFromHelmName(dep.Name)
+					if dep.Name == "" {
+						return nil, fmt.Errorf("unable to determine type of existing dependency")
+					}
+					version, err := getLatestTemplateVersion(dep.Name, config, projectID)
+					if err != nil {
+						return nil, err
+					}
+					dep.Version = version
+				}
+				deps = append(deps, dep)
 			}
-			deps = append(deps, dep)
 		}
 	}
 
@@ -713,8 +748,8 @@ func createSubdomainIfRequired(
 }
 
 func createDNSRecord(opts SubdomainCreateOpts) (*types.DNSRecord, error) {
-	if opts.powerDnsClient == nil {
-		return nil, fmt.Errorf("cannot create subdomain because powerdns client is nil")
+	if opts.dnsClient == nil {
+		return nil, fmt.Errorf("cannot create subdomain because dns client is nil")
 	}
 
 	endpoint, found, err := domain.GetNGINXIngressServiceIP(opts.k8sAgent.Clientset)
@@ -741,7 +776,7 @@ func createDNSRecord(opts SubdomainCreateOpts) (*types.DNSRecord, error) {
 
 	_record := domain.DNSRecord(*record)
 
-	err = _record.CreateDomain(opts.powerDnsClient)
+	err = _record.CreateDomain(opts.dnsClient)
 
 	if err != nil {
 		return nil, err

+ 51 - 11
api/server/handlers/porter_app/parse_yaml.go

@@ -1,10 +1,12 @@
 package porter_app
 
 import (
+	"context"
 	"encoding/base64"
 	"net/http"
 
 	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
 
 	"github.com/porter-dev/porter/internal/porter_app"
 
@@ -37,11 +39,21 @@ func NewParsePorterYAMLToProtoHandler(
 // ParsePorterYAMLToProtoRequest is the request object for the /apps/parse endpoint
 type ParsePorterYAMLToProtoRequest struct {
 	B64Yaml string `json:"b64_yaml"`
+	AppName string `json:"app_name"`
+}
+
+// EncodedAppWithEnv is a struct that contains a base64-encoded app proto object and a map of env variables
+type EncodedAppWithEnv struct {
+	B64AppProto  string            `json:"b64_app_proto"`
+	EnvVariables map[string]string `json:"env_variables"`
+	EnvSecrets   map[string]string `json:"env_secrets"`
 }
 
 // ParsePorterYAMLToProtoResponse is the response object for the /apps/parse endpoint
 type ParsePorterYAMLToProtoResponse struct {
-	B64AppProto string `json:"b64_app_proto"`
+	EncodedAppWithEnv
+	// PreviewApp contains preview environment specific overrides, if they exist
+	PreviewApp *EncodedAppWithEnv `json:"preview_app,omitempty"`
 }
 
 // ServeHTTP receives a base64-encoded porter.yaml, parses the version, and then translates it into a base64-encoded app proto object
@@ -51,9 +63,9 @@ func (c *ParsePorterYAMLToProtoHandler) ServeHTTP(w http.ResponseWriter, r *http
 
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
 
-	if !project.ValidateApplyV2 {
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have apply v2 enabled")
-		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
 		return
 	}
 
@@ -82,30 +94,58 @@ func (c *ParsePorterYAMLToProtoHandler) ServeHTTP(w http.ResponseWriter, r *http
 		return
 	}
 
-	appProto, err := porter_app.ParseYAML(ctx, yaml)
+	appDefinition, err := porter_app.ParseYAML(ctx, yaml, request.AppName)
 	if err != nil {
 		err := telemetry.Error(ctx, span, err, "error parsing yaml")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
-	if appProto == nil {
+	if appDefinition.AppProto == nil {
 		err := telemetry.Error(ctx, span, nil, "app proto is nil")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
 
-	by, err := helpers.MarshalContractObject(ctx, appProto)
+	response := &ParsePorterYAMLToProtoResponse{}
+
+	encodedApp, err := encodeAppProto(ctx, appDefinition.AppProto)
 	if err != nil {
-		err := telemetry.Error(ctx, span, nil, "error marshalling app proto")
+		err := telemetry.Error(ctx, span, err, "error encoding app proto")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
 		return
 	}
+	response.B64AppProto = encodedApp
+	response.EnvVariables = appDefinition.EnvVariables
+
+	if appDefinition.PreviewApp != nil {
+		encodedPreviewApp, err := encodeAppProto(ctx, appDefinition.PreviewApp.AppProto)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error encoding preview app proto")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+		response.PreviewApp = &EncodedAppWithEnv{
+			B64AppProto:  encodedPreviewApp,
+			EnvVariables: appDefinition.PreviewApp.EnvVariables,
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "includes-preview-app", Value: true})
+	}
+
+	c.WriteResult(w, r, response)
+}
+
+func encodeAppProto(ctx context.Context, app *porterv1.PorterApp) (string, error) {
+	ctx, span := telemetry.NewSpan(ctx, "encode-app-proto")
+	defer span.End()
 
-	b64 := base64.StdEncoding.EncodeToString(by)
+	var encodedApp string
 
-	response := &ParsePorterYAMLToProtoResponse{
-		B64AppProto: b64,
+	by, err := helpers.MarshalContractObject(ctx, app)
+	if err != nil {
+		return encodedApp, err
 	}
 
-	c.WriteResult(w, r, response)
+	encodedApp = base64.StdEncoding.EncodeToString(by)
+
+	return encodedApp, nil
 }

+ 127 - 0
api/server/handlers/porter_app/pod_status.go

@@ -0,0 +1,127 @@
+package porter_app
+
+import (
+	"fmt"
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+	v1 "k8s.io/api/core/v1"
+)
+
+// PodStatusHandler is the handler for GET /apps/pods
+type PodStatusHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewPodStatusHandler returns a new PodStatusHandler
+func NewPodStatusHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *PodStatusHandler {
+	return &PodStatusHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// PodStatusRequest is the expected format for a request body on GET /apps/pods
+type PodStatusRequest struct {
+	DeploymentTargetID string `schema:"deployment_target_id"`
+	ServiceName        string `schema:"service"`
+}
+
+func (c *PodStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-pod-status")
+	defer span.End()
+
+	request := &PodStatusRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "porter app name not found in request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: request.ServiceName}, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	if request.DeploymentTargetID == "" {
+		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	deploymentTargetDetailsReq := connect.NewRequest(&porterv1.DeploymentTargetDetailsRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: request.DeploymentTargetID,
+	})
+
+	deploymentTargetDetailsResp, err := c.Config().ClusterControlPlaneClient.DeploymentTargetDetails(ctx, deploymentTargetDetailsReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if deploymentTargetDetailsResp == nil || deploymentTargetDetailsResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if deploymentTargetDetailsResp.Msg.ClusterId != int64(cluster.ID) {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp cluster id does not match cluster id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	namespace := deploymentTargetDetailsResp.Msg.Namespace
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "unable to get agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	pods := []v1.Pod{}
+
+	var selectors string
+	if request.ServiceName == "" {
+		selectors = fmt.Sprintf("porter.run/deployment-target-id=%s,porter.run/app-name=%s", request.DeploymentTargetID, appName)
+	} else {
+		selectors = fmt.Sprintf("porter.run/service-name=%s,porter.run/deployment-target-id=%s,porter.run/app-name=%s", request.ServiceName, request.DeploymentTargetID, appName)
+	}
+	podsList, err := agent.GetPodsByLabel(selectors, namespace)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "unable to get pods by label")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	pods = append(pods, podsList.Items...)
+
+	c.WriteResult(w, r, pods)
+}

+ 273 - 0
api/server/handlers/porter_app/report_status.go

@@ -0,0 +1,273 @@
+package porter_app
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/google/go-github/v39/github"
+	"github.com/google/uuid"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/deployment_target"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/porter_app"
+	v2 "github.com/porter-dev/porter/internal/porter_app/v2"
+	"github.com/porter-dev/porter/internal/telemetry"
+	"k8s.io/utils/pointer"
+)
+
+// ReportRevisionStatusHandler is the handler for the /apps/{porter_app_name}/revisions/{app_revision_id}/status endpoint
+type ReportRevisionStatusHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewReportRevisionStatusHandler handles POST requests to the endpoint /apps/{porter_app_name}/revisions/{app_revision_id}/status
+func NewReportRevisionStatusHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *ReportRevisionStatusHandler {
+	return &ReportRevisionStatusHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// ReportRevisionStatusRequest is the request object for the /apps/{porter_app_name}/revisions/{app_revision_id}/status endpoint
+type ReportRevisionStatusRequest struct {
+	PRNumber  int    `json:"pr_number"`
+	CommitSHA string `json:"commit_sha"`
+}
+
+// ReportRevisionStatusResponse is the response object for the /apps/{porter_app_name}/revisions/{app_revision_id}/status endpoint
+type ReportRevisionStatusResponse struct{}
+
+// ServeHTTP reports the status of a revision to Github and other integrations, depending on the status and the deployment target
+func (c *ReportRevisionStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-report-revision-status")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
+		return
+	}
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing porter app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-name", Value: appName})
+
+	revisionID, reqErr := requestutils.GetURLParamString(r, types.URLParamAppRevisionID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing app revision id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-revision-id", Value: revisionID})
+
+	appRevisionUuid, err := uuid.Parse(revisionID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error parsing app revision id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if appRevisionUuid == uuid.Nil {
+		err := telemetry.Error(ctx, span, nil, "app revision id is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-revision-id", Value: appRevisionUuid.String()})
+
+	porterApp, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, appName)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error reading porter app by name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if porterApp.ID == 0 {
+		err := telemetry.Error(ctx, span, nil, "porter app not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-id", Value: porterApp.ID})
+
+	request := &ReportRevisionStatusRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	revision, err := porter_app.GetAppRevision(ctx, porter_app.GetAppRevisionInput{
+		AppRevisionID: appRevisionUuid,
+		ProjectID:     project.ID,
+		CCPClient:     c.Config().ClusterControlPlaneClient,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	deploymentTarget, err := deployment_target.DeploymentTargetDetails(ctx, deployment_target.DeploymentTargetDetailsInput{
+		ProjectID:          int64(project.ID),
+		ClusterID:          int64(cluster.ID),
+		DeploymentTargetID: revision.DeploymentTargetID,
+		CCPClient:          c.Config().ClusterControlPlaneClient,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "deployment-target-id", Value: deploymentTarget.ID},
+		telemetry.AttributeKV{Key: "pr-number", Value: request.PRNumber},
+		telemetry.AttributeKV{Key: "commit-sha", Value: request.CommitSHA},
+		telemetry.AttributeKV{Key: "preview", Value: deploymentTarget.Preview},
+		telemetry.AttributeKV{Key: "revision-number", Value: revision.RevisionNumber},
+	)
+
+	resp := &ReportRevisionStatusResponse{}
+
+	if !deploymentTarget.Preview || request.PRNumber == 0 || revision.RevisionNumber > 1 {
+		c.WriteResult(w, r, resp)
+		return
+	}
+
+	err = writePRComment(ctx, writePRCommentInput{
+		revision:        revision,
+		porterApp:       porterApp,
+		prNumber:        request.PRNumber,
+		commitSha:       request.CommitSHA,
+		serverURL:       c.Config().ServerConf.ServerURL,
+		githubAppSecret: c.Config().ServerConf.GithubAppSecret,
+		githubAppID:     c.Config().ServerConf.GithubAppID,
+	})
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error writing pr comment")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	c.WriteResult(w, r, resp)
+}
+
+type writePRCommentInput struct {
+	revision  porter_app.Revision
+	porterApp *models.PorterApp
+	prNumber  int
+	commitSha string
+	serverURL string
+
+	githubAppSecret []byte
+	githubAppID     string
+}
+
+func writePRComment(ctx context.Context, inp writePRCommentInput) error {
+	ctx, span := telemetry.NewSpan(ctx, "write-pr-comment")
+	defer span.End()
+
+	if inp.porterApp == nil {
+		return telemetry.Error(ctx, span, nil, "porter app is nil")
+	}
+	if inp.prNumber == 0 {
+		return telemetry.Error(ctx, span, nil, "pr number is empty")
+	}
+	if inp.commitSha == "" {
+		return telemetry.Error(ctx, span, nil, "commit sha is empty")
+	}
+	if inp.githubAppSecret == nil {
+		return telemetry.Error(ctx, span, nil, "github app secret is empty")
+	}
+	if inp.githubAppID == "" {
+		return telemetry.Error(ctx, span, nil, "github app id is empty")
+	}
+	if inp.serverURL == "" {
+		return telemetry.Error(ctx, span, nil, "server url is empty")
+	}
+
+	client, err := porter_app.GetGithubClientByRepoID(ctx, inp.porterApp.GitRepoID, inp.githubAppSecret, inp.githubAppID)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error getting github client")
+	}
+
+	repoDetails := strings.Split(inp.porterApp.RepoName, "/")
+	if len(repoDetails) != 2 {
+		return telemetry.Error(ctx, span, nil, "repo name is not in the format <org>/<repo>")
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "repo-owner", Value: repoDetails[0]},
+		telemetry.AttributeKV{Key: "repo-name", Value: repoDetails[1]},
+		telemetry.AttributeKV{Key: "pr-number", Value: inp.prNumber},
+		telemetry.AttributeKV{Key: "commit-sha", Value: inp.commitSha},
+	)
+
+	decoded, err := base64.StdEncoding.DecodeString(inp.revision.B64AppProto)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error decoding base proto")
+	}
+
+	appProto := &porterv1.PorterApp{}
+	err = helpers.UnmarshalContractObject(decoded, appProto)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error unmarshalling app proto")
+	}
+
+	app, err := v2.AppFromProto(appProto)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error converting app proto to app")
+	}
+
+	body := "## Porter Preview Environments\n"
+	porterURL := fmt.Sprintf("%s/preview-environments/apps/%s?target=%s", inp.serverURL, inp.porterApp.Name, inp.revision.DeploymentTargetID)
+
+	switch inp.revision.Status {
+	case models.AppRevisionStatus_BuildFailed:
+		body = fmt.Sprintf("%s❌ The latest deploy failed to build. Check the [Porter Dashboard](%s) or [action logs](https://github.com/%s/actions/runs/) for more information.", body, porterURL, inp.porterApp.RepoName)
+	case models.AppRevisionStatus_DeployFailed:
+		body = fmt.Sprintf("%s❌ The latest SHA ([`%s`](https://github.com/%s/%s/commit/%s)) failed to deploy.\nCheck the [Porter Dashboard](%s) or [action logs](https://github.com/%s/actions/runs/) for more information.\nContact Porter Support if the errors persists", body, inp.commitSha, repoDetails[0], repoDetails[1], inp.commitSha, porterURL, inp.porterApp.RepoName)
+	case models.AppRevisionStatus_Deployed:
+		body = fmt.Sprintf("%s✅ The latest SHA ([`%s`](https://github.com/%s/%s/commit/%s)) has been successfully deployed.\nApp details available in the [Porter Dashboard](%s)", body, inp.commitSha, repoDetails[0], repoDetails[1], inp.commitSha, porterURL)
+	default:
+		return nil
+	}
+
+	for _, service := range app.Services {
+		if service.Domains != nil && len(service.Domains) > 0 {
+			body = fmt.Sprintf("%s\n\n**Preview URL**: https://%s", body, service.Domains[0].Name)
+		}
+	}
+
+	_, _, err = client.Issues.CreateComment(
+		ctx,
+		repoDetails[0],
+		repoDetails[1],
+		inp.prNumber,
+		&github.IssueComment{
+			Body: pointer.String(body),
+		},
+	)
+	if err != nil {
+		return telemetry.Error(ctx, span, err, "error creating github comment")
+	}
+
+	return nil
+}

+ 6 - 6
api/server/handlers/porter_app/rollback.go

@@ -121,11 +121,11 @@ func (c *RollbackPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 			ProjectID:     cluster.ProjectID,
 			Namespace:     namespace,
 			SubdomainCreateOpts: SubdomainCreateOpts{
-				k8sAgent:       k8sAgent,
-				dnsRepo:        c.Repo().DNSRecord(),
-				powerDnsClient: c.Config().PowerDNSClient,
-				appRootDomain:  c.Config().ServerConf.AppRootDomain,
-				stackName:      appName,
+				k8sAgent:      k8sAgent,
+				dnsRepo:       c.Repo().DNSRecord(),
+				dnsClient:     c.Config().DNSClient,
+				appRootDomain: c.Config().ServerConf.AppRootDomain,
+				stackName:     appName,
 			},
 			InjectLauncherToStartCommand: injectLauncher,
 			FullHelmValues:               string(valuesYaml),
@@ -155,7 +155,7 @@ func (c *RollbackPorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 
 	if features.AreAgentDeployEventsEnabled(k8sAgent) {
 		serviceDeploymentStatusMap := getServiceDeploymentMetadataFromValues(values, types.PorterAppEventStatus_Progressing)
-		_, err = createNewPorterAppDeployEvent(ctx, serviceDeploymentStatusMap, types.PorterAppEventStatus_Progressing, porterApp.ID, latestHelmRelease.Version+1, imageInfo.Tag, c.Repo().PorterAppEvent())
+		_, err = createNewPorterAppDeployEvent(ctx, serviceDeploymentStatusMap, porterApp.ID, latestHelmRelease.Version+1, imageInfo.Tag, c.Repo().PorterAppEvent())
 	} else {
 		_, err = createOldPorterAppDeployEvent(ctx, types.PorterAppEventStatus_Success, porterApp.ID, latestHelmRelease.Version+1, imageInfo.Tag, c.Repo().PorterAppEvent())
 	}

+ 134 - 0
api/server/handlers/porter_app/rollback_revision.go

@@ -0,0 +1,134 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	"github.com/google/uuid"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// RollbackAppRevisionHandler rolls back an app revision to the last deployed revision
+type RollbackAppRevisionHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewRollbackAppRevisionHandler returns a new RollbackAppRevisionHandler
+func NewRollbackAppRevisionHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *RollbackAppRevisionHandler {
+	return &RollbackAppRevisionHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// RollbackAppRevisionRequest is the request body for the /apps/{porter_app_name}/rollback endpoint
+type RollbackAppRevisionRequest struct {
+	DeploymentTargetID string `json:"deployment_target_id"`
+	AppRevisionID      string `json:"app_revision_id"`
+}
+
+// RollbackAppRevisionResponse is the response body for the /apps/{porter_app_name}/rollback endpoint
+type RollbackAppRevisionResponse struct {
+	TargetRevisionNumber int `json:"target_revision_number"`
+}
+
+// ServeHTTP handles the request and rolls back the app revision
+func (c *RollbackAppRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-rollback-app-revision")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
+		return
+	}
+
+	request := &RollbackAppRevisionRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	deploymentTargetID, err := uuid.Parse(request.DeploymentTargetID)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error parsing deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if deploymentTargetID == uuid.Nil {
+		err := telemetry.Error(ctx, span, nil, "deployment target id is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing porter app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "application-name", Value: appName})
+
+	app, err := c.Repo().PorterApp().ReadPorterAppByName(cluster.ID, appName)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error reading porter app by name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if app.ID == 0 {
+		err = telemetry.Error(ctx, span, nil, "app with name does not exist in project")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	rollbackReq := connect.NewRequest(&porterv1.RollbackRevisionRequest{
+		ProjectId:          int64(project.ID),
+		AppId:              int64(app.ID),
+		DeploymentTargetId: deploymentTargetID.String(),
+		AppRevisionId:      request.AppRevisionID,
+	})
+	ccpResp, err := c.Config().ClusterControlPlaneClient.RollbackRevision(ctx, rollbackReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error calling ccp rollback porter app")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if ccpResp == nil {
+		err := telemetry.Error(ctx, span, err, "ccp resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "ccp resp msg is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp.Msg.TargetRevisionNumber == 0 {
+		err := telemetry.Error(ctx, span, err, "ccp resp target revision number is 0")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	c.WriteResult(w, r, &RollbackAppRevisionResponse{
+		TargetRevisionNumber: int(ccpResp.Msg.TargetRevisionNumber),
+	})
+}

+ 71 - 0
api/server/handlers/porter_app/status.go

@@ -0,0 +1,71 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/server/shared/websocket"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// AppStatusHandler handles the /apps/{kind}/status endpoint
+type AppStatusHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewAppStatusHandler returns a new AppStatusHandler
+func NewAppStatusHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *AppStatusHandler {
+	return &AppStatusHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// AppStatusRequest represents the accepted fields on a request to the /apps/{kind}/status endpoint
+type AppStatusRequest struct {
+	Selectors string `schema:"selectors"`
+}
+
+func (c *AppStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-app-logs")
+	defer span.End()
+
+	safeRW := ctx.Value(types.RequestCtxWebsocketKey).(*websocket.WebsocketSafeReadWriter)
+	request := &AppStatusRequest{}
+
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "unable to get agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	kind, _ := requestutils.GetURLParamString(r, types.URLParamKind)
+
+	err = agent.StreamControllerStatus(kind, request.Selectors, safeRW)
+
+	if err != nil {
+		c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
+		return
+	}
+}

+ 152 - 0
api/server/handlers/porter_app/stream_logs.go

@@ -0,0 +1,152 @@
+package porter_app
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"connectrpc.com/connect"
+
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/server/shared/websocket"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+// StreamLogsLokiHandler handles the /apps/logs/loki endpoint
+type StreamLogsLokiHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewStreamLogsLokiHandler returns a new StreamLogsLokiHandler
+func NewStreamLogsLokiHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *StreamLogsLokiHandler {
+	return &StreamLogsLokiHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// ServeHTTP streams live logs for a given app, service, and deployment target
+func (c *StreamLogsLokiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-stream-app-logs")
+	defer span.End()
+	r = r.Clone(ctx)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	request := &AppLogsRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, reqErr, "porter app name not found in request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	if request.ServiceName == "" {
+		err := telemetry.Error(ctx, span, nil, "must provide service name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "service-name", Value: request.ServiceName})
+
+	if request.DeploymentTargetID == "" {
+		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	deploymentTargetDetailsReq := connect.NewRequest(&porterv1.DeploymentTargetDetailsRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: request.DeploymentTargetID,
+	})
+
+	deploymentTargetDetailsResp, err := c.Config().ClusterControlPlaneClient.DeploymentTargetDetails(ctx, deploymentTargetDetailsReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if deploymentTargetDetailsResp == nil || deploymentTargetDetailsResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if deploymentTargetDetailsResp.Msg.ClusterId != int64(cluster.ID) {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp cluster id does not match cluster id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	namespace := deploymentTargetDetailsResp.Msg.Namespace
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
+
+	if request.StartRange.IsZero() {
+		dayAgo := time.Now().Add(-24 * time.Hour)
+		request.StartRange = dayAgo
+	}
+
+	startTime, err := request.StartRange.MarshalText()
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error marshaling start time")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "start-time", Value: string(startTime)})
+
+	safeRW := r.Context().Value(types.RequestCtxWebsocketKey).(*websocket.WebsocketSafeReadWriter)
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting agent")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	labels := []string{
+		fmt.Sprintf("%s=%s", lokiLabel_Namespace, namespace),
+		fmt.Sprintf("%s=%s", lokiLabel_PorterAppName, appName),
+		fmt.Sprintf("%s=%s", lokiLabel_DeploymentTargetId, request.DeploymentTargetID),
+		fmt.Sprintf("%s=%s", lokiLabel_PorterAppID, fmt.Sprintf("%d", request.AppID)),
+	}
+
+	if request.ServiceName != "all" {
+		labels = append(labels, fmt.Sprintf("%s=%s", lokiLabel_PorterServiceName, request.ServiceName))
+	}
+
+	if request.AppRevisionID != "" {
+		labels = append(labels, fmt.Sprintf("%s=%s", lokiLabel_PorterAppRevisionID, request.AppRevisionID))
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "labels", Value: strings.Join(labels, ",")})
+
+	err = agent.StreamPorterAgentLokiLog(labels, string(startTime), request.SearchParam, 0, safeRW)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error streaming logs")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+}

+ 489 - 0
api/server/handlers/porter_app/update_app_environment_group.go

@@ -0,0 +1,489 @@
+package porter_app
+
+import (
+	"context"
+	"encoding/base64"
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/porter_app"
+
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/internal/kubernetes/environment_groups"
+
+	"connectrpc.com/connect"
+
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// UpdateAppEnvironmentHandler handles the /apps/{porter_app_name}/update-environment endpoint
+type UpdateAppEnvironmentHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewUpdateAppEnvironmentHandler returns a new UpdateAppEnvironmentHandler
+func NewUpdateAppEnvironmentHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *UpdateAppEnvironmentHandler {
+	return &UpdateAppEnvironmentHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+const (
+	// LabelKey_AppName is the label key for the app name
+	LabelKey_AppName = "porter.run/app-name"
+	// LabelKey_DeploymentTargetID is the label key for the deployment target id
+	LabelKey_DeploymentTargetID = "porter.run/deployment-target-id"
+	// LabelKey_PorterManaged is the label key signifying the resource is managed by porter
+	LabelKey_PorterManaged = "porter.run/managed"
+)
+
+// UpdateAppEnvironmentRequest represents the accepted fields on a request to the /apps/{porter_app_name}/environment-group endpoint
+type UpdateAppEnvironmentRequest struct {
+	Base64AppProto     string            `json:"b64_app_proto"`
+	DeploymentTargetID string            `json:"deployment_target_id"`
+	Variables          map[string]string `json:"variables"`
+	Secrets            map[string]string `json:"secrets"`
+	// HardUpdate is used to remove any variables that are not specified in the request.  If false, the request will only update the variables specified in the request,
+	// and leave all other variables untouched.
+	HardUpdate bool `json:"remove_missing"`
+}
+
+// UpdateAppEnvironmentResponse represents the fields on the response object from the /apps/{porter_app_name}/environment-group endpoint
+type UpdateAppEnvironmentResponse struct {
+	Base64AppProto string                                `json:"b64_app_proto"`
+	EnvGroups      []environment_groups.EnvironmentGroup `json:"env_groups"`
+}
+
+// ServeHTTP updates or creates the environment group for an app
+func (c *UpdateAppEnvironmentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-update-app-env-group")
+	defer span.End()
+	r = r.Clone(ctx)
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing porter app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	request := &UpdateAppEnvironmentRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "invalid request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	porterApp, err := c.Config().Repo.PorterApp().ReadPorterAppByName(cluster.ID, appName)
+	if err != nil {
+		err := telemetry.Error(ctx, span, nil, "error getting porter app by name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	if porterApp.ID == 0 {
+		err := telemetry.Error(ctx, span, nil, "porter app not found")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusNotFound))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-id", Value: porterApp.ID})
+
+	if request.DeploymentTargetID == "" {
+		err := telemetry.Error(ctx, span, nil, "must provide deployment target id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "deployment-target-id", Value: request.DeploymentTargetID})
+
+	appProto := &porterv1.PorterApp{}
+
+	if request.Base64AppProto == "" {
+		if appName == "" {
+			err := telemetry.Error(ctx, span, nil, "app name is empty and no base64 proto provided")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		appProto.Name = appName
+	} else {
+		decoded, err := base64.StdEncoding.DecodeString(request.Base64AppProto)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error decoding base yaml")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		err = helpers.UnmarshalContractObject(decoded, appProto)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+	}
+
+	if appProto.Name == "" {
+		err := telemetry.Error(ctx, span, nil, "app proto name is empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	deploymentTargetDetailsReq := connect.NewRequest(&porterv1.DeploymentTargetDetailsRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: request.DeploymentTargetID,
+	})
+
+	deploymentTargetDetailsResp, err := c.Config().ClusterControlPlaneClient.DeploymentTargetDetails(ctx, deploymentTargetDetailsReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting deployment target details from cluster control plane client")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if deploymentTargetDetailsResp == nil || deploymentTargetDetailsResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if deploymentTargetDetailsResp.Msg.ClusterId != int64(cluster.ID) {
+		err := telemetry.Error(ctx, span, err, "deployment target details resp cluster id does not match cluster id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	namespace := deploymentTargetDetailsResp.Msg.Namespace
+	isPreview := deploymentTargetDetailsResp.Msg.IsPreview
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "is-preview", Value: isPreview})
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace", Value: namespace})
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "hard-update", Value: request.HardUpdate})
+
+	appEnvGroupName, err := porter_app.AppEnvGroupName(ctx, appName, request.DeploymentTargetID, cluster.ID, c.Repo().PorterApp())
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting app env group name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	agent, err := c.GetAgent(r, cluster, "")
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to connect to kubernetes cluster")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	latestEnvironmentGroup, err := environment_groups.LatestBaseEnvironmentGroup(ctx, agent, appEnvGroupName)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to get latest base environment group")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-exists", Value: latestEnvironmentGroup.Name != ""})
+
+	previewTemplateEnvName, err := porter_app.AppTemplateEnvGroupName(ctx, appName, cluster.ID, c.Repo().PorterApp())
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error getting preview template env name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	// filter out preview template and app env groups
+	filteredEnvGroups := []*porterv1.EnvGroup{}
+	for _, envGroup := range appProto.EnvGroups {
+		if envGroup.GetName() != previewTemplateEnvName && envGroup.GetName() != appEnvGroupName {
+			filteredEnvGroups = append(filteredEnvGroups, envGroup)
+		}
+	}
+
+	if latestEnvironmentGroup.Name != "" {
+		sameEnvGroup := true
+		for key, newValue := range request.Variables {
+			if existingValue, ok := latestEnvironmentGroup.Variables[key]; !ok || existingValue != newValue {
+				sameEnvGroup = false
+			}
+		}
+		for key, newValue := range request.Secrets {
+			// We cannot check if the values are the same because the existing secrets are substituted with dummy values. However, if the new value is a dummy value, then it is unchanged.
+			if _, ok := latestEnvironmentGroup.SecretVariables[key]; !ok || newValue != environment_groups.EnvGroupSecretDummyValue {
+				sameEnvGroup = false
+			}
+		}
+		if request.HardUpdate {
+			for key := range latestEnvironmentGroup.Variables {
+				if _, ok := request.Variables[key]; !ok {
+					sameEnvGroup = false
+				}
+			}
+			for key := range latestEnvironmentGroup.SecretVariables {
+				if _, ok := request.Secrets[key]; !ok {
+					sameEnvGroup = false
+				}
+			}
+		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "same-env-group", Value: sameEnvGroup})
+
+		if sameEnvGroup {
+			// even if the env group is the same, we still need to sync the latest versions of the other env groups
+			syncInp := syncLatestEnvGroupVersionsInput{
+				envGroups:          filteredEnvGroups,
+				appName:            appName,
+				namespace:          namespace,
+				deploymentTargetID: request.DeploymentTargetID,
+				k8sAgent:           agent,
+			}
+			latestEnvGroups, err := syncLatestEnvGroupVersions(ctx, syncInp)
+			if err != nil {
+				err := telemetry.Error(ctx, span, err, "error syncing latest env group versions")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+				return
+			}
+
+			latestEnvGroups = append(latestEnvGroups, environment_groups.EnvironmentGroup{
+				Name:    latestEnvironmentGroup.Name,
+				Version: latestEnvironmentGroup.Version,
+			})
+
+			var protoEnvGroups []*porterv1.EnvGroup
+			for _, envGroup := range latestEnvGroups {
+				protoEnvGroups = append(protoEnvGroups, &porterv1.EnvGroup{
+					Name:    envGroup.Name,
+					Version: int64(envGroup.Version),
+				})
+			}
+			appProto.EnvGroups = protoEnvGroups
+
+			encodedApp, err := encodeAppProto(ctx, appProto)
+			if err != nil {
+				err := telemetry.Error(ctx, span, err, "error encoding app proto")
+				c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+				return
+			}
+
+			res := &UpdateAppEnvironmentResponse{
+				EnvGroups:      latestEnvGroups,
+				Base64AppProto: encodedApp,
+			}
+
+			c.WriteResult(w, r, res)
+			return
+		}
+	}
+
+	// if this app does not have a default env group for this deployment target and is a preview
+	// then use the preview template env group as the default
+	// this should only run when the app is first deployed to a given deployment target
+	if latestEnvironmentGroup.Name == "" && isPreview {
+		latestEnvironmentGroup, err = environment_groups.LatestBaseEnvironmentGroup(ctx, agent, previewTemplateEnvName)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "unable to get latest base environment group")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+			return
+		}
+	}
+
+	variables := make(map[string]string)
+	secrets := make(map[string]string)
+
+	if !request.HardUpdate {
+		for key, value := range latestEnvironmentGroup.Variables {
+			variables[key] = value
+		}
+		for key, value := range latestEnvironmentGroup.SecretVariables {
+			secrets[key] = value
+		}
+	}
+
+	for key, value := range request.Variables {
+		if len(key) > 0 && len(value) > 0 {
+			variables[key] = value
+		}
+	}
+	for key, value := range request.Secrets {
+		if len(key) > 0 && len(value) > 0 {
+			secrets[key] = value
+		}
+	}
+
+	envGroup := environment_groups.EnvironmentGroup{
+		Name:            appEnvGroupName,
+		Variables:       variables,
+		SecretVariables: secrets,
+		CreatedAtUTC:    time.Now().UTC(),
+	}
+
+	additionalEnvGroupLabels := map[string]string{
+		LabelKey_AppName:                                  appName,
+		LabelKey_DeploymentTargetID:                       request.DeploymentTargetID,
+		environment_groups.LabelKey_DefaultAppEnvironment: "true",
+		LabelKey_PorterManaged:                            "true",
+	}
+
+	err = environment_groups.CreateOrUpdateBaseEnvironmentGroup(ctx, agent, envGroup, additionalEnvGroupLabels)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to create or update base environment group")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	inp := environment_groups.SyncLatestVersionToNamespaceInput{
+		BaseEnvironmentGroupName: appEnvGroupName,
+		TargetNamespace:          namespace,
+	}
+
+	syncedAppEnvironment, err := environment_groups.SyncLatestVersionToNamespace(ctx, agent, inp, additionalEnvGroupLabels)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "unable to create or update synced environment group")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-versioned-name", Value: syncedAppEnvironment.EnvironmentGroupVersionedName})
+
+	syncInp := syncLatestEnvGroupVersionsInput{
+		envGroups:          filteredEnvGroups,
+		appName:            appName,
+		namespace:          namespace,
+		deploymentTargetID: request.DeploymentTargetID,
+		k8sAgent:           agent,
+	}
+	latestEnvGroups, err := syncLatestEnvGroupVersions(ctx, syncInp)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error syncing latest env group versions")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	split := strings.Split(syncedAppEnvironment.EnvironmentGroupVersionedName, ".")
+	if len(split) != 2 {
+		err := telemetry.Error(ctx, span, err, "unexpected environment group versioned name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	version, err := strconv.Atoi(split[1])
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error converting environment group version to int")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	latestEnvGroups = append(latestEnvGroups, environment_groups.EnvironmentGroup{
+		Name:    split[0],
+		Version: version,
+	})
+
+	var protoEnvGroups []*porterv1.EnvGroup
+	for _, envGroup := range latestEnvGroups {
+		protoEnvGroups = append(protoEnvGroups, &porterv1.EnvGroup{
+			Name:    envGroup.Name,
+			Version: int64(envGroup.Version),
+		})
+	}
+	appProto.EnvGroups = protoEnvGroups
+
+	encodedApp, err := encodeAppProto(ctx, appProto)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error encoding app proto")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &UpdateAppEnvironmentResponse{
+		EnvGroups:      latestEnvGroups,
+		Base64AppProto: encodedApp,
+	}
+
+	c.WriteResult(w, r, res)
+}
+
+type syncLatestEnvGroupVersionsInput struct {
+	// envGroups is the list of env groups to sync. We only need the names and will get the latest version of each from the porter-env-group ns
+	envGroups []*porterv1.EnvGroup
+	// appName is the name of the app
+	appName string
+	// namespace is the namespace to sync the latest versions to
+	namespace string
+	// deploymentTargetID is the id of the deployment target
+	deploymentTargetID string
+	// k8sAgent is the kubernetes agent
+	k8sAgent *kubernetes.Agent
+}
+
+// syncLatestEnvGroupVersions syncs the latest versions of the env groups to the namespace where an app is deployed
+func syncLatestEnvGroupVersions(ctx context.Context, inp syncLatestEnvGroupVersionsInput) ([]environment_groups.EnvironmentGroup, error) {
+	ctx, span := telemetry.NewSpan(ctx, "sync-latest-env-group-versions")
+	defer span.End()
+
+	var envGroups []environment_groups.EnvironmentGroup
+
+	if inp.deploymentTargetID == "" {
+		return envGroups, telemetry.Error(ctx, span, nil, "deployment target id is empty")
+	}
+	if inp.appName == "" {
+		return envGroups, telemetry.Error(ctx, span, nil, "app name is empty")
+	}
+	if inp.namespace == "" {
+		return envGroups, telemetry.Error(ctx, span, nil, "namespace is empty")
+	}
+	if inp.k8sAgent == nil {
+		return envGroups, telemetry.Error(ctx, span, nil, "k8s agent is nil")
+	}
+
+	for _, envGroup := range inp.envGroups {
+		if envGroup == nil {
+			continue
+		}
+
+		additionalEnvGroupLabels := map[string]string{
+			LabelKey_AppName:            inp.appName,
+			LabelKey_DeploymentTargetID: inp.deploymentTargetID,
+			LabelKey_PorterManaged:      "true",
+		}
+
+		syncedEnvironment, err := environment_groups.SyncLatestVersionToNamespace(ctx, inp.k8sAgent, environment_groups.SyncLatestVersionToNamespaceInput{
+			TargetNamespace:          inp.namespace,
+			BaseEnvironmentGroupName: envGroup.GetName(),
+		}, additionalEnvGroupLabels)
+		if err != nil {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "env-group-name", Value: envGroup.GetName()})
+			return envGroups, telemetry.Error(ctx, span, err, "error syncing latest version to namespace")
+		}
+
+		split := strings.Split(syncedEnvironment.EnvironmentGroupVersionedName, ".")
+		if len(split) != 2 {
+			return envGroups, telemetry.Error(ctx, span, err, "unexpected environment group versioned name")
+		}
+
+		version, err := strconv.Atoi(split[1])
+		if err != nil {
+			return envGroups, telemetry.Error(ctx, span, err, "error converting environment group version to int")
+		}
+
+		envGroups = append(envGroups, environment_groups.EnvironmentGroup{
+			Name:    split[0],
+			Version: version,
+		})
+	}
+
+	return envGroups, nil
+}

+ 106 - 0
api/server/handlers/porter_app/update_app_revision_status.go

@@ -0,0 +1,106 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// UpdateAppRevisionStatusHandler handles requests to the /apps/{porter_app_name}/revisions/{app_revision_id} endpoint
+type UpdateAppRevisionStatusHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+// NewUpdateAppRevisionStatusHandler returns a new UpdateAppRevisionStatusHandler
+func NewUpdateAppRevisionStatusHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *UpdateAppRevisionStatusHandler {
+	return &UpdateAppRevisionStatusHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+	}
+}
+
+// UpdateAppRevisionStatusRequest is the request object for the /apps/{porter_app_name}/revisions/{app_revision_id} endpoint
+type UpdateAppRevisionStatusRequest struct {
+	// Status is the new status to set for the app revision
+	Status models.AppRevisionStatus `json:"status"`
+}
+
+// UpdateAppRevisionStatusResponse is the response object for the /apps/{porter_app_name}/revisions/{app_revision_id} endpoint
+type UpdateAppRevisionStatusResponse struct{}
+
+// UpdateAppRevisionStatus updates the status of an app revision
+func (c *UpdateAppRevisionStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-update-app-revision-status")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	// read the request object from the decoder
+	request := &UpdateAppRevisionStatusRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if request.Status == "" {
+		err := telemetry.Error(ctx, span, nil, "status cannot be empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	appRevisionId, _ := requestutils.GetURLParamString(r, types.URLParamAppRevisionID)
+	if appRevisionId == "" {
+		err := telemetry.Error(ctx, span, nil, "app revision id is empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	if appRevisionId == "" {
+		err := telemetry.Error(ctx, span, nil, "app revision id is empty")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	var statusProto porterv1.EnumRevisionStatus
+	switch request.Status {
+	case models.AppRevisionStatus_BuildFailed:
+		statusProto = porterv1.EnumRevisionStatus_ENUM_REVISION_STATUS_BUILD_FAILED
+	case models.AppRevisionStatus_DeployFailed:
+		statusProto = porterv1.EnumRevisionStatus_ENUM_REVISION_STATUS_DEPLOY_FAILED
+	case models.AppRevisionStatus_PredeployFailed:
+		statusProto = porterv1.EnumRevisionStatus_ENUM_REVISION_STATUS_PREDEPLOY_FAILED
+	default:
+		err := telemetry.Error(ctx, span, nil, "invalid status")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	updateStatusReq := connect.NewRequest(&porterv1.UpdateRevisionStatusRequest{
+		ProjectId:      int64(project.ID),
+		AppRevisionId:  appRevisionId,
+		RevisionStatus: statusProto,
+	})
+
+	_, err := c.Config().ClusterControlPlaneClient.UpdateRevisionStatus(ctx, updateStatusReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error updating revision status")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &UpdateAppRevisionStatusResponse{}
+	c.WriteResult(w, r, res)
+}

+ 97 - 0
api/server/handlers/porter_app/update_image.go

@@ -0,0 +1,97 @@
+package porter_app
+
+import (
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// UpdateImageHandler is the handler for the /apps/{porter_app_name}/update-image endpoint
+type UpdateImageHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewUpdateImageHandler handles POST requests to the /apps/{porter_app_name}/update-image endpoint
+func NewUpdateImageHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *UpdateImageHandler {
+	return &UpdateImageHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// UpdateImageRequest is the request object for the /apps/{porter_app_name}/update-image endpoint
+type UpdateImageRequest struct {
+	DeploymentTargetId string `json:"deployment_target_id"`
+	Repository         string `json:"repository"`
+	Tag                string `json:"tag"`
+}
+
+// UpdateImageResponse is the response object for the /apps/{porter_app_name}/update-image endpoint
+type UpdateImageResponse struct {
+	Repository string `json:"repository"`
+	Tag        string `json:"tag"`
+}
+
+func (c *UpdateImageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-update-image")
+	defer span.End()
+
+	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
+
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
+		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusForbidden))
+		return
+	}
+
+	appName, reqErr := requestutils.GetURLParamString(r, types.URLParamPorterAppName)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing app name")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "app-name", Value: appName})
+
+	request := &UpdateImageRequest{}
+	if ok := c.DecodeAndValidate(w, r, request); !ok {
+		err := telemetry.Error(ctx, span, nil, "error decoding request")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	updateImageReq := connect.NewRequest(&porterv1.UpdateAppImageRequest{
+		ProjectId:          int64(project.ID),
+		DeploymentTargetId: request.DeploymentTargetId,
+		RepositoryUrl:      request.Repository,
+		Tag:                request.Tag,
+		AppName:            appName,
+	})
+	ccpResp, err := c.Config().ClusterControlPlaneClient.UpdateAppImage(ctx, updateImageReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error calling ccp update porter app image")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	res := &UpdateImageResponse{
+		Repository: ccpResp.Msg.RepositoryUrl,
+		Tag:        ccpResp.Msg.Tag,
+	}
+
+	c.WriteResult(w, r, res)
+}

+ 86 - 24
api/server/handlers/porter_app/validate.go

@@ -36,11 +36,29 @@ func NewValidatePorterAppHandler(
 	}
 }
 
+// ServiceDeletions are deletions to apply to a specific service
+type ServiceDeletions struct {
+	DomainNames           []string `json:"domain_names"`
+	IngressAnnotationKeys []string `json:"ingress_annotation_keys"`
+}
+
+// Deletions are the names of services and env variables to delete
+type Deletions struct {
+	ServiceNames     []string                    `json:"service_names"`
+	Predeploy        []string                    `json:"predeploy"`
+	EnvVariableNames []string                    `json:"env_variable_names"`
+	EnvGroupNames    []string                    `json:"env_group_names"`
+	ServiceDeletions map[string]ServiceDeletions `json:"service_deletions"`
+}
+
 // ValidatePorterAppRequest is the request object for the /apps/validate endpoint
 type ValidatePorterAppRequest struct {
-	Base64AppProto     string `json:"b64_app_proto"`
-	DeploymentTargetId string `json:"deployment_target_id"`
-	CommitSHA          string `json:"commit_sha"`
+	AppName            string    `json:"app_name"`
+	Base64AppProto     string    `json:"b64_app_proto"`
+	Base64AppOverrides string    `json:"b64_app_overrides"`
+	DeploymentTargetId string    `json:"deployment_target_id"`
+	CommitSHA          string    `json:"commit_sha"`
+	Deletions          Deletions `json:"deletions"`
 }
 
 // ValidatePorterAppResponse is the response object for the /apps/validate endpoint
@@ -61,7 +79,7 @@ func (c *ValidatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
 	)
 
-	if !project.ValidateApplyV2 {
+	if !project.GetFeatureFlag(models.ValidateApplyV2, c.Config().LaunchDarklyClient) {
 		err := telemetry.Error(ctx, span, nil, "project does not have validate apply v2 enabled")
 		c.HandleAPIError(w, r, apierrors.NewErrForbidden(err))
 		return
@@ -74,29 +92,34 @@ func (c *ValidatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		return
 	}
 
-	if request.Base64AppProto == "" {
-		err := telemetry.Error(ctx, span, nil, "b64 yaml is empty")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
-	decoded, err := base64.StdEncoding.DecodeString(request.Base64AppProto)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error decoding base  yaml")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
-	}
-
 	appProto := &porterv1.PorterApp{}
-	err = helpers.UnmarshalContractObject(decoded, appProto)
-	if err != nil {
-		err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
-		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
-		return
+
+	if request.Base64AppProto == "" {
+		if request.AppName == "" {
+			err := telemetry.Error(ctx, span, nil, "app name is empty and no base64 proto provided")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		appProto.Name = request.AppName
+	} else {
+		decoded, err := base64.StdEncoding.DecodeString(request.Base64AppProto)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error decoding base yaml")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		err = helpers.UnmarshalContractObject(decoded, appProto)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
 	}
 
 	if appProto.Name == "" {
-		err := telemetry.Error(ctx, span, err, "app proto name is empty")
+		err := telemetry.Error(ctx, span, nil, "app proto name is empty")
 		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
 		return
 	}
@@ -107,11 +130,51 @@ func (c *ValidatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		telemetry.AttributeKV{Key: "commit-sha", Value: request.CommitSHA},
 	)
 
+	var overrides *porterv1.PorterApp
+
+	if request.Base64AppOverrides != "" {
+		decoded, err := base64.StdEncoding.DecodeString(request.Base64AppOverrides)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error decoding base  yaml")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		overrides = &porterv1.PorterApp{}
+		err = helpers.UnmarshalContractObject(decoded, overrides)
+		if err != nil {
+			err := telemetry.Error(ctx, span, err, "error unmarshalling app proto")
+			c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+			return
+		}
+
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "validated-with-overrides", Value: true})
+	}
+
+	var serviceDeletions map[string]*porterv1.ServiceDeletions
+	if request.Deletions.ServiceDeletions != nil {
+		serviceDeletions = make(map[string]*porterv1.ServiceDeletions)
+		for k, v := range request.Deletions.ServiceDeletions {
+			serviceDeletions[k] = &porterv1.ServiceDeletions{
+				DomainNames:        v.DomainNames,
+				IngressAnnotations: v.IngressAnnotationKeys,
+			}
+		}
+	}
+
 	validateReq := connect.NewRequest(&porterv1.ValidatePorterAppRequest{
 		ProjectId:          int64(project.ID),
 		DeploymentTargetId: request.DeploymentTargetId,
 		CommitSha:          request.CommitSHA,
 		App:                appProto,
+		AppOverrides:       overrides,
+		Deletions: &porterv1.Deletions{
+			ServiceNames:     request.Deletions.ServiceNames,
+			PredeployNames:   request.Deletions.Predeploy,
+			EnvVariableNames: request.Deletions.EnvVariableNames,
+			EnvGroupNames:    request.Deletions.EnvGroupNames,
+			ServiceDeletions: serviceDeletions,
+		},
 	})
 	ccpResp, err := c.Config().ClusterControlPlaneClient.ValidatePorterApp(ctx, validateReq)
 	if err != nil {
@@ -145,7 +208,6 @@ func (c *ValidatePorterAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 	}
 
 	b64 := base64.StdEncoding.EncodeToString(encoded)
-
 	response := &ValidatePorterAppResponse{
 		ValidatedBase64AppProto: b64,
 	}

+ 112 - 0
api/server/handlers/porter_app/yaml_from_revision.go

@@ -0,0 +1,112 @@
+package porter_app
+
+import (
+	"encoding/base64"
+	"net/http"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"gopkg.in/yaml.v2"
+
+	v2 "github.com/porter-dev/porter/internal/porter_app/v2"
+	"github.com/porter-dev/porter/internal/telemetry"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/server/shared/requestutils"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+// PorterYAMLFromRevisionHandler is the handler for the /apps/{porter_app_name}/revisions/{app_revision_id}/yaml endpoint
+type PorterYAMLFromRevisionHandler struct {
+	handlers.PorterHandlerReadWriter
+	authz.KubernetesAgentGetter
+}
+
+// NewPorterYAMLFromRevisionHandler returns a new PorterYAMLFromRevisionHandler
+func NewPorterYAMLFromRevisionHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) *PorterYAMLFromRevisionHandler {
+	return &PorterYAMLFromRevisionHandler{
+		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
+		KubernetesAgentGetter:   authz.NewOutOfClusterAgentGetter(config),
+	}
+}
+
+// PorterYAMLFromRevisionResponse is the response object for the /apps/{porter_app_name}/revisions/{app_revision_id}/yaml endpoint
+type PorterYAMLFromRevisionResponse struct {
+	B64PorterYAML string `json:"b64_porter_yaml"`
+}
+
+// ServeHTTP takes a porter app revision and returns the porter yaml for it
+func (c *PorterYAMLFromRevisionHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-porter-yaml-from-revision")
+	defer span.End()
+
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+
+	appRevisionID, reqErr := requestutils.GetURLParamString(r, types.URLParamAppRevisionID)
+	if reqErr != nil {
+		err := telemetry.Error(ctx, span, nil, "error parsing app revision id")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	getRevisionReq := connect.NewRequest(&porterv1.GetAppRevisionRequest{
+		ProjectId:     int64(project.ID),
+		AppRevisionId: appRevisionID,
+	})
+	ccpResp, err := c.Config().ClusterControlPlaneClient.GetAppRevision(ctx, getRevisionReq)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error getting app revision")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if ccpResp == nil || ccpResp.Msg == nil {
+		err = telemetry.Error(ctx, span, nil, "get app revision response is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if ccpResp.Msg.AppRevision == nil {
+		err = telemetry.Error(ctx, span, nil, "app revision is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	appProto := ccpResp.Msg.AppRevision.App
+	if appProto == nil {
+		err = telemetry.Error(ctx, span, nil, "app proto is nil")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	app, err := v2.AppFromProto(appProto)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error converting app proto to porter yaml")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	porterYAMLString, err := yaml.Marshal(app)
+	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error marshaling porter yaml")
+		c.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	b64String := base64.StdEncoding.EncodeToString(porterYAMLString)
+
+	response := &PorterYAMLFromRevisionResponse{
+		B64PorterYAML: b64String,
+	}
+
+	c.WriteResult(w, r, response)
+}

+ 1 - 1
api/server/handlers/project/create.go

@@ -81,7 +81,7 @@ func (p *ProjectCreateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		return
 	}
 
-	p.WriteResult(w, r, proj.ToProjectType())
+	p.WriteResult(w, r, proj.ToProjectType(p.Config().LaunchDarklyClient))
 
 	// add project to billing team
 	_, err = p.Config().BillingManager.CreateTeam(user, proj)

+ 1 - 0
api/server/handlers/project/create_test.go

@@ -47,6 +47,7 @@ func TestCreateProjectSuccessful(t *testing.T) {
 		HelmValuesEnabled:      false,
 		MultiCluster:           false,
 		EnableReprovision:      false,
+		ValidateApplyV2:        true,
 	}
 
 	gotProject := &types.CreateProjectResponse{}

+ 2 - 2
api/server/handlers/project/delete.go

@@ -33,7 +33,7 @@ func (p *ProjectDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 	user, _ := ctx.Value(types.UserScope).(*models.User)
 	proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
 
-	if proj.CapiProvisionerEnabled {
+	if proj.GetFeatureFlag(models.CapiProvisionerEnabled, p.Config().LaunchDarklyClient) {
 		clusters, err := p.Config().Repo.Cluster().ListClustersByProjectID(proj.ID)
 		if err != nil {
 			p.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error finding clusters for project: %w", err)))
@@ -87,7 +87,7 @@ func (p *ProjectDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		return
 	}
 
-	p.WriteResult(w, r, deletedProject.ToProjectType())
+	p.WriteResult(w, r, deletedProject.ToProjectType(p.Config().LaunchDarklyClient))
 
 	// delete the billing team
 	if err := p.Config().BillingManager.DeleteTeam(user, proj); err != nil {

+ 1 - 1
api/server/handlers/project/get.go

@@ -26,5 +26,5 @@ func NewProjectGetHandler(
 func (p *ProjectGetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
 
-	p.WriteResult(w, r, proj.ToProjectType())
+	p.WriteResult(w, r, proj.ToProjectType(p.Config().LaunchDarklyClient))
 }

+ 4 - 3
api/server/handlers/project/get_test.go

@@ -7,6 +7,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apitest"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/features"
 	"github.com/porter-dev/porter/internal/models"
 )
 
@@ -33,8 +34,8 @@ func TestGetProjectSuccessful(t *testing.T) {
 
 	handler.ServeHTTP(rr, req)
 
-	expProject := proj.ToProjectType()
-	gotProject := &types.Project{}
+	expProject := proj.ToProjectType(&features.Client{})
+	gotProject := types.Project{}
 
-	apitest.AssertResponseExpected(t, rr, expProject, gotProject)
+	apitest.AssertResponseExpected(t, rr, &expProject, &gotProject)
 }

+ 107 - 0
api/server/handlers/project/images.go

@@ -0,0 +1,107 @@
+package project
+
+import (
+	"net/http"
+	"time"
+
+	"connectrpc.com/connect"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/apierrors"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/telemetry"
+)
+
+// ImagesHandler serves the /images endpoint
+type ImagesHandler struct {
+	handlers.PorterHandlerWriter
+}
+
+// NewImagesHandler returns a new ImagesHandler
+func NewImagesHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) *ImagesHandler {
+	return &ImagesHandler{
+		PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
+	}
+}
+
+// ImageArtifact is an instance of an image in an image repository
+type ImageArtifact struct {
+	Tag       string    `json:"tag"`
+	UpdatedAt time.Time `json:"updated_at"`
+}
+
+// Image is a representation of a docker image
+// To pull one of the image instances, you must use a string of format <image.uri>:<image.artifact.tag>
+type Image struct {
+	Uri       string          `json:"uri"`
+	Artifacts []ImageArtifact `json:"artifacts"`
+}
+
+// ImagesReponse is the response payload for the /images endpoint
+type ImagesReponse struct {
+	Images []Image `json:"images"`
+}
+
+// ServeHTTP handles the GET request to retrieve a list of images for a given project
+func (p *ImagesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx, span := telemetry.NewSpan(r.Context(), "serve-get-images")
+	defer span.End()
+
+	project, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+
+	if project.ID == 0 {
+		err := telemetry.Error(ctx, span, nil, "project id is 0")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
+		return
+	}
+
+	var resp ImagesReponse
+
+	imagesReq := connect.NewRequest(&porterv1.ImagesRequest{
+		ProjectId: int64(project.ID),
+	})
+	ccpResp, err := p.Config().ClusterControlPlaneClient.Images(ctx, imagesReq)
+	if err != nil {
+		err := telemetry.Error(ctx, span, err, "error calling ccp rollback porter app")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	if ccpResp == nil {
+		err := telemetry.Error(ctx, span, err, "ccp resp is nil")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp.Msg == nil {
+		err := telemetry.Error(ctx, span, err, "ccp resp msg is nil")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+	if ccpResp.Msg.Images == nil {
+		err := telemetry.Error(ctx, span, err, "ccp resp msg images is nil")
+		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
+		return
+	}
+
+	for _, image := range ccpResp.Msg.Images {
+		var artifacts []ImageArtifact
+		for _, artifact := range image.Artifacts {
+			artifacts = append(artifacts, ImageArtifact{
+				Tag:       artifact.Tag,
+				UpdatedAt: artifact.UpdatedAt.AsTime().UTC(),
+			})
+		}
+		resp.Images = append(resp.Images, Image{
+			Uri:       image.Uri,
+			Artifacts: artifacts,
+		})
+	}
+
+	p.WriteResult(w, r, resp)
+}

+ 2 - 2
api/server/handlers/project/list.go

@@ -35,10 +35,10 @@ func (p *ProjectListHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	res := make([]*types.Project, len(projects))
+	res := make([]*types.ProjectList, len(projects))
 
 	for i, proj := range projects {
-		res[i] = proj.ToProjectType()
+		res[i] = proj.ToProjectListType()
 	}
 
 	p.WriteResult(w, r, res)

+ 4 - 4
api/server/handlers/project/list_test.go

@@ -40,11 +40,11 @@ func TestListProjectsSuccessful(t *testing.T) {
 
 	handler.ServeHTTP(rr, req)
 
-	expProjects := make([]*types.Project, 0)
+	expProjects := make([]*types.ProjectList, 0)
 
-	expProjects = append(expProjects, proj1.ToProjectType())
-	expProjects = append(expProjects, proj2.ToProjectType())
-	gotProjects := []*types.Project{}
+	expProjects = append(expProjects, proj1.ToProjectListType())
+	expProjects = append(expProjects, proj2.ToProjectListType())
+	gotProjects := []*types.ProjectList{}
 
 	apitest.AssertResponseExpected(t, rr, &expProjects, &gotProjects)
 }

+ 1 - 1
api/server/handlers/project/rename.go

@@ -47,5 +47,5 @@ func (c *RenameProjectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 		return
 	}
 
-	c.WriteResult(w, r, project.ToProjectType())
+	c.WriteResult(w, r, project.ToProjectType(c.Config().LaunchDarklyClient))
 }

+ 16 - 0
api/server/handlers/project/update_onboarding_step.go

@@ -191,6 +191,21 @@ func (v *UpdateOnboardingStepHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 		}
 	}
 
+	if request.Step == "requested-quota-increase" {
+		err := v.Config().AnalyticsClient.Track(analytics.QuotaIncreaseAttemptTrack(&analytics.ProvisioningAttemptTrackOpts{
+			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
+			Email:                  user.Email,
+			FirstName:              user.FirstName,
+			LastName:               user.LastName,
+			CompanyName:            user.CompanyName,
+			Region:                 request.Region,
+			Provider:               request.Provider,
+		}))
+		if err != nil {
+			_ = telemetry.Error(ctx, span, err, "error tracking quota increase")
+		}
+	}
+
 	if request.Step == "provisioning-started" {
 		err := v.Config().AnalyticsClient.Track(analytics.ProvisioningAttemptTrack(&analytics.ProvisioningAttemptTrackOpts{
 			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
@@ -199,6 +214,7 @@ func (v *UpdateOnboardingStepHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 			LastName:               user.LastName,
 			CompanyName:            user.CompanyName,
 			Region:                 request.Region,
+			Provider:               request.Provider,
 		}))
 		if err != nil {
 			_ = telemetry.Error(ctx, span, err, "error tracking provisioning started")

+ 1 - 1
api/server/handlers/project_integration/create_aws.go

@@ -56,7 +56,7 @@ func (p *CreateAWSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		AWSIntegration: aws.ToAWSIntegrationType(),
 	}
 
-	if project.CapiProvisionerEnabled && p.Config().EnableCAPIProvisioner {
+	if project.GetFeatureFlag(models.CapiProvisionerEnabled, p.Config().LaunchDarklyClient) && p.Config().EnableCAPIProvisioner {
 		credReq := porterv1.CreateAssumeRoleChainRequest{
 			ProjectId:       int64(project.ID),
 			SourceArn:       "arn:aws:iam::108458755588:role/CAPIManagement", // hard coded as this is the final hop for a CAPI cluster

+ 1 - 1
api/server/handlers/project_integration/create_gcp.go

@@ -43,7 +43,7 @@ func (p *CreateGCPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	if project.CapiProvisionerEnabled {
+	if project.GetFeatureFlag(models.CapiProvisionerEnabled, p.Config().LaunchDarklyClient) {
 		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "capi-provisioner-enabled", Value: true})
 
 		b64Key := base64.StdEncoding.EncodeToString([]byte(request.GCPKeyData))

+ 3 - 0
api/server/handlers/project_integration/get_gitlab_repo_buildpack.go

@@ -130,12 +130,15 @@ func initBuilderInfo() map[string]*buildpacks.BuilderInfo {
 	builders[buildpacks.PaketoBuilder] = &buildpacks.BuilderInfo{
 		Name: "Paketo",
 		Builders: []string{
+			"paketobuildpacks/builder-jammy-full:latest",
 			"paketobuildpacks/builder:full",
 		},
 	}
 	builders[buildpacks.HerokuBuilder] = &buildpacks.BuilderInfo{
 		Name: "Heroku",
 		Builders: []string{
+			"heroku/builder:22",
+			"heroku/builder-classic:22",
 			"heroku/buildpacks:20",
 			"heroku/buildpacks:18",
 		},

+ 1 - 1
api/server/handlers/project_integration/list_aws.go

@@ -40,7 +40,7 @@ func (p *ListAWSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 	ctx := r.Context()
 	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
 
-	if project.CapiProvisionerEnabled {
+	if project.GetFeatureFlag(models.CapiProvisionerEnabled, p.Config().LaunchDarklyClient) {
 		dblinks, err := p.Repo().AWSAssumeRoleChainer().List(ctx, project.ID)
 		if err != nil {
 			e := fmt.Errorf("unable to find assume role chain links: %w", err)

+ 2 - 3
api/server/handlers/project_integration/preflight_check.go

@@ -4,9 +4,8 @@ import (
 	"fmt"
 	"net/http"
 
-	"github.com/porter-dev/api-contracts/generated/go/helpers"
-
 	"connectrpc.com/connect"
+	"github.com/porter-dev/api-contracts/generated/go/helpers"
 	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
@@ -53,7 +52,7 @@ func (p *CreatePreflightCheckHandler) ServeHTTP(w http.ResponseWriter, r *http.R
 	}
 
 	if cloudValues.PreflightValues != nil {
-		if cloudValues.CloudProvider == porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_GCP {
+		if cloudValues.CloudProvider == porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_GCP || cloudValues.CloudProvider == porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_AWS {
 			input.PreflightValues = cloudValues.PreflightValues
 		}
 	}

+ 0 - 61
api/server/handlers/project_integration/preflight_check_aws_usage.go

@@ -1,61 +0,0 @@
-package project_integration
-
-import (
-	"fmt"
-	"net/http"
-
-	"connectrpc.com/connect"
-	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
-	"github.com/porter-dev/porter/api/server/handlers"
-	"github.com/porter-dev/porter/api/server/shared"
-	"github.com/porter-dev/porter/api/server/shared/apierrors"
-	"github.com/porter-dev/porter/api/server/shared/config"
-	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/models"
-)
-
-type CreatePreflightCheckAWSUsageHandler struct {
-	handlers.PorterHandlerReadWriter
-}
-
-func NewCreatePreflightCheckAWSUsageHandler(
-	config *config.Config,
-	decoderValidator shared.RequestDecoderValidator,
-	writer shared.ResultWriter,
-) *CreatePreflightCheckAWSUsageHandler {
-	return &CreatePreflightCheckAWSUsageHandler{
-		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
-	}
-}
-
-func (p *CreatePreflightCheckAWSUsageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	ctx := r.Context()
-	project, _ := ctx.Value(types.ProjectScope).(*models.Project)
-
-	if !p.Config().EnableCAPIProvisioner {
-		message := "Trying to run preflight checks but CAPI Provisioner is disabled. If you want to provision through CAPI, make sure that the environment variable ENABLE_CAPI_PROVISIONER is set to true"
-		e := fmt.Errorf(message)
-		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusServiceUnavailable, message))
-		return
-	}
-
-	request := &types.QuotaPreflightCheckRequest{}
-	if ok := p.DecodeAndValidate(w, r, request); !ok {
-		return
-	}
-
-	checkReq := porterv1.QuotaPreflightCheckRequest{
-		ProjectId: int64(project.ID),
-		TargetArn: request.TargetARN,
-		Region:    request.Region,
-	}
-
-	checkResp, err := p.Config().ClusterControlPlaneClient.QuotaPreflightCheck(ctx, connect.NewRequest(&checkReq))
-	if err != nil {
-		e := fmt.Errorf("Pre-provision check failed: %w", err)
-		p.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(e, http.StatusPreconditionFailed, err.Error()))
-		return
-	}
-
-	p.WriteResult(w, r, checkResp)
-}

Nem az összes módosított fájl került megjelenítésre, mert túl sok fájl változott