Jelajahi Sumber

get database flow working with new provisioner

Alexander Belanger 4 tahun lalu
induk
melakukan
3a54855b57
58 mengubah file dengan 2842 tambahan dan 1032 penghapusan
  1. 2 2
      .github/workflows/prerelease.yaml
  2. 5 1
      api/server/handlers/infra/stream_state.go
  3. 0 2
      api/server/handlers/namespace/add_env_group_app.go
  4. 4 4
      api/server/handlers/namespace/create_env_group.go
  5. 0 2
      api/server/handlers/namespace/remove_env_group_app.go
  6. 0 30
      api/server/router/namespace.go
  7. 12 4
      api/server/shared/websocket/response_writer.go
  8. 4 1
      api/server/shared/websocket/upgrader.go
  9. 1 0
      api/types/database.go
  10. 0 122
      api/types/provision.go
  11. 0 33
      api/types/provision_test.go
  12. 1 1
      build/Dockerfile.osx
  13. 1 1
      build/Dockerfile.win
  14. 1 1
      cli/cmd/login/server.go
  15. 84 38
      dashboard/src/components/DocsHelper.tsx
  16. 1 0
      dashboard/src/components/porter-form/PorterFormContextProvider.tsx
  17. 59 16
      dashboard/src/components/porter-form/field-components/KeyValueArray.tsx
  18. 3 0
      dashboard/src/components/porter-form/types.ts
  19. 2 2
      dashboard/src/main/home/ModalHandler.tsx
  20. 0 302
      dashboard/src/main/home/cluster-dashboard/databases/CreateDatabaseForm.tsx
  21. 30 4
      dashboard/src/main/home/cluster-dashboard/databases/DatabasesHome.tsx
  22. 122 11
      dashboard/src/main/home/cluster-dashboard/databases/DatabasesList.tsx
  23. 9 3
      dashboard/src/main/home/cluster-dashboard/databases/mock_data.ts
  24. 2 6
      dashboard/src/main/home/cluster-dashboard/databases/routes.tsx
  25. 62 29
      dashboard/src/main/home/cluster-dashboard/env-groups/ExpandedEnvGroup.tsx
  26. 26 0
      dashboard/src/main/home/cluster-dashboard/expanded-chart/ExpandedChart.tsx
  27. 27 4
      dashboard/src/main/home/cluster-dashboard/expanded-chart/ExpandedChartWrapper.tsx
  28. 3 0
      dashboard/src/main/home/infrastructure/InfrastructureRouter.tsx
  29. 101 40
      dashboard/src/main/home/infrastructure/components/ProvisionInfra.tsx
  30. 45 1
      dashboard/src/main/home/modals/ConnectToDatabaseInstructionsModal.tsx
  31. 51 22
      dashboard/src/main/home/modals/LoadEnvGroupModal.tsx
  32. 1 1
      dashboard/src/main/home/modals/Modal.tsx
  33. 6 1
      dashboard/src/main/home/navbar/Help.tsx
  34. 1 3
      dashboard/src/shared/api.tsx
  35. 1 1
      docker/Dockerfile
  36. 1 1
      docker/cli.Dockerfile
  37. 1 1
      docker/dev.Dockerfile
  38. 1 1
      ee/docker/ee.Dockerfile
  39. 207 32
      go.mod
  40. 175 135
      go.sum
  41. 22 27
      internal/helm/agent.go
  42. 304 7
      internal/helm/postrenderer.go
  43. 241 138
      internal/kubernetes/agent.go
  44. 2 0
      internal/models/database.go
  45. 584 0
      internal/redis_stream/global_stream.go
  46. 69 0
      internal/redis_stream/resource_stream.go
  47. 1 0
      internal/repository/database.go
  48. 8 0
      internal/repository/gorm/database.go
  49. 4 0
      internal/repository/test/database.go
  50. 102 0
      provisioner/integrations/state/s3/s3.go
  51. 118 0
      provisioner/server/handlers/desired.go
  52. 28 0
      provisioner/server/handlers/init.go
  53. 144 0
      provisioner/server/handlers/log.go
  54. 100 0
      provisioner/server/handlers/tfstate.go
  55. 41 0
      provisioner/test_client/client.go
  56. 3 0
      provisioner/types/transformers.go
  57. 18 1
      services/job_sidecar_container/job_killer.sh
  58. 1 1
      services/porter_cli_container/dev.Dockerfile

+ 2 - 2
.github/workflows/prerelease.yaml

@@ -52,7 +52,7 @@ jobs:
       - name: Set up Go
         uses: actions/setup-go@v2
         with:
-          go-version: 1.16
+          go-version: 1.17
       - name: Write Dashboard Environment Variables
         run: |
           cat >./dashboard/.env <<EOL
@@ -119,7 +119,7 @@ jobs:
       - name: Set up Go
         uses: actions/setup-go@v2
         with:
-          go-version: 1.16
+          go-version: 1.17
       - name: Write Dashboard Environment Variables
         run: |
           cat >./dashboard/.env <<EOL

+ 5 - 1
api/server/handlers/infra/stream_state.go

@@ -106,7 +106,11 @@ func (c *InfraStreamStateHandler) ServeHTTP(w http.ResponseWriter, r *http.Reque
 				return
 			}
 
-			safeRW.WriteJSONWithChannel(stateUpdate, errorchan)
+			err = safeRW.WriteJSON(stateUpdate)
+
+			if err != nil {
+				errorchan <- err
+			}
 		}
 	}()
 

+ 0 - 2
api/server/handlers/namespace/add_env_group_app.go

@@ -63,8 +63,6 @@ func (c *AddEnvGroupAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		return
 	}
 
-	// TODO: verify that application exists
-
 	cm, err = agent.AddApplicationToVersionedConfigMap(cm, request.ApplicationName)
 
 	if err != nil {

+ 4 - 4
api/server/handlers/namespace/create_env_group.go

@@ -100,7 +100,9 @@ func (c *CreateEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 		return
 	}
 
-	// trigger rollout of new applications
+	c.WriteResult(w, r, envGroup)
+
+	// trigger rollout of new applications after writing the result
 	errors := rolloutApplications(c.Config(), cluster, helmAgent, envGroup, configMap, releases)
 
 	if len(errors) > 0 {
@@ -110,11 +112,9 @@ func (c *CreateEnvGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request
 			errStrArr = append(errStrArr, err.Error())
 		}
 
-		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf(strings.Join(errStrArr, ","))))
+		c.HandleAPIErrorNoWrite(w, r, apierrors.NewErrInternal(fmt.Errorf(strings.Join(errStrArr, ","))))
 		return
 	}
-
-	c.WriteResult(w, r, envGroup)
 }
 
 func rolloutApplications(

+ 0 - 2
api/server/handlers/namespace/remove_env_group_app.go

@@ -63,8 +63,6 @@ func (c *RemoveEnvGroupAppHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ
 		return
 	}
 
-	// TODO: verify that application exists
-
 	cm, err = agent.RemoveApplicationFromVersionedConfigMap(cm, request.ApplicationName)
 
 	if err != nil && errors.Is(err, kubernetes.IsNotFoundError) {

+ 0 - 30
api/server/router/namespace.go

@@ -56,36 +56,6 @@ func getNamespaceRoutes(
 
 	routes := make([]*Route, 0)
 
-	// POST /api/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}/provision/rds/ -> provision.NewProvisionRDSHandler
-	// provisionRDSEndpoint := factory.NewAPIEndpoint(
-	// 	&types.APIRequestMetadata{
-	// 		Verb:   types.APIVerbCreate,
-	// 		Method: types.HTTPVerbPost,
-	// 		Path: &types.Path{
-	// 			Parent:       basePath,
-	// 			RelativePath: relPath + "/provision/rds",
-	// 		},
-	// 		Scopes: []types.PermissionScope{
-	// 			types.UserScope,
-	// 			types.ProjectScope,
-	// 			types.ClusterScope,
-	// 			types.NamespaceScope,
-	// 		},
-	// 	},
-	// )
-
-	// provisionRDSHandler := provision.NewProvisionRDSHandler(
-	// 	config,
-	// 	factory.GetDecoderValidator(),
-	// 	factory.GetResultWriter(),
-	// )
-
-	// routes = append(routes, &Route{
-	// 	Endpoint: provisionRDSEndpoint,
-	// 	Handler:  provisionRDSHandler,
-	// 	Router:   r,
-	// })
-
 	// GET /api/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}/envgroups/list -> namespace.NewListEnvGroupsHandler
 	listEnvGroupsEndpoint := factory.NewAPIEndpoint(
 		&types.APIRequestMetadata{

+ 12 - 4
api/server/shared/websocket/response_writer.go

@@ -3,6 +3,7 @@ package websocket
 import (
 	"errors"
 	"net/http"
+	"sync"
 	"syscall"
 
 	"github.com/gorilla/websocket"
@@ -10,23 +11,30 @@ import (
 
 type WebsocketSafeReadWriter struct {
 	conn *websocket.Conn
+	mu   sync.Mutex
 }
 
-func (w *WebsocketSafeReadWriter) WriteJSONWithChannel(v interface{}, errorChan chan<- error) {
+func (w *WebsocketSafeReadWriter) WriteJSON(v interface{}) error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
 	err := w.conn.WriteJSON(v)
 
 	if err != nil {
 		if errOr(err, websocket.ErrCloseSent, syscall.EPIPE, syscall.ECONNRESET) {
 			// if close has been sent, or error is broken pipe error or connection reset, we want to
 			// send a message to the error channel to ensure closure but we ignore the error
-			errorChan <- nil
-		} else if err != nil {
-			errorChan <- err
+			return nil
 		}
+
+		return err
 	}
+
+	return nil
 }
 
 func (w *WebsocketSafeReadWriter) Write(data []byte) (int, error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
 	err := w.conn.WriteMessage(websocket.TextMessage, data)
 
 	if err != nil {

+ 4 - 1
api/server/shared/websocket/upgrader.go

@@ -27,7 +27,10 @@ func (u *Upgrader) Upgrade(
 
 	conn, err := u.WSUpgrader.Upgrade(w, r, responseHeader)
 
-	safeWriter := &WebsocketSafeReadWriter{conn}
+	safeWriter := &WebsocketSafeReadWriter{
+		conn: conn,
+	}
+
 	rw := &WebsocketResponseWriter{conn, safeWriter}
 
 	return conn, rw, safeWriter, err

+ 1 - 0
api/types/database.go

@@ -17,6 +17,7 @@ type Database struct {
 	InstanceStatus    string `json:"instance_status"`
 	InstanceDBFamily  string `json:"instance_db_family"`
 	InstanceDBVersion string `json:"instance_db_version"`
+	Status            string `json:"status"`
 }
 
 type ListDatabaseResponse []*Database

+ 0 - 122
api/types/provision.go

@@ -1,8 +1,6 @@
 package types
 
 type CreateRDSInfraRequest struct {
-	Namespace string `json:"namespace"`
-
 	// version of the postgres engine
 	DBEngineVersion string `json:"db_engine_version"`
 
@@ -19,123 +17,3 @@ type CreateRDSInfraRequest struct {
 	DBMaxStorage string `json:"db_max_allocated_storage"`
 	DBEncryption bool   `json:"db_storage_encrypted"`
 }
-
-// type Family string
-
-// type EngineVersion string
-
-// func (e EngineVersion) MajorVersion() string {
-// 	semver := strings.Split(string(e), ".")
-
-// 	return strings.Join(semver[:len(semver)-1], ".")
-// }
-
-// type EngineVersions []EngineVersion
-
-// func (e EngineVersions) VersionExists(version EngineVersion) bool {
-// 	for _, v := range e {
-// 		if version == v {
-// 			return true
-// 		}
-// 	}
-
-// 	return false
-// }
-
-// const (
-// 	FamilyPG9   Family = "postgres9"
-// 	FamilyPG10  Family = "postgres10"
-// 	FamilyPG11  Family = "postgres11"
-// 	FamilyPG12  Family = "postgres12"
-// 	FamilyPG13  Family = "postgres13"
-// 	FamilyMysql Family = "mysql"
-// )
-
-// var availablePG9Versions EngineVersions = EngineVersions{
-// 	"9.6.1",
-// 	"9.6.2",
-// 	"9.6.3",
-// 	"9.6.4",
-// 	"9.6.5",
-// 	"9.6.6",
-// 	"9.6.7",
-// 	"9.6.8",
-// 	"9.6.9",
-// 	"9.6.10",
-// 	"9.6.11",
-// 	"9.6.12",
-// 	"9.6.13",
-// 	"9.6.14",
-// 	"9.6.15",
-// 	"9.6.16",
-// 	"9.6.17",
-// 	"9.6.18",
-// 	"9.6.19",
-// 	"9.6.20",
-// 	"9.6.21",
-// 	"9.6.22",
-// 	"9.6.23",
-// }
-
-// var availablePG10Versions EngineVersions = EngineVersions{
-// 	"10.1",
-// 	"10.2",
-// 	"10.3",
-// 	"10.4",
-// 	"10.5",
-// 	"10.6",
-// 	"10.7",
-// 	"10.8",
-// 	"10.9",
-// 	"10.10",
-// 	"10.11",
-// 	"10.12",
-// 	"10.13",
-// 	"10.14",
-// 	"10.15",
-// 	"10.16",
-// 	"10.17",
-// 	"10.18",
-// }
-
-// var availablePG11Versions EngineVersions = EngineVersions{
-// 	"11.1",
-// 	"11.2",
-// 	"11.3",
-// 	"11.4",
-// 	"11.5",
-// 	"11.6",
-// 	"11.7",
-// 	"11.8",
-// 	"11.9",
-// 	"11.10",
-// 	"11.11",
-// 	"11.12",
-// 	"11.13",
-// }
-
-// var availablePG12Versions EngineVersions = EngineVersions{
-// 	"12.2",
-// 	"12.3",
-// 	"12.4",
-// 	"12.5",
-// 	"12.6",
-// 	"12.7",
-// 	"12.8",
-// }
-
-// var availablePG13Versions EngineVersions = EngineVersions{
-// 	"13.1",
-// 	"13.2",
-// 	"13.3",
-// 	"13.4",
-// }
-
-// var DBVersionMapping = map[Family]EngineVersions{
-// 	FamilyPG9:   availablePG9Versions,
-// 	FamilyPG10:  availablePG10Versions,
-// 	FamilyPG11:  availablePG11Versions,
-// 	FamilyPG12:  availablePG12Versions,
-// 	FamilyPG13:  availablePG13Versions,
-// 	FamilyMysql: {},
-// }

+ 0 - 33
api/types/provision_test.go

@@ -1,33 +0,0 @@
-package types
-
-import (
-	"testing"
-)
-
-func TestAvailableVersion(t *testing.T) {
-	if _, ok := DBVersionMapping[Family("mongo")]; ok {
-		t.Fatalf("mong engine availability should fail")
-	}
-
-	v, ok := DBVersionMapping[Family(FamilyPG10)]
-	if !ok {
-		t.Fatalf("postgres engine not available in engine mapping")
-	}
-
-	// test for a particular version
-	if !v.VersionExists(EngineVersion("9.6.23")) {
-		t.Errorf("postgres 9.6.23 not available")
-	}
-
-	if v.VersionExists(EngineVersion("10.6.23")) {
-		t.Errorf("postgres 10.6.23 should not available")
-	}
-
-	if EngineVersion("9.6.23").MajorVersion() != "9.6" {
-		t.Errorf("wrong major version for postgres")
-	}
-
-	if EngineVersion("11.13").MajorVersion() != "11" {
-		t.Errorf("wrong major version for postgres")
-	}
-}

+ 1 - 1
build/Dockerfile.osx

@@ -1,4 +1,4 @@
-ARG GO_VERSION=1.16
+ARG GO_VERSION=1.17
 
 FROM golang:${GO_VERSION}
 

+ 1 - 1
build/Dockerfile.win

@@ -1,4 +1,4 @@
-ARG GO_VERSION=1.16
+ARG GO_VERSION=1.17
 
 FROM golang:${GO_VERSION}
 

+ 1 - 1
cli/cmd/login/server.go

@@ -67,7 +67,7 @@ func Login(
 	err = utils.OpenBrowser(loginURL)
 
 	if err != nil {
-		return "", fmt.Errorf("Could not open browser: %v", err)
+		fmt.Printf("Could not open browser. Please navigate to the link manually.")
 	}
 
 	for {

+ 84 - 38
dashboard/src/components/DocsHelper.tsx

@@ -1,15 +1,21 @@
-import React, { Component, useState } from "react";
-import styled, { createGlobalStyle } from "styled-components";
-import Button from "@material-ui/core/Button";
-import Tooltip from "@material-ui/core/Tooltip";
-import { ClickAwayListener, TooltipProps } from "@material-ui/core";
+import React from "react";
+import styled from "styled-components";
+
+import { ClickAwayListener } from "@material-ui/core";
 
 type Props = {
   tooltipText: string;
   link: string;
+  placement?: TooltipPlacement;
+  disableMargin?: boolean;
 };
 
-const DocsHelper: React.FC<Props> = ({ tooltipText, link }) => {
+const DocsHelper: React.FC<Props> = ({
+  tooltipText,
+  link,
+  placement,
+  disableMargin,
+}) => {
   const [open, setOpen] = React.useState(false);
 
   const handleTooltipClose = () => {
@@ -25,46 +31,87 @@ const DocsHelper: React.FC<Props> = ({ tooltipText, link }) => {
   };
 
   return (
-    <DocsHelperContainer>
+    <DocsHelperContainer disableMargin={disableMargin}>
       <ClickAwayListener
         onClickAway={() => {
           handleTooltipClose();
         }}
       >
         <div>
-          <Tooltip
-            PopperProps={{
-              disablePortal: true,
-              placement: "top-end",
-            }}
-            onClose={handleTooltipClose}
-            open={open}
-            interactive
-            disableFocusListener
-            disableHoverListener
-            disableTouchListener
-            title={
+          <HelperButton onClick={handleTooltipToggle}>
+            <i className="material-icons">help_outline</i>
+          </HelperButton>
+          {open && (
+            <Tooltip placement={placement}>
               <StyledContent onClick={handleTooltipOpen}>
                 {tooltipText}
                 <A target="_blank" href={link}>
                   Documentation {">"}
                 </A>
               </StyledContent>
-            }
-          >
-            <HelperButton onClick={handleTooltipToggle}>
-              <i className="material-icons">help_outline</i>
-            </HelperButton>
-          </Tooltip>
+            </Tooltip>
+          )}
         </div>
       </ClickAwayListener>
-      <TooltipStyle />
     </DocsHelperContainer>
   );
 };
 
 export default DocsHelper;
 
+type TooltipPlacement = "top-end" | "bottom-end" | "top-start" | "bottom-start";
+
+const Tooltip = styled.div<{ placement: TooltipPlacement }>`
+  position: absolute;
+  ${({ placement }) => {
+    switch (placement) {
+      case "top-start":
+        return `
+          bottom: 25px;
+          left: 0px;
+        `;
+      case "bottom-end":
+        return `
+          top: 25px;
+          right: 0px;
+        `;
+      case "bottom-start":
+        return `
+          top: 25px;
+          left: 0px;
+        `;
+      case "top-end":
+      default:
+        return `
+          bottom: 25px;
+          right: 0px;
+        `;
+    }
+  }}
+  word-wrap: break-word;
+  min-height: 18px;
+  width: fit-content;
+  padding: 5px 7px;
+  z-index: 999;
+  display: flex;
+  flex-direction: column;
+  justify-content: center;
+  flex: 1;
+  color: white;
+  text-transform: none;
+  opacity: 0;
+  animation: faded-in 0.2s 0.15s;
+  animation-fill-mode: forwards;
+  @keyframes faded-in {
+    from {
+      opacity: 0;
+    }
+    to {
+      opacity: 1;
+    }
+  }
+`;
+
 const StyledContent = styled.div`
   font-family: "Work Sans", sans-serif;
   font-size: 12px;
@@ -72,7 +119,8 @@ const StyledContent = styled.div`
   padding: 12px 14px;
   line-height: 1.5em;
   user-select: text;
-  width: calc(100% + 14px);
+  width: max-content;
+  max-width: 300px;
   height: calc(100% + 10px);
   margin-left: -7px;
   height: 100%;
@@ -96,26 +144,24 @@ const HelperButton = styled.div`
   }
 `;
 
-const TooltipStyle = createGlobalStyle`
-  .MuiTooltip-tooltip {
-    background-color: #00000000 !important;
-    font-size: 12px !important;
-    padding: 0px;
-    max-width: 300px !important;    
-  }
-`;
-
 const A = styled.a`
   display: inline-block;
   height: 20px;
   color: #8590ff;
   text-decoration: underline;
   cursor: pointer;
+  margin-top: 10px;
   width: 100%;
   text-align: right;
   user-select: none;
 `;
 
-const DocsHelperContainer = styled.div`
-  margin-left: auto;
+const DocsHelperContainer = styled.div<{ disableMargin: boolean }>`
+  ${(props) => {
+    if (props.disableMargin) {
+      return "";
+    }
+    return `margin-left: auto;`;
+  }}
+  position: relative;
 `;

+ 1 - 0
dashboard/src/components/porter-form/PorterFormContextProvider.tsx

@@ -308,6 +308,7 @@ export const PorterFormContextProvider: React.FC<Props> = (props) => {
                       envLoader: true,
                       fileUpload: true,
                       settings: {
+                        ...(field.settings || {}),
                         type: "env",
                       },
                     };

+ 59 - 16
dashboard/src/components/porter-form/field-components/KeyValueArray.tsx

@@ -40,7 +40,9 @@ const KeyValueArray: React.FC<Props> = (props) => {
             : [],
           showEnvModal: false,
           showEditorModal: false,
-          synced_env_groups: null,
+          synced_env_groups: props.settings?.options?.enable_synced_env_groups
+            ? null
+            : [],
         };
       },
     }
@@ -48,9 +50,17 @@ const KeyValueArray: React.FC<Props> = (props) => {
 
   const { currentProject } = useContext(Context);
 
+  // If the variable includes normal it means that the form corresponds to an old job template version
+  // The "normal" keyword doesn't exist for applications as well as the enable_synced_env_groups setting.
+  // This is why we have to check if the form corresponds to a job or not.
+  const enableSyncedEnvGroups = props.variable.includes("normal")
+    ? !!props.settings?.options?.enable_synced_env_groups
+    : true;
+
   useEffect(() => {
     if (hasSetValue(props) && !Array.isArray(state?.synced_env_groups)) {
       const values = props.value[0];
+      console.log(values);
       const envGroups = values?.synced || [];
       const promises = Promise.all(
         envGroups.map(async (envGroup: any) => {
@@ -86,7 +96,7 @@ const KeyValueArray: React.FC<Props> = (props) => {
 
   if (state == undefined) return <></>;
 
-  if (!Array.isArray(state.synced_env_groups)) {
+  if (!Array.isArray(state.synced_env_groups) && enableSyncedEnvGroups) {
     return <Loading />;
   }
 
@@ -205,11 +215,12 @@ const KeyValueArray: React.FC<Props> = (props) => {
               return { showEnvModal: false };
             })
           }
-          width="765px"
+          width="800px"
           height="542px"
         >
           <LoadEnvGroupModal
             existingValues={getProcessedValues(state.values)}
+            enableSyncedEnvGroups={enableSyncedEnvGroups}
             syncedEnvGroups={state.synced_env_groups}
             namespace={variables.namespace}
             clusterId={variables.clusterId}
@@ -296,9 +307,11 @@ const KeyValueArray: React.FC<Props> = (props) => {
 
     if (env_group) {
       return (
-        <Helper color="#f5cb42" style={{ marginLeft: "10px" }}>
-          This variable will be overrided by env group {env_group?.name}
-        </Helper>
+        <Wrapper>
+          <Helper color="#f5cb42" style={{ marginLeft: "10px" }}>
+            Overridden by the env group "{env_group?.name}"
+          </Helper>
+        </Wrapper>
       );
     }
 
@@ -429,9 +442,10 @@ const KeyValueArray: React.FC<Props> = (props) => {
             )}
           </InputWrapper>
         )}
-        {!!state.synced_env_groups?.length && (
+        {enableSyncedEnvGroups && !!state.synced_env_groups?.length && (
           <>
-            <Heading>Synced env vars</Heading>
+            <Heading>Synced Environment Groups</Heading>
+            <Br />
             {state.synced_env_groups?.map((envGroup: any) => {
               return (
                 <ExpandableEnvGroup
@@ -507,8 +521,15 @@ export const getFinalVariablesForKeyValueArray: GetFinalVariablesFunction = (
     }));
   }
 
+  const variableContent = props.variable.split(".");
+  let variable = props.variable;
+
+  if (variable.includes("normal")) {
+    variable = `${variableContent[0]}.${variableContent[1]}`;
+  }
+
   return {
-    [props.variable]: obj,
+    [variable]: obj,
   };
 };
 
@@ -529,7 +550,6 @@ const ExpandableEnvGroup: React.FC<{
             </EventInformation>
           </ContentContainer>
           <ActionContainer>
-            <ActionButton></ActionButton>
             <ActionButton onClick={() => onDelete()}>
               <span className="material-icons">delete</span>
             </ActionButton>
@@ -542,6 +562,7 @@ const ExpandableEnvGroup: React.FC<{
         </Flex>
         {isExpanded && (
           <>
+            <Buffer />
             {Object.entries(envGroup.variables || {})?.map(
               ([key, value], i: number) => {
                 // Preprocess non-string env values set via raw Helm values
@@ -573,14 +594,24 @@ const ExpandableEnvGroup: React.FC<{
                 );
               }
             )}
+            <Br />
           </>
         )}
       </StyledCard>
     </>
   );
-  return null;
 };
 
+const Br = styled.div`
+  width: 100%;
+  height: 1px;
+`;
+
+const Buffer = styled.div`
+  width: 100%;
+  height: 10px;
+`;
+
 const Spacer = styled.div`
   width: 10px;
   height: 20px;
@@ -684,6 +715,14 @@ const HideButton = styled(DeleteButton)`
   }
 `;
 
+const Wrapper = styled.div`
+  margin-left: 5px;
+  height: 20px;
+  display: flex;
+  align-items: center;
+  margin-top: -7px;
+`;
+
 const InputWrapper = styled.div`
   display: flex;
   align-items: center;
@@ -731,26 +770,27 @@ const fadeIn = keyframes`
 `;
 
 const StyledCard = styled.div`
-  border: 1px solid #ffffff00;
-  background: #ffffff08;
+  border: 1px solid #ffffff44;
+  background: #ffffff11;
   margin-bottom: 5px;
   border-radius: 8px;
-  padding: 14px;
+  margin-top: 15px;
+  padding: 10px 14px;
   overflow: hidden;
-  min-height: 60px;
   font-size: 13px;
   animation: ${fadeIn} 0.5s;
 `;
 
 const Flex = styled.div`
   display: flex;
+  height: 25px;
   align-items: center;
   justify-content: space-between;
 `;
 
 const ContentContainer = styled.div`
   display: flex;
-  height: 100%;
+  height: 40px;
   width: 100%;
   align-items: center;
 `;
@@ -781,6 +821,9 @@ const ActionButton = styled.button`
   background: none;
   color: white;
   padding: 5px;
+  width: 30px;
+  height: 30px;
+  margin-left: 5px;
   display: flex;
   justify-content: center;
   align-items: center;

+ 3 - 0
dashboard/src/components/porter-form/types.ts

@@ -83,6 +83,9 @@ export interface KeyValueArrayField extends GenericInputField {
   envLoader?: boolean;
   fileUpload?: boolean;
   settings?: {
+    options?: {
+      enable_synced_env_groups: boolean;
+    },
     type: "env" | "normal";
   };
 }

+ 2 - 2
dashboard/src/main/home/ModalHandler.tsx

@@ -216,8 +216,8 @@ const ModalHandler: React.FC<{
         <Modal
           onRequestClose={() => setCurrentModal(null, null)}
           width="600px"
-          height="240px"
-          title="How to connect my database"
+          height="350px"
+          title="Connecting to the Database"
         >
           <ConnectToDatabaseInstructionsModal />
         </Modal>

+ 0 - 302
dashboard/src/main/home/cluster-dashboard/databases/CreateDatabaseForm.tsx

@@ -1,302 +0,0 @@
-import Helper from "components/form-components/Helper";
-import InputRow from "components/form-components/InputRow";
-import SelectRow from "components/form-components/SelectRow";
-import SaveButton from "components/SaveButton";
-import React, { useContext, useEffect, useState } from "react";
-import { Link } from "react-router-dom";
-import api from "shared/api";
-import useAuth from "shared/auth/useAuth";
-import { Context } from "shared/Context";
-import { useRouting } from "shared/routing";
-import styled from "styled-components";
-import DashboardHeader from "../DashboardHeader";
-import {
-  DATABASE_INSTANCE_TYPES,
-  DEFAULT_DATABASE_INSTANCE_TYPE,
-  FORM_DEFAULT_VALUES,
-  LAST_POSTGRES_ENGINE_VERSION,
-  POSTGRES_DB_FAMILIES,
-  POSTGRES_ENGINE_VERSIONS,
-  DEFAULT_DB_FAMILY,
-} from "./static_data";
-
-type ValidationError = {
-  hasError: boolean;
-  description?: string;
-};
-
-const CreateDatabaseForm = () => {
-  const { currentProject, currentCluster } = useContext(Context);
-  const [databaseName, setDatabaseName] = useState(
-    () => `${currentProject.name}-database`
-  );
-  const [masterUser, setMasterUser] = useState("");
-  const [masterPassword, setMasterPassword] = useState("");
-  const [dbFamily, setDbFamily] = useState(DEFAULT_DB_FAMILY);
-  const [engineVersion, setEngineVersion] = useState(
-    LAST_POSTGRES_ENGINE_VERSION
-  );
-  const [instanceType, setInstanceType] = useState(
-    DEFAULT_DATABASE_INSTANCE_TYPE
-  );
-  const [submitStatus, setSubmitStatus] = useState("");
-  const [availableNamespaces, setAvailableNamespaces] = useState([]);
-  const [selectedNamespace, setSelectedNamespace] = useState("default");
-  const [isAuthorized] = useAuth();
-
-  const { pushFiltered } = useRouting();
-
-  const validateForm = (): ValidationError => {
-    if (!databaseName.length) {
-      return {
-        hasError: true,
-        description: "Database name cannot be empty",
-      };
-    }
-
-    if (!masterUser.length) {
-      return {
-        hasError: true,
-        description: "Master user cannot be empty",
-      };
-    }
-
-    if (!masterPassword.length) {
-      return {
-        hasError: true,
-        description: "Master password cannot be empty",
-      };
-    }
-
-    return {
-      hasError: false,
-    };
-  };
-
-  const handleSubmit = async () => {
-    const validation = validateForm();
-    if (validation.hasError) {
-      setSubmitStatus(validation.description);
-      return;
-    }
-
-    try {
-      await api.provisionDatabase(
-        "<token>",
-        {
-          ...FORM_DEFAULT_VALUES,
-          db_family: dbFamily,
-          db_name: databaseName,
-          username: masterUser,
-          password: masterPassword,
-          db_engine_version: engineVersion,
-          machine_type: instanceType,
-        },
-        {
-          project_id: currentProject.id,
-          cluster_id: currentCluster.id,
-          namespace: selectedNamespace,
-        }
-      );
-      setSubmitStatus("successful");
-      pushFiltered("/databases", []);
-    } catch (error) {
-      console.error(error);
-      setSubmitStatus("We couldn't process your request, please try again.");
-    }
-  };
-
-  const updateNamespaces = async () => {
-    try {
-      const res = await api.getNamespaces(
-        "<token>",
-        {},
-        {
-          id: currentProject.id,
-          cluster_id: currentCluster.id,
-        }
-      );
-      if (res.data) {
-        const availableNamespaces = res.data.items.filter((namespace: any) => {
-          return namespace.status.phase !== "Terminating";
-        });
-        const namespaceOptions: {
-          label: string;
-          value: string;
-        }[] = availableNamespaces.map((x: { metadata: { name: string } }) => {
-          return { label: x.metadata.name, value: x.metadata.name };
-        });
-
-        if (availableNamespaces.length > 0) {
-          setAvailableNamespaces(namespaceOptions);
-        }
-      }
-    } catch (error) {
-      console.error(error);
-    }
-  };
-
-  useEffect(() => {
-    updateNamespaces();
-  }, []);
-
-  useEffect(() => {
-    setEngineVersion(
-      POSTGRES_ENGINE_VERSIONS[dbFamily][
-        POSTGRES_ENGINE_VERSIONS[dbFamily].length - 1
-      ].value
-    );
-  }, [dbFamily]);
-
-  return (
-    <>
-      <DashboardHeader
-        image="storage"
-        title="New database"
-        materialIconClass="material-icons-outlined"
-      />
-      <ControlRow>
-        <BackButton to="/databases">
-          <i className="material-icons">close</i>
-        </BackButton>
-      </ControlRow>
-
-      <FormWrapper>
-        <SelectRow
-          label="Namespace"
-          selectorProps={{
-            refreshOptions: () => {
-              updateNamespaces();
-            },
-            addButton: isAuthorized("namespace", "", ["get", "create"]),
-            dropdownWidth: "335px",
-            closeOverlay: true,
-          }}
-          value={selectedNamespace}
-          setActiveValue={setSelectedNamespace}
-          options={availableNamespaces}
-          width="100%"
-        />
-        <InputRow
-          type="string"
-          label="Database name"
-          isRequired
-          value={databaseName}
-          setValue={(value: string) => {
-            setDatabaseName(value);
-          }}
-          width="100%"
-        />
-        <InputRow
-          type="string"
-          label="Master user"
-          isRequired
-          value={masterUser}
-          setValue={(value: string) => {
-            setMasterUser(value);
-          }}
-          width="100%"
-        />
-        <InputRow
-          type="password"
-          label="Master password"
-          isRequired
-          value={masterPassword}
-          setValue={(value: string) => {
-            setMasterPassword(value);
-          }}
-          width="100%"
-        />
-        <SelectRow
-          label="DB Family"
-          options={POSTGRES_DB_FAMILIES}
-          setActiveValue={(value) => {
-            setDbFamily(value);
-          }}
-          value={dbFamily}
-          width="100%"
-        />
-        <SelectRow
-          label="Engine version"
-          options={POSTGRES_ENGINE_VERSIONS[dbFamily]}
-          setActiveValue={(value) => {
-            setEngineVersion(value);
-          }}
-          value={engineVersion}
-          width="100%"
-        />
-        <SelectRow
-          label="Instance type"
-          options={DATABASE_INSTANCE_TYPES}
-          setActiveValue={(value) => {
-            setInstanceType(value);
-          }}
-          value={instanceType}
-          width="100%"
-        />
-        <Helper>
-          Please remember that this feature is still on development, this means
-          that if you update the values provided here from your AWS Console
-          porter <b>WILL NOT</b> be able to track those changes. In case is
-          mandatory to change anything please contact the Porter team.
-        </Helper>
-
-        <SubmitButton
-          clearPosition
-          text="Create database"
-          onClick={() => {
-            handleSubmit();
-          }}
-          statusPosition="right"
-          status={submitStatus}
-        />
-      </FormWrapper>
-    </>
-  );
-};
-
-export default CreateDatabaseForm;
-
-const BackButton = styled(Link)`
-  display: flex;
-  width: 37px;
-  z-index: 1;
-  cursor: pointer;
-  height: 37px;
-  align-items: center;
-  justify-content: center;
-  border: 1px solid #ffffff55;
-  border-radius: 100px;
-  background: #ffffff11;
-  text-decoration: none;
-  color: white;
-
-  > i {
-    font-size: 20px;
-  }
-
-  :hover {
-    background: #ffffff22;
-    > img {
-      opacity: 1;
-    }
-  }
-`;
-
-const ControlRow = styled.div`
-  display: flex;
-  margin-left: auto;
-  justify-content: space-between;
-  align-items: center;
-  margin-bottom: 35px;
-  padding-left: 0px;
-`;
-
-const FormWrapper = styled.div`
-  max-width: 600px;
-  margin: auto;
-`;
-
-const SubmitButton = styled(SaveButton)`
-  margin-top: 20px;
-`;

+ 30 - 4
dashboard/src/main/home/cluster-dashboard/databases/DatabasesHome.tsx

@@ -1,13 +1,39 @@
-import React, { useContext, useState } from "react";
+import React, { useContext, useEffect, useState } from "react";
 import TabSelector from "components/TabSelector";
 import DashboardHeader from "../DashboardHeader";
 import DatabasesList from "./DatabasesList";
 import { StatusPage } from "main/home/onboarding/steps/ProvisionResources/forms/StatusPage";
 import { Context } from "shared/Context";
+import { useHistory, useLocation, useRouteMatch } from "react-router";
+import { getQueryParam, useRouting } from "shared/routing";
+
+const AvailableTabs = ["databases-list", "provisioner-status"] as const;
+
+type AvailableTabsType = typeof AvailableTabs[number];
 
 const DatabasesHome = () => {
   const { currentProject } = useContext(Context);
-  const [currentTab, setCurrentTab] = useState("databases-list");
+  const [currentTab, setCurrentTab] = useState<AvailableTabsType>(
+    "databases-list"
+  );
+  const { pushQueryParams } = useRouting();
+  const location = useLocation();
+  const history = useHistory();
+
+  useEffect(() => {
+    const current_tab = getQueryParam(
+      { location },
+      "current_tab"
+    ) as AvailableTabsType;
+
+    if (!AvailableTabs.includes(current_tab)) {
+      return;
+    }
+
+    if (current_tab !== currentTab) {
+      setCurrentTab(current_tab);
+    }
+  }, [location.search, history]);
 
   return (
     <div>
@@ -38,8 +64,8 @@ const DatabasesHome = () => {
             ),
           },
         ]}
-        setCurrentTab={(newTab) => {
-          setCurrentTab(newTab);
+        setCurrentTab={(newTab: AvailableTabsType) => {
+          pushQueryParams({ current_tab: newTab });
         }}
       />
     </div>

+ 122 - 11
dashboard/src/main/home/cluster-dashboard/databases/DatabasesList.tsx

@@ -1,20 +1,23 @@
 import CopyToClipboard from "components/CopyToClipboard";
-import SaveButton from "components/SaveButton";
 import Table from "components/Table";
 import React, { useContext, useEffect, useMemo, useState } from "react";
-import { useHistory, useLocation, useRouteMatch } from "react-router";
+import { useRouteMatch } from "react-router";
 import { Link } from "react-router-dom";
-import { Column } from "react-table";
+import { Column, Row } from "react-table";
 import api from "shared/api";
+import useAuth from "shared/auth/useAuth";
 import { Context } from "shared/Context";
+import { useRouting } from "shared/routing";
 import styled from "styled-components";
 import { mock_database_list } from "./mock_data";
 
 export type DatabaseObject = {
   cluster_id: number;
   project_id: number;
+  infra_id: number;
   instance_id: string;
   instance_name: string;
+  status: string;
   instance_endpoint: string;
 };
 
@@ -24,10 +27,14 @@ const DatabasesList = () => {
     currentProject,
     setCurrentError,
     setCurrentModal,
+    setCurrentOverlay,
+    user,
   } = useContext(Context);
   const { url } = useRouteMatch();
   const [isLoading, setIsLoading] = useState(true);
   const [databases, setDatabases] = useState<DatabaseObject[]>([]);
+  const [isAuth] = useAuth();
+  const { pushQueryParams } = useRouting();
 
   useEffect(() => {
     let isSubscribed = true;
@@ -57,18 +64,45 @@ const DatabasesList = () => {
     };
   }, [currentCluster, currentProject]);
 
-  const columns = useMemo<Column<DatabaseObject>[]>(
-    () => [
+  const handleDeleteDatabase = async (project_id: number, infra_id: number) => {
+    try {
+      await api.destroyInfra(
+        "<token>",
+        {},
+        {
+          project_id,
+          infra_id,
+        }
+      );
+
+      setCurrentOverlay(null);
+      pushQueryParams({ current_tab: "provisioner-status" });
+    } catch (error) {
+      console.error(error);
+      setCurrentError("We couldn't delete the infra, please try again.");
+    }
+  };
+
+  const columns = useMemo<Column<DatabaseObject>[]>(() => {
+    let columns: Column<DatabaseObject>[] = [
       {
         Header: "Instance id",
         accessor: "instance_id",
       },
       {
-        Header: "Instance name",
+        Header: "Name",
         accessor: "instance_name",
       },
       {
-        Header: "Instance endpoint",
+        Header: "Status",
+        accessor: "status",
+        Cell: ({ cell }) => {
+          const status: "running" | "destroying" = cell.value as any;
+          return <Status status={status}>{status}</Status>;
+        },
+      },
+      {
+        Header: "Endpoint",
         accessor: "instance_endpoint",
         Cell: ({ row }) => {
           return (
@@ -90,6 +124,7 @@ const DatabasesList = () => {
                 onClick={() =>
                   setCurrentModal("ConnectToDatabaseInstructionsModal", {
                     endpoint: row.original.instance_endpoint,
+                    name: row.original.instance_name,
                   })
                 }
               >
@@ -100,9 +135,40 @@ const DatabasesList = () => {
         },
         width: 50,
       },
-    ],
-    []
-  );
+    ];
+
+    if (isAuth("cluster", "", ["get", "delete"])) {
+      columns.push({
+        id: "delete_button",
+        Cell: ({ row }: { row: Row<DatabaseObject> }) => {
+          return (
+            <>
+              <DeleteButton
+                onClick={() =>
+                  setCurrentOverlay({
+                    message: `Are you sure you want to delete ${row.original.instance_name}?`,
+                    onYes: () =>
+                      handleDeleteDatabase(
+                        row.original.project_id,
+                        row.original.infra_id
+                      ),
+                    onNo: () => setCurrentOverlay(null),
+                  })
+                }
+              >
+                <i className="material-icons">delete</i>
+              </DeleteButton>
+            </>
+          );
+        },
+        width: 50,
+      });
+    } else {
+      columns = columns.filter((col) => col.id !== "delete_button");
+    }
+
+    return columns;
+  }, [user]);
 
   const data = useMemo<Array<DatabaseObject>>(() => {
     return databases;
@@ -111,7 +177,11 @@ const DatabasesList = () => {
   return (
     <DatabasesListWrapper>
       <ControlRow>
-        <Button to={`${url}/provision-database`}>
+        <Button
+          to={`/infrastructure/provision/RDS?origin=${encodeURIComponent(
+            "/databases"
+          )}`}
+        >
           <i className="material-icons">add</i>
           Create database
         </Button>
@@ -125,6 +195,47 @@ const DatabasesList = () => {
 
 export default DatabasesList;
 
+const Status = styled.div<{ status: "running" | "destroying" }>`
+  padding: 5px 10px;
+  margin-right: 12px;
+  background: ${(props) => {
+    if (props.status === "running") return "#38a88a";
+    if (props.status === "destroying") return "#cc3d42";
+  }};
+  font-size: 13px;
+  border-radius: 3px;
+  display: flex;
+  align-items: center;
+  justify-content: center;
+  max-height: 25px;
+  max-width: 80px;
+  text-transform: capitalize;
+  font-weight: 400;
+  user-select: none;
+`;
+
+const DeleteButton = styled.div`
+  display: flex;
+  visibility: ${(props: { invis?: boolean }) =>
+    props.invis ? "hidden" : "visible"};
+  align-items: center;
+  justify-content: center;
+  width: 30px;
+  float: right;
+  height: 30px;
+  :hover {
+    background: #ffffff11;
+    border-radius: 20px;
+    cursor: pointer;
+  }
+
+  > i {
+    font-size: 20px;
+    color: #ffffff44;
+    border-radius: 20px;
+  }
+`;
+
 const DatabasesListWrapper = styled.div`
   margin-top: 35px;
 `;

+ 9 - 3
dashboard/src/main/home/cluster-dashboard/databases/mock_data.ts

@@ -5,21 +5,27 @@ export const mock_database_list: DatabaseObject[] = [
     cluster_id: 1,
     instance_endpoint: "some/some",
     instance_id: "my-id",
-    instance_name: "Instance name",
+    instance_name: "instance-name",
     project_id: 3,
+    infra_id: 1,
+    status: "running",
   },
   {
     cluster_id: 1,
     instance_endpoint: "some/some",
     instance_id: "my-id",
-    instance_name: "Instance name",
+    instance_name: "instance-name",
     project_id: 3,
+    infra_id: 2,
+    status: "running",
   },
   {
     cluster_id: 1,
     instance_endpoint: "some/some",
     instance_id: "my-id",
-    instance_name: "Instance name",
+    instance_name: "instance-name",
     project_id: 3,
+    infra_id: 3,
+    status: "running",
   },
 ];

+ 2 - 6
dashboard/src/main/home/cluster-dashboard/databases/routes.tsx

@@ -2,7 +2,6 @@ import React, { useContext, useEffect, useLayoutEffect } from "react";
 import { Route, Switch, useRouteMatch } from "react-router";
 import { Context } from "shared/Context";
 import { useRouting } from "shared/routing";
-import CreateDatabaseForm from "./CreateDatabaseForm";
 import DatabasesHome from "./DatabasesHome";
 
 const DatabasesRoutes = () => {
@@ -12,8 +11,8 @@ const DatabasesRoutes = () => {
 
   useLayoutEffect(() => {
     if (
-      currentCluster.service !== "eks" &&
-      currentCluster.infra_id <= 0 &&
+      currentCluster.service !== "eks" ||
+      currentCluster.infra_id <= 0 ||
       !currentProject.enable_rds_databases
     ) {
       pushFiltered("/cluster-dashboard", []);
@@ -23,9 +22,6 @@ const DatabasesRoutes = () => {
   return (
     <>
       <Switch>
-        <Route path={`${url}/provision-database`}>
-          <CreateDatabaseForm />
-        </Route>
         <Route path={`${url}/`}>
           <DatabasesHome />
         </Route>

+ 62 - 29
dashboard/src/main/home/cluster-dashboard/env-groups/ExpandedEnvGroup.tsx

@@ -29,6 +29,7 @@ import { isAuthorized } from "shared/auth/authorization-helpers";
 import useAuth from "shared/auth/useAuth";
 import { fillWithDeletedVariables } from "components/porter-form/utils";
 import DynamicLink from "components/DynamicLink";
+import DocsHelper from "components/DocsHelper";
 
 type PropsType = WithAuthProps & {
   namespace: string;
@@ -65,12 +66,14 @@ export const ExpandedEnvGroupFC = ({
   namespace,
   closeExpanded,
 }: PropsType) => {
-  const { currentProject, currentCluster, setCurrentOverlay } = useContext(
-    Context
-  );
+  const {
+    currentProject,
+    currentCluster,
+    setCurrentOverlay,
+    setCurrentError,
+  } = useContext(Context);
   const [isAuthorized] = useAuth();
 
-  const [isLoading, setIsLoading] = useState(true);
   const [currentTab, setCurrentTab] = useState("variables-editor");
   const [isDeleting, setIsDeleting] = useState(false);
   const [buttonStatus, setButtonStatus] = useState("");
@@ -96,14 +99,14 @@ export const ExpandedEnvGroupFC = ({
     ) {
       return [
         { value: "variables-editor", label: "Environment Variables" },
-        { value: "applications", label: "Linked applications" },
+        { value: "applications", label: "Linked Applications" },
       ];
     }
 
     if (currentEnvGroup?.applications?.length) {
       return [
         { value: "variables-editor", label: "Environment Variables" },
-        { value: "applications", label: "Linked applications" },
+        { value: "applications", label: "Linked Applications" },
         { value: "settings", label: "Settings" },
       ];
     }
@@ -185,6 +188,7 @@ export const ExpandedEnvGroupFC = ({
   };
 
   const handleUpdateValues = async () => {
+    setButtonStatus("loading");
     const name = currentEnvGroup.name;
     let variables = currentEnvGroup.variables;
 
@@ -223,8 +227,14 @@ export const ExpandedEnvGroupFC = ({
             }
           )
           .then((res) => res.data);
+        setButtonStatus("successful");
         updateEnvGroup(updatedEnvGroup);
-      } catch (error) {}
+        setTimeout(() => setButtonStatus(""), 1000);
+      } catch (error) {
+        setButtonStatus("Couldn't update successfully");
+        setCurrentError(error);
+        setTimeout(() => setButtonStatus(""), 1000);
+      }
     } else {
       const configMapSecretVariables = fillWithDeletedVariables(
         originalEnvVars.filter((variable) => {
@@ -254,23 +264,31 @@ export const ExpandedEnvGroupFC = ({
         }),
         {}
       );
-      console.log({ configMapVariables, configMapSecretVariables });
-      const updatedEnvGroup = await api
-        .updateConfigMap(
-          "<token>",
-          {
-            name,
-            variables: configMapVariables,
-            secret_variables: configMapSecretVariables,
-          },
-          {
-            id: currentProject.id,
-            cluster_id: currentCluster.id,
-            namespace,
-          }
-        )
-        .then((res) => res.data);
-      updateEnvGroup(updatedEnvGroup);
+
+      try {
+        const updatedEnvGroup = await api
+          .updateConfigMap(
+            "<token>",
+            {
+              name,
+              variables: configMapVariables,
+              secret_variables: configMapSecretVariables,
+            },
+            {
+              id: currentProject.id,
+              cluster_id: currentCluster.id,
+              namespace,
+            }
+          )
+          .then((res) => res.data);
+        setButtonStatus("successful");
+        updateEnvGroup(updatedEnvGroup);
+        setTimeout(() => setButtonStatus(""), 1000);
+      } catch (error) {
+        setButtonStatus("Couldn't update successfully");
+        setCurrentError(error);
+        setTimeout(() => setButtonStatus(""), 1000);
+      }
     }
   };
 
@@ -480,6 +498,15 @@ const ApplicationsList = ({ envGroup }: { envGroup: EditableEnvGroup }) => {
 
   return (
     <>
+      <HeadingWrapper>
+        <Heading isAtTop>Linked applications:</Heading>
+        <DocsHelper
+          link="https://docs.porter.run/deploying-applications/environment-groups#syncing-environment-groups-to-applications"
+          tooltipText="When env group sync is enabled, the applications are automatically restarted when the env groups are updated."
+          placement="top-start"
+          disableMargin
+        />
+      </HeadingWrapper>
       {envGroup.applications.map((appName) => {
         return (
           <StyledCard>
@@ -505,6 +532,11 @@ const ApplicationsList = ({ envGroup }: { envGroup: EditableEnvGroup }) => {
   );
 };
 
+const HeadingWrapper = styled.div`
+  display: flex;
+  margin-bottom: 15px;
+`;
+
 const Header = styled.div`
   font-weight: 500;
   color: #aaaabb;
@@ -710,15 +742,16 @@ const fadeIn = keyframes`
 `;
 
 const StyledCard = styled.div`
-  border: 1px solid #ffffff00;
-  background: #ffffff08;
-  margin-bottom: 5px;
   border-radius: 8px;
-  padding: 14px;
+  padding: 10px 18px;
   overflow: hidden;
-  min-height: 60px;
   font-size: 13px;
   animation: ${fadeIn} 0.5s;
+
+  background: #2b2e36;
+  margin-bottom: 15px;
+  overflow: hidden;
+  border: 1px solid #ffffff0a;
 `;
 
 const Flex = styled.div`

+ 26 - 0
dashboard/src/main/home/cluster-dashboard/expanded-chart/ExpandedChart.tsx

@@ -655,6 +655,32 @@ const ExpandedChart: React.FC<Props> = (props) => {
   const handleUninstallChart = async () => {
     setDeleting(true);
     setCurrentOverlay(null);
+    const syncedEnvGroups = currentChart.config?.container?.env?.synced || [];
+    const removeApplicationToEnvGroupPromises = syncedEnvGroups.map(
+      (envGroup: any) => {
+        return api.removeApplicationFromEnvGroup(
+          "<token>",
+          {
+            name: envGroup?.name,
+            app_name: currentChart.name,
+          },
+          {
+            project_id: currentProject.id,
+            cluster_id: currentCluster.id,
+            namespace: currentChart.namespace,
+          }
+        );
+      }
+    );
+    try {
+      await Promise.all(removeApplicationToEnvGroupPromises);
+    } catch (error) {
+      setCurrentError(
+        "We coudln't remove the synced env group from the application, please remove it manually before uninstalling the chart, or try again."
+      );
+      return;
+    }
+
     try {
       await api.uninstallTemplate(
         "<token>",

+ 27 - 4
dashboard/src/main/home/cluster-dashboard/expanded-chart/ExpandedChartWrapper.tsx

@@ -3,7 +3,11 @@ import styled from "styled-components";
 import { Context } from "shared/Context";
 import { RouteComponentProps, withRouter } from "react-router";
 
-import { ChartType, StorageType } from "shared/types";
+import {
+  ChartType,
+  ChartTypeWithExtendedConfig,
+  StorageType,
+} from "shared/types";
 import api from "shared/api";
 import { getQueryParam, pushFiltered } from "shared/routing";
 import ExpandedJobChart from "./ExpandedJobChart";
@@ -11,7 +15,10 @@ import ExpandedChart from "./ExpandedChart";
 import Loading from "components/Loading";
 import PageNotFound from "components/PageNotFound";
 
-type PropsType = RouteComponentProps & {
+type PropsType = RouteComponentProps<{
+  baseRoute: string;
+  namespace: string;
+}> & {
   setSidebar: (x: boolean) => void;
   isMetricsInstalled: boolean;
 };
@@ -34,7 +41,7 @@ class ExpandedChartWrapper extends Component<PropsType, StateType> {
     let { currentProject, currentCluster } = this.context;
     if (currentProject && currentCluster) {
       api
-        .getChart(
+        .getChart<ChartTypeWithExtendedConfig>(
           "<token>",
           {},
           {
@@ -46,10 +53,26 @@ class ExpandedChartWrapper extends Component<PropsType, StateType> {
           }
         )
         .then((res) => {
+          const chart = res.data;
           this.setState({ currentChart: res.data, loading: false });
+          const isJob = res.data.form?.name?.toLowerCase() === "job";
+          let route = `${isJob ? "/jobs" : "/applications"}/${
+            currentCluster.name
+          }/${chart.namespace}/${chart.name}`;
+
+          if (isJob && this.props.match.params?.baseRoute === "applications") {
+            pushFiltered(this.props, route, ["project_id"]);
+            return;
+          }
+
+          if (!isJob && this.props.match.params?.baseRoute !== "applications") {
+            pushFiltered(this.props, route, ["project_id"]);
+            return;
+          }
         })
         .catch((err) => {
-          console.log("err", err.response.data);
+          console.log(err);
+          console.log("err", err?.response?.data);
           this.setState({ loading: false });
         });
     }

+ 3 - 0
dashboard/src/main/home/infrastructure/InfrastructureRouter.tsx

@@ -9,6 +9,9 @@ import ProvisionInfra from "./components/ProvisionInfra";
 const InfrastructureRouter = () => {
   return (
     <Switch>
+      <Route path="/infrastructure/provision/:name">
+        <ProvisionInfra />
+      </Route>
       <Route path="/infrastructure/provision">
         <ProvisionInfra />
       </Route>

+ 101 - 40
dashboard/src/main/home/infrastructure/components/ProvisionInfra.tsx

@@ -13,7 +13,7 @@ import AWSCredentialsList from "./credentials/AWSCredentialList";
 import Heading from "components/form-components/Heading";
 import GCPCredentialsList from "./credentials/GCPCredentialList";
 import DOCredentialsList from "./credentials/DOCredentialList";
-import { useRouting } from "shared/routing";
+import { getQueryParam, useRouting } from "shared/routing";
 import {
   InfraTemplateMeta,
   InfraTemplate,
@@ -23,10 +23,24 @@ import {
 import Description from "components/Description";
 import Select from "components/porter-form/field-components/Select";
 import ClusterList from "./credentials/ClusterList";
+import { useLocation, useParams } from "react-router";
+import qs from "qs";
 
 type Props = {};
 
+type ProvisionParams = {
+  name: string;
+};
+
+type ProvisionQueryParams = {
+  version?: string;
+};
+
 const ProvisionInfra: React.FunctionComponent<Props> = () => {
+  const { name } = useParams<ProvisionParams>();
+  const location = useLocation<ProvisionQueryParams>();
+  const version = getQueryParam({ location }, "version");
+  const origin = getQueryParam({ location }, "origin");
   const { currentProject, setCurrentError } = useContext(Context);
   const [templates, setTemplates] = useState<InfraTemplateMeta[]>([]);
   const [currentTemplate, setCurrentTemplate] = useState<InfraTemplate>(null);
@@ -41,7 +55,7 @@ const ProvisionInfra: React.FunctionComponent<Props> = () => {
   const { pushFiltered } = useRouting();
 
   useEffect(() => {
-    if (currentProject) {
+    if (currentProject && !name) {
       api
         .listInfraTemplates(
           "<token>",
@@ -69,7 +83,38 @@ const ProvisionInfra: React.FunctionComponent<Props> = () => {
           setIsLoading(false);
         });
     }
-  }, [currentProject]);
+  }, [currentProject, name]);
+
+  useEffect(() => {
+    if (currentProject && name) {
+      let templateVersion = version || "latest";
+
+      setIsLoading(true);
+
+      api
+        .getInfraTemplate(
+          "<token>",
+          {},
+          {
+            project_id: currentProject.id,
+            version: templateVersion,
+            name: name,
+          }
+        )
+        .then(({ data }) => {
+          setCurrentTemplate(data);
+          setIsLoading(false);
+        })
+        .catch((err) => {
+          console.error(err);
+          setHasError(true);
+          setCurrentError(err.response?.data?.error);
+          setIsLoading(false);
+        });
+    } else if (!name) {
+      setCurrentTemplate(null);
+    }
+  }, [currentProject, name, version]);
 
   const onSubmit = (values: any) => {
     setIsLoading(true);
@@ -92,7 +137,9 @@ const ProvisionInfra: React.FunctionComponent<Props> = () => {
       .then(({ data }) => {
         setIsLoading(false);
 
-        if (data?.infra_id) {
+        if (origin) {
+          pushFiltered(origin, ["project_id"]);
+        } else if (data?.infra_id) {
           pushFiltered(`/infrastructure/${data?.infra_id}`, ["project_id"]);
         } else {
           pushFiltered(`/infrastructure`, ["project_id"]);
@@ -128,37 +175,23 @@ const ProvisionInfra: React.FunctionComponent<Props> = () => {
     );
   };
 
-  const selectTemplate = (templateMeta: InfraTemplateMeta) => {
-    setIsLoading(true);
-
-    api
-      .getInfraTemplate(
-        "<token>",
-        {},
-        {
-          project_id: currentProject.id,
-          version: templateMeta.version,
-          name: templateMeta.name,
-        }
-      )
-      .then(({ data }) => {
-        setCurrentTemplate(data);
-        setIsLoading(false);
-      })
-      .catch((err) => {
-        console.error(err);
-        setHasError(true);
-        setCurrentError(err.response?.data?.error);
-        setIsLoading(false);
-      });
-  };
-
   const renderTemplates = () => {
     return templates.map((template) => {
       let { name, icon, description } = template;
 
       return (
-        <TemplateBlock key={name} onClick={() => selectTemplate(template)}>
+        <TemplateBlock
+          key={name}
+          onClick={() =>
+            pushFiltered(
+              `/infrastructure/provision/${template.name}`,
+              ["project_id"],
+              {
+                version: template.version,
+              }
+            )
+          }
+        >
           {renderIcon(icon)}
           <TemplateTitle>{name}</TemplateTitle>
           <TemplateDescription>{description}</TemplateDescription>
@@ -252,15 +285,50 @@ const ProvisionInfra: React.FunctionComponent<Props> = () => {
     );
   };
 
+  const renderTitleSection = () => {
+    if (currentTemplate) {
+      return (
+        <>
+          <TitleSection>{`Provision ${currentTemplate.name}`}</TitleSection>
+          <InfoSection>
+            <Description>
+              {`Input the required configuration settings.`}
+            </Description>
+          </InfoSection>
+          <LineBreak />
+        </>
+      );
+    }
+
+    return (
+      <>
+        <TitleSection>Provision Infrastructure</TitleSection>
+        <InfoSection>
+          <Description>
+            Select the infrastructure template you would like to use for
+            provisioning.
+          </Description>
+        </InfoSection>
+        <LineBreak />
+      </>
+    );
+  };
+
   const renderContents = () => {
     if (currentTemplate) {
       let { name, icon, description } = currentTemplate;
       return (
         <ExpandedContainer>
           <BackArrowContainer>
-            <BackArrow onClick={() => setCurrentTemplate(null)}>
+            <BackArrow
+              onClick={() =>
+                pushFiltered(origin || `/infrastructure/provision`, [
+                  "project_id",
+                ])
+              }
+            >
               <i className="material-icons next-icon">navigate_before</i>
-              All Templates
+              {origin ? "Back" : "All Templates"}
             </BackArrow>
           </BackArrowContainer>
           <StepContainer>
@@ -280,14 +348,7 @@ const ProvisionInfra: React.FunctionComponent<Props> = () => {
 
   return (
     <TemplatesWrapper>
-      <TitleSection>Provision Infrastructure</TitleSection>
-      <InfoSection>
-        <Description>
-          Select the infrastructure template you would like to use for
-          provisioning.
-        </Description>
-      </InfoSection>
-      <LineBreak />
+      {renderTitleSection()}
       {renderContents()}
     </TemplatesWrapper>
   );

+ 45 - 1
dashboard/src/main/home/modals/ConnectToDatabaseInstructionsModal.tsx

@@ -1,10 +1,54 @@
+import Helper from "components/form-components/Helper";
 import React, { useContext } from "react";
 import { Context } from "shared/Context";
+import styled from "styled-components";
 
 const ConnectToDatabaseInstructionsModal = () => {
   const { currentModalData } = useContext(Context);
 
-  return <div>{currentModalData?.endpoint || ""}</div>;
+  return (
+    <Container>
+      In order to get connection credentials for your RDS Postgres database,
+      select <b>Load from Env Group</b> when launching or updating your
+      application. Then, select the rds-credentials-{currentModalData?.name}{" "}
+      database.
+      <p>
+        This will set the following environment variables in your application:
+      </p>
+      <CodeBlock>
+        <span>- PGHOST</span>
+        <span>- PGPORT</span>
+        <span>- PGUSER</span>
+        <span>- PGPASSWORD</span>
+      </CodeBlock>
+      <Helper>Note: the database automatically listens on port 5432.</Helper>
+    </Container>
+  );
 };
 
 export default ConnectToDatabaseInstructionsModal;
+
+const CodeBlock = styled.span`
+  display: block;
+  background-color: #1b1d26;
+  color: white;
+  border-radius: 5px;
+  font-family: monospace;
+  user-select: text;
+  max-height: 400px;
+  width: 90%;
+  margin-left: 5%;
+  margin-top: 20px;
+  overflow-x: hidden;
+  overflow-y: auto;
+  padding: 10px;
+  overflow-wrap: break-word;
+  > span {
+    display: block;
+  }
+`;
+
+const Container = styled.div`
+  margin-top: 30px;
+  line-height: 1.3rem;
+`;

+ 51 - 22
dashboard/src/main/home/modals/LoadEnvGroupModal.tsx

@@ -19,6 +19,7 @@ import {
   PopulatedEnvGroup,
 } from "components/porter-form/types";
 import Helper from "components/form-components/Helper";
+import DocsHelper from "components/DocsHelper";
 
 type PropsType = {
   namespace: string;
@@ -26,6 +27,7 @@ type PropsType = {
   closeModal: () => void;
   existingValues: Record<string, string>;
   setValues: (values: Record<string, string>) => void;
+  enableSyncedEnvGroups?: boolean;
   syncedEnvGroups?: PopulatedEnvGroup[];
   setSyncedEnvGroups?: (values: PopulatedEnvGroup) => void;
 };
@@ -34,7 +36,7 @@ type StateType = {
   envGroups: any[];
   loading: boolean;
   error: boolean;
-  selectedEnvGroup: EnvGroupData | null;
+  selectedEnvGroup: PopulatedEnvGroup | null;
   buttonStatus: string;
   shouldSync: boolean;
 };
@@ -162,7 +164,7 @@ export default class LoadEnvGroupModal extends Component<PropsType, StateType> {
       return "No env group selected";
     }
     if (hasClashingKeys) {
-      return "There are variables defined in this group that will override existing variables.";
+      return "";
     }
   }
 
@@ -236,24 +238,39 @@ export default class LoadEnvGroupModal extends Component<PropsType, StateType> {
               )}
             </SidebarSection>
           )}
-          <CheckboxRow
-            checked={this.state.shouldSync}
-            toggle={() =>
-              this.setState((prevState) => ({
-                shouldSync: !prevState.shouldSync,
-              }))
-            }
-            label="Enable env var synchronization"
-            disabled={this.state.selectedEnvGroup?.meta_version === 1}
-          />
-
-          {this.state.selectedEnvGroup?.meta_version === 1 && (
-            <Helper color="#f5cb42">
-              Looks like the env group you selected belongs to an old version
-              and is not available for syncing. You can fix this by updating the
-              env group from the env groups tab.
-            </Helper>
-          )}
+          <AbsoluteWrapper>
+            {this.props.enableSyncedEnvGroups ? (
+              <>
+                {this.state.selectedEnvGroup?.meta_version === 1 ? (
+                  <Helper color="#f5cb42">
+                    Upgrade this env group from the env groups tab to sync.
+                  </Helper>
+                ) : (
+                  <CheckboxRow
+                    checked={this.state.shouldSync}
+                    toggle={() =>
+                      this.setState((prevState) => ({
+                        shouldSync: !prevState.shouldSync,
+                      }))
+                    }
+                    label="Sync environment group"
+                    disabled={this.state.selectedEnvGroup?.meta_version === 1}
+                  />
+                )}
+                <IconWrapper>
+                  <DocsHelper
+                    link="https://docs.porter.run/deploying-applications/environment-groups#syncing-environment-groups-to-applications"
+                    tooltipText="When env group sync is enabled, the applications are automatically restarted when the env groups are updated."
+                    placement="top-start"
+                  />
+                </IconWrapper>
+              </>
+            ) : (
+              <Helper color="#f5cb42">
+                Upgrade the job template to enable sync env groups
+              </Helper>
+            )}
+          </AbsoluteWrapper>
         </GroupModalSections>
 
         <SaveButton
@@ -269,6 +286,19 @@ export default class LoadEnvGroupModal extends Component<PropsType, StateType> {
 
 LoadEnvGroupModal.contextType = Context;
 
+const IconWrapper = styled.div`
+  margin-bottom: -10px;
+`;
+
+const AbsoluteWrapper = styled.div`
+  position: absolute;
+  z-index: 999;
+  bottom: 18px;
+  left: 25px;
+  display: flex;
+  align-items: center;
+`;
+
 const SidebarSection = styled.section<{ $expanded?: boolean }>`
   height: 100%;
   overflow-y: auto;
@@ -461,8 +491,7 @@ const StyledLoadEnvGroupModal = styled.div`
   top: 0;
   height: 100%;
   padding: 25px 30px;
-  overflow: hidden;
-  border-radius: 6px;
+  border-radius: 8px;
   background: #202227;
 `;
 

+ 1 - 1
dashboard/src/main/home/modals/Modal.tsx

@@ -92,7 +92,7 @@ const Overlay = styled.div`
   width: 100%;
   height: 100%;
   background-color: rgba(0, 0, 0, 0.6);
-  z-index: 3;
+  z-index: 999;
   display: flex;
   align-items: center;
   justify-content: center;

+ 6 - 1
dashboard/src/main/home/navbar/Help.tsx

@@ -29,7 +29,12 @@ export default class Help extends Component<PropsType, StateType> {
           <Dropdown dropdownWidth="155px" dropdownMaxHeight="300px">
             <Option
               onClick={() => {
-                window.open("https://docs.porter.run", "_blank").focus();
+                window
+                  .open(
+                    "https://porter-docs-demo-22fd462fef4dcd45.onporter.run",
+                    "_blank"
+                  )
+                  .focus();
               }}
             >
               <i className="material-icons-outlined">book</i>

+ 1 - 3
dashboard/src/shared/api.tsx

@@ -976,9 +976,7 @@ const getReleaseSteps = baseApi<
 });
 
 const destroyInfra = baseApi<
-  {
-    name: string;
-  },
+  {},
   {
     project_id: number;
     infra_id: number;

+ 1 - 1
docker/Dockerfile

@@ -2,7 +2,7 @@
 
 # Base Go environment
 # -------------------
-FROM golang:1.16-alpine as base
+FROM golang:1.17-alpine as base
 WORKDIR /porter
 
 RUN apk update && apk add --no-cache gcc musl-dev git

+ 1 - 1
docker/cli.Dockerfile

@@ -2,7 +2,7 @@
 
 # Base Go environment
 # -------------------
-FROM golang:1.16 as base
+FROM golang:1.17 as base
 WORKDIR /porter
 
 RUN apt-get update && apt-get install -y gcc musl-dev git make

+ 1 - 1
docker/dev.Dockerfile

@@ -1,6 +1,6 @@
 # Development environment
 # -----------------------
-FROM golang:1.16-alpine
+FROM golang:1.17-alpine
 WORKDIR /porter
 
 RUN apk update && apk add --no-cache gcc musl-dev git

+ 1 - 1
ee/docker/ee.Dockerfile

@@ -2,7 +2,7 @@
 
 # Base Go environment
 # -------------------
-FROM golang:1.16-alpine as base
+FROM golang:1.17-alpine as base
 WORKDIR /porter
 
 RUN apk update && apk add --no-cache gcc musl-dev git

+ 207 - 32
go.mod

@@ -1,11 +1,10 @@
 module github.com/porter-dev/porter
 
-go 1.16
+go 1.17
 
 require (
-	cloud.google.com/go v0.81.0
+	cloud.google.com/go v0.99.0
 	github.com/AlecAivazis/survey/v2 v2.2.9
-	github.com/BurntSushi/toml v0.4.1 // indirect
 	github.com/Masterminds/semver/v3 v3.1.1
 	github.com/aws/aws-sdk-go v1.35.4
 	github.com/bradleyfalzon/ghinstallation/v2 v2.0.3
@@ -13,12 +12,12 @@ require (
 	github.com/cli/cli v1.11.0
 	github.com/dgrijalva/jwt-go v3.2.0+incompatible
 	github.com/digitalocean/godo v1.56.0
-	github.com/docker/cli v20.10.7+incompatible
+	github.com/docker/cli v20.10.11+incompatible
 	github.com/docker/distribution v2.7.1+incompatible
-	github.com/docker/docker v20.10.7+incompatible
-	github.com/docker/docker-credential-helpers v0.6.3
+	github.com/docker/docker v20.10.12+incompatible
+	github.com/docker/docker-credential-helpers v0.6.4
 	github.com/docker/go-connections v0.4.0
-	github.com/fatih/color v1.10.0
+	github.com/fatih/color v1.13.0
 	github.com/getsentry/sentry-go v0.11.0
 	github.com/go-chi/chi v4.1.2+incompatible
 	github.com/go-playground/validator/v10 v10.3.0
@@ -35,45 +34,221 @@ require (
 	github.com/itchyny/gojq v0.12.1
 	github.com/joeshaw/envdecode v0.0.0-20200121155833-099f1fc765bd
 	github.com/kris-nova/logger v0.0.0-20181127235838-fd0d87064b06
-	github.com/kris-nova/lolgopher v0.0.0-20180921204813-313b3abb0d9b // indirect
-	github.com/mattn/go-runewidth v0.0.12 // indirect
 	github.com/mitchellh/mapstructure v1.4.3
 	github.com/moby/moby v20.10.6+incompatible
 	github.com/moby/term v0.0.0-20210610120745-9d4ed1856297
-	github.com/onsi/gomega v1.16.0 // indirect
-	github.com/opencontainers/image-spec v1.0.1
-	github.com/pelletier/go-toml v1.9.4 // indirect
+	github.com/opencontainers/image-spec v1.0.2
 	github.com/pkg/errors v0.9.1
 	github.com/porter-dev/switchboard v0.0.0-20220109170702-ea2a4450e034
 	github.com/rs/zerolog v1.26.0
-	github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3 // indirect
-	github.com/sendgrid/rest v2.6.3+incompatible // indirect
 	github.com/sendgrid/sendgrid-go v3.8.0+incompatible
-	github.com/spf13/cobra v1.2.1
+	github.com/spf13/cobra v1.3.0
 	github.com/spf13/pflag v1.0.5
-	github.com/spf13/viper v1.8.1
+	github.com/spf13/viper v1.10.0
 	github.com/stretchr/testify v1.7.0
-	github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
 	golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b
-	golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba // indirect
-	golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602
-	golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
-	google.golang.org/api v0.44.0
+	golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba
+	golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
+	google.golang.org/api v0.62.0
 	google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5
 	google.golang.org/grpc v1.44.0
 	google.golang.org/protobuf v1.27.1
-	gopkg.in/segmentio/analytics-go.v3 v3.1.0
-	gopkg.in/yaml.v2 v2.4.0
-	gorm.io/driver/postgres v1.0.2
 	gorm.io/driver/sqlite v1.1.3
-	gorm.io/gorm v1.20.2
-	helm.sh/helm/v3 v3.7.1
-	k8s.io/api v0.22.3
-	k8s.io/apimachinery v0.22.3
-	k8s.io/cli-runtime v0.22.3
-	k8s.io/client-go v0.22.3
+	gorm.io/gorm v1.22.3
+	helm.sh/helm/v3 v3.8.0
+	k8s.io/api v0.23.1
+	k8s.io/apimachinery v0.23.1
+	k8s.io/cli-runtime v0.23.1
+	k8s.io/client-go v0.23.1
 	k8s.io/helm v2.17.0+incompatible
-	k8s.io/kubectl v0.22.1
+	k8s.io/kubectl v0.23.1
 	sigs.k8s.io/aws-iam-authenticator v0.5.2
-	sigs.k8s.io/yaml v1.2.0
+	sigs.k8s.io/yaml v1.3.0
+)
+
+require (
+	gopkg.in/segmentio/analytics-go.v3 v3.1.0
+	gopkg.in/yaml.v2 v2.4.0
+	gorm.io/driver/postgres v1.2.3
+)
+
+require (
+	github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+	github.com/Azure/go-autorest v14.2.0+incompatible // indirect
+	github.com/Azure/go-autorest/autorest v0.11.20 // indirect
+	github.com/Azure/go-autorest/autorest/adal v0.9.15 // indirect
+	github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
+	github.com/Azure/go-autorest/logger v0.2.1 // indirect
+	github.com/Azure/go-autorest/tracing v0.6.0 // indirect
+	github.com/BurntSushi/toml v0.4.1 // indirect
+	github.com/MakeNowJust/heredoc v1.0.0 // indirect
+	github.com/Masterminds/goutils v1.1.1 // indirect
+	github.com/Masterminds/semver v1.5.0 // indirect
+	github.com/Masterminds/sprig/v3 v3.2.2 // indirect
+	github.com/Masterminds/squirrel v1.5.2 // indirect
+	github.com/Microsoft/go-winio v0.5.1 // indirect
+	github.com/Microsoft/hcsshim v0.9.1 // indirect
+	github.com/PuerkitoBio/purell v1.1.1 // indirect
+	github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
+	github.com/apex/log v1.9.0 // indirect
+	github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
+	github.com/beorn7/perks v1.0.1 // indirect
+	github.com/bits-and-blooms/bitset v1.2.0 // indirect
+	github.com/buildpacks/imgutil v0.0.0-20210510154637-009f91f52918 // indirect
+	github.com/buildpacks/lifecycle v0.11.3 // indirect
+	github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
+	github.com/cespare/xxhash/v2 v2.1.2 // indirect
+	github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
+	github.com/cli/safeexec v1.0.0 // indirect
+	github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
+	github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
+	github.com/containerd/cgroups v1.0.2 // indirect
+	github.com/containerd/containerd v1.5.9 // indirect
+	github.com/containerd/stargz-snapshotter/estargz v0.4.1 // indirect
+	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+	github.com/docker/go-metrics v0.0.1 // indirect
+	github.com/docker/go-units v0.4.0 // indirect
+	github.com/emirpasic/gods v1.12.0 // indirect
+	github.com/envoyproxy/go-control-plane v0.10.1 // indirect
+	github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
+	github.com/evanphx/json-patch v4.12.0+incompatible // indirect
+	github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
+	github.com/fsnotify/fsnotify v1.5.1 // indirect
+	github.com/ghodss/yaml v1.0.0 // indirect
+	github.com/go-errors/errors v1.0.1 // indirect
+	github.com/go-logr/logr v1.2.0 // indirect
+	github.com/go-openapi/jsonpointer v0.19.5 // indirect
+	github.com/go-openapi/jsonreference v0.19.5 // indirect
+	github.com/go-openapi/swag v0.19.14 // indirect
+	github.com/go-playground/locales v0.13.0 // indirect
+	github.com/go-playground/universal-translator v0.17.0 // indirect
+	github.com/gobwas/glob v0.2.3 // indirect
+	github.com/gofrs/flock v0.8.1 // indirect
+	github.com/gogo/protobuf v1.3.2 // indirect
+	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+	github.com/google/btree v1.0.1 // indirect
+	github.com/google/go-cmp v0.5.6 // indirect
+	github.com/google/go-containerregistry v0.5.1 // indirect
+	github.com/google/go-querystring v1.1.0 // indirect
+	github.com/google/gofuzz v1.1.0 // indirect
+	github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
+	github.com/google/uuid v1.2.0 // indirect
+	github.com/googleapis/gax-go/v2 v2.1.1 // indirect
+	github.com/googleapis/gnostic v0.5.5 // indirect
+	github.com/gorilla/mux v1.8.0 // indirect
+	github.com/gosuri/uitable v0.0.4 // indirect
+	github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/heroku/color v0.0.6 // indirect
+	github.com/huandu/xstrings v1.3.2 // indirect
+	github.com/imdario/mergo v0.3.12 // indirect
+	github.com/inconshreveable/mousetrap v1.0.0 // indirect
+	github.com/itchyny/astgen-go v0.0.0-20210113000433-0da0671862a3 // indirect
+	github.com/itchyny/timefmt-go v0.1.1 // indirect
+	github.com/jackc/chunkreader/v2 v2.0.1 // indirect
+	github.com/jackc/pgconn v1.10.1 // indirect
+	github.com/jackc/pgio v1.0.0 // indirect
+	github.com/jackc/pgpassfile v1.0.0 // indirect
+	github.com/jackc/pgproto3/v2 v2.2.0 // indirect
+	github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
+	github.com/jackc/pgtype v1.9.0 // indirect
+	github.com/jackc/pgx/v4 v4.14.0 // indirect
+	github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.2 // indirect
+	github.com/jmespath/go-jmespath v0.4.0 // indirect
+	github.com/jmoiron/sqlx v1.3.4 // indirect
+	github.com/josharian/intern v1.0.0 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
+	github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd // indirect
+	github.com/klauspost/compress v1.13.6 // indirect
+	github.com/kris-nova/lolgopher v0.0.0-20180921204813-313b3abb0d9b // indirect
+	github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
+	github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
+	github.com/leodido/go-urn v1.2.0 // indirect
+	github.com/lib/pq v1.10.4 // indirect
+	github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
+	github.com/magiconair/properties v1.8.5 // indirect
+	github.com/mailru/easyjson v0.7.6 // indirect
+	github.com/mattn/go-colorable v0.1.12 // indirect
+	github.com/mattn/go-isatty v0.0.14 // indirect
+	github.com/mattn/go-runewidth v0.0.12 // indirect
+	github.com/mattn/go-sqlite3 v1.14.6 // indirect
+	github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+	github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
+	github.com/mitchellh/copystructure v1.2.0 // indirect
+	github.com/mitchellh/go-homedir v1.1.0 // indirect
+	github.com/mitchellh/go-wordwrap v1.0.0 // indirect
+	github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e // indirect
+	github.com/mitchellh/reflectwalk v1.0.2 // indirect
+	github.com/moby/locker v1.0.1 // indirect
+	github.com/moby/spdystream v0.2.0 // indirect
+	github.com/moby/sys/mount v0.2.0 // indirect
+	github.com/moby/sys/mountinfo v0.5.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
+	github.com/morikuni/aec v1.0.0 // indirect
+	github.com/onsi/ginkgo v1.16.4 // indirect
+	github.com/onsi/gomega v1.16.0 // indirect
+	github.com/opencontainers/go-digest v1.0.0 // indirect
+	github.com/opencontainers/runc v1.0.2 // indirect
+	github.com/opencontainers/selinux v1.8.2 // indirect
+	github.com/pelletier/go-toml v1.9.4 // indirect
+	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/prometheus/client_golang v1.11.0 // indirect
+	github.com/prometheus/client_model v0.2.0 // indirect
+	github.com/prometheus/common v0.28.0 // indirect
+	github.com/prometheus/procfs v0.6.0 // indirect
+	github.com/rivo/uniseg v0.2.0 // indirect
+	github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc // indirect
+	github.com/russross/blackfriday v1.5.2 // indirect
+	github.com/sabhiram/go-gitignore v0.0.0-20201211074657-223ce5d391b0 // indirect
+	github.com/segmentio/backo-go v0.0.0-20200129164019-23eae7c10bd3 // indirect
+	github.com/sendgrid/rest v2.6.3+incompatible // indirect
+	github.com/sergi/go-diff v1.1.0 // indirect
+	github.com/shopspring/decimal v1.2.0 // indirect
+	github.com/sirupsen/logrus v1.8.1 // indirect
+	github.com/spf13/afero v1.6.0 // indirect
+	github.com/spf13/cast v1.4.1 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/src-d/gcfg v1.4.0 // indirect
+	github.com/subosito/gotenv v1.2.0 // indirect
+	github.com/xanzy/ssh-agent v0.3.0 // indirect
+	github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
+	github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
+	github.com/xeipuuv/gojsonschema v1.2.0 // indirect
+	github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect
+	github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
+	go.opencensus.io v0.23.0 // indirect
+	go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
+	golang.org/x/mod v0.5.0 // indirect
+	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
+	golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
+	golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
+	golang.org/x/text v0.3.7 // indirect
+	golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
+	google.golang.org/appengine v1.6.7 // indirect
+	gopkg.in/gorp.v1 v1.7.2 // indirect
+	gopkg.in/inf.v0 v0.9.1 // indirect
+	gopkg.in/ini.v1 v1.66.2 // indirect
+	gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect
+	gopkg.in/src-d/go-git.v4 v4.13.1 // indirect
+	gopkg.in/warnings.v0 v0.1.2 // indirect
+	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+	k8s.io/apiextensions-apiserver v0.23.1 // indirect
+	k8s.io/apiserver v0.23.1 // indirect
+	k8s.io/component-base v0.23.1 // indirect
+	k8s.io/klog/v2 v2.30.0 // indirect
+	k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
+	k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
+	oras.land/oras-go v1.1.0 // indirect
+	sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
+	sigs.k8s.io/kustomize/api v0.10.1 // indirect
+	sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect
+	sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
 )

File diff ditekan karena terlalu besar
+ 175 - 135
go.sum


+ 22 - 27
internal/helm/agent.go

@@ -191,19 +191,17 @@ func (a *Agent) UpgradeReleaseByValues(
 	cmd := action.NewUpgrade(a.ActionConfig)
 	cmd.Namespace = rel.Namespace
 
-	if conf.Cluster != nil && a.K8sAgent != nil && conf.Registries != nil && len(conf.Registries) > 0 {
-		cmd.PostRenderer, err = NewDockerSecretsPostRenderer(
-			conf.Cluster,
-			conf.Repo,
-			a.K8sAgent,
-			rel.Namespace,
-			conf.Registries,
-			doAuth,
-		)
-
-		if err != nil {
-			return nil, err
-		}
+	cmd.PostRenderer, err = NewPorterPostrenderer(
+		conf.Cluster,
+		conf.Repo,
+		a.K8sAgent,
+		rel.Namespace,
+		conf.Registries,
+		doAuth,
+	)
+
+	if err != nil {
+		return nil, err
 	}
 
 	res, err := cmd.Run(conf.Name, ch, conf.Values)
@@ -264,20 +262,17 @@ func (a *Agent) InstallChart(
 
 	var err error
 
-	// only add the postrenderer if required fields exist
-	if conf.Cluster != nil && a.K8sAgent != nil && conf.Registries != nil && len(conf.Registries) > 0 {
-		cmd.PostRenderer, err = NewDockerSecretsPostRenderer(
-			conf.Cluster,
-			conf.Repo,
-			a.K8sAgent,
-			conf.Namespace,
-			conf.Registries,
-			doAuth,
-		)
-
-		if err != nil {
-			return nil, err
-		}
+	cmd.PostRenderer, err = NewPorterPostrenderer(
+		conf.Cluster,
+		conf.Repo,
+		a.K8sAgent,
+		conf.Namespace,
+		conf.Registries,
+		doAuth,
+	)
+
+	if err != nil {
+		return nil, err
 	}
 
 	if req := conf.Chart.Metadata.Dependencies; req != nil {

+ 304 - 7
internal/helm/postrenderer.go

@@ -18,6 +18,58 @@ import (
 	"github.com/docker/distribution/reference"
 )
 
+type PorterPostrenderer struct {
+	DockerSecretsPostRenderer       *DockerSecretsPostRenderer
+	EnvironmentVariablePostrenderer *EnvironmentVariablePostrenderer
+}
+
+func NewPorterPostrenderer(
+	cluster *models.Cluster,
+	repo repository.Repository,
+	agent *kubernetes.Agent,
+	namespace string,
+	regs []*models.Registry,
+	doAuth *oauth2.Config,
+) (postrender.PostRenderer, error) {
+	var dockerSecretsPostrenderer *DockerSecretsPostRenderer
+	var err error
+
+	if cluster != nil && agent != nil && regs != nil && len(regs) > 0 {
+		dockerSecretsPostrenderer, err = NewDockerSecretsPostRenderer(cluster, repo, agent, namespace, regs, doAuth)
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	envVarPostrenderer, err := NewEnvironmentVariablePostrenderer()
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &PorterPostrenderer{
+		DockerSecretsPostRenderer:       dockerSecretsPostrenderer,
+		EnvironmentVariablePostrenderer: envVarPostrenderer,
+	}, nil
+}
+
+func (p *PorterPostrenderer) Run(
+	renderedManifests *bytes.Buffer,
+) (modifiedManifests *bytes.Buffer, err error) {
+	if p.DockerSecretsPostRenderer != nil {
+		renderedManifests, err = p.DockerSecretsPostRenderer.Run(renderedManifests)
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	renderedManifests, err = p.EnvironmentVariablePostrenderer.Run(renderedManifests)
+
+	return renderedManifests, err
+}
+
 // DockerSecretsPostRenderer is a Helm post-renderer that adds image pull secrets to
 // pod specs that would otherwise be unable to pull an image.
 //
@@ -49,7 +101,7 @@ func NewDockerSecretsPostRenderer(
 	namespace string,
 	regs []*models.Registry,
 	doAuth *oauth2.Config,
-) (postrender.PostRenderer, error) {
+) (*DockerSecretsPostRenderer, error) {
 	// Registries is a map of registry URLs to registry ids
 	registries := make(map[string]*models.Registry)
 
@@ -200,7 +252,8 @@ func (d *DockerSecretsPostRenderer) getRegistriesToLink(renderedManifests *bytes
 	// that a secret will be generated for, if it does not exist
 	linkedRegs := make(map[string]*models.Registry)
 
-	err := d.decodeRenderedManifests(renderedManifests)
+	var err error
+	d.resources, err = decodeRenderedManifests(renderedManifests)
 
 	if err != nil {
 		return linkedRegs, err
@@ -242,9 +295,11 @@ func (d *DockerSecretsPostRenderer) getRegistriesToLink(renderedManifests *bytes
 	return linkedRegs, nil
 }
 
-func (d *DockerSecretsPostRenderer) decodeRenderedManifests(
+func decodeRenderedManifests(
 	renderedManifests *bytes.Buffer,
-) error {
+) ([]resource, error) {
+	resArr := make([]resource, 0)
+
 	// use the yaml decoder to parse the multi-document yaml.
 	decoder := yaml.NewDecoder(renderedManifests)
 
@@ -256,13 +311,13 @@ func (d *DockerSecretsPostRenderer) decodeRenderedManifests(
 		}
 
 		if err != nil {
-			return err
+			return resArr, err
 		}
 
-		d.resources = append(d.resources, res)
+		resArr = append(resArr, res)
 	}
 
-	return nil
+	return resArr, nil
 }
 
 func (d *DockerSecretsPostRenderer) getPodSpecs(resources []resource) {
@@ -457,6 +512,248 @@ func (d *DockerSecretsPostRenderer) isRegistryNative(regName string) bool {
 	return isNative
 }
 
+// EnvironmentVariablePostrenderer removes duplicated environment variables, giving preference to synced
+// env vars
+type EnvironmentVariablePostrenderer struct {
+	podSpecs  []resource
+	resources []resource
+}
+
+func NewEnvironmentVariablePostrenderer() (*EnvironmentVariablePostrenderer, error) {
+	return &EnvironmentVariablePostrenderer{
+		podSpecs:  make([]resource, 0),
+		resources: make([]resource, 0),
+	}, nil
+}
+
+func (e *EnvironmentVariablePostrenderer) Run(
+	renderedManifests *bytes.Buffer,
+) (modifiedManifests *bytes.Buffer, err error) {
+	e.resources, err = decodeRenderedManifests(renderedManifests)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// Check to see if the resources loaded into the postrenderer contain a configmap
+	// with a manifest that needs env var cleanup as well. If this is the case, create and
+	// run another postrenderer for this specific manifest.
+	for i, res := range e.resources {
+		kindVal, hasKind := res["kind"]
+
+		if !hasKind {
+			continue
+		}
+
+		kind, ok := kindVal.(string)
+
+		if !ok {
+			continue
+		}
+
+		if kind == "ConfigMap" {
+			labelVal := getNestedResource(res, "metadata", "labels")
+
+			if labelVal == nil {
+				continue
+			}
+
+			porterLabelVal, exists := labelVal["getporter.dev/manifest"]
+
+			if !exists {
+				continue
+			}
+
+			if labelValStr, ok := porterLabelVal.(string); ok && labelValStr == "true" {
+				data := getNestedResource(res, "data")
+				manifestData, exists := data["manifest"]
+
+				if !exists {
+					continue
+				}
+
+				manifestDataStr, ok := manifestData.(string)
+
+				if !ok {
+					continue
+				}
+
+				dCopy := &EnvironmentVariablePostrenderer{
+					podSpecs:  make([]resource, 0),
+					resources: make([]resource, 0),
+				}
+
+				newData, err := dCopy.Run(bytes.NewBufferString(manifestDataStr))
+
+				if err != nil {
+					continue
+				}
+
+				data["manifest"] = string(newData.Bytes())
+
+				e.resources[i] = res
+			}
+		}
+	}
+
+	e.getPodSpecs(e.resources)
+	e.updatePodSpecs()
+
+	modifiedManifests = bytes.NewBuffer([]byte{})
+	encoder := yaml.NewEncoder(modifiedManifests)
+	defer encoder.Close()
+
+	for _, resource := range e.resources {
+		err = encoder.Encode(resource)
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return modifiedManifests, nil
+}
+
+func (e *EnvironmentVariablePostrenderer) getPodSpecs(resources []resource) {
+	for _, res := range resources {
+		kindVal, hasKind := res["kind"]
+		if !hasKind {
+			continue
+		}
+
+		kind, ok := kindVal.(string)
+		if !ok {
+			continue
+		}
+
+		// manifests of list type will have an items field, items should
+		// be recursively parsed
+		if itemsVal, isList := res["items"]; isList {
+			if items, ok := itemsVal.([]interface{}); ok {
+				// convert items to resource
+				resArr := make([]resource, 0)
+				for _, item := range items {
+					if arrVal, ok := item.(resource); ok {
+						resArr = append(resArr, arrVal)
+					}
+				}
+
+				e.getPodSpecs(resArr)
+			}
+
+			continue
+		}
+
+		// otherwise, get the pod spec based on the type of resource
+		podSpec := getPodSpecFromResource(kind, res)
+
+		if podSpec == nil {
+			continue
+		}
+
+		e.podSpecs = append(e.podSpecs, podSpec)
+	}
+
+	return
+}
+
+func (e *EnvironmentVariablePostrenderer) updatePodSpecs() error {
+	// for each pod spec, remove duplicate env variables
+	for _, podSpec := range e.podSpecs {
+		containersVal, hasContainers := podSpec["containers"]
+
+		if !hasContainers {
+			continue
+		}
+
+		containers, ok := containersVal.([]interface{})
+
+		if !ok {
+			continue
+		}
+
+		newContainers := make([]interface{}, 0)
+
+		for _, container := range containers {
+			envVars := make(map[string]interface{})
+
+			_container, ok := container.(resource)
+
+			if !ok {
+				continue
+			}
+
+			// read container env variables
+			envInter, ok := _container["env"]
+
+			if !ok {
+				newContainers = append(newContainers, _container)
+				continue
+			}
+
+			env, ok := envInter.([]interface{})
+
+			if !ok {
+				newContainers = append(newContainers, _container)
+				continue
+			}
+
+			for _, envVar := range env {
+				envVarMap, ok := envVar.(resource)
+
+				if !ok {
+					continue
+				}
+
+				envVarName, ok := envVarMap["name"]
+
+				if !ok {
+					continue
+				}
+
+				envVarNameStr, ok := envVarName.(string)
+
+				if !ok {
+					continue
+				}
+
+				// check if the env var already exists, if it does perform reconciliation
+				if currVal, exists := envVars[envVarNameStr]; exists {
+					currValMap, ok := currVal.(resource)
+
+					if !ok {
+						continue
+					}
+
+					// if the current value has a valueFrom field, this should override the existing env var
+					if _, currValFromFieldExists := currValMap["valueFrom"]; currValFromFieldExists {
+						continue
+					} else {
+						envVars[envVarNameStr] = envVarMap
+					}
+				} else {
+					envVars[envVarNameStr] = envVarMap
+				}
+			}
+
+			// flatten env var map to array
+			envVarArr := make([]interface{}, 0)
+
+			for _, envVar := range envVars {
+				envVarArr = append(envVarArr, envVar)
+			}
+
+			_container["env"] = envVarArr
+			newContainers = append(newContainers, _container)
+		}
+
+		podSpec["containers"] = newContainers
+	}
+
+	return nil
+}
+
+// HELPERS
 func getPodSpecFromResource(kind string, res resource) resource {
 	switch kind {
 	case "Pod":

+ 241 - 138
internal/kubernetes/agent.go

@@ -12,6 +12,7 @@ import (
 	"io/ioutil"
 	"strconv"
 	"strings"
+	"sync"
 	"time"
 
 	goerrors "errors"
@@ -951,13 +952,29 @@ func (a *Agent) GetPodLogs(namespace string, name string, selectedContainer stri
 		return fmt.Errorf("Cannot open log stream for pod %s: %s", name, err.Error())
 	}
 
-	defer podLogs.Close()
-
 	r := bufio.NewReader(podLogs)
 	errorchan := make(chan error)
 
+	var wg sync.WaitGroup
+	var once sync.Once
+	wg.Add(2)
+
+	go func() {
+		wg.Wait()
+		close(errorchan)
+	}()
+
 	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				// TODO: add method to alert on panic
+				return
+			}
+		}()
+
 		// listens for websocket closing handshake
+		defer wg.Done()
+
 		for {
 			if _, _, err := rw.ReadMessage(); err != nil {
 				errorchan <- nil
@@ -967,36 +984,39 @@ func (a *Agent) GetPodLogs(namespace string, name string, selectedContainer stri
 	}()
 
 	go func() {
-		for {
-			select {
-			case <-errorchan:
-				defer close(errorchan)
+		defer func() {
+			if r := recover(); r != nil {
+				// TODO: add method to alert on panic
 				return
-			default:
 			}
+		}()
+
+		defer wg.Done()
 
+		for {
 			bytes, err := r.ReadBytes('\n')
-			if _, writeErr := rw.Write(bytes); writeErr != nil {
-				errorchan <- writeErr
+
+			if err != nil {
+				errorchan <- err
 				return
 			}
-			if err != nil {
-				if err != io.EOF {
-					errorchan <- err
-					return
-				}
-				errorchan <- nil
+
+			if _, writeErr := rw.Write(bytes); writeErr != nil {
+				errorchan <- writeErr
 				return
 			}
 		}
 	}()
 
-	for {
-		select {
-		case err = <-errorchan:
-			return err
-		}
+	for err = range errorchan {
+		// only call these methods a single time
+		once.Do(func() {
+			rw.Close()
+			podLogs.Close()
+		})
 	}
+
+	return err
 }
 
 // GetPodLogs streams real-time logs from a given pod.
@@ -1184,43 +1204,29 @@ func (a *Agent) StreamControllerStatus(kind string, selectors string, rw *websoc
 
 		stopper := make(chan struct{})
 		errorchan := make(chan error)
-		defer close(stopper)
 
-		informer.SetWatchErrorHandler(func(r *cache.Reflector, err error) {
-			if strings.HasSuffix(err.Error(), ": Unauthorized") {
-				errorchan <- &AuthError{}
-			}
-		})
+		var wg sync.WaitGroup
+		var once sync.Once
+		var err error
 
-		informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
-			UpdateFunc: func(oldObj, newObj interface{}) {
-				msg := Message{
-					EventType: "UPDATE",
-					Object:    newObj,
-					Kind:      strings.ToLower(kind),
-				}
-				rw.WriteJSONWithChannel(msg, errorchan)
-			},
-			AddFunc: func(obj interface{}) {
-				msg := Message{
-					EventType: "ADD",
-					Object:    obj,
-					Kind:      strings.ToLower(kind),
-				}
-				rw.WriteJSONWithChannel(msg, errorchan)
-			},
-			DeleteFunc: func(obj interface{}) {
-				msg := Message{
-					EventType: "DELETE",
-					Object:    obj,
-					Kind:      strings.ToLower(kind),
-				}
-				rw.WriteJSONWithChannel(msg, errorchan)
-			},
-		})
+		wg.Add(2)
+
+		go func() {
+			wg.Wait()
+			close(errorchan)
+		}()
 
 		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					// TODO: add method to alert on panic
+					return
+				}
+			}()
+
 			// listens for websocket closing handshake
+			defer wg.Done()
+
 			for {
 				if _, _, err := rw.ReadMessage(); err != nil {
 					errorchan <- nil
@@ -1229,14 +1235,75 @@ func (a *Agent) StreamControllerStatus(kind string, selectors string, rw *websoc
 			}
 		}()
 
-		go informer.Run(stopper)
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					// TODO: add method to alert on panic
+					return
+				}
+			}()
 
-		for {
-			select {
-			case err := <-errorchan:
-				return err
-			}
+			// listens for websocket closing handshake
+			defer wg.Done()
+
+			informer.SetWatchErrorHandler(func(r *cache.Reflector, err error) {
+				if strings.HasSuffix(err.Error(), ": Unauthorized") {
+					errorchan <- &AuthError{}
+				}
+			})
+
+			informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
+				UpdateFunc: func(oldObj, newObj interface{}) {
+					msg := Message{
+						EventType: "UPDATE",
+						Object:    newObj,
+						Kind:      strings.ToLower(kind),
+					}
+					err := rw.WriteJSON(msg)
+
+					if err != nil {
+						errorchan <- err
+					}
+				},
+				AddFunc: func(obj interface{}) {
+					msg := Message{
+						EventType: "ADD",
+						Object:    obj,
+						Kind:      strings.ToLower(kind),
+					}
+
+					err := rw.WriteJSON(msg)
+
+					if err != nil {
+						errorchan <- err
+					}
+				},
+				DeleteFunc: func(obj interface{}) {
+					msg := Message{
+						EventType: "DELETE",
+						Object:    obj,
+						Kind:      strings.ToLower(kind),
+					}
+
+					err := rw.WriteJSON(msg)
+
+					if err != nil {
+						errorchan <- err
+					}
+				},
+			})
+
+			informer.Run(stopper)
+		}()
+
+		for err = range errorchan {
+			once.Do(func() {
+				close(stopper)
+				rw.Close()
+			})
 		}
+
+		return err
 	}
 
 	return a.RunWebsocketTask(run)
@@ -1328,113 +1395,146 @@ func (a *Agent) StreamHelmReleases(namespace string, chartList []string, selecto
 
 		stopper := make(chan struct{})
 		errorchan := make(chan error)
-		defer close(stopper)
 
-		informer.SetWatchErrorHandler(func(r *cache.Reflector, err error) {
-			if strings.HasSuffix(err.Error(), ": Unauthorized") {
-				errorchan <- &AuthError{}
-			}
-		})
+		var wg sync.WaitGroup
+		var once sync.Once
+		var err error
+
+		wg.Add(2)
 
-		informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
-			UpdateFunc: func(oldObj, newObj interface{}) {
-				secretObj, ok := newObj.(*v1.Secret)
+		go func() {
+			wg.Wait()
+			close(errorchan)
+		}()
 
-				if !ok {
-					errorchan <- fmt.Errorf("could not cast to secret")
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					// TODO: add method to alert on panic
 					return
 				}
+			}()
 
-				helm_object, isNotHelmRelease, err := ParseSecretToHelmRelease(*secretObj, chartList)
+			// listens for websocket closing handshake
+			defer wg.Done()
 
-				if isNotHelmRelease && err == nil {
+			for {
+				if _, _, err := rw.ReadMessage(); err != nil {
+					errorchan <- nil
 					return
 				}
+			}
+		}()
 
-				if err != nil {
-					errorchan <- err
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					// TODO: add method to alert on panic
 					return
 				}
+			}()
 
-				msg := Message{
-					EventType: "UPDATE",
-					Object:    helm_object,
+			// listens for websocket closing handshake
+			defer wg.Done()
+
+			informer.SetWatchErrorHandler(func(r *cache.Reflector, err error) {
+				if strings.HasSuffix(err.Error(), ": Unauthorized") {
+					errorchan <- &AuthError{}
 				}
+			})
 
-				rw.WriteJSONWithChannel(msg, errorchan)
-			},
-			AddFunc: func(obj interface{}) {
-				secretObj, ok := obj.(*v1.Secret)
+			informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
+				UpdateFunc: func(oldObj, newObj interface{}) {
+					secretObj, ok := newObj.(*v1.Secret)
 
-				if !ok {
-					errorchan <- fmt.Errorf("could not cast to secret")
-					return
-				}
+					if !ok {
+						errorchan <- fmt.Errorf("could not cast to secret")
+						return
+					}
 
-				helm_object, isNotHelmRelease, err := ParseSecretToHelmRelease(*secretObj, chartList)
+					helm_object, isNotHelmRelease, err := ParseSecretToHelmRelease(*secretObj, chartList)
 
-				if isNotHelmRelease && err == nil {
-					return
-				}
+					if isNotHelmRelease && err == nil {
+						return
+					}
 
-				if err != nil {
-					errorchan <- err
-					return
-				}
+					if err != nil {
+						errorchan <- err
+						return
+					}
 
-				msg := Message{
-					EventType: "ADD",
-					Object:    helm_object,
-				}
+					msg := Message{
+						EventType: "UPDATE",
+						Object:    helm_object,
+					}
 
-				rw.WriteJSONWithChannel(msg, errorchan)
-			},
-			DeleteFunc: func(obj interface{}) {
-				secretObj, ok := obj.(*v1.Secret)
+					rw.WriteJSON(msg)
+				},
+				AddFunc: func(obj interface{}) {
+					secretObj, ok := obj.(*v1.Secret)
 
-				if !ok {
-					errorchan <- fmt.Errorf("could not cast to secret")
-					return
-				}
+					if !ok {
+						errorchan <- fmt.Errorf("could not cast to secret")
+						return
+					}
 
-				helm_object, isNotHelmRelease, err := ParseSecretToHelmRelease(*secretObj, chartList)
+					helm_object, isNotHelmRelease, err := ParseSecretToHelmRelease(*secretObj, chartList)
 
-				if isNotHelmRelease && err == nil {
-					return
-				}
+					if isNotHelmRelease && err == nil {
+						return
+					}
 
-				if err != nil {
-					errorchan <- err
-					return
-				}
+					if err != nil {
+						errorchan <- err
+						return
+					}
 
-				msg := Message{
-					EventType: "DELETE",
-					Object:    helm_object,
-				}
+					msg := Message{
+						EventType: "ADD",
+						Object:    helm_object,
+					}
 
-				rw.WriteJSONWithChannel(msg, errorchan)
-			},
-		})
+					rw.WriteJSON(msg)
+				},
+				DeleteFunc: func(obj interface{}) {
+					secretObj, ok := obj.(*v1.Secret)
 
-		go func() {
-			// listens for websocket closing handshake
-			for {
-				if _, _, err := rw.ReadMessage(); err != nil {
-					errorchan <- nil
-					return
-				}
-			}
-		}()
+					if !ok {
+						errorchan <- fmt.Errorf("could not cast to secret")
+						return
+					}
 
-		go informer.Run(stopper)
+					helm_object, isNotHelmRelease, err := ParseSecretToHelmRelease(*secretObj, chartList)
 
-		for {
-			select {
-			case err := <-errorchan:
-				return err
-			}
+					if isNotHelmRelease && err == nil {
+						return
+					}
+
+					if err != nil {
+						errorchan <- err
+						return
+					}
+
+					msg := Message{
+						EventType: "DELETE",
+						Object:    helm_object,
+					}
+
+					rw.WriteJSON(msg)
+				},
+			})
+
+			informer.Run(stopper)
+		}()
+
+		for err = range errorchan {
+			once.Do(func() {
+				close(stopper)
+				rw.Close()
+			})
 		}
+
+		return err
 	}
 
 	return a.RunWebsocketTask(run)
@@ -1549,11 +1649,12 @@ func (a *Agent) waitForPod(pod *v1.Pod) (error, bool) {
 		return err, false
 	}
 	defer w.Stop()
-	for {
+
+	expireTime := time.Now().Add(time.Second * 30)
+
+	for time.Now().Unix() <= expireTime.Unix() {
 		select {
-		case <-time.After(time.Second * 30):
-			return goerrors.New("timed out waiting for pod"), false
-		case <-time.Tick(time.Second):
+		case <-time.NewTicker(time.Second).C:
 			// poll every second in case we already missed the ready event while
 			// creating the listener.
 			pod, err = a.Clientset.CoreV1().
@@ -1579,6 +1680,8 @@ func (a *Agent) waitForPod(pod *v1.Pod) (error, bool) {
 			}
 		}
 	}
+
+	return goerrors.New("timed out waiting for pod"), false
 }
 
 func isPodReady(pod *v1.Pod) bool {

+ 2 - 0
internal/models/database.go

@@ -19,6 +19,7 @@ type Database struct {
 	InstanceID       string `json:"rds_instance_id"`
 	InstanceEndpoint string `json:"rds_connection_endpoint"`
 	InstanceName     string `json:"rds_instance_name"`
+	Status           string
 }
 
 func (d *Database) ToDatabaseType() *types.Database {
@@ -30,5 +31,6 @@ func (d *Database) ToDatabaseType() *types.Database {
 		InstanceID:       d.InstanceID,
 		InstanceEndpoint: d.InstanceEndpoint,
 		InstanceName:     d.InstanceName,
+		Status:           d.Status,
 	}
 }

+ 584 - 0
internal/redis_stream/global_stream.go

@@ -0,0 +1,584 @@
+package redis_stream
+
+import (
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strings"
+
+	"github.com/aws/aws-sdk-go/service/ecr"
+	"github.com/porter-dev/porter/internal/analytics"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/kubernetes/envgroup"
+	"gorm.io/gorm"
+
+	redis "github.com/go-redis/redis/v8"
+
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+)
+
+// GlobalStreamName is the name of the Redis stream for global operations
+const GlobalStreamName = "global"
+
+// GlobalStreamGroupName is the name of the Redis consumer group that this server
+// is a part of
+const GlobalStreamGroupName = "portersvr"
+
+// InitGlobalStream initializes the global stream if it does not exist, and the
+// global consumer group if it does not exist
+func InitGlobalStream(client *redis.Client) error {
+	// determine if the stream exists
+	x, err := client.Exists(
+		context.Background(),
+		GlobalStreamName,
+	).Result()
+
+	// if it does not exist, create group and stream
+	if x == 0 {
+		_, err := client.XGroupCreateMkStream(
+			context.Background(),
+			GlobalStreamName,
+			GlobalStreamGroupName,
+			">",
+		).Result()
+
+		return err
+	}
+
+	// otherwise, check if the group exists
+	xInfoGroups, err := client.XInfoGroups(
+		context.Background(),
+		GlobalStreamName,
+	).Result()
+
+	// if error is not NOGROUP error, return
+	if err != nil && !strings.Contains(err.Error(), "NOGROUP") {
+		return err
+	}
+
+	for _, group := range xInfoGroups {
+		// if the group exists, return with no error
+		if group.Name == GlobalStreamGroupName {
+			return nil
+		}
+	}
+
+	// if the group does not exist, create it
+	_, err = client.XGroupCreate(
+		context.Background(),
+		GlobalStreamName,
+		GlobalStreamGroupName,
+		"$",
+	).Result()
+
+	return err
+}
+
+// ResourceCRUDHandler is a handler for updates to an infra resource
+type ResourceCRUDHandler interface {
+	OnCreate(id uint) error
+}
+
+// GlobalStreamListener performs an XREADGROUP operation on a given stream and
+// updates models in the database as necessary
+func GlobalStreamListener(
+	client *redis.Client,
+	config *config.Config,
+	repo repository.Repository,
+	analyticsClient analytics.AnalyticsSegmentClient,
+	errorChan chan error,
+) {
+	for {
+		xstreams, err := client.XReadGroup(
+			context.Background(),
+			&redis.XReadGroupArgs{
+				Group:    GlobalStreamGroupName,
+				Consumer: "portersvr-0", // just static consumer for now
+				Streams:  []string{GlobalStreamName, ">"},
+				Block:    0,
+			},
+		).Result()
+
+		if err != nil {
+			errorChan <- err
+			return
+		}
+
+		// parse messages from the global stream
+		for _, msg := range xstreams[0].Messages {
+			// parse the id to identify the infra
+			kind, projID, infraID, err := models.ParseUniqueName(fmt.Sprintf("%v", msg.Values["id"]))
+
+			if fmt.Sprintf("%v", msg.Values["status"]) == "created" {
+				infra, err := repo.Infra().ReadInfra(projID, infraID)
+
+				if err != nil {
+					continue
+				}
+
+				infra.Status = types.StatusCreated
+
+				infra, err = repo.Infra().UpdateInfra(infra)
+
+				if err != nil {
+					continue
+				}
+
+				// create ECR/EKS
+				if kind == string(types.InfraECR) {
+					reg := &models.Registry{
+						ProjectID:        projID,
+						AWSIntegrationID: infra.AWSIntegrationID,
+						InfraID:          infra.ID,
+					}
+
+					// parse raw data into ECR type
+					dataString, ok := msg.Values["data"].(string)
+
+					if ok {
+						json.Unmarshal([]byte(dataString), reg)
+					}
+
+					awsInt, err := repo.AWSIntegration().ReadAWSIntegration(reg.ProjectID, reg.AWSIntegrationID)
+
+					if err != nil {
+						continue
+					}
+
+					sess, err := awsInt.GetSession()
+
+					if err != nil {
+						continue
+					}
+
+					ecrSvc := ecr.New(sess)
+
+					output, err := ecrSvc.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{})
+
+					if err != nil {
+						continue
+					}
+
+					reg.URL = *output.AuthorizationData[0].ProxyEndpoint
+
+					reg, err = repo.Registry().CreateRegistry(reg)
+
+					if err != nil {
+						continue
+					}
+
+					analyticsClient.Track(analytics.RegistryProvisioningSuccessTrack(
+						&analytics.RegistryProvisioningSuccessTrackOpts{
+							RegistryScopedTrackOpts: analytics.GetRegistryScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, reg.ID),
+							RegistryType:            infra.Kind,
+							InfraID:                 infra.ID,
+						},
+					))
+				} else if kind == string(types.InfraRDS) {
+					// parse the last applied field to get the cluster id
+					rdsRequest := &types.RDSInfraLastApplied{}
+					err := json.Unmarshal(infra.LastApplied, rdsRequest)
+
+					if err != nil {
+						continue
+					}
+
+					database := &models.Database{
+						Status: "running",
+					}
+
+					// parse raw data into ECR type
+					dataString, ok := msg.Values["data"].(string)
+
+					if ok {
+						err = json.Unmarshal([]byte(dataString), database)
+
+						if err != nil {
+						}
+					}
+
+					database.Model = gorm.Model{}
+					database.ProjectID = projID
+					database.ClusterID = rdsRequest.ClusterID
+					database.InfraID = infra.ID
+
+					database, err = repo.Database().CreateDatabase(database)
+
+					if err != nil {
+						continue
+					}
+
+					infra.DatabaseID = database.ID
+					infra, err = repo.Infra().UpdateInfra(infra)
+
+					if err != nil {
+						continue
+					}
+
+					err = createRDSEnvGroup(repo, config, infra, database, rdsRequest)
+
+					if err != nil {
+						continue
+					}
+				} else if kind == string(types.InfraEKS) {
+					cluster := &models.Cluster{
+						AuthMechanism:    models.AWS,
+						ProjectID:        projID,
+						AWSIntegrationID: infra.AWSIntegrationID,
+						InfraID:          infra.ID,
+					}
+
+					// parse raw data into ECR type
+					dataString, ok := msg.Values["data"].(string)
+
+					if ok {
+						json.Unmarshal([]byte(dataString), cluster)
+					}
+
+					re := regexp.MustCompile(`^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$`)
+
+					// if it matches the base64 regex, decode it
+					caData := string(cluster.CertificateAuthorityData)
+					if re.MatchString(caData) {
+						decoded, err := base64.StdEncoding.DecodeString(caData)
+
+						if err != nil {
+							continue
+						}
+
+						cluster.CertificateAuthorityData = []byte(decoded)
+					}
+
+					cluster, err := repo.Cluster().CreateCluster(cluster)
+
+					if err != nil {
+						continue
+					}
+
+					analyticsClient.Track(analytics.ClusterProvisioningSuccessTrack(
+						&analytics.ClusterProvisioningSuccessTrackOpts{
+							ClusterScopedTrackOpts: analytics.GetClusterScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, cluster.ID),
+							ClusterType:            infra.Kind,
+							InfraID:                infra.ID,
+						},
+					))
+				} else if kind == string(types.InfraGCR) {
+					reg := &models.Registry{
+						ProjectID:        projID,
+						GCPIntegrationID: infra.GCPIntegrationID,
+						InfraID:          infra.ID,
+						Name:             "gcr-registry",
+					}
+
+					// parse raw data into ECR type
+					dataString, ok := msg.Values["data"].(string)
+
+					if ok {
+						json.Unmarshal([]byte(dataString), reg)
+					}
+
+					reg, err = repo.Registry().CreateRegistry(reg)
+
+					if err != nil {
+						continue
+					}
+
+					analyticsClient.Track(analytics.RegistryProvisioningSuccessTrack(
+						&analytics.RegistryProvisioningSuccessTrackOpts{
+							RegistryScopedTrackOpts: analytics.GetRegistryScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, reg.ID),
+							RegistryType:            infra.Kind,
+							InfraID:                 infra.ID,
+						},
+					))
+				} else if kind == string(types.InfraGKE) {
+					cluster := &models.Cluster{
+						AuthMechanism:    models.GCP,
+						ProjectID:        projID,
+						GCPIntegrationID: infra.GCPIntegrationID,
+						InfraID:          infra.ID,
+					}
+
+					// parse raw data into GKE type
+					dataString, ok := msg.Values["data"].(string)
+
+					if ok {
+						json.Unmarshal([]byte(dataString), cluster)
+					}
+
+					re := regexp.MustCompile(`^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$`)
+
+					// if it matches the base64 regex, decode it
+					caData := string(cluster.CertificateAuthorityData)
+					if re.MatchString(caData) {
+						decoded, err := base64.StdEncoding.DecodeString(caData)
+
+						if err != nil {
+							continue
+						}
+
+						cluster.CertificateAuthorityData = []byte(decoded)
+					}
+
+					cluster, err := repo.Cluster().CreateCluster(cluster)
+
+					if err != nil {
+						continue
+					}
+
+					analyticsClient.Track(analytics.ClusterProvisioningSuccessTrack(
+						&analytics.ClusterProvisioningSuccessTrackOpts{
+							ClusterScopedTrackOpts: analytics.GetClusterScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, cluster.ID),
+							ClusterType:            infra.Kind,
+							InfraID:                infra.ID,
+						},
+					))
+				} else if kind == string(types.InfraDOCR) {
+					reg := &models.Registry{
+						ProjectID:       projID,
+						DOIntegrationID: infra.DOIntegrationID,
+						InfraID:         infra.ID,
+					}
+
+					// parse raw data into DOCR type
+					dataString, ok := msg.Values["data"].(string)
+
+					if ok {
+						json.Unmarshal([]byte(dataString), reg)
+					}
+
+					reg, err = repo.Registry().CreateRegistry(reg)
+
+					if err != nil {
+						continue
+					}
+
+					analyticsClient.Track(analytics.RegistryProvisioningSuccessTrack(
+						&analytics.RegistryProvisioningSuccessTrackOpts{
+							RegistryScopedTrackOpts: analytics.GetRegistryScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, reg.ID),
+							RegistryType:            infra.Kind,
+							InfraID:                 infra.ID,
+						},
+					))
+				} else if kind == string(types.InfraDOKS) {
+					cluster := &models.Cluster{
+						AuthMechanism:   models.DO,
+						ProjectID:       projID,
+						DOIntegrationID: infra.DOIntegrationID,
+						InfraID:         infra.ID,
+					}
+
+					// parse raw data into GKE type
+					dataString, ok := msg.Values["data"].(string)
+
+					if ok {
+						json.Unmarshal([]byte(dataString), cluster)
+					}
+
+					re := regexp.MustCompile(`^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$`)
+
+					// if it matches the base64 regex, decode it
+					caData := string(cluster.CertificateAuthorityData)
+					if re.MatchString(caData) {
+						decoded, err := base64.StdEncoding.DecodeString(caData)
+
+						if err != nil {
+							continue
+						}
+
+						cluster.CertificateAuthorityData = []byte(decoded)
+					}
+
+					cluster, err := repo.Cluster().CreateCluster(cluster)
+
+					if err != nil {
+						continue
+					}
+
+					analyticsClient.Track(analytics.ClusterProvisioningSuccessTrack(
+						&analytics.ClusterProvisioningSuccessTrackOpts{
+							ClusterScopedTrackOpts: analytics.GetClusterScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, cluster.ID),
+							ClusterType:            infra.Kind,
+							InfraID:                infra.ID,
+						},
+					))
+				}
+			} else if fmt.Sprintf("%v", msg.Values["status"]) == "error" {
+				infra, err := repo.Infra().ReadInfra(projID, infraID)
+
+				if err != nil {
+					continue
+				}
+
+				infra.Status = types.StatusError
+
+				infra, err = repo.Infra().UpdateInfra(infra)
+
+				if err != nil {
+					continue
+				}
+
+				if infra.Kind == types.InfraDOKS || infra.Kind == types.InfraGKE || infra.Kind == types.InfraEKS {
+					analyticsClient.Track(analytics.ClusterProvisioningErrorTrack(
+						&analytics.ClusterProvisioningErrorTrackOpts{
+							ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID),
+							ClusterType:            infra.Kind,
+							InfraID:                infra.ID,
+						},
+					))
+				} else if infra.Kind == types.InfraDOCR || infra.Kind == types.InfraGCR || infra.Kind == types.InfraECR {
+					analyticsClient.Track(analytics.RegistryProvisioningErrorTrack(
+						&analytics.RegistryProvisioningErrorTrackOpts{
+							ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID),
+							RegistryType:           infra.Kind,
+							InfraID:                infra.ID,
+						},
+					))
+				}
+			} else if fmt.Sprintf("%v", msg.Values["status"]) == "destroyed" {
+				infra, err := repo.Infra().ReadInfra(projID, infraID)
+
+				if err != nil {
+					continue
+				}
+
+				infra.Status = types.StatusDestroyed
+
+				infra, err = repo.Infra().UpdateInfra(infra)
+
+				if err != nil {
+					continue
+				}
+
+				if infra.Kind == types.InfraDOKS || infra.Kind == types.InfraGKE || infra.Kind == types.InfraEKS {
+					analyticsClient.Track(analytics.ClusterDestroyingSuccessTrack(
+						&analytics.ClusterDestroyingSuccessTrackOpts{
+							ClusterScopedTrackOpts: analytics.GetClusterScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, 0),
+							ClusterType:            infra.Kind,
+							InfraID:                infra.ID,
+						},
+					))
+				} else if infra.Kind == types.InfraRDS && infra.DatabaseID != 0 {
+					rdsRequest := &types.RDSInfraLastApplied{}
+					err := json.Unmarshal(infra.LastApplied, rdsRequest)
+
+					if err != nil {
+						continue
+					}
+
+					database, err := repo.Database().ReadDatabase(infra.ProjectID, rdsRequest.ClusterID, infra.DatabaseID)
+
+					if err != nil {
+						continue
+					}
+
+					err = deleteRDSEnvGroup(repo, config, infra, database, rdsRequest)
+
+					if err != nil {
+						continue
+					}
+
+					// delete the database
+					err = repo.Database().DeleteDatabase(infra.ProjectID, rdsRequest.ClusterID, infra.DatabaseID)
+
+					if err != nil {
+						continue
+					}
+				}
+			}
+
+			// acknowledge the message as read
+			_, err = client.XAck(
+				context.Background(),
+				GlobalStreamName,
+				GlobalStreamGroupName,
+				msg.ID,
+			).Result()
+
+			// if error, continue for now
+			if err != nil {
+				continue
+			}
+		}
+	}
+}
+
+func createRDSEnvGroup(repo repository.Repository, config *config.Config, infra *models.Infra, database *models.Database, rdsConfig *types.RDSInfraLastApplied) error {
+
+	cluster, err := repo.Cluster().ReadCluster(infra.ProjectID, rdsConfig.ClusterID)
+
+	if err != nil {
+		return err
+	}
+
+	ooc := &kubernetes.OutOfClusterConfig{
+		Repo:              config.Repo,
+		DigitalOceanOAuth: config.DOConf,
+		Cluster:           cluster,
+	}
+
+	agent, err := kubernetes.GetAgentOutOfClusterConfig(ooc)
+
+	if err != nil {
+		return fmt.Errorf("failed to get agent: %s", err.Error())
+	}
+
+	// split the instance endpoint on the port
+	port := "5432"
+	host := database.InstanceEndpoint
+
+	if strArr := strings.Split(database.InstanceEndpoint, ":"); len(strArr) == 2 {
+		host = strArr[0]
+		port = strArr[1]
+	}
+
+	_, err = envgroup.CreateEnvGroup(agent, types.ConfigMapInput{
+		Name:      fmt.Sprintf("rds-credentials-%s", rdsConfig.DBName),
+		Namespace: rdsConfig.Namespace,
+		Variables: map[string]string{},
+		SecretVariables: map[string]string{
+			"PGPORT":     port,
+			"PGHOST":     host,
+			"PGPASSWORD": rdsConfig.Password,
+			"PGUSER":     rdsConfig.Username,
+		},
+	})
+
+	if err != nil {
+		return fmt.Errorf("failed to create RDS env group: %s", err.Error())
+	}
+
+	return nil
+}
+
+func deleteRDSEnvGroup(repo repository.Repository, config *config.Config, infra *models.Infra, database *models.Database, rdsConfig *types.RDSInfraLastApplied) error {
+	cluster, err := repo.Cluster().ReadCluster(infra.ProjectID, rdsConfig.ClusterID)
+
+	if err != nil {
+		return err
+	}
+
+	ooc := &kubernetes.OutOfClusterConfig{
+		Repo:              config.Repo,
+		DigitalOceanOAuth: config.DOConf,
+		Cluster:           cluster,
+	}
+
+	agent, err := kubernetes.GetAgentOutOfClusterConfig(ooc)
+
+	if err != nil {
+		return fmt.Errorf("failed to get agent: %s", err.Error())
+	}
+
+	err = envgroup.DeleteEnvGroup(agent, fmt.Sprintf("rds-credentials-%s", rdsConfig.DBName), rdsConfig.Namespace)
+
+	if err != nil {
+		return fmt.Errorf("failed to create RDS env group: %s", err.Error())
+	}
+
+	return nil
+}

+ 69 - 0
internal/redis_stream/resource_stream.go

@@ -0,0 +1,69 @@
+package redis_stream
+
+import (
+	"context"
+
+	redis "github.com/go-redis/redis/v8"
+	"github.com/porter-dev/porter/api/server/shared/websocket"
+)
+
+// ResourceStream performs an XREAD operation on the given stream and outputs it to the given websocket conn.
+func ResourceStream(client *redis.Client, streamName string, rw *websocket.WebsocketSafeReadWriter) error {
+	errorchan := make(chan error)
+
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				// TODO: add method to alert on panic
+				return
+			}
+		}()
+
+		// listens for websocket closing handshake
+		for {
+			if _, _, err := rw.ReadMessage(); err != nil {
+				errorchan <- nil
+				return
+			}
+		}
+	}()
+
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				// TODO: add method to alert on panic
+				return
+			}
+		}()
+
+		lastID := "0-0"
+
+		for {
+			xstream, err := client.XRead(
+				context.Background(),
+				&redis.XReadArgs{
+					Streams: []string{streamName, lastID},
+					Block:   0,
+				},
+			).Result()
+
+			if err != nil {
+				return
+			}
+
+			messages := xstream[0].Messages
+			lastID = messages[len(messages)-1].ID
+
+			rw.WriteJSON(messages)
+		}
+	}()
+
+	for {
+		select {
+		case err := <-errorchan:
+			close(errorchan)
+			client.Close()
+			return err
+		}
+	}
+}

+ 1 - 0
internal/repository/database.go

@@ -9,5 +9,6 @@ type DatabaseRepository interface {
 	ReadDatabase(projectID, clusterID, databaseID uint) (*models.Database, error)
 	ReadDatabaseByInfraID(projectID, infraID uint) (*models.Database, error)
 	ListDatabases(projectID, clusterID uint) ([]*models.Database, error)
+	UpdateDatabase(database *models.Database) (*models.Database, error)
 	DeleteDatabase(projectID, clusterID, databaseID uint) error
 }

+ 8 - 0
internal/repository/gorm/database.go

@@ -53,6 +53,14 @@ func (repo *DatabaseRepository) ReadDatabaseByInfraID(projectID, infraID uint) (
 	return database, nil
 }
 
+func (repo *DatabaseRepository) UpdateDatabase(database *models.Database) (*models.Database, error) {
+	if err := repo.db.Save(database).Error; err != nil {
+		return nil, err
+	}
+
+	return database, nil
+}
+
 func (repo *DatabaseRepository) DeleteDatabase(projectID, clusterID, databaseID uint) error {
 	if err := repo.db.Where("project_id = ? AND cluster_id = ? AND id = ?", projectID, clusterID, databaseID).Delete(&models.Database{}).Error; err != nil {
 		return err

+ 4 - 0
internal/repository/test/database.go

@@ -24,6 +24,10 @@ func (repo *DatabaseRepository) ReadDatabaseByInfraID(projectID, infraID uint) (
 	panic("unimplemented")
 }
 
+func (repo *DatabaseRepository) UpdateDatabase(database *models.Database) (*models.Database, error) {
+	panic("unimplemented")
+}
+
 func (repo *DatabaseRepository) DeleteDatabase(projectID, clusterID, databaseID uint) error {
 	panic("unimplemented")
 }

+ 102 - 0
provisioner/integrations/state/s3/s3.go

@@ -0,0 +1,102 @@
+package s3
+
+// type Client struct {
+// 	client *s3.S3
+// 	bucket string
+// }
+
+// var LOCAL_RUN string
+// var ENCRYPT_KEY string
+
+// func init() {
+// 	LOCAL_RUN = os.Getenv("LOCAL_RUN")
+
+// 	ENCRYPT_KEY = os.Getenv("ENCRYPT_KEY")
+
+// 	if ENCRYPT_KEY == "" {
+// 		if LOCAL_RUN == "true" {
+// 			ENCRYPT_KEY = "the-key-has-to-be-32-bytes-long!"
+// 		} else {
+// 			panic("no encryption key set for storage")
+// 		}
+// 	}
+// }
+
+// func NewS3Client(bucket string) *Client {
+// 	var sess *session.Session
+// 	var err error
+
+// 	if LOCAL_RUN == "true" {
+// 		sess, err = session.NewSession(&aws.Config{
+// 			Region:   aws.String("us-east-1"),
+// 			Endpoint: aws.String("localhost.localstack.cloud:4566"),
+// 		})
+// 	} else {
+// 		sess, err = session.NewSession()
+// 		if err != nil {
+// 			log.Fatal("cannot create aws session", err.Error())
+// 		}
+// 	}
+
+// 	return &Client{
+// 		client: s3.New(sess),
+// 		bucket: bucket,
+// 	}
+// }
+
+// func (s *Client) GetObject(org, filename string) (io.ReadCloser, error) {
+// 	log.Println(org, filename)
+// 	output, err := s.client.GetObject(&s3.GetObjectInput{
+// 		Bucket: &s.bucket,
+// 		Key:    aws.String(fmt.Sprintf("%s/%s", org, filename)),
+// 	})
+
+// 	if err != nil {
+// 		return nil, err
+// 	}
+
+// 	var encryptedData bytes.Buffer
+// 	_, err = encryptedData.ReadFrom(output.Body)
+// 	if err != nil {
+// 		return nil, err
+// 	}
+
+// 	data, err := encryption.Decrypt(encryptedData.Bytes(), []byte(ENCRYPT_KEY))
+// 	if err != nil {
+// 		return nil, err
+// 	}
+
+// 	return io.NopCloser(bytes.NewReader(data)), nil
+// }
+
+// func (s *Client) PutObject(org, filename string, body []byte) error {
+// 	encryptedBody, err := encryption.Encrypt(body, []byte(ENCRYPT_KEY))
+// 	if err != nil {
+// 		return err
+// 	}
+
+// 	_, err = s.client.PutObject(&s3.PutObjectInput{
+// 		Body:   aws.ReadSeekCloser(bytes.NewReader(encryptedBody)),
+// 		Bucket: &s.bucket,
+// 		Key:    aws.String(fmt.Sprintf("%s/%s", org, filename)),
+// 	})
+
+// 	if err != nil {
+// 		return err
+// 	}
+
+// 	return nil
+// }
+
+// func (s *Client) DeleteObject(org, filename string) error {
+// 	_, err := s.client.DeleteObject(&s3.DeleteObjectInput{
+// 		Bucket: &s.bucket,
+// 		Key:    aws.String(fmt.Sprintf("%s/%s", org, filename)),
+// 	})
+
+// 	if err != nil {
+// 		return err
+// 	}
+
+// 	return nil
+// }

+ 118 - 0
provisioner/server/handlers/desired.go

@@ -0,0 +1,118 @@
+package handlers
+
+// import (
+// 	"bytes"
+// 	"encoding/json"
+// 	"log"
+// 	"net/http"
+
+// 	"github.com/aws/aws-sdk-go/aws/awserr"
+// 	"github.com/aws/aws-sdk-go/service/s3"
+// 	"github.com/gin-gonic/gin"
+// 	"github.com/porter-dev/tf-http-backend/models"
+// )
+
+// // SetDesiredState is the POST handler that creates or
+// // updates the desired state for a particular provisioning job
+// func SetDesiredState(c *gin.Context) {
+// 	var desiredState models.DesiredTFState
+
+// 	err := c.BindJSON(&desiredState)
+// 	if err != nil {
+// 		log.Println("cannot read request body. error:", err.Error())
+
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+// 		return
+// 	}
+
+// 	orgID := c.Param("org")
+
+// 	data, err := json.Marshal(desiredState)
+// 	if err != nil {
+// 		log.Println("cannot marshal json. error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	err = s3Client.PutObject(orgID, "desired.json", data)
+// 	if err != nil {
+// 		log.Printf("cannot create desired state file. error: %s\n", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": "cannot create state file",
+// 		})
+
+// 		return
+// 	}
+
+// 	c.JSON(http.StatusCreated, gin.H{})
+// 	return
+// }
+
+// // GetDesiredState is the GET handler for returning
+// // the desired state. To be used mostly by the API server
+// func GetDesiredState(c *gin.Context) {
+// 	var desiredState models.DesiredTFState
+
+// 	orgID := c.Param("org")
+
+// 	reader, err := s3Client.GetObject(orgID, "desired.json")
+// 	if err != nil {
+// 		if aerr, ok := err.(awserr.Error); ok {
+// 			switch aerr.Code() {
+// 			case s3.ErrCodeNoSuchKey:
+// 				log.Println(aerr.Error())
+// 				c.JSON(http.StatusNotFound, gin.H{
+// 					"error": aerr.Error(),
+// 				})
+
+// 				return
+// 			default:
+// 				log.Println(aerr.Error())
+// 				c.JSON(http.StatusInternalServerError, gin.H{
+// 					"error": aerr.Error(),
+// 				})
+
+// 				return
+// 			}
+// 		}
+
+// 		log.Println("cannot cast to awserr. error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	var data bytes.Buffer
+// 	_, err = data.ReadFrom(reader)
+// 	if err != nil {
+// 		log.Println("cannot read from reader. error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	err = json.Unmarshal(data.Bytes(), &desiredState)
+// 	if err != nil {
+// 		log.Println("cannot unmarshal desired state. error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	c.JSON(http.StatusOK, gin.H{
+// 		"data": desiredState,
+// 	})
+
+// 	return
+// }

+ 28 - 0
provisioner/server/handlers/init.go

@@ -0,0 +1,28 @@
+package handlers
+
+// var s3Client *s3.Client
+// var redisClient *redis.Client
+// var eventProcessor *processor.EventProcessor
+
+// func init() {
+// 	BUCKET := os.Getenv("BUCKET")
+
+// 	// construct redis client, fallback to localhost
+// 	host := os.Getenv("REDIS_HOST")
+
+// 	if host == "" {
+// 		host = "localhost"
+// 	}
+
+// 	redisClient = redis.NewClient(
+// 		host,
+// 		"6379",
+// 		os.Getenv("REDIS_USER"),
+// 		os.Getenv("REDIS_PASS"),
+// 		0,
+// 	)
+
+// 	s3Client = s3.NewS3Client(BUCKET)
+
+// 	eventProcessor = processor.NewEventProcessor()
+// }

+ 144 - 0
provisioner/server/handlers/log.go

@@ -0,0 +1,144 @@
+package handlers
+
+// import (
+// 	"encoding/json"
+// 	"io/ioutil"
+// 	"log"
+// 	"net/http"
+// 	"strings"
+
+// 	"github.com/gin-gonic/gin"
+// 	"github.com/porter-dev/tf-http-backend/models"
+// 	"github.com/porter-dev/tf-http-backend/pkg/processor"
+// )
+
+// // StreamLogMsg is responsible for handling the POST of the
+// // log message from provisioner cli and pushing the content
+// // to a redis stream for showing on the UI. It is also
+// // responsible for testing and performing some additional
+// // tasks like specially handling error messages and resource
+// // provisioning completion events
+// func StreamLogMsg(c *gin.Context) {
+// 	var logMsg models.TFLogLine
+// 	orgID := c.Param("org")
+
+// 	err := c.BindJSON(&logMsg)
+// 	if err != nil {
+// 		log.Println("cannot get json from log msg body. error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	filterAndProcessError(orgID, &logMsg)
+
+// 	data, err := json.Marshal(logMsg)
+// 	if err != nil {
+// 		log.Println("cannot marshal to json for pushing to redis stream, error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	// check if type is error/created/destroyed,
+// 	// also push to global stream in that case
+// 	if logMsg.Type == models.ApplyErrored {
+// 		_, err = redisClient.AddToStream("global", map[string]interface{}{
+// 			"id":     orgID,
+// 			"status": "error",
+// 		})
+
+// 		if err != nil {
+// 			log.Println("cannot push to global stream, error:", err.Error())
+// 		}
+// 	} else if logMsg.Type == models.ChangeSummary {
+// 		if strings.Contains(logMsg.Message, "Destroy complete") {
+// 			_, err = redisClient.AddToStream("global", map[string]interface{}{
+// 				"id":     orgID,
+// 				"status": "destroyed",
+// 			})
+// 		}
+
+// 		if err != nil {
+// 			log.Println("cannot push to global stream, error:", err.Error())
+// 		}
+// 	}
+
+// 	// push to redis
+// 	id, err := redisClient.AddToStream(orgID, map[string]interface{}{
+// 		"data": data,
+// 	})
+
+// 	if err != nil {
+// 		log.Println("cannot add to redis stream. error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	c.JSON(http.StatusCreated, gin.H{
+// 		"id": id,
+// 	})
+
+// 	return
+// }
+
+// // StreamOutput is the handler responsible for
+// // posting the terraform output to the global stream
+// func StreamOutput(c *gin.Context) {
+// 	orgID := c.Param("org")
+
+// 	body, err := ioutil.ReadAll(c.Request.Body)
+// 	if err != nil {
+// 		log.Println("cannot read body. error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	c.Request.Body.Close()
+
+// 	id, err := redisClient.AddToStream("global", map[string]interface{}{
+// 		"id":     orgID,
+// 		"status": "created",
+// 		"data":   body,
+// 	})
+
+// 	if err != nil {
+// 		log.Println("cannot push output to global stream. error:", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	log.Println("successfully added output to global stream. id:", id)
+// 	c.JSON(http.StatusOK, gin.H{
+// 		"id": id,
+// 	})
+
+// 	return
+// }
+
+// func filterAndProcessError(orgID string, logMsg *models.TFLogLine) {
+// 	// process log message to filter out error
+// 	err := eventProcessor.Filter(&processor.Event{
+// 		OrgID:     orgID,
+// 		TFLogLine: logMsg,
+// 	})
+
+// 	if err != nil {
+// 		log.Printf("cannot mark errored resource %s in desired state\nerror: %s\n",
+// 			logMsg.Hook.Resource.Resource,
+// 			err.Error())
+// 	}
+// }

+ 100 - 0
provisioner/server/handlers/tfstate.go

@@ -0,0 +1,100 @@
+package handlers
+
+// import (
+// 	"bytes"
+// 	"encoding/json"
+// 	"log"
+// 	"net/http"
+
+// 	"github.com/gin-gonic/gin"
+// 	"github.com/porter-dev/tf-http-backend/models"
+// )
+
+// func GetState(c *gin.Context) {
+// 	orgID := c.Param("org")
+
+// 	stateFile, err := s3Client.GetObject(orgID, "default.tfstate")
+// 	if err != nil {
+// 		log.Println("cannot find state file. error: ", err.Error())
+// 		log.Println("must be an init operation")
+
+// 		c.JSON(http.StatusNotFound, gin.H{
+// 			"error": err.Error(),
+// 		})
+// 		return
+// 	}
+
+// 	defer stateFile.Close()
+
+// 	var data bytes.Buffer
+
+// 	readCount, err := data.ReadFrom(stateFile)
+// 	if err != nil {
+// 		log.Println("cannot read state file. error: ", err.Error())
+
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	log.Println("read", readCount, "bytes")
+// 	if readCount == 0 {
+// 		// empty state file, delete it
+// 		log.Println("detected empty state file, deleting for sanity")
+// 		s3Client.DeleteObject(orgID, "default.tfstate")
+
+// 		c.JSON(http.StatusNotFound, gin.H{})
+// 		return
+// 	}
+
+// 	body := make(gin.H)
+
+// 	err = json.Unmarshal(data.Bytes(), &body)
+// 	if err != nil {
+// 		log.Println("error unmarshaling the state. error: ", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": err.Error(),
+// 		})
+
+// 		return
+// 	}
+
+// 	c.JSON(http.StatusOK, body)
+// }
+
+// func UpdateState(c *gin.Context) {
+// 	log.Println("updating/creating state")
+
+// 	var state models.TFState
+// 	err := c.BindJSON(&state)
+// 	if err != nil {
+// 		log.Fatalln("cannot read request body", err)
+// 	}
+
+// 	orgID := c.Param("org")
+
+// 	data, err := json.Marshal(state)
+// 	if err != nil {
+// 		log.Printf("cannot marshal json. error: %s\n", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": "cannot marshal JSON",
+// 		})
+
+// 		return
+// 	}
+
+// 	err = s3Client.PutObject(orgID, "default.tfstate", data)
+// 	if err != nil {
+// 		log.Printf("cannot create state file. error: %s\n", err.Error())
+// 		c.JSON(http.StatusInternalServerError, gin.H{
+// 			"error": "cannot create state file",
+// 		})
+
+// 		return
+// 	}
+
+// 	c.JSON(http.StatusCreated, gin.H{})
+// 	return
+// }

+ 41 - 0
provisioner/test_client/client.go

@@ -0,0 +1,41 @@
+package main
+
+import (
+	"context"
+	"log"
+	"time"
+
+	"github.com/porter-dev/porter/provisioner/pb"
+	"google.golang.org/grpc"
+)
+
+// printFeature gets the feature for the given point.
+func printStateUpdate(client pb.ProvisionerClient, infra *pb.Infra) {
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+	feature, err := client.GetState(ctx, infra)
+	if err != nil {
+		log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err)
+	}
+	stateUpdate, err := feature.Recv()
+
+	if err != nil {
+		log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err)
+	}
+
+	log.Println(stateUpdate)
+}
+
+func main() {
+	serverAddr := "localhost:8082"
+	conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
+	if err != nil {
+		log.Fatalf("fail to dial: %v", err)
+	}
+	defer conn.Close()
+	client := pb.NewProvisionerClient(conn)
+
+	printStateUpdate(client, &pb.Infra{
+		Id: 1,
+	})
+}

+ 3 - 0
provisioner/types/transformers.go

@@ -0,0 +1,3 @@
+package types
+
+// transforms PB types -> API types and API types -> PB types

+ 18 - 1
services/job_sidecar_container/job_killer.sh

@@ -31,6 +31,14 @@ else
   sidecar=$3
 fi  
 
+global_timeout=$TIMEOUT
+
+if [ -z "$global_timeout" ]; then
+  global_timeout=3600
+fi
+
+echo "set global timeout value of $global_timeout"
+
 pattern="$(printf '[%s]%s' $(echo $target | cut -c 1) $(echo $target | cut -c 2-))"
 
 graceful_shutdown() {
@@ -93,12 +101,21 @@ target_pid_name=$(pgrep -f $pattern -l | grep -v 'job_killer.sh' | grep -v 'wait
 
 if [ -n "$target_pid" ]; then
     echo "targeting pids $target_pid matched by $target_pid_name"
+    # schedule hard kill after global timeout
+    is_global_shutdown=""
+    (sleep ${global_timeout}; echo "triggering global shutdown" && is_global_shutdown="true" && graceful_shutdown $grace_period_seconds $target || true) &
+    global_killer=${!}
+    
     tail --pid=$target_pid -f /dev/null &
     child=$!
 
     wait "$child"
 
-    graceful_shutdown $grace_period_seconds $target
+    if [ -z "$is_global_shutdown" ]; then
+      # cancel hard kill timer
+      sleep 0.1 && kill -9 ${global_killer} 2>/dev/null || true
+      graceful_shutdown $grace_period_seconds $target
+    fi
 else 
   echo "no process could be targeted within 10s, initiating shutdown"
 

+ 1 - 1
services/porter_cli_container/dev.Dockerfile

@@ -2,7 +2,7 @@
 
 # Base Go environment
 # -------------------
-FROM golang:1.16 as base
+FROM golang:1.17 as base
 WORKDIR /porter
 
 RUN apt-get update && apt-get install -y gcc musl-dev git

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini