Explorar el Código

Merge branch 'master' of github.com:porter-dev/porter into nico/rbac-crud-operations

jnfrati hace 3 años
padre
commit
b93f0dd1f2

+ 0 - 4
README.md

@@ -63,10 +63,6 @@ Below are instructions for a quickstart. For full documentation, please visit ou
 
 3. 🚀 Deploy your applications from a [git repository](https://docs.getporter.dev/docs/applications) or [Docker image registry](https://docs.getporter.dev/docs/cli-documentation#porter-docker-configure).
 
-## Running Porter Locally
-
-While it requires a few additional steps, it is possible to run Porter locally. Follow [this guide](https://docs.getporter.dev/docs/running-porter-locally) to run the local version of Porter.
-
 ## Want to Help?
 
 We welcome all contributions. If you're interested in contributing, please read our [contributing guide](https://github.com/porter-dev/porter/blob/master/CONTRIBUTING.md) and [join our Discord community](https://discord.gg/GJynMR3KXK).

+ 6 - 0
api/server/handlers/infra/forms.go

@@ -91,6 +91,12 @@ tabs:
           value: db.t3.xlarge
         - label: db.t3.2xlarge
           value: db.t3.2xlarge
+        - label: db.r6g.large
+          value: db.r6g.large
+        - label: db.r6g.xlarge
+          value: db.r6g.xlarge
+        - label: db.r6g.2xlarge
+          value: db.r6g.2xlarge
   - name: family-versions
     contents:
     - type: select

+ 1 - 1
api/server/router/cluster.go

@@ -697,7 +697,7 @@ func getClusterRoutes(
 	// GET /api/projects/{project_id}/clusters/{cluster_id}/kubeconfig -> cluster.NewGetTemporaryKubeconfigHandler
 	getTemporaryKubeconfigEndpoint := factory.NewAPIEndpoint(
 		&types.APIRequestMetadata{
-			Verb:   types.APIVerbGet,
+			Verb:   types.APIVerbUpdate, // we do not want users with no-write access to be able to use this
 			Method: types.HTTPVerbGet,
 			Path: &types.Path{
 				Parent:       basePath,

+ 1 - 1
api/server/shared/config/env/envconfs.go

@@ -100,7 +100,7 @@ type ServerConf struct {
 	ProvisionerTest bool `env:"PROVISIONER_TEST,default=false"`
 
 	// Disable filtering for project creation
-	DisableAllowlist bool `env:"DISABLE_ALLOWLIST,default=false"`
+	DisableAllowlist bool `env:"DISABLE_ALLOWLIST,default=true"`
 
 	// Enable gitlab integration
 	EnableGitlab bool `env:"ENABLE_GITLAB,default=false"`

+ 83 - 19
cli/cmd/deploy.go

@@ -231,6 +231,7 @@ var updateEnvGroupCmd = &cobra.Command{
 var updateSetEnvGroupCmd = &cobra.Command{
 	Use:   "set",
 	Short: "Sets the desired value of an environment variable in an env group in the form VAR=VALUE.",
+	Args:  cobra.MaximumNArgs(1),
 	Run: func(cmd *cobra.Command, args []string) {
 		err := checkLoginAndRun(args, updateSetEnvGroup)
 
@@ -243,6 +244,7 @@ var updateSetEnvGroupCmd = &cobra.Command{
 var updateUnsetEnvGroupCmd = &cobra.Command{
 	Use:   "unset",
 	Short: "Removes an environment variable from an env group.",
+	Args:  cobra.MinimumNArgs(1),
 	Run: func(cmd *cobra.Command, args []string) {
 		err := checkLoginAndRun(args, updateUnsetEnvGroup)
 
@@ -262,9 +264,10 @@ var stream bool
 var buildFlagsEnv []string
 var forcePush bool
 var useCache bool
-var value string
 var version uint
 var varType string
+var normalEnvGroupVars []string
+var secretEnvGroupVars []string
 
 func init() {
 	buildFlagsEnv = []string{}
@@ -407,6 +410,22 @@ func init() {
 		"the type of environment variable (either \"normal\" or \"secret\")",
 	)
 
+	updateSetEnvGroupCmd.PersistentFlags().StringArrayVarP(
+		&normalEnvGroupVars,
+		"normal",
+		"n",
+		[]string{},
+		"list of variables to set, in the form VAR=VALUE",
+	)
+
+	updateSetEnvGroupCmd.PersistentFlags().StringArrayVarP(
+		&secretEnvGroupVars,
+		"secret",
+		"s",
+		[]string{},
+		"list of secret variables to set, in the form VAR=VALUE",
+	)
+
 	updateEnvGroupCmd.AddCommand(updateSetEnvGroupCmd)
 	updateEnvGroupCmd.AddCommand(updateUnsetEnvGroupCmd)
 
@@ -573,14 +592,8 @@ func updateUpgrade(_ *types.GetAuthenticatedUserResponse, client *api.Client, ar
 }
 
 func updateSetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	if len(args) == 0 {
-		return fmt.Errorf("required variable in the form of VAR=VALUE")
-	}
-
-	key, value, found := strings.Cut(args[0], "=")
-
-	if !found {
-		return fmt.Errorf("variable should be in the form of VAR=VALUE")
+	if len(normalEnvGroupVars) == 0 && len(secretEnvGroupVars) == 0 && len(args) == 0 {
+		return fmt.Errorf("please provide one or more variables to update")
 	}
 
 	s := spinner.New(spinner.CharSets[9], 100*time.Millisecond)
@@ -606,17 +619,56 @@ func updateSetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client
 		Variables: envGroupResp.Variables,
 	}
 
-	delete(newEnvGroup.Variables, key)
+	// first check for multiple variables being set using the -e or -s flags
+	if len(normalEnvGroupVars) > 0 || len(secretEnvGroupVars) > 0 {
+		for _, v := range normalEnvGroupVars {
+			delete(newEnvGroup.Variables, v)
+
+			key, value, err := validateVarValue(v)
+
+			if err != nil {
+				return err
+			}
+
+			newEnvGroup.Variables[key] = value
+		}
+
+		if len(secretEnvGroupVars) > 0 {
+			newEnvGroup.SecretVariables = make(map[string]string)
+		}
+
+		for _, v := range secretEnvGroupVars {
+			delete(newEnvGroup.Variables, v)
+
+			key, value, err := validateVarValue(v)
+
+			if err != nil {
+				return err
+			}
+
+			newEnvGroup.SecretVariables[key] = value
+		}
+
+		s.Suffix = fmt.Sprintf(" Updating env group '%s' in namespace '%s'", name, namespace)
+	} else { // legacy usage
+		key, value, err := validateVarValue(args[0])
+
+		if err != nil {
+			return err
+		}
+
+		delete(newEnvGroup.Variables, key)
 
-	if varType == "secret" {
-		newEnvGroup.SecretVariables = make(map[string]string)
-		newEnvGroup.SecretVariables[key] = value
+		if varType == "secret" {
+			newEnvGroup.SecretVariables = make(map[string]string)
+			newEnvGroup.SecretVariables[key] = value
 
-		s.Suffix = fmt.Sprintf(" Adding new secret variable '%s' to env group '%s' in namespace '%s'", key, name, namespace)
-	} else {
-		newEnvGroup.Variables[key] = value
+			s.Suffix = fmt.Sprintf(" Adding new secret variable '%s' to env group '%s' in namespace '%s'", key, name, namespace)
+		} else {
+			newEnvGroup.Variables[key] = value
 
-		s.Suffix = fmt.Sprintf(" Adding new variable '%s' to env group '%s' in namespace '%s'", key, name, namespace)
+			s.Suffix = fmt.Sprintf(" Adding new variable '%s' to env group '%s' in namespace '%s'", key, name, namespace)
+		}
 	}
 
 	s.Start()
@@ -636,6 +688,16 @@ func updateSetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client
 	return nil
 }
 
+func validateVarValue(in string) (string, string, error) {
+	key, value, found := strings.Cut(in, "=")
+
+	if !found {
+		return "", "", fmt.Errorf("%s is not in the form of VAR=VALUE", in)
+	}
+
+	return key, value, nil
+}
+
 func updateUnsetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
 	if len(args) == 0 {
 		return fmt.Errorf("required variable name")
@@ -664,9 +726,11 @@ func updateUnsetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Clie
 		Variables: envGroupResp.Variables,
 	}
 
-	delete(newEnvGroup.Variables, args[0])
+	for _, v := range args {
+		delete(newEnvGroup.Variables, v)
+	}
 
-	s.Suffix = fmt.Sprintf(" Removing variable '%s' from env group '%s' in namespace '%s'", args[0], name, namespace)
+	s.Suffix = fmt.Sprintf(" Removing variables from env group '%s' in namespace '%s'", name, namespace)
 
 	s.Start()
 

+ 60 - 0
cli/cmd/helm.go

@@ -0,0 +1,60 @@
+package cmd
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+
+	api "github.com/porter-dev/porter/api/client"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/spf13/cobra"
+)
+
+var helmCmd = &cobra.Command{
+	Use:   "helm",
+	Short: "Use helm to interact with a Porter cluster",
+	Run: func(cmd *cobra.Command, args []string) {
+		err := checkLoginAndRun(args, runHelm)
+
+		if err != nil {
+			os.Exit(1)
+		}
+	},
+}
+
+func init() {
+	rootCmd.AddCommand(helmCmd)
+}
+
+func runHelm(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+	_, err := exec.LookPath("helm")
+
+	if err != nil {
+		return fmt.Errorf("error finding helm: %w", err)
+	}
+
+	tmpFile, err := downloadTempKubeconfig(client)
+
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		os.Remove(tmpFile)
+	}()
+
+	os.Setenv("KUBECONFIG", tmpFile)
+
+	cmd := exec.Command("helm", args...)
+
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+
+	err = cmd.Run()
+
+	if err != nil {
+		return fmt.Errorf("error running helm: %w", err)
+	}
+
+	return nil
+}

+ 85 - 0
cli/cmd/kubectl.go

@@ -0,0 +1,85 @@
+package cmd
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"os/exec"
+
+	api "github.com/porter-dev/porter/api/client"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/spf13/cobra"
+)
+
+var kubectlCmd = &cobra.Command{
+	Use:   "kubectl",
+	Short: "Use kubectl to interact with a Porter cluster",
+	Run: func(cmd *cobra.Command, args []string) {
+		err := checkLoginAndRun(args, runKubectl)
+
+		if err != nil {
+			os.Exit(1)
+		}
+	},
+}
+
+func init() {
+	rootCmd.AddCommand(kubectlCmd)
+}
+
+func runKubectl(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+	_, err := exec.LookPath("kubectl")
+
+	if err != nil {
+		return fmt.Errorf("error finding kubectl: %w", err)
+	}
+
+	tmpFile, err := downloadTempKubeconfig(client)
+
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		os.Remove(tmpFile)
+	}()
+
+	os.Setenv("KUBECONFIG", tmpFile)
+
+	cmd := exec.Command("kubectl", args...)
+
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+
+	err = cmd.Run()
+
+	if err != nil {
+		return fmt.Errorf("error running helm: %w", err)
+	}
+
+	return nil
+}
+
+func downloadTempKubeconfig(client *api.Client) (string, error) {
+	tmpFile, err := os.CreateTemp("", "porter_kubeconfig_*.yaml")
+
+	if err != nil {
+		return "", fmt.Errorf("error creating temp file for kubeconfig: %w", err)
+	}
+
+	defer tmpFile.Close()
+
+	resp, err := client.GetKubeconfig(context.Background(), cliConf.Project, cliConf.Cluster, cliConf.Kubeconfig)
+
+	if err != nil {
+		return "", fmt.Errorf("error fetching kubeconfig for cluster: %w", err)
+	}
+
+	_, err = tmpFile.Write(resp.Kubeconfig)
+
+	if err != nil {
+		return "", fmt.Errorf("error writing kubeconfig to temp file: %w", err)
+	}
+
+	return tmpFile.Name(), nil
+}

+ 2 - 2
dashboard/docker/dev.Dockerfile

@@ -7,9 +7,9 @@ COPY package*.json ./
 
 ENV NODE_ENV=development
 
-RUN npm install
+RUN npm ci --legacy-peer-deps
 RUN npm i -g http-parser-js
 
 COPY . ./
 
-CMD npm start
+CMD npm start

+ 4 - 2
dashboard/src/main/home/cluster-dashboard/expanded-chart/status/ControllerTab.tsx

@@ -28,6 +28,7 @@ export type ControllerTabPodType = {
   restartCount: number | string;
   podAge: string;
   revisionNumber?: number;
+  containerStatus: any;
 };
 
 const formatCreationTimestamp = timeFormat("%H:%M:%S %b %d, '%y");
@@ -125,6 +126,7 @@ const ControllerTabFC: React.FunctionComponent<Props> = ({
             status: pod?.status,
             replicaSetName,
             restartCount,
+            containerStatus,
             podAge: pod?.metadata?.creationTimestamp ? podAge : "N/A",
             revisionNumber:
               (pod?.metadata?.annotations &&
@@ -233,8 +235,8 @@ const ControllerTabFC: React.FunctionComponent<Props> = ({
         {},
         {
           cluster_id: currentCluster.id,
-          name: pod.metadata?.name,
-          namespace: pod.metadata?.namespace,
+          name: pod?.name,
+          namespace: pod?.namespace,
           id: currentProject.id,
         }
       )

+ 16 - 6
dashboard/src/main/home/cluster-dashboard/expanded-chart/status/Logs.tsx

@@ -77,14 +77,15 @@ const LogsFC: React.FC<{
     ) {
       return previousLogs?.map((log, i) => {
         return (
-          <Log key={i}>
-            {log.map((ansi, j) => {
+          <Log key={[log.lineNumber, i].join(".")}>
+            <span className="line-number">{log.lineNumber}</span>
+            {log.line.map((ansi, j) => {
               if (ansi.clearLine) {
                 return null;
               }
 
               return (
-                <LogSpan key={i + "." + j} ansi={ansi}>
+                <LogSpan key={[log.lineNumber, i, j].join(".")} ansi={ansi}>
                   {ansi.content.replace(/ /g, "\u00a0")}
                 </LogSpan>
               );
@@ -108,14 +109,15 @@ const LogsFC: React.FC<{
 
     return logs?.map((log, i) => {
       return (
-        <Log key={i}>
-          {log.map((ansi, j) => {
+        <Log key={[log.lineNumber, i].join(".")}>
+          <span className="line-number">{log.lineNumber}</span>
+          {log.line.map((ansi, j) => {
             if (ansi.clearLine) {
               return null;
             }
 
             return (
-              <LogSpan key={i + "." + j} ansi={ansi}>
+              <LogSpan key={[log.lineNumber, i, j].join(".")} ansi={ansi}>
                 {ansi.content.replace(/ /g, "\u00a0")}
               </LogSpan>
             );
@@ -351,6 +353,14 @@ const Message = styled.div`
 
 const Log = styled.div`
   font-family: monospace;
+  & > .line-number {
+    display: inline-block;
+    text-align: right;
+    min-width: 35px;
+    margin-right: 8px;
+    opacity: 0.3;
+    font-family: monospace;
+  }
 `;
 
 const LogSpan = styled.span`

+ 13 - 0
dashboard/src/main/home/cluster-dashboard/expanded-chart/status/PodRow.tsx

@@ -43,6 +43,12 @@ const PodRow: React.FunctionComponent<PodRowProps> = ({
           {pod?.name}
           <Grey>Restart count: {pod.restartCount}</Grey>
           <Grey>Created on: {pod.podAge}</Grey>
+          {podStatus === "failed" ? (
+            <FailedStatusContainer>
+              <Grey>Failure Reason: {pod?.containerStatus?.state?.waiting?.reason}</Grey>
+              <Grey>{pod?.containerStatus?.state?.waiting?.message}</Grey>
+            </FailedStatusContainer>
+          ) : null}
         </Tooltip>
       )}
 
@@ -73,6 +79,13 @@ const Grey = styled.div`
   color: #aaaabb;
 `;
 
+const FailedStatusContainer = styled.div`
+  width: 100%;
+  border: 1px solid hsl(0deg, 100%, 30%);
+  padding: 5px;
+  margin-block: 5px;
+`;
+
 const Tooltip = styled.div`
   position: absolute;
   left: 35px;

+ 127 - 33
dashboard/src/main/home/cluster-dashboard/expanded-chart/status/useLogs.ts

@@ -5,23 +5,29 @@ import { Context } from "shared/Context";
 import { useWebsockets, NewWebsocketOptions } from "shared/hooks/useWebsockets";
 import { SelectedPodType } from "./types";
 
-const MAX_LOGS = 250;
+const MAX_LOGS = 5000;
+const LOGS_BUFFER_SIZE = 1000;
+
+interface Log {
+  line: Anser.AnserJsonEntry[];
+  lineNumber: number;
+}
 
 export const useLogs = (
   currentPod: SelectedPodType,
   scroll?: (smooth: boolean) => void
 ) => {
+  let logsBufferRef = useRef<Record<string, Log[]>>({});
   const currentPodName = useRef<string>();
 
   const { currentCluster, currentProject } = useContext(Context);
   const [containers, setContainers] = useState<string[]>([]);
   const [currentContainer, setCurrentContainer] = useState<string>("");
   const [logs, setLogs] = useState<{
-    [key: string]: Anser.AnserJsonEntry[][];
+    [key: string]: Log[];
   }>({});
-
   const [prevLogs, setPrevLogs] = useState<{
-    [key: string]: Anser.AnserJsonEntry[][];
+    [key: string]: Log[];
   }>({});
 
   const {
@@ -46,14 +52,16 @@ export const useLogs = (
       )
       .then((res) => res.data);
 
-    let processedLogs = [] as Anser.AnserJsonEntry[][];
-
-    events.items.forEach((evt: any) => {
+    const processedLogs: Log[] = events.items.map((evt: any, idx: number) => {
       let ansiEvtType = evt.type == "Warning" ? "\u001b[31m" : "\u001b[32m";
       let ansiLog = Anser.ansiToJson(
         `${ansiEvtType}${evt.type}\u001b[0m \t \u001b[43m\u001b[34m\t${evt.reason} \u001b[0m \t ${evt.message}`
       );
-      processedLogs.push(ansiLog);
+
+      return {
+        line: ansiLog,
+        lineNumber: idx + 1,
+      };
     });
 
     // SET LOGS FOR SYSTEM
@@ -80,12 +88,13 @@ export const useLogs = (
         )
         .then((res) => res.data);
       // Process logs
-      const processedLogs: Anser.AnserJsonEntry[][] = logs.previous_logs.map(
-        (currentLog) => {
-          let ansiLog = Anser.ansiToJson(currentLog);
-          return ansiLog;
-        }
-      );
+      const processedLogs: Log[] = logs.previous_logs.map((currentLog, idx) => {
+        let ansiLog = Anser.ansiToJson(currentLog);
+        return {
+          line: ansiLog,
+          lineNumber: idx + 1,
+        };
+      });
 
       setPrevLogs((pl) => ({
         ...pl,
@@ -94,6 +103,60 @@ export const useLogs = (
     } catch (error) {}
   };
 
+  /**
+   * Updates the `logs` for `containerName` with `newLogs`
+   * @param containerName Name of the container
+   * @param newLogs New logs to update for
+   */
+  const updateContainerLogs = (containerName: string, newLogs: Log[]) => {
+    setLogs((logs) => {
+      let containerLogs = logs[containerName] || [];
+      const lastLineNumber = containerLogs?.at(-1)?.lineNumber || 0;
+
+      containerLogs.push(
+        ...newLogs.map((l) => ({
+          ...l,
+          lineNumber: lastLineNumber + l.lineNumber,
+        }))
+      );
+      // this is technically not as efficient as things could be
+      // if there are performance issues, a deque can be used in place of a list
+      // for storing logs
+      if (containerLogs.length > MAX_LOGS) {
+        const logsToBeRemoved =
+          newLogs.length < LOGS_BUFFER_SIZE ? newLogs.length : LOGS_BUFFER_SIZE;
+        containerLogs = containerLogs.slice(logsToBeRemoved);
+      }
+
+      if (typeof scroll === "function") {
+        scroll(true);
+      }
+      return {
+        ...logs,
+        [containerName]: containerLogs,
+      };
+    });
+  };
+
+  /**
+   * Flushes the logs buffer. If `containerName` is provided,
+   * it will update logs for the `containerName` before executing
+   * the flush operation
+   * @param containerName Name of the container
+   */
+  const flushLogsBuffer = (containerName?: string) => {
+    if (containerName) {
+      updateContainerLogs(containerName, [
+        ...(logsBufferRef.current[containerName] || []),
+      ]);
+      logsBufferRef.current[containerName] = [];
+      return;
+    }
+
+    // If no container name is provided flush all,
+    logsBufferRef.current = {};
+  };
+
   const setupWebsocket = (containerName: string, websocketKey: string) => {
     if (!currentPod?.metadata?.name) return;
 
@@ -105,25 +168,20 @@ export const useLogs = (
       },
       onmessage: (evt: MessageEvent) => {
         let ansiLog = Anser.ansiToJson(evt.data);
-        setLogs((logs) => {
-          const tmpLogs = { ...logs };
-          let containerLogs = tmpLogs[containerName] || [];
-
-          containerLogs.push(ansiLog);
-          // this is technically not as efficient as things could be
-          // if there are performance issues, a deque can be used in place of a list
-          // for storing logs
-          if (containerLogs.length > MAX_LOGS) {
-            containerLogs.shift();
-          }
-          if (typeof scroll === "function") {
-            scroll(true);
-          }
-          return {
-            ...logs,
-            [containerName]: containerLogs,
-          };
+
+        if (!logsBufferRef.current[containerName]) {
+          logsBufferRef.current[containerName] = [];
+        }
+
+        logsBufferRef.current[containerName].push({
+          line: ansiLog,
+          lineNumber: logsBufferRef.current[containerName].length + 1,
         });
+
+        // If size of the logs buffer is exceeded, immediately flush the buffer
+        if (logsBufferRef.current[containerName].length > LOGS_BUFFER_SIZE) {
+          flushLogsBuffer(containerName);
+        }
       },
       onclose: () => {
         console.log("Closed websocket:", websocketKey);
@@ -138,6 +196,8 @@ export const useLogs = (
     const websocketKey = `${currentPodName.current}-${currentContainer}-websocket`;
     closeWebsocket(websocketKey);
 
+    // Flush and re-initialize empty buffer
+    flushLogsBuffer(currentContainer);
     setPrevLogs((prev) => ({ ...prev, [currentContainer]: [] }));
     setLogs((prev) => ({ ...prev, [currentContainer]: [] }));
 
@@ -174,6 +234,7 @@ export const useLogs = (
 
     closeAllWebsockets();
 
+    flushLogsBuffer();
     setPrevLogs({});
     setLogs({});
 
@@ -199,6 +260,35 @@ export const useLogs = (
     };
   }, []);
 
+  useEffect(() => {
+    flushLogsBuffer(currentContainer);
+  }, []);
+
+  /**
+   * In some situations, we might never hit the limit for the max buffer size.
+   * An example is if the total logs for the pod < LOGS_BUFFER_SIZE.
+   *
+   * For handling situations like this, we would want to force a flush operation
+   * on the buffer so that we dont have any stale logs
+   */
+  useEffect(() => {
+    const flushAllLogs = () =>
+      Object.keys(logsBufferRef.current).forEach((container) =>
+        flushLogsBuffer(container)
+      );
+
+    /**
+     * We dont want users to wait for too long for the initial
+     * logs to appear. So we use a setTimeout for 1s to force-flush
+     * logs after 1s of load
+     */
+    setTimeout(flushAllLogs, 1000);
+
+    const flushLogsBufferInterval = setInterval(flushAllLogs, 5000);
+
+    return () => clearInterval(flushLogsBufferInterval);
+  }, []);
+
   const currentLogs = useMemo(() => {
     return logs[currentContainer] || [];
   }, [currentContainer, logs]);
@@ -210,7 +300,11 @@ export const useLogs = (
   return {
     containers,
     currentContainer,
-    setCurrentContainer,
+    setCurrentContainer: (newContainer: string) => {
+      // First flush the logs of the older container
+      flushLogsBuffer(currentContainer);
+      setCurrentContainer(newContainer);
+    },
     logs: currentLogs,
     previousLogs: currentPreviousLogs,
     refresh,

+ 1 - 1
docker-compose.dev-secure.yaml

@@ -18,7 +18,7 @@ services:
       - postgres
     env_file:
       - ./docker/.env
-    command: /bin/sh -c '/porter/bin/migrate; air -c .air.toml;'
+    command:  air -c .air.toml
     restart: on-failure
     volumes:
       - ./cmd:/porter/cmd

+ 1 - 1
docker-compose.dev.yaml

@@ -18,7 +18,7 @@ services:
       - postgres
     env_file:
       - ./docker/.env
-    command: /bin/sh -c '/porter/bin/migrate; air -c .air.toml;'
+    command: air -c .air.toml
     restart: on-failure
     volumes:
       - ./cmd:/porter/cmd

+ 4 - 4
docker/dev.Dockerfile

@@ -5,6 +5,9 @@ WORKDIR /porter
 
 RUN apk update && apk add --no-cache gcc musl-dev git
 
+# for live reloading of go container
+RUN go install github.com/cosmtrek/air@latest
+
 COPY go.mod go.sum ./
 RUN go mod download
 
@@ -12,7 +15,4 @@ COPY . ./
 
 RUN chmod +x /porter/docker/bin/*
 
-# for live reloading of go container
-RUN go get github.com/cosmtrek/air
-
-CMD air -c .air.toml
+CMD air -c .air.toml