Bladeren bron

Remove globals, add context propagation to CLI (#3400)

Stefan McShane 2 jaren geleden
bovenliggende
commit
0fff42d1a7
66 gewijzigde bestanden met toevoegingen van 1485 en 1550 verwijderingen
  1. 1 1
      .github/golangci-lint.yaml
  2. 54 17
      api/client/api.go
  3. 88 83
      cli/cmd/app.go
  4. 130 99
      cli/cmd/apply.go
  5. 56 30
      cli/cmd/auth.go
  6. 24 23
      cli/cmd/bluegreen.go
  7. 10 9
      cli/cmd/cluster.go
  8. 62 18
      cli/cmd/config.go
  9. 127 96
      cli/cmd/config/config.go
  10. 18 19
      cli/cmd/config/docker.go
  11. 27 16
      cli/cmd/connect.go
  12. 4 3
      cli/cmd/connect/dockerhub.go
  13. 4 3
      cli/cmd/connect/docr.go
  14. 12 10
      cli/cmd/connect/ecr.go
  15. 4 3
      cli/cmd/connect/gar.go
  16. 4 3
      cli/cmd/connect/gcr.go
  17. 4 3
      cli/cmd/connect/helmrepo.go
  18. 8 5
      cli/cmd/connect/kubeconfig.go
  19. 4 3
      cli/cmd/connect/registry.go
  20. 15 15
      cli/cmd/create.go
  21. 20 25
      cli/cmd/delete.go
  22. 76 83
      cli/cmd/deploy.go
  23. 7 3
      cli/cmd/deploy/build.go
  24. 37 32
      cli/cmd/deploy/create.go
  25. 29 25
      cli/cmd/deploy/deploy.go
  26. 3 2
      cli/cmd/deploy/shared.go
  27. 4 3
      cli/cmd/deploy/wait/job.go
  28. 4 3
      cli/cmd/docker.go
  29. 41 41
      cli/cmd/docker/agent.go
  30. 27 20
      cli/cmd/docker/auth.go
  31. 2 2
      cli/cmd/docker/builder.go
  32. 4 5
      cli/cmd/docker/config.go
  33. 47 46
      cli/cmd/docker/porter.go
  34. 18 5
      cli/cmd/errors.go
  35. 14 6
      cli/cmd/errors/error_handler.go
  36. 7 10
      cli/cmd/get.go
  37. 8 8
      cli/cmd/github/release.go
  38. 5 3
      cli/cmd/helm.go
  39. 15 18
      cli/cmd/job.go
  40. 6 5
      cli/cmd/kubectl.go
  41. 16 21
      cli/cmd/list.go
  42. 9 6
      cli/cmd/logs.go
  43. 23 6
      cli/cmd/open.go
  44. 6 4
      cli/cmd/pack/pack.go
  45. 26 22
      cli/cmd/porter_app/apply.go
  46. 14 9
      cli/cmd/porter_app/hooks.go
  47. 4 4
      cli/cmd/porter_app/preDeploy.go
  48. 42 32
      cli/cmd/preview/build_image_driver.go
  49. 25 17
      cli/cmd/preview/env_group_driver.go
  50. 30 22
      cli/cmd/preview/push_image_driver.go
  51. 34 30
      cli/cmd/preview/update_config_driver.go
  52. 17 18
      cli/cmd/preview/utils.go
  53. 17 30
      cli/cmd/preview/v2beta1/apply.go
  54. 0 375
      cli/cmd/preview/v2beta1/default_driver.go
  55. 13 12
      cli/cmd/project.go
  56. 1 2
      cli/cmd/providers/gcp/local/config.go
  57. 13 12
      cli/cmd/registry.go
  58. 6 11
      cli/cmd/root.go
  59. 77 72
      cli/cmd/run.go
  60. 31 11
      cli/cmd/server.go
  61. 9 12
      cli/cmd/stack.go
  62. 1 1
      cli/cmd/v2/apply.go
  63. 6 5
      cli/cmd/v2/build.go
  64. 7 1
      cli/main.go
  65. 22 10
      cmd/docker-credential-porter/helper/helper.go
  66. 6 1
      cmd/docker-credential-porter/main.go

+ 1 - 1
.github/golangci-lint.yaml

@@ -14,7 +14,7 @@ linters-settings:
       - name: exported
         severity: error
   gocyclo:
-    min-complexity: 15
+    min-complexity: 40 # should drop to 15 max
   gomoddirectives:
     replace-local: false
   gosec:

+ 54 - 17
api/client/api.go

@@ -1,8 +1,10 @@
 package client
 
 import (
+	"context"
 	"encoding/base64"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"io/ioutil"
 	"net/http"
@@ -25,7 +27,59 @@ type Client struct {
 	CookieFilePath string
 	Token          string
 
+	// cfToken is a cloudflare token for accessing the API
 	cfToken string
+
+	// // Config contains all config read from flags, environment variables, or porter.yaml config. This is used to automatically pull hosts, projectIDs, clusterIDs etc. in API calls
+	// Config config.CLIConfig
+}
+
+// NewClientInput contains all information required to create a new API Client
+type NewClientInput struct {
+	// BaseURL is the url for the API. This usually ends with /api, and should not end with a /
+	BaseURL string
+
+	// CookieFileName allows you to authenticate with a cookie file, if one is present in the porter directory.
+	// If both CookieFileName and BearerToken are specified, BearerToken will be preferred
+	CookieFileName string
+
+	// BearerToken uses a JWT to authenticate with the Porter API. If both BearerToken and CookieFileName are specified, BearerToken will be used
+	BearerToken string
+
+	// CloudflareToken allows for authenticating with a Porter API behind Cloudflare Zero Trust. If not specified, we will check PORTER_CF_ACCESS_TOKEN for a token.
+	// If one is found, it will be added to all API calls.
+	CloudflareToken string
+}
+
+// NewClientWithConfig creates a new API client with the provided config
+func NewClientWithConfig(ctx context.Context, input NewClientInput) (Client, error) {
+	client := Client{
+		BaseURL: input.BaseURL,
+		HTTPClient: &http.Client{
+			Timeout: time.Minute,
+		},
+	}
+	if cfToken := os.Getenv("PORTER_CF_ACCESS_TOKEN"); cfToken != "" {
+		client.cfToken = cfToken
+	}
+
+	if input.BearerToken != "" {
+		client.Token = input.BearerToken
+		return client, nil
+	}
+
+	if input.CookieFileName != "" {
+		client.CookieFilePath = input.CookieFileName
+		cookie, err := client.getCookie()
+		if err != nil {
+			return client, fmt.Errorf("error getting cooking from path: %w", err)
+		}
+		if cookie == nil {
+			return client, errors.New("no cookie found at location")
+		}
+		return client, nil
+	}
+	return client, errors.New("unable to create an API session with cookie nor token")
 }
 
 // NewClient constructs a new client based on a set of options
@@ -55,23 +109,6 @@ func NewClient(baseURL string, cookieFileName string) *Client {
 	return client
 }
 
-func NewClientWithToken(baseURL, token string) *Client {
-	client := &Client{
-		BaseURL: baseURL,
-		Token:   token,
-		HTTPClient: &http.Client{
-			Timeout: time.Minute,
-		},
-	}
-
-	// look for a cloudflare access token specifically for Porter
-	if cfToken := os.Getenv("PORTER_CF_ACCESS_TOKEN"); cfToken != "" {
-		client.cfToken = cfToken
-	}
-
-	return client
-}
-
 func (c *Client) getRequest(relPath string, data interface{}, response interface{}) error {
 	vals := make(map[string][]string)
 	err := schema.NewEncoder().Encode(data, vals)

+ 88 - 83
cli/cmd/app.go

@@ -12,6 +12,7 @@ import (
 	"github.com/fatih/color"
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 	"github.com/spf13/cobra"
 	batchv1 "k8s.io/api/batch/v1"
@@ -55,7 +56,7 @@ var appRunCmd = &cobra.Command{
 	Args:  cobra.MinimumNArgs(2),
 	Short: "Runs a command inside a connected cluster container.",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, appRun)
+		err := checkLoginAndRun(cmd.Context(), args, appRun)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -68,7 +69,7 @@ var appRunCleanupCmd = &cobra.Command{
 	Args:  cobra.NoArgs,
 	Short: "Delete any lingering ephemeral pods that were created with \"porter app run\".",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, appCleanup)
+		err := checkLoginAndRun(cmd.Context(), args, appCleanup)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -81,7 +82,7 @@ var appUpdateTagCmd = &cobra.Command{
 	Args:  cobra.MinimumNArgs(1),
 	Short: "Updates the image tag for an application.",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, appUpdateTag)
+		err := checkLoginAndRun(cmd.Context(), args, appUpdateTag)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -150,7 +151,7 @@ func init() {
 	appCmd.AddCommand(appUpdateTagCmd)
 }
 
-func appRun(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func appRun(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, args []string) error {
 	execArgs := args[1:]
 
 	color.New(color.FgGreen).Println("Attempting to run", strings.Join(execArgs, " "), "for application", args[0])
@@ -158,12 +159,12 @@ func appRun(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []st
 	appNamespace = fmt.Sprintf("porter-stack-%s", args[0])
 
 	if len(execArgs) > 0 {
-		res, err := client.GetPorterApp(context.Background(), cliConf.Project, cliConf.Cluster, args[0])
+		res, err := client.GetPorterApp(ctx, cliConfig.Project, cliConfig.Cluster, args[0])
 		if err != nil {
 			return fmt.Errorf("Unable to run command: %w", err)
 		}
 		if res.Name == "" {
-			return fmt.Errorf("An application named \"%s\" was not found in your project (ID: %d). Please check your spelling and try again.", args[0], cliConf.Project)
+			return fmt.Errorf("An application named \"%s\" was not found in your project (ID: %d). Please check your spelling and try again.", args[0], cliConfig.Project)
 		}
 
 		if res.Builder != "" &&
@@ -176,7 +177,7 @@ func appRun(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []st
 		}
 	}
 
-	podsSimple, err := appGetPods(client, appNamespace, args[0])
+	podsSimple, err := appGetPods(ctx, cliConfig, client, appNamespace, args[0])
 	if err != nil {
 		return fmt.Errorf("Could not retrieve list of pods: %s", err.Error())
 	}
@@ -248,10 +249,11 @@ func appRun(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []st
 	}
 
 	config := &AppPorterRunSharedConfig{
-		Client: client,
+		Client:    client,
+		CLIConfig: cliConfig,
 	}
 
-	err = config.setSharedConfig()
+	err = config.setSharedConfig(ctx)
 
 	if err != nil {
 		return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
@@ -261,15 +263,16 @@ func appRun(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []st
 		return appExecuteRun(config, appNamespace, selectedPod.Name, selectedContainerName, execArgs)
 	}
 
-	return appExecuteRunEphemeral(config, appNamespace, selectedPod.Name, selectedContainerName, execArgs)
+	return appExecuteRunEphemeral(ctx, config, appNamespace, selectedPod.Name, selectedContainerName, execArgs)
 }
 
-func appCleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func appCleanup(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, _ []string) error {
 	config := &AppPorterRunSharedConfig{
-		Client: client,
+		Client:    client,
+		CLIConfig: cliConfig,
 	}
 
-	err := config.setSharedConfig()
+	err := config.setSharedConfig(ctx)
 	if err != nil {
 		return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
 	}
@@ -291,20 +294,20 @@ func appCleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []s
 	color.New(color.FgGreen).Println("Fetching ephemeral pods for cleanup")
 
 	if proceed == "All namespaces" {
-		namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
+		namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
 		if err != nil {
 			return err
 		}
 
 		for _, namespace := range namespaces.Items {
-			if pods, err := appGetEphemeralPods(namespace.Name, config.Clientset); err == nil {
+			if pods, err := appGetEphemeralPods(ctx, namespace.Name, config.Clientset); err == nil {
 				podNames = append(podNames, pods...)
 			} else {
 				return err
 			}
 		}
 	} else {
-		if pods, err := appGetEphemeralPods(appNamespace, config.Clientset); err == nil {
+		if pods, err := appGetEphemeralPods(ctx, appNamespace, config.Clientset); err == nil {
 			podNames = append(podNames, pods...)
 		} else {
 			return err
@@ -325,7 +328,7 @@ func appCleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []s
 		color.New(color.FgBlue).Printf("Deleting ephemeral pod: %s\n", podName)
 
 		err = config.Clientset.CoreV1().Pods(appNamespace).Delete(
-			context.Background(), podName, metav1.DeleteOptions{},
+			ctx, podName, metav1.DeleteOptions{},
 		)
 		if err != nil {
 			return err
@@ -335,11 +338,11 @@ func appCleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []s
 	return nil
 }
 
-func appGetEphemeralPods(namespace string, clientset *kubernetes.Clientset) ([]string, error) {
+func appGetEphemeralPods(ctx context.Context, namespace string, clientset *kubernetes.Clientset) ([]string, error) {
 	var podNames []string
 
 	pods, err := clientset.CoreV1().Pods(namespace).List(
-		context.Background(), metav1.ListOptions{LabelSelector: "porter/ephemeral-pod"},
+		ctx, metav1.ListOptions{LabelSelector: "porter/ephemeral-pod"},
 	)
 	if err != nil {
 		return nil, err
@@ -353,17 +356,18 @@ func appGetEphemeralPods(namespace string, clientset *kubernetes.Clientset) ([]s
 }
 
 type AppPorterRunSharedConfig struct {
-	Client     *api.Client
+	Client     api.Client
 	RestConf   *rest.Config
 	Clientset  *kubernetes.Clientset
 	RestClient *rest.RESTClient
+	CLIConfig  config.CLIConfig
 }
 
-func (p *AppPorterRunSharedConfig) setSharedConfig() error {
-	pID := cliConf.Project
-	cID := cliConf.Cluster
+func (p *AppPorterRunSharedConfig) setSharedConfig(ctx context.Context) error {
+	pID := p.CLIConfig.Project
+	cID := p.CLIConfig.Cluster
 
-	kubeResp, err := p.Client.GetKubeconfig(context.Background(), pID, cID, cliConf.Kubeconfig)
+	kubeResp, err := p.Client.GetKubeconfig(ctx, pID, cID, p.CLIConfig.Kubeconfig)
 	if err != nil {
 		return err
 	}
@@ -411,11 +415,11 @@ type appPodSimple struct {
 	ContainerNames []string
 }
 
-func appGetPods(client *api.Client, namespace, releaseName string) ([]appPodSimple, error) {
-	pID := cliConf.Project
-	cID := cliConf.Cluster
+func appGetPods(ctx context.Context, cliConfig config.CLIConfig, client api.Client, namespace, releaseName string) ([]appPodSimple, error) {
+	pID := cliConfig.Project
+	cID := cliConfig.Cluster
 
-	resp, err := client.GetK8sAllPods(context.TODO(), pID, cID, namespace, releaseName)
+	resp, err := client.GetK8sAllPods(ctx, pID, cID, namespace, releaseName)
 	if err != nil {
 		return nil, err
 	}
@@ -482,28 +486,28 @@ func appExecuteRun(config *AppPorterRunSharedConfig, namespace, name, container
 	})
 }
 
-func appExecuteRunEphemeral(config *AppPorterRunSharedConfig, namespace, name, container string, args []string) error {
-	existing, err := appGetExistingPod(config, name, namespace)
+func appExecuteRunEphemeral(ctx context.Context, config *AppPorterRunSharedConfig, namespace, name, container string, args []string) error {
+	existing, err := appGetExistingPod(ctx, config, name, namespace)
 	if err != nil {
 		return err
 	}
 
-	newPod, err := appCreateEphemeralPodFromExisting(config, existing, container, args)
+	newPod, err := appCreateEphemeralPodFromExisting(ctx, config, existing, container, args)
 	if err != nil {
 		return err
 	}
 	podName := newPod.ObjectMeta.Name
 
 	// delete the ephemeral pod no matter what
-	defer appDeletePod(config, podName, namespace)
+	defer appDeletePod(ctx, config, podName, namespace) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
 	color.New(color.FgYellow).Printf("Waiting for pod %s to be ready...", podName)
-	if err = appWaitForPod(config, newPod); err != nil {
+	if err = appWaitForPod(ctx, config, newPod); err != nil {
 		color.New(color.FgRed).Println("failed")
-		return appHandlePodAttachError(err, config, namespace, podName, container)
+		return appHandlePodAttachError(ctx, err, config, namespace, podName, container)
 	}
 
-	err = appCheckForPodDeletionCronJob(config)
+	err = appCheckForPodDeletionCronJob(ctx, config)
 	if err != nil {
 		return err
 	}
@@ -511,7 +515,7 @@ func appExecuteRunEphemeral(config *AppPorterRunSharedConfig, namespace, name, c
 	// refresh pod info for latest status
 	newPod, err = config.Clientset.CoreV1().
 		Pods(newPod.Namespace).
-		Get(context.Background(), newPod.Name, metav1.GetOptions{})
+		Get(ctx, newPod.Name, metav1.GetOptions{})
 
 	// pod exited while we were waiting.  maybe an error maybe not.
 	// we dont know if the user wanted an interactive shell or not.
@@ -519,11 +523,11 @@ func appExecuteRunEphemeral(config *AppPorterRunSharedConfig, namespace, name, c
 	if appIsPodExited(newPod) {
 		color.New(color.FgGreen).Println("complete!")
 		var writtenBytes int64
-		writtenBytes, _ = appPipePodLogsToStdout(config, namespace, podName, container, false)
+		writtenBytes, _ = appPipePodLogsToStdout(ctx, config, namespace, podName, container, false)
 
 		if appVerbose || writtenBytes == 0 {
 			color.New(color.FgYellow).Println("Could not get logs. Pod events:")
-			appPipeEventsToStdout(config, namespace, podName, container, false)
+			_ = appPipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 		}
 		return nil
 	}
@@ -564,44 +568,44 @@ func appExecuteRunEphemeral(config *AppPorterRunSharedConfig, namespace, name, c
 		})
 	}); err != nil {
 		// ugly way to catch no TTY errors, such as when running command "echo \"hello\""
-		return appHandlePodAttachError(err, config, namespace, podName, container)
+		return appHandlePodAttachError(ctx, err, config, namespace, podName, container)
 	}
 
 	if appVerbose {
 		color.New(color.FgYellow).Println("Pod events:")
-		appPipeEventsToStdout(config, namespace, podName, container, false)
+		_ = appPipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	}
 
 	return err
 }
 
-func appCheckForPodDeletionCronJob(config *AppPorterRunSharedConfig) error {
+func appCheckForPodDeletionCronJob(ctx context.Context, config *AppPorterRunSharedConfig) error {
 	// try and create the cron job and all of the other required resources as necessary,
 	// starting with the service account, then role and then a role binding
 
-	err := appCheckForServiceAccount(config)
+	err := appCheckForServiceAccount(ctx, config)
 	if err != nil {
 		return err
 	}
 
-	err = appCheckForClusterRole(config)
+	err = appCheckForClusterRole(ctx, config)
 	if err != nil {
 		return err
 	}
 
-	err = appCheckForRoleBinding(config)
+	err = appCheckForRoleBinding(ctx, config)
 	if err != nil {
 		return err
 	}
 
-	namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
+	namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
 
 	for _, namespace := range namespaces.Items {
 		cronJobs, err := config.Clientset.BatchV1().CronJobs(namespace.Name).List(
-			context.Background(), metav1.ListOptions{},
+			ctx, metav1.ListOptions{},
 		)
 		if err != nil {
 			return err
@@ -617,7 +621,7 @@ func appCheckForPodDeletionCronJob(config *AppPorterRunSharedConfig) error {
 			for _, cronJob := range cronJobs.Items {
 				if cronJob.Name == "porter-ephemeral-pod-deletion-cronjob" {
 					err = config.Clientset.BatchV1().CronJobs(namespace.Name).Delete(
-						context.Background(), cronJob.Name, metav1.DeleteOptions{},
+						ctx, cronJob.Name, metav1.DeleteOptions{},
 					)
 					if err != nil {
 						return err
@@ -656,7 +660,7 @@ func appCheckForPodDeletionCronJob(config *AppPorterRunSharedConfig) error {
 		},
 	}
 	_, err = config.Clientset.BatchV1().CronJobs("default").Create(
-		context.Background(), cronJob, metav1.CreateOptions{},
+		ctx, cronJob, metav1.CreateOptions{},
 	)
 	if err != nil {
 		return err
@@ -665,15 +669,15 @@ func appCheckForPodDeletionCronJob(config *AppPorterRunSharedConfig) error {
 	return nil
 }
 
-func appCheckForServiceAccount(config *AppPorterRunSharedConfig) error {
-	namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
+func appCheckForServiceAccount(ctx context.Context, config *AppPorterRunSharedConfig) error {
+	namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
 
 	for _, namespace := range namespaces.Items {
 		serviceAccounts, err := config.Clientset.CoreV1().ServiceAccounts(namespace.Name).List(
-			context.Background(), metav1.ListOptions{},
+			ctx, metav1.ListOptions{},
 		)
 		if err != nil {
 			return err
@@ -689,7 +693,7 @@ func appCheckForServiceAccount(config *AppPorterRunSharedConfig) error {
 			for _, svcAccount := range serviceAccounts.Items {
 				if svcAccount.Name == "porter-ephemeral-pod-deletion-service-account" {
 					err = config.Clientset.CoreV1().ServiceAccounts(namespace.Name).Delete(
-						context.Background(), svcAccount.Name, metav1.DeleteOptions{},
+						ctx, svcAccount.Name, metav1.DeleteOptions{},
 					)
 					if err != nil {
 						return err
@@ -705,7 +709,7 @@ func appCheckForServiceAccount(config *AppPorterRunSharedConfig) error {
 		},
 	}
 	_, err = config.Clientset.CoreV1().ServiceAccounts("default").Create(
-		context.Background(), serviceAccount, metav1.CreateOptions{},
+		ctx, serviceAccount, metav1.CreateOptions{},
 	)
 	if err != nil {
 		return err
@@ -714,9 +718,9 @@ func appCheckForServiceAccount(config *AppPorterRunSharedConfig) error {
 	return nil
 }
 
-func appCheckForClusterRole(config *AppPorterRunSharedConfig) error {
+func appCheckForClusterRole(ctx context.Context, config *AppPorterRunSharedConfig) error {
 	roles, err := config.Clientset.RbacV1().ClusterRoles().List(
-		context.Background(), metav1.ListOptions{},
+		ctx, metav1.ListOptions{},
 	)
 	if err != nil {
 		return err
@@ -746,7 +750,7 @@ func appCheckForClusterRole(config *AppPorterRunSharedConfig) error {
 		},
 	}
 	_, err = config.Clientset.RbacV1().ClusterRoles().Create(
-		context.Background(), role, metav1.CreateOptions{},
+		ctx, role, metav1.CreateOptions{},
 	)
 	if err != nil {
 		return err
@@ -755,9 +759,9 @@ func appCheckForClusterRole(config *AppPorterRunSharedConfig) error {
 	return nil
 }
 
-func appCheckForRoleBinding(config *AppPorterRunSharedConfig) error {
+func appCheckForRoleBinding(ctx context.Context, config *AppPorterRunSharedConfig) error {
 	bindings, err := config.Clientset.RbacV1().ClusterRoleBindings().List(
-		context.Background(), metav1.ListOptions{},
+		ctx, metav1.ListOptions{},
 	)
 	if err != nil {
 		return err
@@ -788,7 +792,7 @@ func appCheckForRoleBinding(config *AppPorterRunSharedConfig) error {
 		},
 	}
 	_, err = config.Clientset.RbacV1().ClusterRoleBindings().Create(
-		context.Background(), binding, metav1.CreateOptions{},
+		ctx, binding, metav1.CreateOptions{},
 	)
 	if err != nil {
 		return err
@@ -797,7 +801,7 @@ func appCheckForRoleBinding(config *AppPorterRunSharedConfig) error {
 	return nil
 }
 
-func appWaitForPod(config *AppPorterRunSharedConfig, pod *v1.Pod) error {
+func appWaitForPod(ctx context.Context, config *AppPorterRunSharedConfig, pod *v1.Pod) error {
 	var (
 		w   watch.Interface
 		err error
@@ -810,7 +814,7 @@ func appWaitForPod(config *AppPorterRunSharedConfig, pod *v1.Pod) error {
 		selector := fields.OneTermEqualSelector("metadata.name", pod.Name).String()
 		w, err = config.Clientset.CoreV1().
 			Pods(pod.Namespace).
-			Watch(context.Background(), metav1.ListOptions{FieldSelector: selector})
+			Watch(ctx, metav1.ListOptions{FieldSelector: selector})
 
 		if err == nil {
 			break
@@ -828,7 +832,7 @@ func appWaitForPod(config *AppPorterRunSharedConfig, pod *v1.Pod) error {
 			// creating the listener.
 			pod, err = config.Clientset.CoreV1().
 				Pods(pod.Namespace).
-				Get(context.Background(), pod.Name, metav1.GetOptions{})
+				Get(ctx, pod.Name, metav1.GetOptions{})
 			if appIsPodReady(pod) || appIsPodExited(pod) {
 				return nil
 			}
@@ -861,23 +865,23 @@ func appIsPodExited(pod *v1.Pod) bool {
 	return pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
 }
 
-func appHandlePodAttachError(err error, config *AppPorterRunSharedConfig, namespace, podName, container string) error {
+func appHandlePodAttachError(ctx context.Context, err error, config *AppPorterRunSharedConfig, namespace, podName, container string) error {
 	if appVerbose {
 		color.New(color.FgYellow).Fprintf(os.Stderr, "Error: %s\n", err)
 	}
 	color.New(color.FgYellow).Fprintln(os.Stderr, "Could not open a shell to this container. Container logs:")
 
 	var writtenBytes int64
-	writtenBytes, _ = appPipePodLogsToStdout(config, namespace, podName, container, false)
+	writtenBytes, _ = appPipePodLogsToStdout(ctx, config, namespace, podName, container, false)
 
 	if appVerbose || writtenBytes == 0 {
 		color.New(color.FgYellow).Fprintln(os.Stderr, "Could not get logs. Pod events:")
-		appPipeEventsToStdout(config, namespace, podName, container, false)
+		_ = appPipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	}
 	return err
 }
 
-func appPipePodLogsToStdout(config *AppPorterRunSharedConfig, namespace, name, container string, follow bool) (int64, error) {
+func appPipePodLogsToStdout(ctx context.Context, config *AppPorterRunSharedConfig, namespace, name, container string, follow bool) (int64, error) {
 	podLogOpts := v1.PodLogOptions{
 		Container: container,
 		Follow:    follow,
@@ -886,7 +890,7 @@ func appPipePodLogsToStdout(config *AppPorterRunSharedConfig, namespace, name, c
 	req := config.Clientset.CoreV1().Pods(namespace).GetLogs(name, &podLogOpts)
 
 	podLogs, err := req.Stream(
-		context.Background(),
+		ctx,
 	)
 	if err != nil {
 		return 0, err
@@ -897,13 +901,13 @@ func appPipePodLogsToStdout(config *AppPorterRunSharedConfig, namespace, name, c
 	return io.Copy(os.Stdout, podLogs)
 }
 
-func appPipeEventsToStdout(config *AppPorterRunSharedConfig, namespace, name, container string, follow bool) error {
+func appPipeEventsToStdout(ctx context.Context, config *AppPorterRunSharedConfig, namespace, name, _ string, _ bool) error {
 	// update the config in case the operation has taken longer than token expiry time
-	config.setSharedConfig()
+	config.setSharedConfig(ctx) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
 	// creates the clientset
 	resp, err := config.Clientset.CoreV1().Events(namespace).List(
-		context.TODO(),
+		ctx,
 		metav1.ListOptions{
 			FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=%s", name, namespace),
 		},
@@ -919,20 +923,20 @@ func appPipeEventsToStdout(config *AppPorterRunSharedConfig, namespace, name, co
 	return nil
 }
 
-func appGetExistingPod(config *AppPorterRunSharedConfig, name, namespace string) (*v1.Pod, error) {
+func appGetExistingPod(ctx context.Context, config *AppPorterRunSharedConfig, name, namespace string) (*v1.Pod, error) {
 	return config.Clientset.CoreV1().Pods(namespace).Get(
-		context.Background(),
+		ctx,
 		name,
 		metav1.GetOptions{},
 	)
 }
 
-func appDeletePod(config *AppPorterRunSharedConfig, name, namespace string) error {
+func appDeletePod(ctx context.Context, config *AppPorterRunSharedConfig, name, namespace string) error {
 	// update the config in case the operation has taken longer than token expiry time
-	config.setSharedConfig()
+	config.setSharedConfig(ctx) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
 	err := config.Clientset.CoreV1().Pods(namespace).Delete(
-		context.Background(),
+		ctx,
 		name,
 		metav1.DeleteOptions{},
 	)
@@ -947,6 +951,7 @@ func appDeletePod(config *AppPorterRunSharedConfig, name, namespace string) erro
 }
 
 func appCreateEphemeralPodFromExisting(
+	ctx context.Context,
 	config *AppPorterRunSharedConfig,
 	existing *v1.Pod,
 	container string,
@@ -1032,18 +1037,18 @@ func appCreateEphemeralPodFromExisting(
 
 	// create the pod and return it
 	return config.Clientset.CoreV1().Pods(existing.ObjectMeta.Namespace).Create(
-		context.Background(),
+		ctx,
 		newPod,
 		metav1.CreateOptions{},
 	)
 }
 
-func appUpdateTag(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func appUpdateTag(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, args []string) error {
 	namespace := fmt.Sprintf("porter-stack-%s", args[0])
 	if appTag == "" {
 		appTag = "latest"
 	}
-	release, err := client.GetRelease(context.TODO(), cliConf.Project, cliConf.Cluster, namespace, args[0])
+	release, err := client.GetRelease(ctx, cliConfig.Project, cliConfig.Cluster, namespace, args[0])
 	if err != nil {
 		return fmt.Errorf("Unable to find application %s", args[0])
 	}
@@ -1056,8 +1061,8 @@ func appUpdateTag(_ *types.GetAuthenticatedUserResponse, client *api.Client, arg
 		Tag:        appTag,
 	}
 	createUpdatePorterAppRequest := &types.CreatePorterAppRequest{
-		ClusterID:       cliConf.Cluster,
-		ProjectID:       cliConf.Project,
+		ClusterID:       cliConfig.Cluster,
+		ProjectID:       cliConfig.Project,
 		ImageInfo:       imageInfo,
 		OverrideRelease: false,
 	}
@@ -1065,9 +1070,9 @@ func appUpdateTag(_ *types.GetAuthenticatedUserResponse, client *api.Client, arg
 	color.New(color.FgGreen).Printf("Updating application %s to build using tag \"%s\"\n", args[0], appTag)
 
 	_, err = client.CreatePorterApp(
-		context.Background(),
-		cliConf.Project,
-		cliConf.Cluster,
+		ctx,
+		cliConfig.Project,
+		cliConfig.Cluster,
 		args[0],
 		createUpdatePorterAppRequest,
 	)

+ 130 - 99
cli/cmd/apply.go

@@ -72,7 +72,7 @@ applying a configuration:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter apply -f porter.yaml"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, apply)
+		err := checkLoginAndRun(cmd.Context(), args, apply)
 		if err != nil {
 			if strings.Contains(err.Error(), "Forbidden") {
 				color.New(color.FgRed).Fprintf(os.Stderr, "You may have to update your GitHub secret token")
@@ -111,23 +111,21 @@ func init() {
 	applyCmd.MarkFlagRequired("file")
 }
 
-func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) (err error) {
-	ctx := context.Background()
-
-	project, err := client.GetProject(ctx, cliConf.Project)
+func apply(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, _ []string) (err error) {
+	project, err := client.GetProject(ctx, cliConfig.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
 	}
 
 	if project.ValidateApplyV2 {
-		err = v2.Apply(ctx, cliConf, client, porterYAML)
+		err = v2.Apply(ctx, cliConfig, client, porterYAML)
 		if err != nil {
 			return err
 		}
 		return nil
 	}
 
-	fileBytes, err := ioutil.ReadFile(porterYAML)
+	fileBytes, err := os.ReadFile(porterYAML) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	if err != nil {
 		stackName := os.Getenv("PORTER_STACK_NAME")
 		if stackName == "" {
@@ -151,7 +149,7 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 	if previewVersion.Version == "v2beta1" {
 		ns := os.Getenv("PORTER_NAMESPACE")
 
-		applier, err := previewV2Beta1.NewApplier(client, fileBytes, ns)
+		applier, err := previewV2Beta1.NewApplier(client, cliConfig, fileBytes, ns)
 		if err != nil {
 			return err
 		}
@@ -193,7 +191,7 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 
 		if parsed.Applications != nil {
 			for appName, app := range parsed.Applications {
-				resources, err := porter_app.CreateApplicationDeploy(client, worker, app, appName, cliConf)
+				resources, err := porter_app.CreateApplicationDeploy(ctx, client, worker, app, appName, cliConfig)
 				if err != nil {
 					return fmt.Errorf("error parsing porter.yaml for build resources: %w", err)
 				}
@@ -230,7 +228,7 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 				return fmt.Errorf("error parsing porter.yaml for build resources: %w", err)
 			}
 
-			resources, err := porter_app.CreateApplicationDeploy(client, worker, app, appName, cliConf)
+			resources, err := porter_app.CreateApplicationDeploy(ctx, client, worker, app, appName, cliConfig)
 			if err != nil {
 				return fmt.Errorf("error parsing porter.yaml for build resources: %w", err)
 			}
@@ -251,12 +249,12 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 		name     string
 		funcName func(resource *switchboardModels.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error)
 	}{
-		{"deploy", NewDeployDriver},
-		{"build-image", preview.NewBuildDriver},
-		{"push-image", preview.NewPushDriver},
-		{"update-config", preview.NewUpdateConfigDriver},
+		{"deploy", NewDeployDriver(ctx, client, cliConfig)},
+		{"build-image", preview.NewBuildDriver(ctx, client, cliConfig)},
+		{"push-image", preview.NewPushDriver(ctx, client, cliConfig)},
+		{"update-config", preview.NewUpdateConfigDriver(ctx, client, cliConfig)},
 		{"random-string", preview.NewRandomStringDriver},
-		{"env-group", preview.NewEnvGroupDriver},
+		{"env-group", preview.NewEnvGroupDriver(ctx, client, cliConfig)},
 		{"os-env", preview.NewOSEnvDriver},
 	}
 	for _, driver := range drivers {
@@ -277,7 +275,7 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 			return
 		}
 
-		deploymentHook, err := NewDeploymentHook(client, resGroup, deplNamespace)
+		deploymentHook, err := NewDeploymentHook(cliConfig, client, resGroup, deplNamespace)
 		if err != nil {
 			err = fmt.Errorf("error creating deployment hook: %w", err)
 			return err
@@ -297,7 +295,7 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 		return err
 	}
 
-	cloneEnvGroupHook := NewCloneEnvGroupHook(client, resGroup)
+	cloneEnvGroupHook := NewCloneEnvGroupHook(client, cliConfig, resGroup)
 	err = worker.RegisterHook("cloneenvgroup", cloneEnvGroupHook)
 	if err != nil {
 		err = fmt.Errorf("error registering clone env group hook: %w", err)
@@ -367,47 +365,55 @@ func hasDeploymentHookEnvVars() bool {
 	return true
 }
 
+// DeployDriver contains all information needed for deploying with switchboard
 type DeployDriver struct {
 	source      *previewInt.Source
 	target      *previewInt.Target
 	output      map[string]interface{}
 	lookupTable *map[string]drivers.Driver
 	logger      *zerolog.Logger
+	cliConfig   config.CLIConfig
+	apiClient   api.Client
 }
 
-func NewDeployDriver(resource *switchboardModels.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
-	driver := &DeployDriver{
-		lookupTable: opts.DriverLookupTable,
-		logger:      opts.Logger,
-		output:      make(map[string]interface{}),
-	}
+// NewDeployDriver creates a deployment driver for use with switchboard
+func NewDeployDriver(ctx context.Context, apiClient api.Client, cliConfig config.CLIConfig) func(resource *switchboardModels.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+	return func(resource *switchboardModels.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+		driver := &DeployDriver{
+			lookupTable: opts.DriverLookupTable,
+			logger:      opts.Logger,
+			output:      make(map[string]interface{}),
+			cliConfig:   cliConfig,
+			apiClient:   apiClient,
+		}
 
-	target, err := preview.GetTarget(resource.Name, resource.Target)
-	if err != nil {
-		return nil, err
-	}
+		target, err := preview.GetTarget(ctx, resource.Name, resource.Target, apiClient, cliConfig)
+		if err != nil {
+			return nil, err
+		}
 
-	driver.target = target
+		driver.target = target
 
-	source, err := preview.GetSource(target.Project, resource.Name, resource.Source)
-	if err != nil {
-		return nil, err
-	}
+		source, err := preview.GetSource(ctx, target.Project, resource.Name, resource.Source, apiClient)
+		if err != nil {
+			return nil, err
+		}
 
-	driver.source = source
+		driver.source = source
 
-	return driver, nil
+		return driver, nil
+	}
 }
 
+// ShouldApply extends switchboard
 func (d *DeployDriver) ShouldApply(_ *switchboardModels.Resource) bool {
 	return true
 }
 
+// Apply extends switchboard
 func (d *DeployDriver) Apply(resource *switchboardModels.Resource) (*switchboardModels.Resource, error) {
-	ctx := context.Background()
-	client := config.GetAPIClient()
-
-	_, err := client.GetRelease(
+	ctx := context.TODO() // blocked from switchboard for now
+	_, err := d.apiClient.GetRelease(
 		ctx,
 		d.target.Project,
 		d.target.Cluster,
@@ -422,14 +428,14 @@ func (d *DeployDriver) Apply(resource *switchboardModels.Resource) (*switchboard
 	}
 
 	if d.source.IsApplication {
-		return d.applyApplication(ctx, resource, client, shouldCreate)
+		return d.applyApplication(ctx, resource, d.apiClient, shouldCreate)
 	}
 
-	return d.applyAddon(resource, client, shouldCreate)
+	return d.applyAddon(ctx, resource, d.apiClient, shouldCreate)
 }
 
 // Simple apply for addons
-func (d *DeployDriver) applyAddon(resource *switchboardModels.Resource, client *api.Client, shouldCreate bool) (*switchboardModels.Resource, error) {
+func (d *DeployDriver) applyAddon(ctx context.Context, resource *switchboardModels.Resource, client api.Client, shouldCreate bool) (*switchboardModels.Resource, error) {
 	addonConfig, err := d.getAddonConfig(resource)
 	if err != nil {
 		return nil, fmt.Errorf("error getting addon config for resource %s: %w", resource.Name, err)
@@ -437,7 +443,7 @@ func (d *DeployDriver) applyAddon(resource *switchboardModels.Resource, client *
 
 	if shouldCreate {
 		err := client.DeployAddon(
-			context.Background(),
+			ctx,
 			d.target.Project,
 			d.target.Cluster,
 			d.target.Namespace,
@@ -461,7 +467,7 @@ func (d *DeployDriver) applyAddon(resource *switchboardModels.Resource, client *
 		}
 
 		err = client.UpgradeRelease(
-			context.Background(),
+			ctx,
 			d.target.Project,
 			d.target.Cluster,
 			d.target.Namespace,
@@ -476,14 +482,14 @@ func (d *DeployDriver) applyAddon(resource *switchboardModels.Resource, client *
 		}
 	}
 
-	if err = d.assignOutput(resource, client); err != nil {
+	if err = d.assignOutput(ctx, resource, client); err != nil {
 		return nil, err
 	}
 
 	return resource, nil
 }
 
-func (d *DeployDriver) applyApplication(ctx context.Context, resource *switchboardModels.Resource, client *api.Client, shouldCreate bool) (*switchboardModels.Resource, error) {
+func (d *DeployDriver) applyApplication(ctx context.Context, resource *switchboardModels.Resource, client api.Client, shouldCreate bool) (*switchboardModels.Resource, error) {
 	if resource == nil {
 		return nil, fmt.Errorf("nil resource")
 	}
@@ -544,20 +550,20 @@ func (d *DeployDriver) applyApplication(ctx context.Context, resource *switchboa
 
 	if appConfig.Build.UseCache {
 		// set the docker config so that pack caching can use the repo credentials
-		err := config.SetDockerConfig(client)
+		err := config.SetDockerConfig(ctx, client, d.target.Project)
 		if err != nil {
 			return nil, err
 		}
 	}
 
 	if shouldCreate {
-		resource, err = d.createApplication(resource, client, sharedOpts, appConfig)
+		resource, err = d.createApplication(ctx, resource, client, sharedOpts, appConfig)
 
 		if err != nil {
 			return nil, fmt.Errorf("error creating app from resource %s: %w", resourceName, err)
 		}
 	} else if !appConfig.OnlyCreate {
-		resource, err = d.updateApplication(resource, client, sharedOpts, appConfig)
+		resource, err = d.updateApplication(ctx, resource, client, sharedOpts, appConfig)
 
 		if err != nil {
 			return nil, fmt.Errorf("error updating application from resource %s: %w", resourceName, err)
@@ -566,7 +572,7 @@ func (d *DeployDriver) applyApplication(ctx context.Context, resource *switchboa
 		color.New(color.FgYellow).Printf("Skipping creation for resource %s as onlyCreate is set to true\n", resourceName)
 	}
 
-	if err = d.assignOutput(resource, client); err != nil {
+	if err = d.assignOutput(ctx, resource, client); err != nil {
 		return nil, err
 	}
 
@@ -592,7 +598,7 @@ func (d *DeployDriver) applyApplication(ctx context.Context, resource *switchboa
 			predeployEventResponseID = eventResponse.ID
 		}
 
-		err = wait.WaitForJob(client, &wait.WaitOpts{
+		err = wait.WaitForJob(ctx, client, &wait.WaitOpts{
 			ProjectID: d.target.Project,
 			ClusterID: d.target.Cluster,
 			Namespace: d.target.Namespace,
@@ -620,7 +626,7 @@ func (d *DeployDriver) applyApplication(ctx context.Context, resource *switchboa
 
 			if appConfig.OnlyCreate {
 				deleteJobErr := client.DeleteRelease(
-					context.Background(),
+					ctx,
 					d.target.Project,
 					d.target.Cluster,
 					d.target.Namespace,
@@ -659,7 +665,7 @@ func (d *DeployDriver) applyApplication(ctx context.Context, resource *switchboa
 	return resource, err
 }
 
-func (d *DeployDriver) createApplication(resource *switchboardModels.Resource, client *api.Client, sharedOpts *deploy.SharedOpts, appConf *previewInt.ApplicationConfig) (*switchboardModels.Resource, error) {
+func (d *DeployDriver) createApplication(ctx context.Context, resource *switchboardModels.Resource, client api.Client, sharedOpts *deploy.SharedOpts, appConf *previewInt.ApplicationConfig) (*switchboardModels.Resource, error) {
 	// create new release
 	color.New(color.FgGreen).Printf("Creating %s release: %s\n", d.source.Name, resource.Name)
 
@@ -698,17 +704,17 @@ func (d *DeployDriver) createApplication(resource *switchboardModels.Resource, c
 	var err error
 
 	if appConf.Build.Method == "registry" {
-		subdomain, err = createAgent.CreateFromRegistry(appConf.Build.Image, appConf.Values)
+		subdomain, err = createAgent.CreateFromRegistry(ctx, appConf.Build.Image, appConf.Values)
 	} else {
 		// if useCache is set, create the image repository first
 		if appConf.Build.UseCache {
-			regID, imageURL, err := createAgent.GetImageRepoURL(resource.Name, sharedOpts.Namespace)
+			regID, imageURL, err := createAgent.GetImageRepoURL(ctx, resource.Name, sharedOpts.Namespace)
 			if err != nil {
 				return nil, err
 			}
 
 			err = client.CreateRepository(
-				context.Background(),
+				ctx,
 				sharedOpts.ProjectID,
 				regID,
 				&types.CreateRegistryRepositoryRequest{
@@ -721,7 +727,7 @@ func (d *DeployDriver) createApplication(resource *switchboardModels.Resource, c
 			}
 		}
 
-		subdomain, err = createAgent.CreateFromDocker(appConf.Values, sharedOpts.OverrideTag, buildConfig)
+		subdomain, err = createAgent.CreateFromDocker(ctx, appConf.Values, sharedOpts.OverrideTag, buildConfig)
 	}
 
 	if err != nil {
@@ -731,14 +737,14 @@ func (d *DeployDriver) createApplication(resource *switchboardModels.Resource, c
 	return resource, handleSubdomainCreate(subdomain, err)
 }
 
-func (d *DeployDriver) updateApplication(resource *switchboardModels.Resource, client *api.Client, sharedOpts *deploy.SharedOpts, appConf *previewInt.ApplicationConfig) (*switchboardModels.Resource, error) {
+func (d *DeployDriver) updateApplication(ctx context.Context, resource *switchboardModels.Resource, client api.Client, sharedOpts *deploy.SharedOpts, appConf *previewInt.ApplicationConfig) (*switchboardModels.Resource, error) {
 	color.New(color.FgGreen).Println("Updating existing release:", resource.Name)
 
 	if len(appConf.Build.Env) > 0 {
 		sharedOpts.AdditionalEnv = appConf.Build.Env
 	}
 
-	updateAgent, err := deploy.NewDeployAgent(client, resource.Name, &deploy.DeployOpts{
+	updateAgent, err := deploy.NewDeployAgent(ctx, client, resource.Name, &deploy.DeployOpts{
 		SharedOpts: sharedOpts,
 		Local:      appConf.Build.Method != "registry",
 	})
@@ -748,7 +754,7 @@ func (d *DeployDriver) updateApplication(resource *switchboardModels.Resource, c
 
 	// if the build method is registry, we do not trigger a build
 	if appConf.Build.Method != "registry" {
-		buildEnv, err := updateAgent.GetBuildEnv(&deploy.GetBuildEnvOpts{
+		buildEnv, err := updateAgent.GetBuildEnv(ctx, &deploy.GetBuildEnvOpts{
 			UseNewConfig: true,
 			NewConfig:    appConf.Values,
 		})
@@ -771,14 +777,14 @@ func (d *DeployDriver) updateApplication(resource *switchboardModels.Resource, c
 			}
 		}
 
-		err = updateAgent.Build(buildConfig)
+		err = updateAgent.Build(ctx, buildConfig)
 
 		if err != nil {
 			return nil, err
 		}
 
 		if !appConf.Build.UseCache {
-			err = updateAgent.Push()
+			err = updateAgent.Push(ctx)
 
 			if err != nil {
 				return nil, err
@@ -802,7 +808,7 @@ func (d *DeployDriver) updateApplication(resource *switchboardModels.Resource, c
 		}
 	}
 
-	err = updateAgent.UpdateImageAndValues(appConf.Values)
+	err = updateAgent.UpdateImageAndValues(ctx, appConf.Values)
 	if err != nil {
 		return nil, err
 	}
@@ -810,9 +816,9 @@ func (d *DeployDriver) updateApplication(resource *switchboardModels.Resource, c
 	return resource, nil
 }
 
-func (d *DeployDriver) assignOutput(resource *switchboardModels.Resource, client *api.Client) error {
+func (d *DeployDriver) assignOutput(ctx context.Context, resource *switchboardModels.Resource, client api.Client) error {
 	release, err := client.GetRelease(
-		context.Background(),
+		ctx,
 		d.target.Project,
 		d.target.Cluster,
 		d.target.Namespace,
@@ -827,6 +833,7 @@ func (d *DeployDriver) assignOutput(resource *switchboardModels.Resource, client
 	return nil
 }
 
+// Output extends switchboard
 func (d *DeployDriver) Output() (map[string]interface{}, error) {
 	return d.output, nil
 }
@@ -865,18 +872,22 @@ func (d *DeployDriver) getAddonConfig(resource *switchboardModels.Resource) (map
 	})
 }
 
+// DeploymentHook contains all information needed for deploying with switchboard
 type DeploymentHook struct {
-	client                                                                    *api.Client
+	client                                                                    api.Client
 	resourceGroup                                                             *switchboardTypes.ResourceGroup
 	gitInstallationID, projectID, clusterID, prID, actionID, envID            uint
 	branchFrom, branchInto, namespace, repoName, repoOwner, prName, commitSHA string
+	cliConfig                                                                 config.CLIConfig
 }
 
-func NewDeploymentHook(client *api.Client, resourceGroup *switchboardTypes.ResourceGroup, namespace string) (*DeploymentHook, error) {
+// NewDeploymentHook creates a new deployment using switchboard
+func NewDeploymentHook(cliConfig config.CLIConfig, client api.Client, resourceGroup *switchboardTypes.ResourceGroup, namespace string) (*DeploymentHook, error) {
 	res := &DeploymentHook{
 		client:        client,
 		resourceGroup: resourceGroup,
 		namespace:     namespace,
+		cliConfig:     cliConfig,
 	}
 
 	ghIDStr := os.Getenv("PORTER_GIT_INSTALLATION_ID")
@@ -895,13 +906,13 @@ func NewDeploymentHook(client *api.Client, resourceGroup *switchboardTypes.Resou
 
 	res.prID = uint(prID)
 
-	res.projectID = cliConf.Project
+	res.projectID = cliConfig.Project
 
 	if res.projectID == 0 {
 		return nil, fmt.Errorf("project id must be set")
 	}
 
-	res.clusterID = cliConf.Cluster
+	res.clusterID = cliConfig.Cluster
 
 	if res.clusterID == 0 {
 		return nil, fmt.Errorf("cluster id must be set")
@@ -944,13 +955,16 @@ func (t *DeploymentHook) isBranchDeploy() bool {
 	return t.branchFrom != "" && t.branchInto != "" && t.branchFrom == t.branchInto
 }
 
+// PreApply extends switchboard
 func (t *DeploymentHook) PreApply() error {
+	ctx := context.TODO() // switchboard blocks changing this for now
+
 	if isSystemNamespace(t.namespace) {
 		color.New(color.FgYellow).Printf("attempting to deploy to system namespace '%s'\n", t.namespace)
 	}
 
 	envList, err := t.client.ListEnvironments(
-		context.Background(), t.projectID, t.clusterID,
+		ctx, t.projectID, t.clusterID,
 	)
 	if err != nil {
 		return err
@@ -974,7 +988,7 @@ func (t *DeploymentHook) PreApply() error {
 	}
 
 	nsList, err := t.client.GetK8sNamespaces(
-		context.Background(), t.projectID, t.clusterID,
+		ctx, t.projectID, t.clusterID,
 	)
 	if err != nil {
 		return fmt.Errorf("error fetching namespaces: %w", err)
@@ -1004,7 +1018,7 @@ func (t *DeploymentHook) PreApply() error {
 		}
 
 		// create the new namespace
-		_, err := t.client.CreateNewK8sNamespace(context.Background(), t.projectID, t.clusterID, createNS)
+		_, err := t.client.CreateNewK8sNamespace(ctx, t.projectID, t.clusterID, createNS)
 
 		if err != nil && !strings.Contains(err.Error(), "namespace already exists") {
 			// ignore the error if the namespace already exists
@@ -1018,7 +1032,7 @@ func (t *DeploymentHook) PreApply() error {
 
 	if t.isBranchDeploy() {
 		_, deplErr = t.client.GetDeployment(
-			context.Background(),
+			ctx,
 			t.projectID, t.clusterID, t.envID,
 			&types.GetDeploymentRequest{
 				Branch: t.branchFrom,
@@ -1026,7 +1040,7 @@ func (t *DeploymentHook) PreApply() error {
 		)
 	} else {
 		_, deplErr = t.client.GetDeployment(
-			context.Background(),
+			ctx,
 			t.projectID, t.clusterID, t.envID,
 			&types.GetDeploymentRequest{
 				PRNumber: t.prID,
@@ -1057,7 +1071,7 @@ func (t *DeploymentHook) PreApply() error {
 		}
 
 		_, err = t.client.CreateDeployment(
-			context.Background(),
+			ctx,
 			t.projectID, t.clusterID, createReq,
 		)
 	} else if err == nil {
@@ -1077,12 +1091,13 @@ func (t *DeploymentHook) PreApply() error {
 			updateReq.PRNumber = 0
 		}
 
-		_, err = t.client.UpdateDeployment(context.Background(), t.projectID, t.clusterID, updateReq)
+		_, err = t.client.UpdateDeployment(ctx, t.projectID, t.clusterID, updateReq)
 	}
 
 	return err
 }
 
+// DataQueries extends switchboard
 func (t *DeploymentHook) DataQueries() map[string]interface{} {
 	res := make(map[string]interface{})
 
@@ -1143,7 +1158,10 @@ func (t *DeploymentHook) DataQueries() map[string]interface{} {
 	return res
 }
 
+// PostApply extends switchboard
 func (t *DeploymentHook) PostApply(populatedData map[string]interface{}) error {
+	ctx := context.TODO() // switchboard blocks changing this for now
+
 	subdomains := make([]string, 0)
 
 	for _, data := range populatedData {
@@ -1171,8 +1189,8 @@ func (t *DeploymentHook) PostApply(populatedData map[string]interface{}) error {
 	}
 
 	for _, res := range t.resourceGroup.Resources {
-		releaseType := getReleaseType(t.projectID, res)
-		releaseName := getReleaseName(res)
+		releaseType := getReleaseType(ctx, t.projectID, res, t.client)
+		releaseName := getReleaseName(ctx, res, t.client, t.cliConfig)
 
 		if releaseType != "" && releaseName != "" {
 			req.SuccessfulResources = append(req.SuccessfulResources, &types.SuccessfullyDeployedResource{
@@ -1183,17 +1201,20 @@ func (t *DeploymentHook) PostApply(populatedData map[string]interface{}) error {
 	}
 
 	// finalize the deployment
-	_, err := t.client.FinalizeDeployment(context.Background(), t.projectID, t.clusterID, req)
+	_, err := t.client.FinalizeDeployment(ctx, t.projectID, t.clusterID, req)
 
 	return err
 }
 
+// OnError extends switchboard
 func (t *DeploymentHook) OnError(error) {
+	ctx := context.TODO() // switchboard blocks changing this for now
+
 	var deplErr error
 
 	if t.isBranchDeploy() {
 		_, deplErr = t.client.GetDeployment(
-			context.Background(),
+			ctx,
 			t.projectID, t.clusterID, t.envID,
 			&types.GetDeploymentRequest{
 				Branch: t.branchFrom,
@@ -1201,7 +1222,7 @@ func (t *DeploymentHook) OnError(error) {
 		)
 	} else {
 		_, deplErr = t.client.GetDeployment(
-			context.Background(),
+			ctx,
 			t.projectID, t.clusterID, t.envID,
 			&types.GetDeploymentRequest{
 				PRNumber: t.prID,
@@ -1228,16 +1249,19 @@ func (t *DeploymentHook) OnError(error) {
 		}
 
 		// FIXME: try to use the error with a custom logger
-		t.client.UpdateDeploymentStatus(context.Background(), t.projectID, t.clusterID, req)
+		t.client.UpdateDeploymentStatus(ctx, t.projectID, t.clusterID, req) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	}
 }
 
+// OnConsolidatedErrors extends switchboard
 func (t *DeploymentHook) OnConsolidatedErrors(allErrors map[string]error) {
+	ctx := context.TODO() // switchboard blocks changing this for now
+
 	var deplErr error
 
 	if t.isBranchDeploy() {
 		_, deplErr = t.client.GetDeployment(
-			context.Background(),
+			ctx,
 			t.projectID, t.clusterID, t.envID,
 			&types.GetDeploymentRequest{
 				Branch: t.branchFrom,
@@ -1245,7 +1269,7 @@ func (t *DeploymentHook) OnConsolidatedErrors(allErrors map[string]error) {
 		)
 	} else {
 		_, deplErr = t.client.GetDeployment(
-			context.Background(),
+			ctx,
 			t.projectID, t.clusterID, t.envID,
 			&types.GetDeploymentRequest{
 				PRNumber: t.prID,
@@ -1270,8 +1294,8 @@ func (t *DeploymentHook) OnConsolidatedErrors(allErrors map[string]error) {
 		for _, res := range t.resourceGroup.Resources {
 			if _, ok := allErrors[res.Name]; !ok {
 				req.SuccessfulResources = append(req.SuccessfulResources, &types.SuccessfullyDeployedResource{
-					ReleaseName: getReleaseName(res),
-					ReleaseType: getReleaseType(t.projectID, res),
+					ReleaseName: getReleaseName(ctx, res, t.client, t.cliConfig),
+					ReleaseType: getReleaseType(ctx, t.projectID, res, t.client),
 				})
 			}
 		}
@@ -1281,23 +1305,29 @@ func (t *DeploymentHook) OnConsolidatedErrors(allErrors map[string]error) {
 		}
 
 		// FIXME: handle the error
-		t.client.FinalizeDeploymentWithErrors(context.Background(), t.projectID, t.clusterID, req)
+		t.client.FinalizeDeploymentWithErrors(ctx, t.projectID, t.clusterID, req) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	}
 }
 
+// CloneEnvGroupHook contains all information needed to clone an env group
 type CloneEnvGroupHook struct {
-	client   *api.Client
-	resGroup *switchboardTypes.ResourceGroup
+	client    api.Client
+	resGroup  *switchboardTypes.ResourceGroup
+	cliConfig config.CLIConfig
 }
 
-func NewCloneEnvGroupHook(client *api.Client, resourceGroup *switchboardTypes.ResourceGroup) *CloneEnvGroupHook {
+// NewCloneEnvGroupHook wraps switchboard for cloning env groups
+func NewCloneEnvGroupHook(client api.Client, cliConfig config.CLIConfig, resourceGroup *switchboardTypes.ResourceGroup) *CloneEnvGroupHook {
 	return &CloneEnvGroupHook{
-		client:   client,
-		resGroup: resourceGroup,
+		client:    client,
+		cliConfig: cliConfig,
+		resGroup:  resourceGroup,
 	}
 }
 
 func (t *CloneEnvGroupHook) PreApply() error {
+	ctx := context.TODO() // switchboard blocks changing this for now
+
 	for _, res := range t.resGroup.Resources {
 		if res.Driver == "env-group" {
 			continue
@@ -1311,7 +1341,7 @@ func (t *CloneEnvGroupHook) PreApply() error {
 		}
 
 		if appConf != nil && len(appConf.EnvGroups) > 0 {
-			target, err := preview.GetTarget(res.Name, res.Target)
+			target, err := preview.GetTarget(ctx, res.Name, res.Target, t.client, t.cliConfig)
 			if err != nil {
 				return err
 			}
@@ -1322,7 +1352,7 @@ func (t *CloneEnvGroupHook) PreApply() error {
 				}
 
 				_, err := t.client.GetEnvGroup(
-					context.Background(),
+					ctx,
 					target.Project,
 					target.Cluster,
 					target.Namespace,
@@ -1344,7 +1374,7 @@ func (t *CloneEnvGroupHook) PreApply() error {
 							group.Name, group.Namespace, target.Namespace)
 
 					_, err = t.client.CloneEnvGroup(
-						context.Background(), target.Project, target.Cluster, group.Namespace,
+						ctx, target.Project, target.Cluster, group.Namespace,
 						&types.CloneEnvGroupRequest{
 							SourceName:      group.Name,
 							TargetNamespace: target.Namespace,
@@ -1376,10 +1406,10 @@ func (t *CloneEnvGroupHook) OnError(error) {}
 
 func (t *CloneEnvGroupHook) OnConsolidatedErrors(map[string]error) {}
 
-func getReleaseName(res *switchboardTypes.Resource) string {
+func getReleaseName(ctx context.Context, res *switchboardTypes.Resource, apiClient api.Client, cliConfig config.CLIConfig) string {
 	// can ignore the error because this method is called once
 	// GetTarget has alrealy been called and validated previously
-	target, _ := preview.GetTarget(res.Name, res.Target)
+	target, _ := preview.GetTarget(ctx, res.Name, res.Target, apiClient, cliConfig)
 
 	if target.AppName != "" {
 		return target.AppName
@@ -1388,10 +1418,10 @@ func getReleaseName(res *switchboardTypes.Resource) string {
 	return res.Name
 }
 
-func getReleaseType(projectID uint, res *switchboardTypes.Resource) string {
+func getReleaseType(ctx context.Context, projectID uint, res *switchboardTypes.Resource, apiClient api.Client) string {
 	// can ignore the error because this method is called once
 	// GetSource has alrealy been called and validated previously
-	source, _ := preview.GetSource(projectID, res.Name, res.Source)
+	source, _ := preview.GetSource(ctx, projectID, res.Name, res.Source, apiClient)
 
 	if source != nil && source.Name != "" {
 		return source.Name
@@ -1410,7 +1440,8 @@ func isSystemNamespace(namespace string) bool {
 
 type ErrorEmitterHook struct{}
 
-func NewErrorEmitterHook(*api.Client, *switchboardTypes.ResourceGroup) *ErrorEmitterHook {
+// NewErrorEmitterHook handles switchboard errors
+func NewErrorEmitterHook(api.Client, *switchboardTypes.ResourceGroup) *ErrorEmitterHook {
 	return &ErrorEmitterHook{}
 }
 

+ 56 - 30
cli/cmd/auth.go

@@ -24,7 +24,7 @@ var loginCmd = &cobra.Command{
 	Use:   "login",
 	Short: "Authorizes a user for a given Porter server",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := login()
+		err := login(cmd.Context())
 		if err != nil {
 			color.Red("Error logging in: %s\n", err.Error())
 			os.Exit(1)
@@ -36,7 +36,7 @@ var registerCmd = &cobra.Command{
 	Use:   "register",
 	Short: "Creates a user for a given Porter server",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := register()
+		err := register(cmd.Context())
 		if err != nil {
 			color.Red("Error registering: %s\n", err.Error())
 			os.Exit(1)
@@ -48,7 +48,7 @@ var logoutCmd = &cobra.Command{
 	Use:   "logout",
 	Short: "Logs a user out of a given Porter server",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, logout)
+		err := checkLoginAndRun(cmd.Context(), args, logout)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -72,10 +72,21 @@ func init() {
 	)
 }
 
-func login() error {
-	client := api.NewClientWithToken(cliConf.Host+"/api", cliConf.Token)
+func login(ctx context.Context) error {
+	cliConf, err := config.InitAndLoadConfig()
+	if err != nil {
+		return fmt.Errorf("error loading porter config: %w", err)
+	}
 
-	user, err := client.AuthCheck(context.Background())
+	client, err := api.NewClientWithConfig(ctx, api.NewClientInput{
+		BaseURL:     fmt.Sprintf("%s/api", cliConf.Host),
+		BearerToken: cliConf.Token,
+	})
+	if err != nil {
+		return fmt.Errorf("error creating porter API client: %w", err)
+	}
+
+	user, err := client.AuthCheck(ctx)
 
 	if err == nil {
 		// set the token if the user calls login with the --token flag or the PORTER_TOKEN env
@@ -91,7 +102,7 @@ func login() error {
 			// if project ID does not exist for the token, this is a user-issued CLI token, so the project
 			// ID should be queried
 			if !exists {
-				err = setProjectForUser(client, user.ID)
+				err = setProjectForUser(ctx, client, cliConf, user.ID)
 
 				if err != nil {
 					return err
@@ -99,13 +110,13 @@ func login() error {
 			} else {
 				// if the project ID does exist for the token, this is a project-issued token, and
 				// the project should be set automatically
-				err = cliConf.SetProject(projID)
+				err = cliConf.SetProject(ctx, client, projID)
 
 				if err != nil {
 					return err
 				}
 
-				err = setProjectCluster(client, projID)
+				err = setProjectCluster(ctx, client, cliConf, projID)
 
 				if err != nil {
 					return err
@@ -120,7 +131,7 @@ func login() error {
 
 	// check for the --manual flag
 	if manual {
-		return loginManual()
+		return loginManual(ctx, cliConf, client)
 	}
 
 	// log the user in
@@ -136,9 +147,15 @@ func login() error {
 		return err
 	}
 
-	client = api.NewClientWithToken(cliConf.Host+"/api", token)
+	client, err = api.NewClientWithConfig(ctx, api.NewClientInput{
+		BaseURL:     fmt.Sprintf("%s/api", cliConf.Host),
+		BearerToken: token,
+	})
+	if err != nil {
+		return fmt.Errorf("error creating porter API client: %w", err)
+	}
 
-	user, err = client.AuthCheck(context.Background())
+	user, err = client.AuthCheck(ctx)
 
 	if err != nil {
 		color.Red("Invalid token.")
@@ -147,12 +164,12 @@ func login() error {
 
 	color.New(color.FgGreen).Println("Successfully logged in!")
 
-	return setProjectForUser(client, user.ID)
+	return setProjectForUser(ctx, client, cliConf, user.ID)
 }
 
-func setProjectForUser(client *api.Client, userID uint) error {
+func setProjectForUser(ctx context.Context, client api.Client, config config.CLIConfig, _ uint) error {
 	// get a list of projects, and set the current project
-	resp, err := client.ListUserProjects(context.Background())
+	resp, err := client.ListUserProjects(ctx)
 	if err != nil {
 		return err
 	}
@@ -160,9 +177,9 @@ func setProjectForUser(client *api.Client, userID uint) error {
 	projects := *resp
 
 	if len(projects) > 0 {
-		cliConf.SetProject(projects[0].ID)
+		config.SetProject(ctx, client, projects[0].ID) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
-		err = setProjectCluster(client, projects[0].ID)
+		err = setProjectCluster(ctx, client, config, projects[0].ID)
 
 		if err != nil {
 			return err
@@ -172,9 +189,7 @@ func setProjectForUser(client *api.Client, userID uint) error {
 	return nil
 }
 
-func loginManual() error {
-	client := api.NewClient(cliConf.Host+"/api", "cookie.json")
-
+func loginManual(ctx context.Context, cliConf config.CLIConfig, client api.Client) error {
 	var username, pw string
 
 	fmt.Println("Please log in with an email and password:")
@@ -190,7 +205,7 @@ func loginManual() error {
 		return err
 	}
 
-	_, err = client.Login(context.Background(), &types.LoginUserRequest{
+	_, err = client.Login(ctx, &types.LoginUserRequest{
 		Email:    username,
 		Password: pw,
 	})
@@ -205,7 +220,7 @@ func loginManual() error {
 	color.New(color.FgGreen).Println("Successfully logged in!")
 
 	// get a list of projects, and set the current project
-	resp, err := client.ListUserProjects(context.Background())
+	resp, err := client.ListUserProjects(ctx)
 	if err != nil {
 		return err
 	}
@@ -213,9 +228,9 @@ func loginManual() error {
 	projects := *resp
 
 	if len(projects) > 0 {
-		cliConf.SetProject(projects[0].ID)
+		cliConf.SetProject(ctx, client, projects[0].ID) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
-		err = setProjectCluster(client, projects[0].ID)
+		err = setProjectCluster(ctx, client, cliConf, projects[0].ID)
 
 		if err != nil {
 			return err
@@ -225,7 +240,20 @@ func loginManual() error {
 	return nil
 }
 
-func register() error {
+func register(ctx context.Context) error {
+	config, err := config.InitAndLoadConfig()
+	if err != nil {
+		return fmt.Errorf("error loading porter config: %w", err)
+	}
+
+	client, err := api.NewClientWithConfig(ctx, api.NewClientInput{
+		BaseURL:     fmt.Sprintf("%s/api", config.Host),
+		BearerToken: config.Token,
+	})
+	if err != nil {
+		return fmt.Errorf("error creating porter API client: %w", err)
+	}
+
 	fmt.Println("Please register your admin account with an email and password:")
 
 	username, err := utils.PromptPlaintext("Email: ")
@@ -238,9 +266,7 @@ func register() error {
 		return err
 	}
 
-	client := config.GetAPIClient()
-
-	resp, err := client.CreateUser(context.Background(), &types.CreateUserRequest{
+	resp, err := client.CreateUser(ctx, &types.CreateUserRequest{
 		Email:    username,
 		Password: pw,
 	})
@@ -253,8 +279,8 @@ func register() error {
 	return nil
 }
 
-func logout(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	err := client.Logout(context.Background())
+func logout(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
+	err := client.Logout(ctx)
 	if err != nil {
 		return err
 	}

+ 24 - 23
cli/cmd/bluegreen.go

@@ -6,6 +6,7 @@ import (
 	"os"
 	"time"
 
+	"github.com/porter-dev/porter/cli/cmd/config"
 	v2 "github.com/porter-dev/porter/cli/cmd/v2"
 
 	"github.com/fatih/color"
@@ -26,7 +27,7 @@ var bluegreenCmd = &cobra.Command{
 	Use:   "blue-green-switch",
 	Short: "Automatically switches the traffic of a blue-green deployment once the new application is ready.",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, bluegreenSwitch)
+		err := checkLoginAndRun(cmd.Context(), args, bluegreenSwitch)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -61,10 +62,8 @@ func init() {
 	)
 }
 
-func bluegreenSwitch(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
-	project, err := client.GetProject(ctx, cliConf.Project)
+func bluegreenSwitch(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, args []string) error {
+	project, err := client.GetProject(ctx, cliConfig.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
 	}
@@ -78,7 +77,7 @@ func bluegreenSwitch(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 	}
 
 	// get the web release
-	webRelease, err := client.GetRelease(context.Background(), cliConf.Project, cliConf.Cluster, namespace, app)
+	webRelease, err := client.GetRelease(ctx, cliConfig.Project, cliConfig.Cluster, namespace, app)
 	if err != nil {
 		return err
 	}
@@ -91,11 +90,11 @@ func bluegreenSwitch(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 	currActiveImage := deploy.GetCurrActiveBlueGreenImage(webRelease.Config)
 
 	sharedConf := &PorterRunSharedConfig{
-		Client: client,
+		Client:    client,
+		CLIConfig: cliConfig,
 	}
 
-	err = sharedConf.setSharedConfig()
-
+	err = sharedConf.setSharedConfig(ctx)
 	if err != nil {
 		return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
 	}
@@ -111,7 +110,7 @@ func bluegreenSwitch(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 	for time.Now().Before(timeWait) {
 		// refresh the client every 10 minutes
 		if time.Now().After(prevRefresh.Add(10 * time.Minute)) {
-			err = sharedConf.setSharedConfig()
+			err = sharedConf.setSharedConfig(ctx)
 
 			if err != nil {
 				return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
@@ -121,7 +120,7 @@ func bluegreenSwitch(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 		}
 
 		depls, err := sharedConf.Clientset.AppsV1().Deployments(namespace).List(
-			context.Background(),
+			ctx,
 			metav1.ListOptions{
 				LabelSelector: fmt.Sprintf("app.kubernetes.io/instance=%s", app),
 			},
@@ -146,13 +145,13 @@ func bluegreenSwitch(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 					// push the deployment
 					color.New(color.FgGreen).Printf("Switching traffic for app %s\n", app)
 
-					deployAgent, err := updateGetAgent(client)
+					deployAgent, err := updateGetAgent(ctx, client, cliConfig)
 					if err != nil {
 						return err
 					}
 
 					if currActiveImage == "" {
-						err = deployAgent.UpdateImageAndValues(map[string]interface{}{
+						err = deployAgent.UpdateImageAndValues(ctx, map[string]interface{}{
 							"bluegreen": map[string]interface{}{
 								"enabled":                  true,
 								"disablePrimaryDeployment": true,
@@ -161,7 +160,7 @@ func bluegreenSwitch(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 							},
 						})
 					} else {
-						err = deployAgent.UpdateImageAndValues(map[string]interface{}{
+						err = deployAgent.UpdateImageAndValues(ctx, map[string]interface{}{
 							"bluegreen": map[string]interface{}{
 								"enabled":                  true,
 								"disablePrimaryDeployment": true,
@@ -199,19 +198,21 @@ func bluegreenSwitch(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 	// wait 30 seconds before removing old deployment
 	time.Sleep(30 * time.Second)
 
-	deployAgent, err := updateGetAgent(client)
+	deployAgent, err := updateGetAgent(ctx, client, cliConfig)
 	if err != nil {
 		return err
 	}
 
-	err = deployAgent.UpdateImageAndValues(map[string]interface{}{
-		"bluegreen": map[string]interface{}{
-			"enabled":                  true,
-			"disablePrimaryDeployment": true,
-			"activeImageTag":           tag,
-			"imageTags":                []string{tag},
-		},
-	})
+	err = deployAgent.UpdateImageAndValues( //nolint - do not want to change logic. New linter error
+		ctx,
+		map[string]interface{}{
+			"bluegreen": map[string]interface{}{
+				"enabled":                  true,
+				"disablePrimaryDeployment": true,
+				"activeImageTag":           tag,
+				"imageTags":                []string{tag},
+			},
+		})
 
 	return nil
 }

+ 10 - 9
cli/cmd/cluster.go

@@ -11,6 +11,7 @@ import (
 	"github.com/fatih/color"
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 	"github.com/spf13/cobra"
 )
@@ -27,7 +28,7 @@ var clusterListCmd = &cobra.Command{
 	Use:   "list",
 	Short: "Lists the linked clusters in the current project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listClusters)
+		err := checkLoginAndRun(cmd.Context(), args, listClusters)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -39,7 +40,7 @@ var clusterDeleteCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Deletes the cluster with the given id",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, deleteCluster)
+		err := checkLoginAndRun(cmd.Context(), args, deleteCluster)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -56,7 +57,7 @@ var clusterNamespaceListCmd = &cobra.Command{
 	Use:   "list",
 	Short: "Lists the namespaces in a cluster",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listNamespaces)
+		err := checkLoginAndRun(cmd.Context(), args, listNamespaces)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -73,8 +74,8 @@ func init() {
 	clusterNamespaceCmd.AddCommand(clusterNamespaceListCmd)
 }
 
-func listClusters(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	resp, err := client.ListProjectClusters(context.Background(), cliConf.Project)
+func listClusters(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
+	resp, err := client.ListProjectClusters(ctx, cliConf.Project)
 	if err != nil {
 		return err
 	}
@@ -101,7 +102,7 @@ func listClusters(user *types.GetAuthenticatedUserResponse, client *api.Client,
 	return nil
 }
 
-func deleteCluster(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func deleteCluster(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	userResp, err := utils.PromptPlaintext(
 		fmt.Sprintf(
 			`Are you sure you'd like to delete the cluster with id %s? %s `,
@@ -119,7 +120,7 @@ func deleteCluster(user *types.GetAuthenticatedUserResponse, client *api.Client,
 			return err
 		}
 
-		err = client.DeleteProjectCluster(context.Background(), cliConf.Project, uint(id))
+		err = client.DeleteProjectCluster(ctx, cliConf.Project, uint(id))
 
 		if err != nil {
 			return err
@@ -131,7 +132,7 @@ func deleteCluster(user *types.GetAuthenticatedUserResponse, client *api.Client,
 	return nil
 }
 
-func listNamespaces(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func listNamespaces(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	pID := cliConf.Project
 
 	// get the service account based on the cluster id
@@ -139,7 +140,7 @@ func listNamespaces(user *types.GetAuthenticatedUserResponse, client *api.Client
 
 	// get the list of namespaces
 	namespaceList, err := client.GetK8sNamespaces(
-		context.Background(),
+		ctx,
 		pID,
 		cID,
 	)

+ 62 - 18
cli/cmd/config.go

@@ -14,12 +14,12 @@ import (
 	"github.com/fatih/color"
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
-	cliConfig "github.com/porter-dev/porter/cli/cmd/config"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 	"github.com/spf13/cobra"
 )
 
-var cliConf = cliConfig.GetCLIConfig()
+// var cliConf = cliConfig.GetCLIConfig()
 
 var configCmd = &cobra.Command{
 	Use:   "config",
@@ -37,22 +37,36 @@ var configSetProjectCmd = &cobra.Command{
 	Args:  cobra.MaximumNArgs(1),
 	Short: "Saves the project id in the default configuration",
 	Run: func(cmd *cobra.Command, args []string) {
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred loading config: %s\n", err.Error()) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
+			os.Exit(1)
+		}
+		client, err := api.NewClientWithConfig(cmd.Context(), api.NewClientInput{
+			BaseURL:        fmt.Sprintf("%s/api", cliConf.Host),
+			BearerToken:    cliConf.Token,
+			CookieFileName: "cookie.json",
+		})
+		if err != nil {
+			_, _ = color.New(color.FgRed).Fprintf(os.Stderr, "error creating porter API client: %s\n", err.Error())
+			os.Exit(1)
+		}
+
 		if len(args) == 0 {
-			err := checkLoginAndRun(args, listAndSetProject)
+			err := checkLoginAndRun(cmd.Context(), args, listAndSetProject)
 			if err != nil {
 				os.Exit(1)
 			}
 		} else {
 			projID, err := strconv.ParseUint(args[0], 10, 64)
 			if err != nil {
-				color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred: %v\n", err)
+				_, _ = color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred: %s\n", err.Error())
 				os.Exit(1)
 			}
 
-			err = cliConf.SetProject(uint(projID))
-
+			err = cliConf.SetProject(cmd.Context(), client, uint(projID))
 			if err != nil {
-				color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred: %v\n", err)
+				_, _ = color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred: %s\n", err.Error())
 				os.Exit(1)
 			}
 		}
@@ -64,8 +78,13 @@ var configSetClusterCmd = &cobra.Command{
 	Args:  cobra.MaximumNArgs(1),
 	Short: "Saves the cluster id in the default configuration",
 	Run: func(cmd *cobra.Command, args []string) {
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred loading config: %v\n", err) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
+			os.Exit(1)
+		}
 		if len(args) == 0 {
-			err := checkLoginAndRun(args, listAndSetCluster)
+			err := checkLoginAndRun(cmd.Context(), args, listAndSetCluster)
 			if err != nil {
 				os.Exit(1)
 			}
@@ -91,8 +110,14 @@ var configSetRegistryCmd = &cobra.Command{
 	Args:  cobra.MaximumNArgs(1),
 	Short: "Saves the registry id in the default configuration",
 	Run: func(cmd *cobra.Command, args []string) {
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred loading config: %v\n", err) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
+			os.Exit(1)
+		}
+
 		if len(args) == 0 {
-			err := checkLoginAndRun(args, listAndSetRegistry)
+			err := checkLoginAndRun(cmd.Context(), args, listAndSetRegistry)
 			if err != nil {
 				os.Exit(1)
 			}
@@ -118,6 +143,12 @@ var configSetHelmRepoCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Saves the helm repo id in the default configuration",
 	Run: func(cmd *cobra.Command, args []string) {
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred loading config: %v\n", err) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
+			os.Exit(1)
+		}
+
 		hrID, err := strconv.ParseUint(args[0], 10, 64)
 		if err != nil {
 			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred: %v\n", err)
@@ -138,7 +169,12 @@ var configSetHostCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Saves the host in the default configuration",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := cliConf.SetHost(args[0])
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred loading config: %v\n", err) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
+			os.Exit(1)
+		}
+		err = cliConf.SetHost(args[0])
 		if err != nil {
 			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred: %v\n", err)
 			os.Exit(1)
@@ -151,7 +187,12 @@ var configSetKubeconfigCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Saves the path to kubeconfig in the default configuration",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := cliConf.SetKubeconfig(args[0])
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred loading config: %v\n", err) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
+			os.Exit(1)
+		}
+		err = cliConf.SetKubeconfig(args[0])
 		if err != nil {
 			color.New(color.FgRed).Fprintf(os.Stderr, "An error occurred: %v\n", err)
 			os.Exit(1)
@@ -181,13 +222,13 @@ func printConfig() error {
 	return nil
 }
 
-func listAndSetProject(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func listAndSetProject(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	s := spinner.New(spinner.CharSets[9], 100*time.Millisecond)
 	s.Color("cyan")
 	s.Suffix = " Loading list of projects"
 	s.Start()
 
-	resp, err := client.ListUserProjects(context.Background())
+	resp, err := client.ListUserProjects(ctx)
 
 	s.Stop()
 
@@ -217,18 +258,21 @@ func listAndSetProject(_ *types.GetAuthenticatedUserResponse, client *api.Client
 		projID = uint64((*resp)[0].ID)
 	}
 
-	cliConf.SetProject(uint(projID))
+	err = cliConf.SetProject(ctx, client, uint(projID))
+	if err != nil {
+		return err
+	}
 
 	return nil
 }
 
-func listAndSetCluster(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func listAndSetCluster(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	s := spinner.New(spinner.CharSets[9], 100*time.Millisecond)
 	s.Color("cyan")
 	s.Suffix = " Loading list of clusters"
 	s.Start()
 
-	resp, err := client.ListProjectClusters(context.Background(), cliConf.Project)
+	resp, err := client.ListProjectClusters(ctx, cliConf.Project)
 
 	s.Stop()
 
@@ -262,13 +306,13 @@ func listAndSetCluster(_ *types.GetAuthenticatedUserResponse, client *api.Client
 	return nil
 }
 
-func listAndSetRegistry(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func listAndSetRegistry(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	s := spinner.New(spinner.CharSets[9], 100*time.Millisecond)
 	s.Color("cyan")
 	s.Suffix = " Loading list of registries"
 	s.Start()
 
-	resp, err := client.ListRegistries(context.Background(), cliConf.Project)
+	resp, err := client.ListRegistries(ctx, cliConf.Project)
 
 	s.Stop()
 

+ 127 - 96
cli/cmd/config/config.go

@@ -4,7 +4,6 @@ import (
 	"context"
 	"errors"
 	"fmt"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"strings"
@@ -19,7 +18,7 @@ import (
 var home = homedir.HomeDir()
 
 // config is a shared object used by all commands
-var config = &CLIConfig{}
+// var config = &CLIConfig{}
 
 // CLIConfig is the set of shared configuration options for the CLI commands.
 // This config is used by viper: calling Set() function for any parameter will
@@ -45,78 +44,31 @@ type CLIConfig struct {
 // 2. env
 // 3. config
 // 4. default
-//
-// It populates the shared config object above
-func InitAndLoadConfig() {
-	initAndLoadConfig(config)
+func InitAndLoadConfig() (CLIConfig, error) {
+	return initAndLoadConfig()
 }
 
-func InitAndLoadNewConfig() *CLIConfig {
-	newConfig := &CLIConfig{}
+func initAndLoadConfig() (CLIConfig, error) {
+	var config CLIConfig
 
-	initAndLoadConfig(newConfig)
-
-	return newConfig
-}
-
-func initAndLoadConfig(_config *CLIConfig) {
-	initFlagSet()
-
-	// check that the .porter folder exists; create if not
-	porterDir := filepath.Join(home, ".porter")
-
-	if _, err := os.Stat(porterDir); os.IsNotExist(err) {
-		os.Mkdir(porterDir, 0o700)
-	} else if err != nil {
-		color.New(color.FgRed).Fprintf(os.Stderr, "%v\n", err)
-		os.Exit(1)
+	porterDir, err := getOrCreatePorterDirectoryAndConfig()
+	if err != nil {
+		return config, fmt.Errorf("unable to get or create porter directory: %w", err)
 	}
-
 	viper.SetConfigName("porter")
 	viper.SetConfigType("yaml")
 	viper.AddConfigPath(porterDir)
 
-	// Bind the flagset initialized above
-	viper.BindPFlags(utils.DriverFlagSet)
-	viper.BindPFlags(utils.DefaultFlagSet)
-	viper.BindPFlags(utils.RegistryFlagSet)
-	viper.BindPFlags(utils.HelmRepoFlagSet)
-
-	// Bind the environment variables with prefix "PORTER_"
-	viper.SetEnvPrefix("PORTER")
-	viper.BindEnv("host")
-	viper.BindEnv("project")
-	viper.BindEnv("cluster")
-	viper.BindEnv("token")
-
-	err := viper.ReadInConfig()
-	if err != nil {
-		if _, ok := err.(viper.ConfigFileNotFoundError); ok {
-			// create blank config file
-			err := ioutil.WriteFile(filepath.Join(home, ".porter", "porter.yaml"), []byte{}, 0o644)
-			if err != nil {
-				color.New(color.FgRed).Fprintf(os.Stderr, "%v\n", err)
-				os.Exit(1)
-			}
-		} else {
-			// Config file was found but another error was produced
-			color.New(color.FgRed).Fprintf(os.Stderr, "%v\n", err)
-			os.Exit(1)
-		}
-	}
-
-	// unmarshal the config into the shared config struct
-	viper.Unmarshal(_config)
-}
-
-// initFlagSet initializes the shared flags used by multiple commands
-func initFlagSet() {
 	utils.DriverFlagSet.StringVar(
 		&config.Driver,
 		"driver",
 		"local",
 		"driver to use (local or docker)",
 	)
+	err = viper.BindPFlags(utils.DriverFlagSet)
+	if err != nil {
+		return config, err
+	}
 
 	utils.DefaultFlagSet.StringVar(
 		&config.Host,
@@ -152,6 +104,10 @@ func initFlagSet() {
 		0,
 		"registry ID of connected Porter registry",
 	)
+	err = viper.BindPFlags(utils.RegistryFlagSet)
+	if err != nil {
+		return config, err
+	}
 
 	utils.HelmRepoFlagSet.UintVar(
 		&config.HelmRepo,
@@ -159,26 +115,102 @@ func initFlagSet() {
 		0,
 		"helm repo ID of connected Porter Helm repository",
 	)
-}
+	err = viper.BindPFlags(utils.HelmRepoFlagSet)
+	if err != nil {
+		return config, err
+	}
+	err = viper.BindPFlags(utils.DefaultFlagSet)
+	if err != nil {
+		return config, err
+	}
 
-func GetCLIConfig() *CLIConfig {
-	if config == nil {
-		panic("GetCLIConfig() called before initialisation")
+	viper.SetEnvPrefix("PORTER")
+	err = viper.BindEnv("host")
+	if err != nil {
+		return config, err
+	}
+	err = viper.BindEnv("project")
+	if err != nil {
+		return config, err
+	}
+	err = viper.BindEnv("cluster")
+	if err != nil {
+		return config, err
+	}
+	err = viper.BindEnv("token")
+	if err != nil {
+		return config, err
+	}
+
+	err = createAndLoadPorterYaml(porterDir)
+	if err != nil {
+		return config, fmt.Errorf("unable to load porter config: %w", err)
 	}
 
-	return config
+	err = viper.Unmarshal(&config)
+	if err != nil {
+		return config, fmt.Errorf("unable to unmarshal porter config: %w", err)
+	}
+
+	return config, nil
 }
 
-func GetAPIClient() *api.Client {
-	config := GetCLIConfig()
+// getOrCreatePorterDirectoryAndConfig checks that the .porter folder exists; create if not
+func getOrCreatePorterDirectoryAndConfig() (string, error) {
+	porterDir := filepath.Join(home, ".porter")
 
-	if token := config.Token; token != "" {
-		return api.NewClientWithToken(config.Host+"/api", token)
+	_, err := os.Stat(porterDir)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return "", fmt.Errorf("error reading porter directory: %w", err)
+		}
+		err = os.Mkdir(porterDir, 0o700)
+		if err != nil {
+			return "", fmt.Errorf("error creating porter directory: %w", err)
+		}
 	}
+	return porterDir, nil
+}
 
-	return api.NewClient(config.Host+"/api", "cookie.json")
+// createAndLoadPorterYaml loads a porter.yaml config into Viper if it exists, or creates the file if it does not
+func createAndLoadPorterYaml(porterDir string) error {
+	err := viper.ReadInConfig()
+	if err != nil {
+		_, ok := err.(viper.ConfigFileNotFoundError)
+		if !ok {
+			return fmt.Errorf("unknown error reading ~/.porter/porter.yaml config: %w", err)
+		}
+
+		err := os.WriteFile(filepath.Join(porterDir, "porter.yaml"), []byte{}, 0o644) //nolint:gosec // do not want to change program logic. Should be addressed later
+		if err != nil {
+			return fmt.Errorf("unable to create ~/.porter/porter.yaml config: %w", err)
+		}
+	}
+	return nil
 }
 
+// func GetCLIConfig() *CLIConfig {
+// 	if config == nil {
+// 		panic("GetCLIConfig() called before initialisation")
+// 	}
+
+// 	return config
+// }
+
+// func GetAPIClient() api.Client {
+// 	ctx := ctx
+
+// 	config := GetCLIConfig()
+
+// 	client := api.NewClientWithConfig(ctx, api.NewClientInput{
+// 		BaseURL:        fmt.Sprintf("%s/api", config.Host),
+// 		BearerToken:    config.Token,
+// 		CookieFileName: "cookie.json",
+// 	})
+
+// 	return client
+// }
+
 func (c *CLIConfig) SetDriver(driver string) error {
 	viper.Set("driver", driver)
 	color.New(color.FgGreen).Printf("Set the current driver as %s\n", driver)
@@ -187,7 +219,7 @@ func (c *CLIConfig) SetDriver(driver string) error {
 		return err
 	}
 
-	config.Driver = driver
+	c.Driver = driver
 
 	return nil
 }
@@ -210,20 +242,21 @@ func (c *CLIConfig) SetHost(host string) error {
 
 	color.New(color.FgGreen).Printf("Set the current host as %s\n", host)
 
-	config.Host = host
-	config.Project = 0
-	config.Cluster = 0
-	config.Token = ""
+	c.Host = host
+	c.Project = 0
+	c.Cluster = 0
+	c.Token = ""
 
 	return nil
 }
 
-func (c *CLIConfig) SetProject(projectID uint) error {
+// SetProject sets a project for all API commands
+func (c *CLIConfig) SetProject(ctx context.Context, apiClient api.Client, projectID uint) error {
 	viper.Set("project", projectID)
 
 	color.New(color.FgGreen).Printf("Set the current project as %d\n", projectID)
 
-	if config.Kubeconfig != "" || viper.IsSet("kubeconfig") {
+	if c.Kubeconfig != "" || viper.IsSet("kubeconfig") {
 		color.New(color.FgYellow).Println("Please change local kubeconfig if needed")
 	}
 
@@ -232,16 +265,13 @@ func (c *CLIConfig) SetProject(projectID uint) error {
 		return err
 	}
 
-	config.Project = projectID
+	c.Project = projectID
 
-	client := GetAPIClient()
-	if client != nil {
-		resp, err := client.ListProjectClusters(context.Background(), projectID)
-		if err == nil {
-			clusters := *resp
-			if len(clusters) == 1 {
-				c.SetCluster(clusters[0].ID)
-			}
+	resp, err := apiClient.ListProjectClusters(ctx, projectID)
+	if err == nil {
+		clusters := *resp
+		if len(clusters) == 1 {
+			_ = c.SetCluster(clusters[0].ID)
 		}
 	}
 
@@ -253,7 +283,7 @@ func (c *CLIConfig) SetCluster(clusterID uint) error {
 
 	color.New(color.FgGreen).Printf("Set the current cluster as %d\n", clusterID)
 
-	if config.Kubeconfig != "" || viper.IsSet("kubeconfig") {
+	if c.Kubeconfig != "" || viper.IsSet("kubeconfig") {
 		color.New(color.FgYellow).Println("Please change local kubeconfig if needed")
 	}
 
@@ -262,7 +292,7 @@ func (c *CLIConfig) SetCluster(clusterID uint) error {
 		return err
 	}
 
-	config.Cluster = clusterID
+	c.Cluster = clusterID
 
 	return nil
 }
@@ -274,7 +304,7 @@ func (c *CLIConfig) SetToken(token string) error {
 		return err
 	}
 
-	config.Token = token
+	c.Token = token
 
 	return nil
 }
@@ -287,7 +317,7 @@ func (c *CLIConfig) SetRegistry(registryID uint) error {
 		return err
 	}
 
-	config.Registry = registryID
+	c.Registry = registryID
 
 	return nil
 }
@@ -300,7 +330,7 @@ func (c *CLIConfig) SetHelmRepo(helmRepoID uint) error {
 		return err
 	}
 
-	config.HelmRepo = helmRepoID
+	c.HelmRepo = helmRepoID
 
 	return nil
 }
@@ -323,21 +353,22 @@ func (c *CLIConfig) SetKubeconfig(kubeconfig string) error {
 		return err
 	}
 
-	config.Kubeconfig = kubeconfig
+	c.Kubeconfig = kubeconfig
 
 	return nil
 }
 
-func ValidateCLIEnvironment() error {
-	if GetCLIConfig().Token == "" {
+// ValidateCLIEnvironment checks that all required variables are present for running the CLI
+func (c *CLIConfig) ValidateCLIEnvironment() error {
+	if c.Token == "" {
 		return fmt.Errorf("no auth token present, please run 'porter auth login' to authenticate")
 	}
 
-	if GetCLIConfig().Project == 0 {
+	if c.Project == 0 {
 		return fmt.Errorf("no project selected, please run 'porter config set-project' to select a project")
 	}
 
-	if GetCLIConfig().Cluster == 0 {
+	if c.Cluster == 0 {
 		return fmt.Errorf("no cluster selected, please run 'porter config set-cluster' to select a cluster")
 	}
 

+ 18 - 19
cli/cmd/config/docker.go

@@ -3,7 +3,6 @@ package config
 import (
 	"context"
 	"encoding/base64"
-	"encoding/json"
 	"fmt"
 	"io/ioutil"
 	"net/url"
@@ -19,15 +18,14 @@ import (
 	"github.com/porter-dev/porter/cli/cmd/github"
 )
 
-func SetDockerConfig(client *api.Client) error {
-	pID := GetCLIConfig().Project
-
+// SetDockerConfig sets up the docker config.json
+func SetDockerConfig(ctx context.Context, client api.Client, pID uint) error {
 	// get all registries that should be added
 	regToAdd := make([]string, 0)
 
 	// get the list of namespaces
 	resp, err := client.ListRegistries(
-		context.Background(),
+		ctx,
 		pID,
 	)
 	if err != nil {
@@ -77,14 +75,15 @@ func SetDockerConfig(client *api.Client) error {
 	}
 
 	// read the file bytes
-	configBytes, err := ioutil.ReadFile(dockerConfigFile)
-	if err != nil {
-		return err
-	}
+	// // TODO: STEFAN - figure out why we are parsing the ~/.docker/config.json into the CLI config. Are we using the variables somewhere?
+	// configBytes, err := ioutil.ReadFile(dockerConfigFile)
+	// if err != nil {
+	// 	return err
+	// }
 
 	// check if the docker credential helper exists
 	if !commandExists("docker-credential-porter") {
-		err := downloadCredMatchingRelease()
+		err := downloadCredMatchingRelease(ctx)
 		if err != nil {
 			color.New(color.FgRed).Println("Failed to download credential helper binary:", err.Error())
 			os.Exit(1)
@@ -99,7 +98,7 @@ func SetDockerConfig(client *api.Client) error {
 	err = cmdVersionCred.Run()
 
 	if err != nil || writer.Version != Version {
-		err := downloadCredMatchingRelease()
+		err := downloadCredMatchingRelease(ctx)
 		if err != nil {
 			color.New(color.FgRed).Println("Failed to download credential helper binary:", err.Error())
 			os.Exit(1)
@@ -110,11 +109,11 @@ func SetDockerConfig(client *api.Client) error {
 		Filename: dockerConfigFile,
 	}
 
-	err = json.Unmarshal(configBytes, GetCLIConfig())
-
-	if err != nil {
-		return err
-	}
+	// // TODO: STEFAN - figure out why we are parsing the ~/.docker/config.json into the CLI config. Are we using the variables somewhere?
+	// err = json.Unmarshal(configBytes, GetCLIConfig())
+	// if err != nil {
+	// 	return err
+	// }
 
 	if configFile.CredentialHelpers == nil {
 		configFile.CredentialHelpers = make(map[string]string)
@@ -138,7 +137,7 @@ func SetDockerConfig(client *api.Client) error {
 
 			if !isAuthenticated {
 				// get a dockerhub token from the Porter API
-				tokenResp, err := client.GetDockerhubAuthorizationToken(context.Background(), GetCLIConfig().Project)
+				tokenResp, err := client.GetDockerhubAuthorizationToken(ctx, pID)
 				if err != nil {
 					return err
 				}
@@ -176,7 +175,7 @@ func commandExists(cmd string) bool {
 	return err == nil
 }
 
-func downloadCredMatchingRelease() error {
+func downloadCredMatchingRelease(ctx context.Context) error {
 	// download the porter cred helper
 	z := &github.ZIPReleaseGetter{
 		AssetName:           "docker-credential-porter",
@@ -193,5 +192,5 @@ func downloadCredMatchingRelease() error {
 		},
 	}
 
-	return z.GetRelease(Version)
+	return z.GetRelease(ctx, Version)
 }

+ 27 - 16
cli/cmd/connect.go

@@ -1,10 +1,12 @@
 package cmd
 
 import (
+	"context"
 	"os"
 
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/connect"
 	"github.com/spf13/cobra"
 )
@@ -24,7 +26,7 @@ var connectKubeconfigCmd = &cobra.Command{
 	Use:   "kubeconfig",
 	Short: "Uses the local kubeconfig to add a cluster",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runConnectKubeconfig)
+		err := checkLoginAndRun(cmd.Context(), args, runConnectKubeconfig)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -35,7 +37,7 @@ var connectECRCmd = &cobra.Command{
 	Use:   "ecr",
 	Short: "Adds an ECR instance to a project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runConnectECR)
+		err := checkLoginAndRun(cmd.Context(), args, runConnectECR)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -46,7 +48,7 @@ var connectDockerhubCmd = &cobra.Command{
 	Use:   "dockerhub",
 	Short: "Adds a Docker Hub registry integration to a project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runConnectDockerhub)
+		err := checkLoginAndRun(cmd.Context(), args, runConnectDockerhub)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -57,7 +59,7 @@ var connectRegistryCmd = &cobra.Command{
 	Use:   "registry",
 	Short: "Adds a custom image registry to a project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runConnectRegistry)
+		err := checkLoginAndRun(cmd.Context(), args, runConnectRegistry)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -68,7 +70,7 @@ var connectHelmRepoCmd = &cobra.Command{
 	Use:   "helm",
 	Short: "Adds a custom Helm registry to a project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runConnectHelmRepo)
+		err := checkLoginAndRun(cmd.Context(), args, runConnectHelmRepo)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -79,7 +81,7 @@ var connectGCRCmd = &cobra.Command{
 	Use:   "gcr",
 	Short: "Adds a GCR instance to a project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runConnectGCR)
+		err := checkLoginAndRun(cmd.Context(), args, runConnectGCR)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -90,10 +92,11 @@ var connectGARCmd = &cobra.Command{
 	Use:   "gar",
 	Short: "Adds a GAR instance to a project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runConnectGAR)
+		err := checkLoginAndRun(cmd.Context(), args, runConnectGAR)
 		if err != nil {
 			os.Exit(1)
 		}
+		cmd.Context()
 	},
 }
 
@@ -101,7 +104,7 @@ var connectDOCRCmd = &cobra.Command{
 	Use:   "docr",
 	Short: "Adds a DOCR instance to a project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runConnectDOCR)
+		err := checkLoginAndRun(cmd.Context(), args, runConnectDOCR)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -136,7 +139,7 @@ func init() {
 	connectCmd.AddCommand(connectHelmRepoCmd)
 }
 
-func runConnectKubeconfig(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func runConnectKubeconfig(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, _ []string) error {
 	isLocal := false
 
 	if cliConf.Driver == "local" {
@@ -144,6 +147,7 @@ func runConnectKubeconfig(_ *types.GetAuthenticatedUserResponse, client *api.Cli
 	}
 
 	id, err := connect.Kubeconfig(
+		ctx,
 		client,
 		kubeconfigPath,
 		*contexts,
@@ -157,8 +161,9 @@ func runConnectKubeconfig(_ *types.GetAuthenticatedUserResponse, client *api.Cli
 	return cliConf.SetCluster(id)
 }
 
-func runConnectECR(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func runConnectECR(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, _ []string) error {
 	regID, err := connect.ECR(
+		ctx,
 		client,
 		cliConf.Project,
 	)
@@ -169,8 +174,9 @@ func runConnectECR(_ *types.GetAuthenticatedUserResponse, client *api.Client, _
 	return cliConf.SetRegistry(regID)
 }
 
-func runConnectGCR(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func runConnectGCR(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, _ []string) error {
 	regID, err := connect.GCR(
+		ctx,
 		client,
 		cliConf.Project,
 	)
@@ -181,8 +187,9 @@ func runConnectGCR(_ *types.GetAuthenticatedUserResponse, client *api.Client, _
 	return cliConf.SetRegistry(regID)
 }
 
-func runConnectGAR(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func runConnectGAR(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, _ []string) error {
 	regID, err := connect.GAR(
+		ctx,
 		client,
 		cliConf.Project,
 	)
@@ -193,8 +200,9 @@ func runConnectGAR(_ *types.GetAuthenticatedUserResponse, client *api.Client, _
 	return cliConf.SetRegistry(regID)
 }
 
-func runConnectDOCR(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func runConnectDOCR(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, _ []string) error {
 	regID, err := connect.DOCR(
+		ctx,
 		client,
 		cliConf.Project,
 	)
@@ -205,8 +213,9 @@ func runConnectDOCR(_ *types.GetAuthenticatedUserResponse, client *api.Client, _
 	return cliConf.SetRegistry(regID)
 }
 
-func runConnectDockerhub(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func runConnectDockerhub(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, _ []string) error {
 	regID, err := connect.Dockerhub(
+		ctx,
 		client,
 		cliConf.Project,
 	)
@@ -217,8 +226,9 @@ func runConnectDockerhub(_ *types.GetAuthenticatedUserResponse, client *api.Clie
 	return cliConf.SetRegistry(regID)
 }
 
-func runConnectRegistry(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func runConnectRegistry(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, _ []string) error {
 	regID, err := connect.Registry(
+		ctx,
 		client,
 		cliConf.Project,
 	)
@@ -229,8 +239,9 @@ func runConnectRegistry(_ *types.GetAuthenticatedUserResponse, client *api.Clien
 	return cliConf.SetRegistry(regID)
 }
 
-func runConnectHelmRepo(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func runConnectHelmRepo(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, _ []string) error {
 	hrID, err := connect.HelmRepo(
+		ctx,
 		client,
 		cliConf.Project,
 	)

+ 4 - 3
cli/cmd/connect/dockerhub.go

@@ -13,7 +13,8 @@ import (
 )
 
 func Dockerhub(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID uint,
 ) (uint, error) {
 	// if project ID is 0, ask the user to set the project ID or create a project
@@ -46,7 +47,7 @@ func Dockerhub(
 
 	// create the basic auth integration
 	integration, err := client.CreateBasicAuthIntegration(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateBasicRequest{
 			Username: username,
@@ -60,7 +61,7 @@ func Dockerhub(
 	color.New(color.FgGreen).Printf("created basic auth integration with id %d\n", integration.ID)
 
 	reg, err := client.CreateRegistry(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateRegistryRequest{
 			URL:                fmt.Sprintf("index.docker.io/%s", repoName),

+ 4 - 3
cli/cmd/connect/docr.go

@@ -13,7 +13,8 @@ import (
 
 // DOCR creates a DOCR integration
 func DOCR(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID uint,
 ) (uint, error) {
 	// if project ID is 0, ask the user to set the project ID or create a project
@@ -64,7 +65,7 @@ Registry URL: `))
 	}
 
 	reg, err := client.CreateRegistry(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateRegistryRequest{
 			Name:            regName,
@@ -76,7 +77,7 @@ Registry URL: `))
 	return reg.ID, nil
 }
 
-func triggerDigitalOceanOAuth(client *api.Client, projectID uint) (*types.OAuthIntegration, error) {
+func triggerDigitalOceanOAuth(client api.Client, projectID uint) (*types.OAuthIntegration, error) {
 	var doAuth *types.OAuthIntegration
 
 	oauthURL := fmt.Sprintf("%s/projects/%d/oauth/digitalocean", client.BaseURL, projectID)

+ 12 - 10
cli/cmd/connect/ecr.go

@@ -21,7 +21,8 @@ import (
 
 // ECR creates an ECR integration
 func ECR(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID uint,
 ) (uint, error) {
 	// if project ID is 0, ask the user to set the project ID or create a project
@@ -52,13 +53,13 @@ Would you like to proceed? %s `,
 		creds, err := agent.CreateIAMECRUser(region)
 		if err != nil {
 			color.New(color.FgRed).Fprintf(os.Stderr, "Automatic creation failed, manual input required. Error was: %v\n", err)
-			return ecrManual(client, projectID, region)
+			return ecrManual(ctx, client, projectID, region)
 		}
 
 		waitForAuthorizationToken(region, creds)
 
 		integration, err := client.CreateAWSIntegration(
-			context.Background(),
+			ctx,
 			projectID,
 			&types.CreateAWSRequest{
 				AWSAccessKeyID:     creds.AWSAccessKeyID,
@@ -72,14 +73,15 @@ Would you like to proceed? %s `,
 
 		color.New(color.FgGreen).Printf("created aws integration with id %d\n", integration.ID)
 
-		return linkRegistry(client, projectID, integration.ID)
+		return linkRegistry(ctx, client, projectID, integration.ID)
 	}
 
-	return ecrManual(client, projectID, region)
+	return ecrManual(ctx, client, projectID, region)
 }
 
 func ecrManual(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID uint,
 	region string,
 ) (uint, error) {
@@ -102,7 +104,7 @@ func ecrManual(
 
 	// create the aws integration
 	integration, err := client.CreateAWSIntegration(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateAWSRequest{
 			AWSAccessKeyID:     accessKeyID,
@@ -116,10 +118,10 @@ func ecrManual(
 
 	color.New(color.FgGreen).Printf("created aws integration with id %d\n", integration.ID)
 
-	return linkRegistry(client, projectID, integration.ID)
+	return linkRegistry(ctx, client, projectID, integration.ID)
 }
 
-func linkRegistry(client *api.Client, projectID uint, intID uint) (uint, error) {
+func linkRegistry(ctx context.Context, client api.Client, projectID uint, intID uint) (uint, error) {
 	// create the registry
 	// query for registry name
 	regName, err := utils.PromptPlaintext(fmt.Sprintf(`Give this registry a name: `))
@@ -128,7 +130,7 @@ func linkRegistry(client *api.Client, projectID uint, intID uint) (uint, error)
 	}
 
 	reg, err := client.CreateRegistry(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateRegistryRequest{
 			Name:             regName,

+ 4 - 3
cli/cmd/connect/gar.go

@@ -15,7 +15,8 @@ import (
 
 // GAR creates a GAR integration
 func GAR(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID uint,
 ) (uint, error) {
 	// if project ID is 0, ask the user to set the project ID or create a project
@@ -39,7 +40,7 @@ Key file location: `)
 
 		// create the gcp integration
 		integration, err := client.CreateGCPIntegration(
-			context.Background(),
+			ctx,
 			projectID,
 			&types.CreateGCPRequest{
 				GCPKeyData: string(bytes),
@@ -81,7 +82,7 @@ Artifact registry region: `)
 		}
 
 		reg, err := client.CreateRegistry(
-			context.Background(),
+			ctx,
 			projectID,
 			&types.CreateRegistryRequest{
 				Name:             regName,

+ 4 - 3
cli/cmd/connect/gcr.go

@@ -15,7 +15,8 @@ import (
 
 // GCR creates a GCR integration
 func GCR(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID uint,
 ) (uint, error) {
 	// if project ID is 0, ask the user to set the project ID or create a project
@@ -39,7 +40,7 @@ Key file location: `))
 
 		// create the gcp integration
 		integration, err := client.CreateGCPIntegration(
-			context.Background(),
+			ctx,
 			projectID,
 			&types.CreateGCPRequest{
 				GCPKeyData: string(bytes),
@@ -65,7 +66,7 @@ Registry URL: `))
 		}
 
 		reg, err := client.CreateRegistry(
-			context.Background(),
+			ctx,
 			projectID,
 			&types.CreateRegistryRequest{
 				Name:             regName,

+ 4 - 3
cli/cmd/connect/helmrepo.go

@@ -13,7 +13,8 @@ import (
 )
 
 func HelmRepo(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID uint,
 ) (uint, error) {
 	// if project ID is 0, ask the user to set the project ID or create a project
@@ -53,7 +54,7 @@ Password:`)
 	if username != "" && password != "" {
 		// create the basic auth integration
 		integration, err := client.CreateBasicAuthIntegration(
-			context.Background(),
+			ctx,
 			projectID,
 			&types.CreateBasicRequest{
 				Username: username,
@@ -70,7 +71,7 @@ Password:`)
 	}
 
 	reg, err := client.CreateHelmRepo(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateUpdateHelmRepoRequest{
 			URL:                repoURL,

+ 8 - 5
cli/cmd/connect/kubeconfig.go

@@ -22,7 +22,8 @@ import (
 // Kubeconfig creates a service account for a project by parsing the local
 // kubeconfig and resolving actions that must be performed.
 func Kubeconfig(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	kubeconfigPath string,
 	contexts []string,
 	projectID uint,
@@ -41,7 +42,7 @@ func Kubeconfig(
 
 	// send kubeconfig to client
 	resp, err := client.CreateProjectCandidates(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateClusterCandidateRequest{
 			Kubeconfig: string(rawBytes),
@@ -166,6 +167,7 @@ func Kubeconfig(
 					}
 				case types.GCPKeyData:
 					err := resolveGCPKeyAction(
+						ctx,
 						cc.Server,
 						cc.Name,
 						allResolver,
@@ -189,7 +191,7 @@ func Kubeconfig(
 			}
 
 			resp, err := client.CreateProjectCluster(
-				context.Background(),
+				ctx,
 				projectID,
 				cc.ID,
 				allResolver,
@@ -203,7 +205,7 @@ func Kubeconfig(
 			cluster = &clExt
 		} else {
 			resp, err := client.GetProjectCluster(
-				context.Background(),
+				ctx,
 				projectID,
 				cc.CreatedClusterID,
 			)
@@ -306,6 +308,7 @@ func resolveTokenDataAction(
 
 // resolves a gcp key data action
 func resolveGCPKeyAction(
+	ctx context.Context,
 	endpoint string,
 	clusterName string,
 	resolver *types.ClusterResolverAll,
@@ -325,7 +328,7 @@ Would you like to proceed? %s `,
 	}
 
 	if userResp := strings.ToLower(userResp); userResp == "y" || userResp == "yes" {
-		agent, err := gcpLocal.NewDefaultAgent()
+		agent, err := gcpLocal.NewDefaultAgent(ctx)
 		if err != nil {
 			color.New(color.FgRed).Fprintf(os.Stderr, "Automatic creation failed, manual input required. Error was: %v\n", err)
 			return resolveGCPKeyActionManual(endpoint, clusterName, resolver)

+ 4 - 3
cli/cmd/connect/registry.go

@@ -13,7 +13,8 @@ import (
 
 // Helm connects a Helm repository using HTTP basic authentication
 func Registry(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID uint,
 ) (uint, error) {
 	// if project ID is 0, ask the user to set the project ID or create a project
@@ -41,7 +42,7 @@ Username: `))
 
 	// create the basic auth integration
 	integration, err := client.CreateBasicAuthIntegration(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateBasicRequest{
 			Username: username,
@@ -55,7 +56,7 @@ Username: `))
 	color.New(color.FgGreen).Printf("created basic auth integration with id %d\n", integration.ID)
 
 	reg, err := client.CreateRegistry(
-		context.Background(),
+		ctx,
 		projectID,
 		&types.CreateRegistryRequest{
 			URL:                repoURL,

+ 15 - 15
cli/cmd/create.go

@@ -70,7 +70,7 @@ To deploy an application from a Docker registry, use "--source registry" and pas
 		color.New(color.FgGreen, color.Bold).Sprintf("porter create web --app example-app --source registry --image gcr.io/snowflake-12345/example-app:latest"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, createFull)
+		err := checkLoginAndRun(cmd.Context(), args, createFull)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -183,9 +183,7 @@ func init() {
 
 var supportedKinds = map[string]string{"web": "", "job": "", "worker": ""}
 
-func createFull(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func createFull(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -266,13 +264,13 @@ func createFull(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 
 	if source == "local" {
 		if useCache {
-			regID, imageURL, err := createAgent.GetImageRepoURL(name, namespace)
+			regID, imageURL, err := createAgent.GetImageRepoURL(ctx, name, namespace)
 			if err != nil {
 				return err
 			}
 
 			err = client.CreateRepository(
-				context.Background(),
+				ctx,
 				cliConf.Project,
 				regID,
 				&types.CreateRegistryRepositoryRequest{
@@ -284,21 +282,21 @@ func createFull(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 				return err
 			}
 
-			err = config.SetDockerConfig(createAgent.Client)
+			err = config.SetDockerConfig(ctx, createAgent.Client, project.ID)
 
 			if err != nil {
 				return err
 			}
 		}
 
-		subdomain, err := createAgent.CreateFromDocker(valuesObj, "default", nil)
+		subdomain, err := createAgent.CreateFromDocker(ctx, valuesObj, "default", nil)
 
 		return handleSubdomainCreate(subdomain, err)
 	} else if source == "github" {
-		return createFromGithub(createAgent, valuesObj)
+		return createFromGithub(ctx, createAgent, valuesObj)
 	}
 
-	subdomain, err := createAgent.CreateFromRegistry(image, valuesObj)
+	subdomain, err := createAgent.CreateFromRegistry(ctx, image, valuesObj)
 
 	return handleSubdomainCreate(subdomain, err)
 }
@@ -317,7 +315,7 @@ func handleSubdomainCreate(subdomain string, err error) error {
 	return nil
 }
 
-func createFromGithub(createAgent *deploy.CreateAgent, overrideValues map[string]interface{}) error {
+func createFromGithub(ctx context.Context, createAgent *deploy.CreateAgent, overrideValues map[string]interface{}) error {
 	fullPath, err := filepath.Abs(localPath)
 	if err != nil {
 		return err
@@ -343,10 +341,12 @@ func createFromGithub(createAgent *deploy.CreateAgent, overrideValues map[string
 		return fmt.Errorf("remote is not a Github repository")
 	}
 
-	subdomain, err := createAgent.CreateFromGithub(&deploy.GithubOpts{
-		Branch: gitBranch,
-		Repo:   remoteRepo,
-	}, overrideValues)
+	subdomain, err := createAgent.CreateFromGithub(
+		ctx,
+		&deploy.GithubOpts{
+			Branch: gitBranch,
+			Repo:   remoteRepo,
+		}, overrideValues)
 
 	return handleSubdomainCreate(subdomain, err)
 }

+ 20 - 25
cli/cmd/delete.go

@@ -6,6 +6,7 @@ import (
 	"os"
 	"strconv"
 
+	"github.com/porter-dev/porter/cli/cmd/config"
 	v2 "github.com/porter-dev/porter/cli/cmd/v2"
 
 	"github.com/fatih/color"
@@ -34,7 +35,7 @@ deleting a configuration:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter delete"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, deleteDeployment)
+		err := checkLoginAndRun(cmd.Context(), args, deleteDeployment)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -48,7 +49,7 @@ var deleteAppsCmd = &cobra.Command{
 	Short:   "Deletes an existing app",
 	Args:    cobra.ExactArgs(1),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, deleteApp)
+		err := checkLoginAndRun(cmd.Context(), args, deleteApp)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -62,7 +63,7 @@ var deleteJobsCmd = &cobra.Command{
 	Short:   "Deletes an existing job",
 	Args:    cobra.ExactArgs(1),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, deleteJob)
+		err := checkLoginAndRun(cmd.Context(), args, deleteJob)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -76,7 +77,7 @@ var deleteAddonsCmd = &cobra.Command{
 	Short:   "Deletes an existing addon",
 	Args:    cobra.ExactArgs(1),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, deleteAddon)
+		err := checkLoginAndRun(cmd.Context(), args, deleteAddon)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -90,7 +91,7 @@ var deleteHelmCmd = &cobra.Command{
 	Short:   "Deletes an existing helm repo",
 	Args:    cobra.ExactArgs(1),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, deleteHelm)
+		err := checkLoginAndRun(cmd.Context(), args, deleteHelm)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -113,9 +114,7 @@ func init() {
 	rootCmd.AddCommand(deleteCmd)
 }
 
-func deleteDeployment(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func deleteDeployment(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -155,13 +154,11 @@ func deleteDeployment(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 	}
 
 	return client.DeleteDeployment(
-		context.Background(), projectID, clusterID, deploymentID,
+		ctx, projectID, clusterID, deploymentID,
 	)
 }
 
-func deleteApp(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func deleteApp(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -178,7 +175,7 @@ func deleteApp(_ *types.GetAuthenticatedUserResponse, client *api.Client, args [
 	name := args[0]
 
 	resp, err := client.GetRelease(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, name,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, name,
 	)
 	if err != nil {
 		return err
@@ -193,7 +190,7 @@ func deleteApp(_ *types.GetAuthenticatedUserResponse, client *api.Client, args [
 	color.New(color.FgBlue).Printf("Deleting app: %s\n", name)
 
 	err = client.DeleteRelease(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, name,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, name,
 	)
 
 	if err != nil {
@@ -203,9 +200,7 @@ func deleteApp(_ *types.GetAuthenticatedUserResponse, client *api.Client, args [
 	return nil
 }
 
-func deleteJob(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func deleteJob(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -222,7 +217,7 @@ func deleteJob(_ *types.GetAuthenticatedUserResponse, client *api.Client, args [
 	name := args[0]
 
 	resp, err := client.GetRelease(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, name,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, name,
 	)
 	if err != nil {
 		return err
@@ -237,7 +232,7 @@ func deleteJob(_ *types.GetAuthenticatedUserResponse, client *api.Client, args [
 	color.New(color.FgBlue).Printf("Deleting job: %s\n", name)
 
 	err = client.DeleteRelease(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, name,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, name,
 	)
 
 	if err != nil {
@@ -247,11 +242,11 @@ func deleteJob(_ *types.GetAuthenticatedUserResponse, client *api.Client, args [
 	return nil
 }
 
-func deleteAddon(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func deleteAddon(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	name := args[0]
 
 	resp, err := client.GetRelease(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, name,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, name,
 	)
 	if err != nil {
 		return err
@@ -266,7 +261,7 @@ func deleteAddon(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 	color.New(color.FgBlue).Printf("Deleting addon: %s\n", name)
 
 	err = client.DeleteRelease(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, name,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, name,
 	)
 
 	if err != nil {
@@ -276,10 +271,10 @@ func deleteAddon(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 	return nil
 }
 
-func deleteHelm(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func deleteHelm(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	name := args[0]
 
-	resp, err := client.ListHelmRepos(context.Background(), cliConf.Project)
+	resp, err := client.ListHelmRepos(ctx, cliConf.Project)
 	if err != nil {
 		return err
 	}
@@ -299,7 +294,7 @@ func deleteHelm(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 
 	color.New(color.FgBlue).Printf("Deleting helm repo: %s\n", name)
 
-	err = client.DeleteHelmRepo(context.Background(), cliConf.Project, repo.ID)
+	err = client.DeleteHelmRepo(ctx, cliConf.Project, repo.ID)
 
 	if err != nil {
 		return err

+ 76 - 83
cli/cmd/deploy.go

@@ -71,7 +71,7 @@ specify it as follows:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter update --app example-app --method docker --dockerfile ./docker/prod.Dockerfile"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, updateFull)
+		err := checkLoginAndRun(cmd.Context(), args, updateFull)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -99,7 +99,7 @@ destination path for a .env file. For example:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter update get-env --app example-app --file .env"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, updateGetEnv)
+		err := checkLoginAndRun(cmd.Context(), args, updateGetEnv)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -141,7 +141,7 @@ for the application:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter update build --app example-app --method docker --dockerfile ./prod.Dockerfile"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, updateBuild)
+		err := checkLoginAndRun(cmd.Context(), args, updateBuild)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -181,7 +181,7 @@ linked it via "porter connect".
 		color.New(color.FgGreen, color.Bold).Sprintf("porter update push --app nginx --tag new-tag"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, updatePush)
+		err := checkLoginAndRun(cmd.Context(), args, updatePush)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -211,7 +211,7 @@ the image that the application uses if no --values file is specified:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter update config --app example-app --tag custom-tag"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, updateUpgrade)
+		err := checkLoginAndRun(cmd.Context(), args, updateUpgrade)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -232,7 +232,7 @@ var updateSetEnvGroupCmd = &cobra.Command{
 	Short: "Sets the desired value of an environment variable in an env group in the form VAR=VALUE.",
 	Args:  cobra.MaximumNArgs(1),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, updateSetEnvGroup)
+		err := checkLoginAndRun(cmd.Context(), args, updateSetEnvGroup)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -244,7 +244,7 @@ var updateUnsetEnvGroupCmd = &cobra.Command{
 	Short: "Removes an environment variable from an env group.",
 	Args:  cobra.MinimumNArgs(1),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, updateUnsetEnvGroup)
+		err := checkLoginAndRun(cmd.Context(), args, updateUnsetEnvGroup)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -442,9 +442,7 @@ func init() {
 	updateCmd.AddCommand(updateEnvGroupCmd)
 }
 
-func updateFull(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func updateFull(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -476,25 +474,22 @@ func updateFull(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 
 	color.New(color.FgGreen).Println("Deploying app:", app)
 
-	updateAgent, err := updateGetAgent(client)
+	updateAgent, err := updateGetAgent(ctx, client, cliConf)
 	if err != nil {
 		return err
 	}
 
-	err = updateBuildWithAgent(updateAgent)
-
+	err = updateBuildWithAgent(ctx, updateAgent)
 	if err != nil {
 		return err
 	}
 
-	err = updatePushWithAgent(updateAgent)
-
+	err = updatePushWithAgent(ctx, updateAgent)
 	if err != nil {
 		return err
 	}
 
-	err = updateUpgradeWithAgent(updateAgent)
-
+	err = updateUpgradeWithAgent(ctx, updateAgent)
 	if err != nil {
 		return err
 	}
@@ -503,7 +498,7 @@ func updateFull(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 		// solves timing issue where replicasets were not on the cluster, before our initial check
 		time.Sleep(10 * time.Second)
 
-		err := checkDeploymentStatus(client)
+		err := checkDeploymentStatus(ctx, client, cliConf)
 		if err != nil {
 			return err
 		}
@@ -512,13 +507,13 @@ func updateFull(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 	return nil
 }
 
-func updateGetEnv(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	updateAgent, err := updateGetAgent(client)
+func updateGetEnv(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
+	updateAgent, err := updateGetAgent(ctx, client, cliConf)
 	if err != nil {
 		return err
 	}
 
-	buildEnv, err := updateAgent.GetBuildEnv(&deploy.GetBuildEnvOpts{
+	buildEnv, err := updateAgent.GetBuildEnv(ctx, &deploy.GetBuildEnvOpts{
 		UseNewConfig: false,
 	})
 	if err != nil {
@@ -527,7 +522,6 @@ func updateGetEnv(_ *types.GetAuthenticatedUserResponse, client *api.Client, arg
 
 	// set the environment variables in the process
 	err = updateAgent.SetBuildEnv(buildEnv)
-
 	if err != nil {
 		return err
 	}
@@ -536,9 +530,7 @@ func updateGetEnv(_ *types.GetAuthenticatedUserResponse, client *api.Client, arg
 	return updateAgent.WriteBuildEnv(getEnvFileDest)
 }
 
-func updateBuild(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func updateBuild(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -552,15 +544,15 @@ func updateBuild(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 		return nil
 	}
 
-	updateAgent, err := updateGetAgent(client)
+	updateAgent, err := updateGetAgent(ctx, client, cliConf)
 	if err != nil {
 		return err
 	}
 
-	return updateBuildWithAgent(updateAgent)
+	return updateBuildWithAgent(ctx, updateAgent)
 }
 
-func updatePush(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func updatePush(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	if app == "" {
 		if len(args) == 0 {
 			return fmt.Errorf("please provide the docker image name")
@@ -568,7 +560,7 @@ func updatePush(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 
 		image := args[0]
 
-		registries, err := client.ListRegistries(context.Background(), cliConf.Project)
+		registries, err := client.ListRegistries(ctx, cliConf.Project)
 		if err != nil {
 			return err
 		}
@@ -587,7 +579,7 @@ func updatePush(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 			return fmt.Errorf("could not find registry for image: %s", image)
 		}
 
-		err = client.CreateRepository(context.Background(), cliConf.Project, regID,
+		err = client.CreateRepository(ctx, cliConf.Project, regID,
 			&types.CreateRegistryRepositoryRequest{
 				ImageRepoURI: strings.Split(image, ":")[0],
 			},
@@ -597,12 +589,12 @@ func updatePush(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 			return err
 		}
 
-		agent, err := docker.NewAgentWithAuthGetter(client, cliConf.Project)
+		agent, err := docker.NewAgentWithAuthGetter(ctx, client, cliConf.Project)
 		if err != nil {
 			return err
 		}
 
-		err = agent.PushImage(image)
+		err = agent.PushImage(ctx, image)
 
 		if err != nil {
 			return err
@@ -611,17 +603,15 @@ func updatePush(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 		return nil
 	}
 
-	updateAgent, err := updateGetAgent(client)
+	updateAgent, err := updateGetAgent(ctx, client, cliConf)
 	if err != nil {
 		return err
 	}
 
-	return updatePushWithAgent(updateAgent)
+	return updatePushWithAgent(ctx, updateAgent)
 }
 
-func updateUpgrade(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func updateUpgrade(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -635,12 +625,12 @@ func updateUpgrade(_ *types.GetAuthenticatedUserResponse, client *api.Client, ar
 		return nil
 	}
 
-	updateAgent, err := updateGetAgent(client)
+	updateAgent, err := updateGetAgent(ctx, client, cliConf)
 	if err != nil {
 		return err
 	}
 
-	err = updateUpgradeWithAgent(updateAgent)
+	err = updateUpgradeWithAgent(ctx, updateAgent)
 
 	if err != nil {
 		return err
@@ -650,7 +640,7 @@ func updateUpgrade(_ *types.GetAuthenticatedUserResponse, client *api.Client, ar
 		// solves timing issue where replicasets were not on the cluster, before our initial check
 		time.Sleep(10 * time.Second)
 
-		err := checkDeploymentStatus(client)
+		err := checkDeploymentStatus(ctx, client, cliConf)
 		if err != nil {
 			return err
 		}
@@ -659,7 +649,7 @@ func updateUpgrade(_ *types.GetAuthenticatedUserResponse, client *api.Client, ar
 	return nil
 }
 
-func updateSetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func updateSetEnvGroup(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	if len(normalEnvGroupVars) == 0 && len(secretEnvGroupVars) == 0 && len(args) == 0 {
 		return fmt.Errorf("please provide one or more variables to update")
 	}
@@ -670,7 +660,7 @@ func updateSetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client
 	s.Suffix = fmt.Sprintf(" Fetching env group '%s' in namespace '%s'", name, namespace)
 	s.Start()
 
-	envGroupResp, err := client.GetEnvGroup(context.Background(), cliConf.Project, cliConf.Cluster, namespace,
+	envGroupResp, err := client.GetEnvGroup(ctx, cliConf.Project, cliConf.Cluster, namespace,
 		&types.GetEnvGroupRequest{
 			Name: name, Version: version,
 		},
@@ -743,7 +733,7 @@ func updateSetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client
 	s.Start()
 
 	_, err = client.CreateEnvGroup(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, newEnvGroup,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, newEnvGroup,
 	)
 
 	s.Stop()
@@ -767,7 +757,7 @@ func validateVarValue(in string) (string, string, error) {
 	return key, value, nil
 }
 
-func updateUnsetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func updateUnsetEnvGroup(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	if len(args) == 0 {
 		return fmt.Errorf("required variable name")
 	}
@@ -778,7 +768,7 @@ func updateUnsetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Clie
 	s.Suffix = fmt.Sprintf(" Fetching env group '%s' in namespace '%s'", name, namespace)
 	s.Start()
 
-	envGroupResp, err := client.GetEnvGroup(context.Background(), cliConf.Project, cliConf.Cluster, namespace,
+	envGroupResp, err := client.GetEnvGroup(ctx, cliConf.Project, cliConf.Cluster, namespace,
 		&types.GetEnvGroupRequest{
 			Name: name, Version: version,
 		},
@@ -804,7 +794,7 @@ func updateUnsetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Clie
 	s.Start()
 
 	_, err = client.CreateEnvGroup(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, newEnvGroup,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, newEnvGroup,
 	)
 
 	s.Stop()
@@ -819,7 +809,7 @@ func updateUnsetEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Clie
 }
 
 // HELPER METHODS
-func updateGetAgent(client *api.Client) (*deploy.DeployAgent, error) {
+func updateGetAgent(ctx context.Context, client api.Client, cliConf config.CLIConfig) (*deploy.DeployAgent, error) {
 	var buildMethod deploy.DeployBuildType
 
 	if method != "" {
@@ -836,7 +826,7 @@ func updateGetAgent(client *api.Client) (*deploy.DeployAgent, error) {
 	}
 
 	// initialize the update agent
-	return deploy.NewDeployAgent(client, app, &deploy.DeployOpts{
+	return deploy.NewDeployAgent(ctx, client, app, &deploy.DeployOpts{
 		SharedOpts: &deploy.SharedOpts{
 			ProjectID:       cliConf.Project,
 			ClusterID:       cliConf.Cluster,
@@ -852,22 +842,22 @@ func updateGetAgent(client *api.Client) (*deploy.DeployAgent, error) {
 	})
 }
 
-func updateBuildWithAgent(updateAgent *deploy.DeployAgent) error {
+func updateBuildWithAgent(ctx context.Context, updateAgent *deploy.DeployAgent) error {
 	// build the deployment
 	color.New(color.FgGreen).Println("Building docker image for", app)
 
 	if stream {
-		updateAgent.StreamEvent(types.SubEvent{
+		_ = updateAgent.StreamEvent(ctx, types.SubEvent{
 			EventID: "build",
 			Name:    "Build",
 			Index:   100,
 			Status:  types.EventStatusInProgress,
 			Info:    "",
-		})
+		}) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	}
 
 	if useCache {
-		err := config.SetDockerConfig(updateAgent.Client)
+		err := config.SetDockerConfig(ctx, updateAgent.Client, updateAgent.Opts.ProjectID)
 		if err != nil {
 			return err
 		}
@@ -879,14 +869,14 @@ func updateBuildWithAgent(updateAgent *deploy.DeployAgent) error {
 		return err
 	}
 
-	buildEnv, err := updateAgent.GetBuildEnv(&deploy.GetBuildEnvOpts{
+	buildEnv, err := updateAgent.GetBuildEnv(ctx, &deploy.GetBuildEnvOpts{
 		UseNewConfig: true,
 		NewConfig:    valuesObj,
 	})
 	if err != nil {
 		if stream {
 			// another concern: is it safe to ignore the error here?
-			updateAgent.StreamEvent(types.SubEvent{
+			updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 				EventID: "build",
 				Name:    "Build",
 				Index:   110,
@@ -902,7 +892,7 @@ func updateBuildWithAgent(updateAgent *deploy.DeployAgent) error {
 
 	if err != nil {
 		if stream {
-			updateAgent.StreamEvent(types.SubEvent{
+			updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 				EventID: "build",
 				Name:    "Build",
 				Index:   120,
@@ -913,9 +903,9 @@ func updateBuildWithAgent(updateAgent *deploy.DeployAgent) error {
 		return err
 	}
 
-	if err := updateAgent.Build(nil); err != nil {
+	if err := updateAgent.Build(ctx, nil); err != nil {
 		if stream {
-			updateAgent.StreamEvent(types.SubEvent{
+			updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 				EventID: "build",
 				Name:    "Build",
 				Index:   130,
@@ -927,7 +917,7 @@ func updateBuildWithAgent(updateAgent *deploy.DeployAgent) error {
 	}
 
 	if stream {
-		updateAgent.StreamEvent(types.SubEvent{
+		updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 			EventID: "build",
 			Name:    "Build",
 			Index:   140,
@@ -939,7 +929,7 @@ func updateBuildWithAgent(updateAgent *deploy.DeployAgent) error {
 	return nil
 }
 
-func updatePushWithAgent(updateAgent *deploy.DeployAgent) error {
+func updatePushWithAgent(ctx context.Context, updateAgent *deploy.DeployAgent) error {
 	if useCache {
 		color.New(color.FgGreen).Println("Skipping image push for", app, "as use-cache is set")
 
@@ -950,18 +940,19 @@ func updatePushWithAgent(updateAgent *deploy.DeployAgent) error {
 	color.New(color.FgGreen).Println("Pushing new image for", app)
 
 	if stream {
-		updateAgent.StreamEvent(types.SubEvent{
-			EventID: "push",
-			Name:    "Push",
-			Index:   200,
-			Status:  types.EventStatusInProgress,
-			Info:    "",
-		})
+		updateAgent.StreamEvent( //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
+			ctx, types.SubEvent{
+				EventID: "push",
+				Name:    "Push",
+				Index:   200,
+				Status:  types.EventStatusInProgress,
+				Info:    "",
+			})
 	}
 
-	if err := updateAgent.Push(); err != nil {
+	if err := updateAgent.Push(ctx); err != nil {
 		if stream {
-			updateAgent.StreamEvent(types.SubEvent{
+			updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 				EventID: "push",
 				Name:    "Push",
 				Index:   210,
@@ -973,7 +964,7 @@ func updatePushWithAgent(updateAgent *deploy.DeployAgent) error {
 	}
 
 	if stream {
-		updateAgent.StreamEvent(types.SubEvent{
+		updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 			EventID: "push",
 			Name:    "Push",
 			Index:   220,
@@ -985,12 +976,12 @@ func updatePushWithAgent(updateAgent *deploy.DeployAgent) error {
 	return nil
 }
 
-func updateUpgradeWithAgent(updateAgent *deploy.DeployAgent) error {
+func updateUpgradeWithAgent(ctx context.Context, updateAgent *deploy.DeployAgent) error {
 	// push the deployment
 	color.New(color.FgGreen).Println("Upgrading configuration for", app)
 
 	if stream {
-		updateAgent.StreamEvent(types.SubEvent{
+		updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 			EventID: "upgrade",
 			Name:    "Upgrade",
 			Index:   300,
@@ -1009,7 +1000,7 @@ func updateUpgradeWithAgent(updateAgent *deploy.DeployAgent) error {
 
 	if err != nil {
 		if stream {
-			updateAgent.StreamEvent(types.SubEvent{
+			updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 				EventID: "upgrade",
 				Name:    "Upgrade",
 				Index:   310,
@@ -1022,6 +1013,7 @@ func updateUpgradeWithAgent(updateAgent *deploy.DeployAgent) error {
 
 	if len(updateAgent.Opts.AdditionalEnv) > 0 {
 		syncedEnv, err := deploy.GetSyncedEnv(
+			ctx,
 			updateAgent.Client,
 			updateAgent.Release.Config,
 			updateAgent.Opts.ProjectID,
@@ -1065,11 +1057,11 @@ func updateUpgradeWithAgent(updateAgent *deploy.DeployAgent) error {
 		})
 	}
 
-	err = updateAgent.UpdateImageAndValues(valuesObj)
+	err = updateAgent.UpdateImageAndValues(ctx, valuesObj)
 
 	if err != nil {
 		if stream {
-			updateAgent.StreamEvent(types.SubEvent{
+			updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 				EventID: "upgrade",
 				Name:    "Upgrade",
 				Index:   320,
@@ -1081,7 +1073,7 @@ func updateUpgradeWithAgent(updateAgent *deploy.DeployAgent) error {
 	}
 
 	if stream {
-		updateAgent.StreamEvent(types.SubEvent{
+		updateAgent.StreamEvent(ctx, types.SubEvent{ //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 			EventID: "upgrade",
 			Name:    "Upgrade",
 			Index:   330,
@@ -1095,14 +1087,15 @@ func updateUpgradeWithAgent(updateAgent *deploy.DeployAgent) error {
 	return nil
 }
 
-func checkDeploymentStatus(client *api.Client) error {
+func checkDeploymentStatus(ctx context.Context, client api.Client, cliConfig config.CLIConfig) error {
 	color.New(color.FgBlue).Println("waiting for deployment to be ready, this may take a few minutes and will time out if it takes longer than 30 minutes")
 
 	sharedConf := &PorterRunSharedConfig{
-		Client: client,
+		Client:    client,
+		CLIConfig: cliConfig,
 	}
 
-	err := sharedConf.setSharedConfig()
+	err := sharedConf.setSharedConfig(ctx)
 	if err != nil {
 		return fmt.Errorf("could not retrieve kubernetes credentials: %w", err)
 	}
@@ -1112,7 +1105,7 @@ func checkDeploymentStatus(client *api.Client) error {
 	success := false
 
 	depls, err := sharedConf.Clientset.AppsV1().Deployments(namespace).List(
-		context.Background(),
+		ctx,
 		metav1.ListOptions{
 			LabelSelector: fmt.Sprintf("app.kubernetes.io/instance=%s", app),
 		},
@@ -1148,7 +1141,7 @@ func checkDeploymentStatus(client *api.Client) error {
 	}
 
 	pods, err := sharedConf.Clientset.CoreV1().Pods(namespace).List(
-		context.Background(), metav1.ListOptions{
+		ctx, metav1.ListOptions{
 			LabelSelector: fmt.Sprintf("app.kubernetes.io/instance=%s", app),
 		},
 	)
@@ -1167,7 +1160,7 @@ func checkDeploymentStatus(client *api.Client) error {
 			for _, ref := range pod.OwnerReferences {
 				if ref.Kind == "ReplicaSet" {
 					rs, err := sharedConf.Clientset.AppsV1().ReplicaSets(namespace).Get(
-						context.Background(),
+						ctx,
 						ref.Name,
 						metav1.GetOptions{},
 					)
@@ -1194,7 +1187,7 @@ func checkDeploymentStatus(client *api.Client) error {
 	for time.Now().Before(timeWait) {
 		// refresh the client every 10 minutes
 		if time.Now().After(prevRefresh.Add(10 * time.Minute)) {
-			err = sharedConf.setSharedConfig()
+			err = sharedConf.setSharedConfig(ctx)
 
 			if err != nil {
 				return fmt.Errorf("could not retrieve kube credentials: %s", err.Error())
@@ -1204,7 +1197,7 @@ func checkDeploymentStatus(client *api.Client) error {
 		}
 
 		rs, err := sharedConf.Clientset.AppsV1().ReplicaSets(namespace).Get(
-			context.Background(),
+			ctx,
 			rsName,
 			metav1.GetOptions{},
 		)

+ 7 - 3
cli/cmd/deploy/build.go

@@ -1,6 +1,7 @@
 package deploy
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"path/filepath"
@@ -16,7 +17,7 @@ import (
 type BuildAgent struct {
 	*SharedOpts
 
-	APIClient   *api.Client
+	APIClient   api.Client
 	ImageRepo   string
 	Env         map[string]string
 	ImageExists bool
@@ -24,6 +25,7 @@ type BuildAgent struct {
 
 // BuildDocker uses the local Docker daemon to build the image
 func (b *BuildAgent) BuildDocker(
+	ctx context.Context,
 	dockerAgent *docker.Agent,
 	basePath,
 	buildCtx,
@@ -52,15 +54,17 @@ func (b *BuildAgent) BuildDocker(
 	}
 
 	return dockerAgent.BuildLocal(
+		ctx,
 		opts,
 	)
 }
 
 // BuildPack uses the cloud-native buildpack client to build a container image
-func (b *BuildAgent) BuildPack(dockerAgent *docker.Agent, dst, tag, prevTag string, buildConfig *types.BuildConfig) error {
+func (b *BuildAgent) BuildPack(ctx context.Context, dockerAgent *docker.Agent, dst, tag, prevTag string, buildConfig *types.BuildConfig) error {
 	// retag the image with "pack-cache" tag so that it doesn't re-pull from the registry
 	if b.ImageExists {
 		err := dockerAgent.TagImage(
+			ctx,
 			fmt.Sprintf("%s:%s", b.ImageRepo, prevTag),
 			fmt.Sprintf("%s:%s", b.ImageRepo, "pack-cache"),
 		)
@@ -81,7 +85,7 @@ func (b *BuildAgent) BuildPack(dockerAgent *docker.Agent, dst, tag, prevTag stri
 	}
 
 	// call builder
-	return packAgent.Build(opts, buildConfig, fmt.Sprintf("%s:%s", b.ImageRepo, "pack-cache"))
+	return packAgent.Build(ctx, opts, buildConfig, fmt.Sprintf("%s:%s", b.ImageRepo, "pack-cache"))
 }
 
 // ResolveDockerPaths returns a path to the dockerfile that is either relative or absolute, and a path

+ 37 - 32
cli/cmd/deploy/create.go

@@ -15,7 +15,7 @@ import (
 
 // CreateAgent handles the creation of a new application on Porter
 type CreateAgent struct {
-	Client     *api.Client
+	Client     api.Client
 	CreateOpts *CreateOpts
 }
 
@@ -43,6 +43,7 @@ type GithubOpts struct {
 // This function attempts to find a matching repository in the list of linked repositories
 // on Porter. If one is found, it will use that repository as the app source.
 func (c *CreateAgent) CreateFromGithub(
+	ctx context.Context,
 	ghOpts *GithubOpts,
 	overrideValues map[string]interface{},
 ) (string, error) {
@@ -50,7 +51,7 @@ func (c *CreateAgent) CreateFromGithub(
 
 	// get all linked github repos and find matching repo
 	resp, err := c.Client.ListGitInstallationIDs(
-		context.Background(),
+		ctx,
 		c.CreateOpts.ProjectID,
 	)
 	if err != nil {
@@ -64,7 +65,7 @@ func (c *CreateAgent) CreateFromGithub(
 	for _, gitInstallationID := range gitInstallations {
 		// for each git repo, search for a matching username/owner
 		resp, err := c.Client.ListGitRepos(
-			context.Background(),
+			ctx,
 			c.CreateOpts.ProjectID,
 			gitInstallationID,
 		)
@@ -90,7 +91,7 @@ func (c *CreateAgent) CreateFromGithub(
 		return "", fmt.Errorf("could not find a linked github repo for %s. Make sure you have linked your Github account on the Porter dashboard.", ghOpts.Repo)
 	}
 
-	latestVersion, mergedValues, err := c.GetMergedValues(overrideValues)
+	latestVersion, mergedValues, err := c.GetMergedValues(ctx, overrideValues)
 	if err != nil {
 		return "", err
 	}
@@ -107,18 +108,18 @@ func (c *CreateAgent) CreateFromGithub(
 		}
 	}
 
-	regID, imageURL, err := c.GetImageRepoURL(opts.ReleaseName, opts.Namespace)
+	regID, imageURL, err := c.GetImageRepoURL(ctx, opts.ReleaseName, opts.Namespace)
 	if err != nil {
 		return "", err
 	}
 
-	subdomain, err := c.CreateSubdomainIfRequired(mergedValues)
+	subdomain, err := c.CreateSubdomainIfRequired(ctx, mergedValues)
 	if err != nil {
 		return "", err
 	}
 
 	err = c.Client.DeployTemplate(
-		context.Background(),
+		ctx,
 		opts.ProjectID,
 		opts.ClusterID,
 		opts.Namespace,
@@ -152,6 +153,7 @@ func (c *CreateAgent) CreateFromGithub(
 
 // CreateFromRegistry deploys a new application from an existing Docker repository + tag.
 func (c *CreateAgent) CreateFromRegistry(
+	ctx context.Context,
 	image string,
 	overrideValues map[string]interface{},
 ) (string, error) {
@@ -168,7 +170,7 @@ func (c *CreateAgent) CreateFromRegistry(
 
 	opts := c.CreateOpts
 
-	latestVersion, mergedValues, err := c.GetMergedValues(overrideValues)
+	latestVersion, mergedValues, err := c.GetMergedValues(ctx, overrideValues)
 	if err != nil {
 		return "", err
 	}
@@ -178,13 +180,13 @@ func (c *CreateAgent) CreateFromRegistry(
 		"tag":        imageSpl[1],
 	}
 
-	subdomain, err := c.CreateSubdomainIfRequired(mergedValues)
+	subdomain, err := c.CreateSubdomainIfRequired(ctx, mergedValues)
 	if err != nil {
 		return "", err
 	}
 
 	err = c.Client.DeployTemplate(
-		context.Background(),
+		ctx,
 		opts.ProjectID,
 		opts.ClusterID,
 		opts.Namespace,
@@ -209,6 +211,7 @@ func (c *CreateAgent) CreateFromRegistry(
 // CreateFromDocker uses a local build context and a local Docker daemon to build a new
 // container image, and then deploys it onto Porter.
 func (c *CreateAgent) CreateFromDocker(
+	ctx context.Context,
 	overrideValues map[string]interface{},
 	imageTag string,
 	extraBuildConfig *types.BuildConfig,
@@ -241,12 +244,12 @@ func (c *CreateAgent) CreateFromDocker(
 	}
 
 	// overwrite with docker image repository and tag
-	regID, imageURL, err := c.GetImageRepoURL(opts.ReleaseName, opts.Namespace)
+	regID, imageURL, err := c.GetImageRepoURL(ctx, opts.ReleaseName, opts.Namespace)
 	if err != nil {
 		return "", err
 	}
 
-	latestVersion, mergedValues, err := c.GetMergedValues(overrideValues)
+	latestVersion, mergedValues, err := c.GetMergedValues(ctx, overrideValues)
 	if err != nil {
 		return "", err
 	}
@@ -257,12 +260,12 @@ func (c *CreateAgent) CreateFromDocker(
 	}
 
 	// create docker agent
-	agent, err := docker.NewAgentWithAuthGetter(c.Client, opts.ProjectID)
+	agent, err := docker.NewAgentWithAuthGetter(ctx, c.Client, opts.ProjectID)
 	if err != nil {
 		return "", err
 	}
 
-	env, err := GetEnvForRelease(c.Client, mergedValues, opts.ProjectID, opts.ClusterID, opts.Namespace)
+	env, err := GetEnvForRelease(ctx, c.Client, mergedValues, opts.ProjectID, opts.ClusterID, opts.Namespace)
 	if err != nil {
 		env = make(map[string]string)
 	}
@@ -307,9 +310,9 @@ func (c *CreateAgent) CreateFromDocker(
 			return "", err
 		}
 
-		err = buildAgent.BuildDocker(agent, basePath, opts.LocalPath, opts.LocalDockerfile, imageTag, "")
+		err = buildAgent.BuildDocker(ctx, agent, basePath, opts.LocalPath, opts.LocalDockerfile, imageTag, "")
 	} else {
-		err = buildAgent.BuildPack(agent, opts.LocalPath, imageTag, "", extraBuildConfig)
+		err = buildAgent.BuildPack(ctx, agent, opts.LocalPath, imageTag, "", extraBuildConfig)
 	}
 
 	if err != nil {
@@ -319,7 +322,7 @@ func (c *CreateAgent) CreateFromDocker(
 	if !opts.SharedOpts.UseCache {
 		// create repository
 		err = c.Client.CreateRepository(
-			context.Background(),
+			ctx,
 			opts.ProjectID,
 			regID,
 			&types.CreateRegistryRepositoryRequest{
@@ -331,20 +334,20 @@ func (c *CreateAgent) CreateFromDocker(
 			return "", err
 		}
 
-		err = agent.PushImage(fmt.Sprintf("%s:%s", imageURL, imageTag))
+		err = agent.PushImage(ctx, fmt.Sprintf("%s:%s", imageURL, imageTag))
 
 		if err != nil {
 			return "", err
 		}
 	}
 
-	subdomain, err := c.CreateSubdomainIfRequired(mergedValues)
+	subdomain, err := c.CreateSubdomainIfRequired(ctx, mergedValues)
 	if err != nil {
 		return "", err
 	}
 
 	err = c.Client.DeployTemplate(
-		context.Background(),
+		ctx,
 		opts.ProjectID,
 		opts.ClusterID,
 		opts.Namespace,
@@ -378,11 +381,11 @@ func (c *CreateAgent) HasDefaultDockerfile(buildPath string) bool {
 // GetImageRepoURL creates the image repository url by finding the first valid image
 // registry linked to Porter, and then generates a new name of the form:
 // `{registry}/{name}-{namespace}`
-func (c *CreateAgent) GetImageRepoURL(name, namespace string) (uint, string, error) {
+func (c *CreateAgent) GetImageRepoURL(ctx context.Context, name, namespace string) (uint, string, error) {
 	// get all image registries linked to the project
 	// get the list of namespaces
 	resp, err := c.Client.ListRegistries(
-		context.Background(),
+		ctx,
 		c.CreateOpts.ProjectID,
 	)
 
@@ -436,9 +439,9 @@ func (c *CreateAgent) GetImageRepoURL(name, namespace string) (uint, string, err
 
 // GetLatestTemplateVersion retrieves the latest template version for a specific
 // Porter template from the chart repository.
-func (c *CreateAgent) GetLatestTemplateVersion(templateName string) (string, error) {
+func (c *CreateAgent) GetLatestTemplateVersion(ctx context.Context, templateName string) (string, error) {
 	resp, err := c.Client.ListTemplates(
-		context.Background(),
+		ctx,
 		c.CreateOpts.ProjectID,
 		&types.ListTemplatesRequest{},
 	)
@@ -466,9 +469,9 @@ func (c *CreateAgent) GetLatestTemplateVersion(templateName string) (string, err
 
 // GetLatestTemplateDefaultValues gets the default config (`values.yaml`) set for a specific
 // template.
-func (c *CreateAgent) GetLatestTemplateDefaultValues(projectID uint, templateName, templateVersion string) (map[string]interface{}, error) {
+func (c *CreateAgent) GetLatestTemplateDefaultValues(ctx context.Context, projectID uint, templateName, templateVersion string) (map[string]interface{}, error) {
 	chart, err := c.Client.GetTemplate(
-		context.Background(),
+		ctx,
 		projectID,
 		templateName,
 		templateVersion,
@@ -481,20 +484,21 @@ func (c *CreateAgent) GetLatestTemplateDefaultValues(projectID uint, templateNam
 	return chart.Values, nil
 }
 
-func (c *CreateAgent) GetMergedValues(overrideValues map[string]interface{}) (string, map[string]interface{}, error) {
+// GetMergedValues merges exsting values with their overrides
+func (c *CreateAgent) GetMergedValues(ctx context.Context, overrideValues map[string]interface{}) (string, map[string]interface{}, error) {
 	// deploy the template
-	latestVersion, err := c.GetLatestTemplateVersion(c.CreateOpts.Kind)
+	latestVersion, err := c.GetLatestTemplateVersion(ctx, c.CreateOpts.Kind)
 	if err != nil {
 		return "", nil, err
 	}
 
 	// get the values of the template
-	values, err := c.GetLatestTemplateDefaultValues(c.CreateOpts.ProjectID, c.CreateOpts.Kind, latestVersion)
+	values, err := c.GetLatestTemplateDefaultValues(ctx, c.CreateOpts.ProjectID, c.CreateOpts.Kind, latestVersion)
 	if err != nil {
 		return "", nil, err
 	}
 
-	err = coalesceEnvGroups(c.Client, c.CreateOpts.ProjectID, c.CreateOpts.ClusterID,
+	err = coalesceEnvGroups(ctx, c.Client, c.CreateOpts.ProjectID, c.CreateOpts.ClusterID,
 		c.CreateOpts.Namespace, c.CreateOpts.EnvGroups, values)
 
 	if err != nil {
@@ -507,7 +511,8 @@ func (c *CreateAgent) GetMergedValues(overrideValues map[string]interface{}) (st
 	return latestVersion, mergedValues, err
 }
 
-func (c *CreateAgent) CreateSubdomainIfRequired(mergedValues map[string]interface{}) (string, error) {
+// CreateSubdomainIfRequired checks if a subdomain needs created, then creates one
+func (c *CreateAgent) CreateSubdomainIfRequired(ctx context.Context, mergedValues map[string]interface{}) (string, error) {
 	subdomain := ""
 
 	// check for automatic subdomain creation if web kind
@@ -543,7 +548,7 @@ func (c *CreateAgent) CreateSubdomainIfRequired(mergedValues map[string]interfac
 					} else {
 						// in the case of ingress enabled but no custom domain, create subdomain
 						dnsRecord, err := c.Client.CreateDNSRecord(
-							context.Background(),
+							ctx,
 							c.CreateOpts.ProjectID,
 							c.CreateOpts.ClusterID,
 							c.CreateOpts.Namespace,

+ 29 - 25
cli/cmd/deploy/deploy.go

@@ -32,7 +32,7 @@ const (
 type DeployAgent struct {
 	App string
 
-	Client         *client.Client
+	Client         client.Client
 	Opts           *DeployOpts
 	Release        *types.GetReleaseResponse
 	agent          *docker.Agent
@@ -53,7 +53,7 @@ type DeployOpts struct {
 
 // NewDeployAgent creates a new DeployAgent given a Porter API client, application
 // name, and DeployOpts.
-func NewDeployAgent(client *client.Client, app string, opts *DeployOpts) (*DeployAgent, error) {
+func NewDeployAgent(ctx context.Context, client client.Client, app string, opts *DeployOpts) (*DeployAgent, error) {
 	deployAgent := &DeployAgent{
 		App:    app,
 		Opts:   opts,
@@ -75,7 +75,7 @@ func NewDeployAgent(client *client.Client, app string, opts *DeployOpts) (*Deplo
 	))
 
 	// get docker agent
-	agent, err := docker.NewAgentWithAuthGetter(client, opts.ProjectID)
+	agent, err := docker.NewAgentWithAuthGetter(ctx, client, opts.ProjectID)
 	if err != nil {
 		return nil, err
 	}
@@ -134,10 +134,10 @@ func NewDeployAgent(client *client.Client, app string, opts *DeployOpts) (*Deplo
 
 	deployAgent.tag = opts.OverrideTag
 
-	err = coalesceEnvGroups(deployAgent.Client, deployAgent.Opts.ProjectID, deployAgent.Opts.ClusterID,
+	err = coalesceEnvGroups(ctx, deployAgent.Client, deployAgent.Opts.ProjectID, deployAgent.Opts.ClusterID,
 		deployAgent.Opts.Namespace, deployAgent.Opts.EnvGroups, deployAgent.Release.Config)
 
-	deployAgent.imageExists = deployAgent.agent.CheckIfImageExists(deployAgent.imageRepo, deployAgent.tag)
+	deployAgent.imageExists = deployAgent.agent.CheckIfImageExists(ctx, deployAgent.imageRepo, deployAgent.tag)
 
 	return deployAgent, err
 }
@@ -154,7 +154,7 @@ type GetBuildEnvOpts struct {
 //  2. container.env.build from the release config
 //  3. container.env.synced from the release config
 //  4. any additional env var that was passed into the DeployAgent as opts.SharedOpts.AdditionalEnv
-func (d *DeployAgent) GetBuildEnv(opts *GetBuildEnvOpts) (map[string]string, error) {
+func (d *DeployAgent) GetBuildEnv(ctx context.Context, opts *GetBuildEnvOpts) (map[string]string, error) {
 	conf := d.Release.Config
 
 	if opts.UseNewConfig {
@@ -163,7 +163,7 @@ func (d *DeployAgent) GetBuildEnv(opts *GetBuildEnvOpts) (map[string]string, err
 		}
 	}
 
-	env, err := GetEnvForRelease(d.Client, conf, d.Opts.ProjectID, d.Opts.ClusterID, d.Opts.Namespace)
+	env, err := GetEnvForRelease(ctx, d.Client, conf, d.Opts.ProjectID, d.Opts.ClusterID, d.Opts.Namespace)
 	if err != nil {
 		return nil, err
 	}
@@ -240,7 +240,7 @@ func (d *DeployAgent) WriteBuildEnv(fileDest string) error {
 
 // Build uses the deploy agent options to build a new container image from either
 // buildpack or docker.
-func (d *DeployAgent) Build(overrideBuildConfig *types.BuildConfig) error {
+func (d *DeployAgent) Build(ctx context.Context, overrideBuildConfig *types.BuildConfig) error {
 	// retrieve current image to use for cache
 	currImageSection := d.Release.Config["image"].(map[string]interface{})
 	currentTag := currImageSection["tag"].(string)
@@ -263,7 +263,7 @@ func (d *DeployAgent) Build(overrideBuildConfig *types.BuildConfig) error {
 		}
 
 		zipResp, err := d.Client.GetRepoZIPDownloadURL(
-			context.Background(),
+			ctx,
 			d.Opts.ProjectID,
 			int64(d.Release.GitActionConfig.GitRepoID),
 			"github",
@@ -294,7 +294,7 @@ func (d *DeployAgent) Build(overrideBuildConfig *types.BuildConfig) error {
 		}
 	}
 
-	currTag, err := d.pullCurrentReleaseImage()
+	currTag, err := d.pullCurrentReleaseImage(ctx)
 
 	// if image is not found, don't return an error
 	if err != nil && err != docker.PullImageErrNotFound {
@@ -311,6 +311,7 @@ func (d *DeployAgent) Build(overrideBuildConfig *types.BuildConfig) error {
 
 	if d.Opts.Method == DeployBuildTypeDocker {
 		return buildAgent.BuildDocker(
+			ctx,
 			d.agent,
 			basePath,
 			buildCtx,
@@ -326,21 +327,21 @@ func (d *DeployAgent) Build(overrideBuildConfig *types.BuildConfig) error {
 		buildConfig = overrideBuildConfig
 	}
 
-	return buildAgent.BuildPack(d.agent, buildCtx, d.tag, currTag, buildConfig)
+	return buildAgent.BuildPack(ctx, d.agent, buildCtx, d.tag, currTag, buildConfig)
 }
 
 // Push pushes a local image to the remote repository linked in the release
-func (d *DeployAgent) Push() error {
-	return d.agent.PushImage(fmt.Sprintf("%s:%s", d.imageRepo, d.tag))
+func (d *DeployAgent) Push(ctx context.Context) error {
+	return d.agent.PushImage(ctx, fmt.Sprintf("%s:%s", d.imageRepo, d.tag))
 }
 
 // UpdateImageAndValues updates the current image for a release, along with new
 // configuration passed in via overrrideValues. If overrideValues is nil, it just
 // reuses the configuration set for the application. If overrideValues is not nil,
 // it will merge the overriding values with the existing configuration.
-func (d *DeployAgent) UpdateImageAndValues(overrideValues map[string]interface{}) error {
+func (d *DeployAgent) UpdateImageAndValues(ctx context.Context, overrideValues map[string]interface{}) error {
 	// we should fetch the latest release and its config
-	release, err := d.Client.GetRelease(context.TODO(), d.Opts.ProjectID, d.Opts.ClusterID, d.Opts.Namespace, d.App)
+	release, err := d.Client.GetRelease(ctx, d.Opts.ProjectID, d.Opts.ClusterID, d.Opts.Namespace, d.App)
 	if err != nil {
 		return err
 	}
@@ -396,7 +397,7 @@ func (d *DeployAgent) UpdateImageAndValues(overrideValues map[string]interface{}
 	}
 
 	return d.Client.UpgradeRelease(
-		context.Background(),
+		ctx,
 		d.Opts.ProjectID,
 		d.Opts.ClusterID,
 		d.Release.Namespace,
@@ -421,7 +422,8 @@ type SyncedEnvSectionKey struct {
 // GetEnvForRelease gets the env vars for a standard Porter template config. These env
 // vars are found at `container.env.normal` and `container.env.synced`.
 func GetEnvForRelease(
-	client *client.Client,
+	ctx context.Context,
+	client client.Client,
 	config map[string]interface{},
 	projID, clusterID uint,
 	namespace string,
@@ -440,7 +442,7 @@ func GetEnvForRelease(
 
 	// next, get the env vars specified by "container.env.synced"
 	// look for container.env.synced
-	syncedEnv, err := GetSyncedEnv(client, config, projID, clusterID, namespace, true)
+	syncedEnv, err := GetSyncedEnv(ctx, client, config, projID, clusterID, namespace, true)
 	if err != nil {
 		return nil, fmt.Errorf("error while fetching container.env.synced variables: %w", err)
 	}
@@ -453,7 +455,7 @@ func GetEnvForRelease(
 }
 
 func GetNormalEnv(
-	client *client.Client,
+	client client.Client,
 	config map[string]interface{},
 	projID, clusterID uint,
 	namespace string,
@@ -487,7 +489,8 @@ func GetNormalEnv(
 }
 
 func GetSyncedEnv(
-	client *client.Client,
+	ctx context.Context,
+	client client.Client,
 	config map[string]interface{},
 	projID, clusterID uint,
 	namespace string,
@@ -590,7 +593,7 @@ func GetSyncedEnv(
 
 		for _, syncedEG := range syncedArr {
 			// for each synced environment group, get the environment group from the client
-			eg, err := client.GetEnvGroup(context.Background(), projID, clusterID, namespace,
+			eg, err := client.GetEnvGroup(ctx, projID, clusterID, namespace,
 				&types.GetEnvGroupRequest{
 					Name: syncedEG.Name,
 				},
@@ -638,7 +641,7 @@ func (d *DeployAgent) getReleaseImage() (string, error) {
 	return repoStr, nil
 }
 
-func (d *DeployAgent) pullCurrentReleaseImage() (string, error) {
+func (d *DeployAgent) pullCurrentReleaseImage(ctx context.Context) (string, error) {
 	// pull the currently deployed image to use cache, if possible
 	imageConfig, err := GetNestedMap(d.Release.Config, "image")
 	if err != nil {
@@ -665,7 +668,7 @@ func (d *DeployAgent) pullCurrentReleaseImage() (string, error) {
 
 	fmt.Printf("attempting to pull image: %s\n", fmt.Sprintf("%s:%s", d.imageRepo, tagStr))
 
-	return tagStr, d.agent.PullImage(fmt.Sprintf("%s:%s", d.imageRepo, tagStr))
+	return tagStr, d.agent.PullImage(ctx, fmt.Sprintf("%s:%s", d.imageRepo, tagStr))
 }
 
 func (d *DeployAgent) downloadRepoToDir(downloadURL string) (string, error) {
@@ -706,9 +709,10 @@ func (d *DeployAgent) downloadRepoToDir(downloadURL string) (string, error) {
 	return res, nil
 }
 
-func (d *DeployAgent) StreamEvent(event types.SubEvent) error {
+// StreamEvent streams events from the deploy agent
+func (d *DeployAgent) StreamEvent(ctx context.Context, event types.SubEvent) error {
 	return d.Client.CreateEvent(
-		context.Background(),
+		ctx,
 		d.Opts.ProjectID, d.Opts.ClusterID,
 		d.Release.Namespace, d.Release.Name,
 		&types.UpdateReleaseStepsRequest{

+ 3 - 2
cli/cmd/deploy/shared.go

@@ -23,7 +23,8 @@ type SharedOpts struct {
 }
 
 func coalesceEnvGroups(
-	client *api.Client,
+	ctx context.Context,
+	client api.Client,
 	projectID, clusterID uint,
 	namespace string,
 	envGroups []types.EnvGroupMeta,
@@ -35,7 +36,7 @@ func coalesceEnvGroups(
 		}
 
 		envGroup, err := client.GetEnvGroup(
-			context.Background(),
+			ctx,
 			projectID,
 			clusterID,
 			namespace,

+ 4 - 3
cli/cmd/deploy/wait/job.go

@@ -17,9 +17,10 @@ type WaitOpts struct {
 }
 
 // WaitForJob waits for a job with a given name/namespace to complete its run
-func WaitForJob(client *api.Client, opts *WaitOpts) error {
+// nolint:revive // bad naming convention
+func WaitForJob(ctx context.Context, client api.Client, opts *WaitOpts) error {
 	// get the job release
-	jobRelease, err := client.GetRelease(context.Background(), opts.ProjectID, opts.ClusterID, opts.Namespace, opts.Name)
+	jobRelease, err := client.GetRelease(ctx, opts.ProjectID, opts.ClusterID, opts.Namespace, opts.Name)
 	if err != nil {
 		return err
 	}
@@ -47,7 +48,7 @@ func WaitForJob(client *api.Client, opts *WaitOpts) error {
 
 	for time.Now().Before(timeWait) {
 		// get the jobs for that job chart
-		jobs, err := client.GetJobs(context.Background(), opts.ProjectID, opts.ClusterID, opts.Namespace, opts.Name)
+		jobs, err := client.GetJobs(ctx, opts.ProjectID, opts.ClusterID, opts.Namespace, opts.Name)
 		if err != nil {
 			return err
 		}

+ 4 - 3
cli/cmd/docker.go

@@ -1,6 +1,7 @@
 package cmd
 
 import (
+	"context"
 	"os"
 
 	api "github.com/porter-dev/porter/api/client"
@@ -18,7 +19,7 @@ var configureCmd = &cobra.Command{
 	Use:   "configure",
 	Short: "Configures the host's Docker instance",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, dockerConfig)
+		err := checkLoginAndRun(cmd.Context(), args, dockerConfig)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -31,6 +32,6 @@ func init() {
 	dockerCmd.AddCommand(configureCmd)
 }
 
-func dockerConfig(user *ptypes.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	return config.SetDockerConfig(client)
+func dockerConfig(ctx context.Context, user *ptypes.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
+	return config.SetDockerConfig(ctx, client, cliConf.Project)
 }

+ 41 - 41
cli/cmd/docker/agent.go

@@ -28,15 +28,14 @@ import (
 type Agent struct {
 	*client.Client
 	authGetter *AuthGetter
-	ctx        context.Context
 	label      string
 }
 
 // CreateLocalVolumeIfNotExist creates a volume using driver type "local" with the
 // given name if it does not exist. If the volume does exist but does not contain
 // the required label (a.label), an error is thrown.
-func (a *Agent) CreateLocalVolumeIfNotExist(name string) (*types.Volume, error) {
-	volListBody, err := a.VolumeList(a.ctx, filters.Args{})
+func (a *Agent) CreateLocalVolumeIfNotExist(ctx context.Context, name string) (*types.Volume, error) {
+	volListBody, err := a.VolumeList(ctx, filters.Args{})
 	if err != nil {
 		return nil, a.handleDockerClientErr(err, "Could not list volumes")
 	}
@@ -49,14 +48,14 @@ func (a *Agent) CreateLocalVolumeIfNotExist(name string) (*types.Volume, error)
 		}
 	}
 
-	return a.CreateLocalVolume(name)
+	return a.CreateLocalVolume(ctx, name)
 }
 
 // CreateLocalVolume creates a volume using driver type "local" with no
 // configured options. The equivalent of:
 //
 // docker volume create --driver local [name]
-func (a *Agent) CreateLocalVolume(name string) (*types.Volume, error) {
+func (a *Agent) CreateLocalVolume(ctx context.Context, name string) (*types.Volume, error) {
 	labels := make(map[string]string)
 	labels[a.label] = "true"
 
@@ -66,7 +65,7 @@ func (a *Agent) CreateLocalVolume(name string) (*types.Volume, error) {
 		Labels: labels,
 	}
 
-	vol, err := a.VolumeCreate(a.ctx, opts)
+	vol, err := a.VolumeCreate(ctx, opts)
 	if err != nil {
 		return nil, a.handleDockerClientErr(err, "Could not create volume "+name)
 	}
@@ -75,15 +74,15 @@ func (a *Agent) CreateLocalVolume(name string) (*types.Volume, error) {
 }
 
 // RemoveLocalVolume removes a volume by name
-func (a *Agent) RemoveLocalVolume(name string) error {
-	return a.VolumeRemove(a.ctx, name, true)
+func (a *Agent) RemoveLocalVolume(ctx context.Context, name string) error {
+	return a.VolumeRemove(ctx, name, true)
 }
 
 // CreateBridgeNetworkIfNotExist creates a volume using driver type "local" with the
 // given name if it does not exist. If the volume does exist but does not contain
 // the required label (a.label), an error is thrown.
-func (a *Agent) CreateBridgeNetworkIfNotExist(name string) (id string, err error) {
-	networks, err := a.NetworkList(a.ctx, types.NetworkListOptions{})
+func (a *Agent) CreateBridgeNetworkIfNotExist(ctx context.Context, name string) (id string, err error) {
+	networks, err := a.NetworkList(ctx, types.NetworkListOptions{})
 	if err != nil {
 		return "", a.handleDockerClientErr(err, "Could not list volumes")
 	}
@@ -96,12 +95,12 @@ func (a *Agent) CreateBridgeNetworkIfNotExist(name string) (id string, err error
 		}
 	}
 
-	return a.CreateBridgeNetwork(name)
+	return a.CreateBridgeNetwork(ctx, name)
 }
 
 // CreateBridgeNetwork creates a volume using the default driver type (bridge)
 // with the CLI label attached
-func (a *Agent) CreateBridgeNetwork(name string) (id string, err error) {
+func (a *Agent) CreateBridgeNetwork(ctx context.Context, name string) (id string, err error) {
 	labels := make(map[string]string)
 	labels[a.label] = "true"
 
@@ -110,7 +109,7 @@ func (a *Agent) CreateBridgeNetwork(name string) (id string, err error) {
 		Attachable: true,
 	}
 
-	net, err := a.NetworkCreate(a.ctx, name, opts)
+	net, err := a.NetworkCreate(ctx, name, opts)
 	if err != nil {
 		return "", a.handleDockerClientErr(err, "Could not create network "+name)
 	}
@@ -119,9 +118,9 @@ func (a *Agent) CreateBridgeNetwork(name string) (id string, err error) {
 }
 
 // ConnectContainerToNetwork attaches a container to a specified network
-func (a *Agent) ConnectContainerToNetwork(networkID, containerID, containerName string) error {
+func (a *Agent) ConnectContainerToNetwork(ctx context.Context, networkID, containerID, containerName string) error {
 	// check if the container is connected already
-	net, err := a.NetworkInspect(a.ctx, networkID, types.NetworkInspectOptions{})
+	net, err := a.NetworkInspect(ctx, networkID, types.NetworkInspectOptions{})
 	if err != nil {
 		return a.handleDockerClientErr(err, "Could not inspect network"+networkID)
 	}
@@ -133,11 +132,12 @@ func (a *Agent) ConnectContainerToNetwork(networkID, containerID, containerName
 		}
 	}
 
-	return a.NetworkConnect(a.ctx, networkID, containerID, &network.EndpointSettings{})
+	return a.NetworkConnect(ctx, networkID, containerID, &network.EndpointSettings{})
 }
 
-func (a *Agent) TagImage(old, new string) error {
-	return a.ImageTag(a.ctx, old, new)
+// TagImage tags an image
+func (a *Agent) TagImage(ctx context.Context, old, new string) error {
+	return a.ImageTag(ctx, old, new)
 }
 
 // PullImageEvent represents a response from the Docker API with an image pull event
@@ -167,13 +167,13 @@ func getRegistryRepositoryPair(imageRepo string) ([]string, error) {
 }
 
 // CheckIfImageExists checks if the image exists in the registry
-func (a *Agent) CheckIfImageExists(imageRepo, imageTag string) bool {
-	registryToken, err := a.getContainerRegistryToken(imageRepo)
+func (a *Agent) CheckIfImageExists(ctx context.Context, imageRepo, imageTag string) bool {
+	registryToken, err := a.getContainerRegistryToken(ctx, imageRepo)
 	if err != nil {
 		return false
 	}
 
-	ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+	ctx, cancel := context.WithTimeout(ctx, time.Second*5)
 	defer cancel()
 
 	if strings.Contains(imageRepo, "gcr.io") {
@@ -248,12 +248,12 @@ func (a *Agent) CheckIfImageExists(imageRepo, imageTag string) bool {
 	}
 
 	image := imageRepo + ":" + imageTag
-	encodedRegistryAuth, err := a.getEncodedRegistryAuth(image)
+	encodedRegistryAuth, err := a.getEncodedRegistryAuth(ctx, image)
 	if err != nil {
 		return false
 	}
 
-	_, err = a.DistributionInspect(context.Background(), image, encodedRegistryAuth)
+	_, err = a.DistributionInspect(ctx, image, encodedRegistryAuth)
 
 	if err == nil {
 		return true
@@ -266,14 +266,14 @@ func (a *Agent) CheckIfImageExists(imageRepo, imageTag string) bool {
 }
 
 // PullImage pulls an image specified by the image string
-func (a *Agent) PullImage(image string) error {
-	opts, err := a.getPullOptions(image)
+func (a *Agent) PullImage(ctx context.Context, image string) error {
+	opts, err := a.getPullOptions(ctx, image)
 	if err != nil {
 		return err
 	}
 
 	// pull the specified image
-	out, err := a.ImagePull(a.ctx, image, opts)
+	out, err := a.ImagePull(ctx, image, opts)
 	if err != nil {
 		if client.IsErrNotFound(err) ||
 			(strings.Contains(image, "gcr.io") && strings.Contains(err.Error(), "or it may not exist")) {
@@ -293,14 +293,14 @@ func (a *Agent) PullImage(image string) error {
 }
 
 // PushImage pushes an image specified by the image string
-func (a *Agent) PushImage(image string) error {
-	opts, err := a.getPushOptions(image)
+func (a *Agent) PushImage(ctx context.Context, image string) error {
+	opts, err := a.getPushOptions(ctx, image)
 	if err != nil {
 		return err
 	}
 
 	out, err := a.ImagePush(
-		context.Background(),
+		ctx,
 		image,
 		opts,
 	)
@@ -323,13 +323,13 @@ func (a *Agent) PushImage(image string) error {
 	return nil
 }
 
-func (a *Agent) getPullOptions(image string) (types.ImagePullOptions, error) {
+func (a *Agent) getPullOptions(ctx context.Context, image string) (types.ImagePullOptions, error) {
 	// check if agent has an auth getter; otherwise, assume public usage
 	if a.authGetter == nil {
 		return types.ImagePullOptions{}, nil
 	}
 
-	authConfigEncoded, err := a.getEncodedRegistryAuth(image)
+	authConfigEncoded, err := a.getEncodedRegistryAuth(ctx, image)
 	if err != nil {
 		return types.ImagePullOptions{}, err
 	}
@@ -340,13 +340,13 @@ func (a *Agent) getPullOptions(image string) (types.ImagePullOptions, error) {
 	}, nil
 }
 
-func (a *Agent) getContainerRegistryToken(image string) (string, error) {
+func (a *Agent) getContainerRegistryToken(ctx context.Context, image string) (string, error) {
 	serverURL, err := GetServerURLFromTag(image)
 	if err != nil {
 		return "", err
 	}
 
-	_, secret, err := a.authGetter.GetCredentials(serverURL)
+	_, secret, err := a.authGetter.GetCredentials(ctx, serverURL)
 	if err != nil {
 		return "", err
 	}
@@ -354,14 +354,14 @@ func (a *Agent) getContainerRegistryToken(image string) (string, error) {
 	return secret, nil
 }
 
-func (a *Agent) getEncodedRegistryAuth(image string) (string, error) {
+func (a *Agent) getEncodedRegistryAuth(ctx context.Context, image string) (string, error) {
 	// get using server url
 	serverURL, err := GetServerURLFromTag(image)
 	if err != nil {
 		return "", err
 	}
 
-	user, secret, err := a.authGetter.GetCredentials(serverURL)
+	user, secret, err := a.authGetter.GetCredentials(ctx, serverURL)
 	if err != nil {
 		return "", err
 	}
@@ -385,8 +385,8 @@ func (a *Agent) getEncodedRegistryAuth(image string) (string, error) {
 	return base64.URLEncoding.EncodeToString(authConfigBytes), nil
 }
 
-func (a *Agent) getPushOptions(image string) (types.ImagePushOptions, error) {
-	pullOpts, err := a.getPullOptions(image)
+func (a *Agent) getPushOptions(ctx context.Context, image string) (types.ImagePushOptions, error) {
+	pullOpts, err := a.getPullOptions(ctx, image)
 
 	return types.ImagePushOptions(pullOpts), err
 }
@@ -430,9 +430,9 @@ func GetServerURLFromTag(image string) (string, error) {
 }
 
 // WaitForContainerStop waits until a container has stopped to exit
-func (a *Agent) WaitForContainerStop(id string) error {
+func (a *Agent) WaitForContainerStop(ctx context.Context, id string) error {
 	// wait for container to stop before exit
-	statusCh, errCh := a.ContainerWait(a.ctx, id, container.WaitConditionNotRunning)
+	statusCh, errCh := a.ContainerWait(ctx, id, container.WaitConditionNotRunning)
 
 	select {
 	case err := <-errCh:
@@ -448,9 +448,9 @@ func (a *Agent) WaitForContainerStop(id string) error {
 // WaitForContainerHealthy waits until a container is returning a healthy status. Streak
 // is the maximum number of failures in a row, while timeout is the length of time between
 // checks.
-func (a *Agent) WaitForContainerHealthy(id string, streak int) error {
+func (a *Agent) WaitForContainerHealthy(ctx context.Context, id string, streak int) error {
 	for {
-		cont, err := a.ContainerInspect(a.ctx, id)
+		cont, err := a.ContainerInspect(ctx, id)
 		if err != nil {
 			return a.handleDockerClientErr(err, "Error waiting for stopped container")
 		}

+ 27 - 20
cli/cmd/docker/auth.go

@@ -44,28 +44,30 @@ type CredentialsCache interface {
 
 // AuthGetter retrieves
 type AuthGetter struct {
-	Client    *api.Client
+	Client    api.Client
 	Cache     CredentialsCache
 	ProjectID uint
 }
 
-func (a *AuthGetter) GetCredentials(serverURL string) (user string, secret string, err error) {
+// GetCredentials returns registry credentials
+func (a *AuthGetter) GetCredentials(ctx context.Context, serverURL string) (user string, secret string, err error) {
 	if strings.Contains(serverURL, "gcr.io") {
-		return a.GetGCRCredentials(serverURL, a.ProjectID)
+		return a.GetGCRCredentials(ctx, serverURL, a.ProjectID)
 	} else if strings.Contains(serverURL, "pkg.dev") {
-		return a.GetGARCredentials(serverURL, a.ProjectID)
+		return a.GetGARCredentials(ctx, serverURL, a.ProjectID)
 	} else if strings.Contains(serverURL, "registry.digitalocean.com") {
-		return a.GetDOCRCredentials(serverURL, a.ProjectID)
+		return a.GetDOCRCredentials(ctx, serverURL, a.ProjectID)
 	} else if strings.Contains(serverURL, "index.docker.io") {
-		return a.GetDockerHubCredentials(serverURL, a.ProjectID)
+		return a.GetDockerHubCredentials(ctx, serverURL, a.ProjectID)
 	} else if strings.Contains(serverURL, "azurecr.io") {
-		return a.GetACRCredentials(serverURL, a.ProjectID)
+		return a.GetACRCredentials(ctx, serverURL, a.ProjectID)
 	}
 
-	return a.GetECRCredentials(serverURL, a.ProjectID)
+	return a.GetECRCredentials(ctx, serverURL, a.ProjectID)
 }
 
-func (a *AuthGetter) GetGCRCredentials(serverURL string, projID uint) (user string, secret string, err error) {
+// GetGCRCredentials returns GCR credentials
+func (a *AuthGetter) GetGCRCredentials(ctx context.Context, serverURL string, projID uint) (user string, secret string, err error) {
 	if err != nil {
 		return "", "", err
 	}
@@ -78,7 +80,7 @@ func (a *AuthGetter) GetGCRCredentials(serverURL string, projID uint) (user stri
 		token = cachedEntry.AuthorizationToken
 	} else {
 		// get a token from the server
-		tokenResp, err := a.Client.GetGCRAuthorizationToken(context.Background(), projID, &types.GetRegistryGCRTokenRequest{
+		tokenResp, err := a.Client.GetGCRAuthorizationToken(ctx, projID, &types.GetRegistryGCRTokenRequest{
 			ServerURL: serverURL,
 		})
 		if err != nil {
@@ -99,7 +101,8 @@ func (a *AuthGetter) GetGCRCredentials(serverURL string, projID uint) (user stri
 	return "oauth2accesstoken", token, nil
 }
 
-func (a *AuthGetter) GetGARCredentials(serverURL string, projID uint) (user string, secret string, err error) {
+// GetGARCredentials returns GAR credentials
+func (a *AuthGetter) GetGARCredentials(ctx context.Context, serverURL string, projID uint) (user string, secret string, err error) {
 	if err != nil {
 		return "", "", err
 	}
@@ -123,7 +126,7 @@ func (a *AuthGetter) GetGARCredentials(serverURL string, projID uint) (user stri
 		token = cachedEntry.AuthorizationToken
 	} else {
 		// get a token from the server
-		tokenResp, err := a.Client.GetGARAuthorizationToken(context.Background(), projID, &types.GetRegistryGARTokenRequest{
+		tokenResp, err := a.Client.GetGARAuthorizationToken(ctx, projID, &types.GetRegistryGARTokenRequest{
 			ServerURL: serverURL,
 		})
 		if err != nil {
@@ -144,7 +147,8 @@ func (a *AuthGetter) GetGARCredentials(serverURL string, projID uint) (user stri
 	return "oauth2accesstoken", token, nil
 }
 
-func (a *AuthGetter) GetDOCRCredentials(serverURL string, projID uint) (user string, secret string, err error) {
+// GetDOCRCredentials returns DOCR credentials
+func (a *AuthGetter) GetDOCRCredentials(ctx context.Context, serverURL string, projID uint) (user string, secret string, err error) {
 	cachedEntry := a.Cache.Get(serverURL)
 
 	var token string
@@ -154,7 +158,7 @@ func (a *AuthGetter) GetDOCRCredentials(serverURL string, projID uint) (user str
 	} else {
 
 		// get a token from the server
-		tokenResp, err := a.Client.GetDOCRAuthorizationToken(context.Background(), projID, &types.GetRegistryGCRTokenRequest{
+		tokenResp, err := a.Client.GetDOCRAuthorizationToken(ctx, projID, &types.GetRegistryGCRTokenRequest{
 			ServerURL: serverURL,
 		})
 		if err != nil {
@@ -180,7 +184,8 @@ func (a *AuthGetter) GetDOCRCredentials(serverURL string, projID uint) (user str
 
 var ecrPattern = regexp.MustCompile(`(^[a-zA-Z0-9][a-zA-Z0-9-_]*)\.dkr\.ecr(\-fips)?\.([a-zA-Z0-9][a-zA-Z0-9-_]*)\.amazonaws\.com(\.cn)?`)
 
-func (a *AuthGetter) GetECRCredentials(serverURL string, projID uint) (user string, secret string, err error) {
+// GetECRCredentials returns ECR credentials
+func (a *AuthGetter) GetECRCredentials(ctx context.Context, serverURL string, projID uint) (user string, secret string, err error) {
 	// parse the server url for region
 	matches := ecrPattern.FindStringSubmatch(serverURL)
 
@@ -201,7 +206,7 @@ func (a *AuthGetter) GetECRCredentials(serverURL string, projID uint) (user stri
 		token = cachedEntry.AuthorizationToken
 	} else {
 		// get a token from the server
-		tokenResp, err := a.Client.GetECRAuthorizationToken(context.Background(), projID, &types.GetRegistryECRTokenRequest{
+		tokenResp, err := a.Client.GetECRAuthorizationToken(ctx, projID, &types.GetRegistryECRTokenRequest{
 			Region:    matches[3],
 			AccountID: matches[1],
 		})
@@ -223,7 +228,8 @@ func (a *AuthGetter) GetECRCredentials(serverURL string, projID uint) (user stri
 	return decodeDockerToken(token)
 }
 
-func (a *AuthGetter) GetDockerHubCredentials(serverURL string, projID uint) (user string, secret string, err error) {
+// GetDockerHubCredentials returns dockerhub credentials
+func (a *AuthGetter) GetDockerHubCredentials(ctx context.Context, serverURL string, projID uint) (user string, secret string, err error) {
 	cachedEntry := a.Cache.Get(serverURL)
 	var token string
 
@@ -231,7 +237,7 @@ func (a *AuthGetter) GetDockerHubCredentials(serverURL string, projID uint) (use
 		token = cachedEntry.AuthorizationToken
 	} else {
 		// get a token from the server
-		tokenResp, err := a.Client.GetDockerhubAuthorizationToken(context.Background(), projID)
+		tokenResp, err := a.Client.GetDockerhubAuthorizationToken(ctx, projID)
 		if err != nil {
 			return "", "", err
 		}
@@ -250,7 +256,8 @@ func (a *AuthGetter) GetDockerHubCredentials(serverURL string, projID uint) (use
 	return decodeDockerToken(token)
 }
 
-func (a *AuthGetter) GetACRCredentials(serverURL string, projID uint) (user string, secret string, err error) {
+// GetACRCredentials returns ACR credentials
+func (a *AuthGetter) GetACRCredentials(ctx context.Context, serverURL string, projID uint) (user string, secret string, err error) {
 	cachedEntry := a.Cache.Get(serverURL)
 	var token string
 
@@ -258,7 +265,7 @@ func (a *AuthGetter) GetACRCredentials(serverURL string, projID uint) (user stri
 		token = cachedEntry.AuthorizationToken
 	} else {
 		req := &types.GetRegistryACRTokenRequest{ServerURL: serverURL}
-		tokenResp, err := a.Client.GetACRAuthorizationToken(context.Background(), projID, req)
+		tokenResp, err := a.Client.GetACRAuthorizationToken(ctx, projID, req)
 		if err != nil {
 			return "", "", err
 		}

+ 2 - 2
cli/cmd/docker/builder.go

@@ -33,7 +33,7 @@ type BuildOpts struct {
 }
 
 // BuildLocal
-func (a *Agent) BuildLocal(opts *BuildOpts) (err error) {
+func (a *Agent) BuildLocal(ctx context.Context, opts *BuildOpts) (err error) {
 	dockerfilePath := opts.DockerfilePath
 
 	// attempt to read dockerignore file and paths
@@ -84,7 +84,7 @@ func (a *Agent) BuildLocal(opts *BuildOpts) (err error) {
 	inlineCacheVal := "1"
 	buildArgs["BUILDKIT_INLINE_CACHE"] = &inlineCacheVal
 
-	out, err := a.ImageBuild(context.Background(), tar, types.ImageBuildOptions{
+	out, err := a.ImageBuild(ctx, tar, types.ImageBuildOptions{
 		Dockerfile: dockerfilePath,
 		BuildArgs:  buildArgs,
 		Tags: []string{

+ 4 - 5
cli/cmd/docker/config.go

@@ -11,8 +11,7 @@ const label = "CreatedByPorterCLI"
 
 // NewAgentFromEnv creates a new Docker agent using the environment variables set
 // on the host
-func NewAgentFromEnv() (*Agent, error) {
-	ctx := context.Background()
+func NewAgentFromEnv(ctx context.Context) (*Agent, error) {
 	cli, err := client.NewClientWithOpts(
 		client.FromEnv,
 		client.WithAPIVersionNegotiation(),
@@ -23,13 +22,13 @@ func NewAgentFromEnv() (*Agent, error) {
 
 	return &Agent{
 		Client: cli,
-		ctx:    ctx,
 		label:  label,
 	}, nil
 }
 
-func NewAgentWithAuthGetter(client *api.Client, projID uint) (*Agent, error) {
-	agent, err := NewAgentFromEnv()
+// NewAgentWithAuthGetter returns a docker agent which can connect to a given registry
+func NewAgentWithAuthGetter(ctx context.Context, client api.Client, projID uint) (*Agent, error) {
+	agent, err := NewAgentFromEnv(ctx)
 	if err != nil {
 		return nil, err
 	}

+ 47 - 46
cli/cmd/docker/porter.go

@@ -1,6 +1,7 @@
 package docker
 
 import (
+	"context"
 	"fmt"
 	"strings"
 	"time"
@@ -33,8 +34,8 @@ type PorterStartOpts struct {
 
 // StartPorter creates a new Docker agent using the host environment, and creates a
 // new Porter instance
-func StartPorter(opts *PorterStartOpts) (agent *Agent, id string, err error) {
-	agent, err = NewAgentFromEnv()
+func StartPorter(ctx context.Context, opts *PorterStartOpts) (agent *Agent, id string, err error) {
+	agent, err = NewAgentFromEnv(ctx)
 
 	if err != nil {
 		return nil, "", err
@@ -46,7 +47,7 @@ func StartPorter(opts *PorterStartOpts) (agent *Agent, id string, err error) {
 	// the volumes passed to the Porter container
 	volumesMap := make(map[string]struct{})
 
-	netID, err := agent.CreateBridgeNetworkIfNotExist("porter_network_" + opts.ProcessID)
+	netID, err := agent.CreateBridgeNetworkIfNotExist(ctx, "porter_network_"+opts.ProcessID)
 	if err != nil {
 		return nil, "", err
 	}
@@ -54,7 +55,7 @@ func StartPorter(opts *PorterStartOpts) (agent *Agent, id string, err error) {
 	switch opts.DB {
 	case SQLite:
 		// check if sqlite volume exists, create it if not
-		vol, err := agent.CreateLocalVolumeIfNotExist("porter_sqlite_" + opts.ProcessID)
+		vol, err := agent.CreateLocalVolumeIfNotExist(ctx, "porter_sqlite_"+opts.ProcessID)
 		if err != nil {
 			return nil, "", err
 		}
@@ -77,7 +78,7 @@ func StartPorter(opts *PorterStartOpts) (agent *Agent, id string, err error) {
 		}...)
 	case Postgres:
 		// check if postgres volume exists, create it if not
-		vol, err := agent.CreateLocalVolumeIfNotExist("porter_postgres_" + opts.ProcessID)
+		vol, err := agent.CreateLocalVolumeIfNotExist(ctx, "porter_postgres_"+opts.ProcessID)
 		if err != nil {
 			return nil, "", err
 		}
@@ -109,12 +110,12 @@ func StartPorter(opts *PorterStartOpts) (agent *Agent, id string, err error) {
 			},
 		}
 
-		pgID, err := agent.StartPostgresContainer(startOpts)
+		pgID, err := agent.StartPostgresContainer(ctx, startOpts)
 		if err != nil {
 			return nil, "", err
 		}
 
-		err = agent.WaitForContainerHealthy(pgID, 10)
+		err = agent.WaitForContainerHealthy(ctx, pgID, 10)
 
 		if err != nil {
 			return nil, "", err
@@ -144,13 +145,13 @@ func StartPorter(opts *PorterStartOpts) (agent *Agent, id string, err error) {
 		Env:           opts.Env,
 	}
 
-	id, err = agent.StartPorterContainer(startOpts)
+	id, err = agent.StartPorterContainer(ctx, startOpts)
 
 	if err != nil {
 		return nil, "", err
 	}
 
-	err = agent.WaitForContainerHealthy(id, 10)
+	err = agent.WaitForContainerHealthy(ctx, id, 10)
 
 	if err != nil {
 		return nil, "", err
@@ -174,20 +175,20 @@ type PorterServerStartOpts struct {
 
 // StartPorterContainer pulls a specific Porter image and starts a container
 // using the Docker engine. It returns the container ID
-func (a *Agent) StartPorterContainer(opts PorterServerStartOpts) (string, error) {
-	id, err := a.upsertPorterContainer(opts)
+func (a *Agent) StartPorterContainer(ctx context.Context, opts PorterServerStartOpts) (string, error) {
+	id, err := a.upsertPorterContainer(ctx, opts)
 	if err != nil {
 		return "", err
 	}
 
-	err = a.startPorterContainer(id)
+	err = a.startPorterContainer(ctx, id)
 
 	if err != nil {
 		return "", err
 	}
 
 	// attach container to network
-	err = a.ConnectContainerToNetwork(opts.NetworkID, id, opts.Name)
+	err = a.ConnectContainerToNetwork(ctx, opts.NetworkID, id, opts.Name)
 
 	if err != nil {
 		return "", err
@@ -200,20 +201,20 @@ func (a *Agent) StartPorterContainer(opts PorterServerStartOpts) (string, error)
 // if spec has changed, remove and recreate container
 // if container does not exist, create the container
 // otherwise, return stopped container
-func (a *Agent) upsertPorterContainer(opts PorterServerStartOpts) (id string, err error) {
-	containers, err := a.getContainersCreatedByStart()
+func (a *Agent) upsertPorterContainer(ctx context.Context, opts PorterServerStartOpts) (id string, err error) {
+	containers, err := a.getContainersCreatedByStart(ctx) // nolint:ineffassign,staticcheck // linter complaining, do not want to change logic incase intentional
 
 	// remove the matching container
 	for _, container := range containers {
 		if len(container.Names) > 0 && container.Names[0] == "/"+opts.Name {
 			timeout, _ := time.ParseDuration("15s")
 
-			err := a.ContainerStop(a.ctx, container.ID, &timeout)
+			err := a.ContainerStop(ctx, container.ID, &timeout)
 			if err != nil {
 				return "", a.handleDockerClientErr(err, "Could not stop container "+container.ID)
 			}
 
-			err = a.ContainerRemove(a.ctx, container.ID, types.ContainerRemoveOptions{})
+			err = a.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{})
 
 			if err != nil {
 				return "", a.handleDockerClientErr(err, "Could not remove container "+container.ID)
@@ -221,12 +222,12 @@ func (a *Agent) upsertPorterContainer(opts PorterServerStartOpts) (id string, er
 		}
 	}
 
-	return a.pullAndCreatePorterContainer(opts)
+	return a.pullAndCreatePorterContainer(ctx, opts)
 }
 
 // create the container and return its id
-func (a *Agent) pullAndCreatePorterContainer(opts PorterServerStartOpts) (id string, err error) {
-	a.PullImage(opts.Image)
+func (a *Agent) pullAndCreatePorterContainer(ctx context.Context, opts PorterServerStartOpts) (id string, err error) {
+	_ = a.PullImage(ctx, opts.Image)
 
 	// format the port array for binding to host machine
 	ports := []string{fmt.Sprintf("127.0.0.1:%d:%d/tcp", opts.HostPort, opts.ContainerPort)}
@@ -240,7 +241,7 @@ func (a *Agent) pullAndCreatePorterContainer(opts PorterServerStartOpts) (id str
 	labels[a.label] = "true"
 
 	// create the container with a label specifying this was created via the CLI
-	resp, err := a.ContainerCreate(a.ctx, &container.Config{
+	resp, err := a.ContainerCreate(ctx, &container.Config{
 		Image:   opts.Image,
 		Cmd:     opts.StartCmd,
 		Tty:     false,
@@ -265,8 +266,8 @@ func (a *Agent) pullAndCreatePorterContainer(opts PorterServerStartOpts) (id str
 }
 
 // start the container
-func (a *Agent) startPorterContainer(id string) error {
-	if err := a.ContainerStart(a.ctx, id, types.ContainerStartOptions{}); err != nil {
+func (a *Agent) startPorterContainer(ctx context.Context, id string) error {
+	if err := a.ContainerStart(ctx, id, types.ContainerStartOptions{}); err != nil {
 		return a.handleDockerClientErr(err, "Could not start Porter container")
 	}
 
@@ -285,20 +286,20 @@ type PostgresOpts struct {
 
 // StartPostgresContainer pulls a specific Porter image and starts a container
 // using the Docker engine
-func (a *Agent) StartPostgresContainer(opts PostgresOpts) (string, error) {
-	id, err := a.upsertPostgresContainer(opts)
+func (a *Agent) StartPostgresContainer(ctx context.Context, opts PostgresOpts) (string, error) {
+	id, err := a.upsertPostgresContainer(ctx, opts)
 	if err != nil {
 		return "", err
 	}
 
-	err = a.startPostgresContainer(id)
+	err = a.startPostgresContainer(ctx, id)
 
 	if err != nil {
 		return "", err
 	}
 
 	// attach container to network
-	err = a.ConnectContainerToNetwork(opts.NetworkID, id, opts.Name)
+	err = a.ConnectContainerToNetwork(ctx, opts.NetworkID, id, opts.Name)
 
 	if err != nil {
 		return "", err
@@ -311,15 +312,15 @@ func (a *Agent) StartPostgresContainer(opts PostgresOpts) (string, error) {
 // if it is running, stop it
 // if it is stopped, return id
 // if it does not exist, create it and return it
-func (a *Agent) upsertPostgresContainer(opts PostgresOpts) (id string, err error) {
-	containers, err := a.getContainersCreatedByStart()
+func (a *Agent) upsertPostgresContainer(ctx context.Context, opts PostgresOpts) (id string, err error) {
+	containers, err := a.getContainersCreatedByStart(ctx) // nolint:ineffassign,staticcheck // linter complaining, do not want to change logic incase intentional
 
 	// stop the matching container and return it
 	for _, container := range containers {
 		if len(container.Names) > 0 && container.Names[0] == "/"+opts.Name {
 			timeout, _ := time.ParseDuration("15s")
 
-			err := a.ContainerStop(a.ctx, container.ID, &timeout)
+			err := a.ContainerStop(ctx, container.ID, &timeout)
 			if err != nil {
 				return "", a.handleDockerClientErr(err, "Could not stop postgres container "+container.ID)
 			}
@@ -328,18 +329,18 @@ func (a *Agent) upsertPostgresContainer(opts PostgresOpts) (id string, err error
 		}
 	}
 
-	return a.pullAndCreatePostgresContainer(opts)
+	return a.pullAndCreatePostgresContainer(ctx, opts)
 }
 
 // create the container and return it
-func (a *Agent) pullAndCreatePostgresContainer(opts PostgresOpts) (id string, err error) {
-	a.PullImage(opts.Image)
+func (a *Agent) pullAndCreatePostgresContainer(ctx context.Context, opts PostgresOpts) (id string, err error) {
+	_ = a.PullImage(ctx, opts.Image) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
 	labels := make(map[string]string)
 	labels[a.label] = "true"
 
 	// create the container with a label specifying this was created via the CLI
-	resp, err := a.ContainerCreate(a.ctx, &container.Config{
+	resp, err := a.ContainerCreate(ctx, &container.Config{
 		Image:   opts.Image,
 		Tty:     false,
 		Labels:  labels,
@@ -365,8 +366,8 @@ func (a *Agent) pullAndCreatePostgresContainer(opts PostgresOpts) (id string, er
 }
 
 // start the container in the background
-func (a *Agent) startPostgresContainer(id string) error {
-	if err := a.ContainerStart(a.ctx, id, types.ContainerStartOptions{}); err != nil {
+func (a *Agent) startPostgresContainer(ctx context.Context, id string) error {
+	if err := a.ContainerStart(ctx, id, types.ContainerStartOptions{}); err != nil {
 		return a.handleDockerClientErr(err, "Could not start Postgres container")
 	}
 
@@ -375,8 +376,8 @@ func (a *Agent) startPostgresContainer(id string) error {
 
 // StopPorterContainers finds all containers that were started via the CLI and stops them
 // -- removes the container if remove is set to true
-func (a *Agent) StopPorterContainers(remove bool) error {
-	containers, err := a.getContainersCreatedByStart()
+func (a *Agent) StopPorterContainers(ctx context.Context, remove bool) error {
+	containers, err := a.getContainersCreatedByStart(ctx)
 	if err != nil {
 		return err
 	}
@@ -385,13 +386,13 @@ func (a *Agent) StopPorterContainers(remove bool) error {
 	for _, container := range containers {
 		timeout, _ := time.ParseDuration("15s")
 
-		err := a.ContainerStop(a.ctx, container.ID, &timeout)
+		err := a.ContainerStop(ctx, container.ID, &timeout)
 		if err != nil {
 			return a.handleDockerClientErr(err, "Could not stop container "+container.ID)
 		}
 
 		if remove {
-			err = a.ContainerRemove(a.ctx, container.ID, types.ContainerRemoveOptions{})
+			err = a.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{})
 
 			if err != nil {
 				return a.handleDockerClientErr(err, "Could not remove container "+container.ID)
@@ -405,8 +406,8 @@ func (a *Agent) StopPorterContainers(remove bool) error {
 // StopPorterContainersWithProcessID finds all containers that were started via the CLI
 // and have a given process id and stops them -- removes the container if remove is set
 // to true
-func (a *Agent) StopPorterContainersWithProcessID(processID string, remove bool) error {
-	containers, err := a.getContainersCreatedByStart()
+func (a *Agent) StopPorterContainersWithProcessID(ctx context.Context, processID string, remove bool) error {
+	containers, err := a.getContainersCreatedByStart(ctx)
 	if err != nil {
 		return err
 	}
@@ -416,13 +417,13 @@ func (a *Agent) StopPorterContainersWithProcessID(processID string, remove bool)
 		if strings.Contains(container.Names[0], "_"+processID) {
 			timeout, _ := time.ParseDuration("15s")
 
-			err := a.ContainerStop(a.ctx, container.ID, &timeout)
+			err := a.ContainerStop(ctx, container.ID, &timeout)
 			if err != nil {
 				return a.handleDockerClientErr(err, "Could not stop container "+container.ID)
 			}
 
 			if remove {
-				err = a.ContainerRemove(a.ctx, container.ID, types.ContainerRemoveOptions{})
+				err = a.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{})
 
 				if err != nil {
 					return a.handleDockerClientErr(err, "Could not remove container "+container.ID)
@@ -436,8 +437,8 @@ func (a *Agent) StopPorterContainersWithProcessID(processID string, remove bool)
 
 // getContainersCreatedByStart gets all containers that were created by the "porter start"
 // command by looking for the label "CreatedByPorterCLI" (or .label of the agent)
-func (a *Agent) getContainersCreatedByStart() ([]types.Container, error) {
-	containers, err := a.ContainerList(a.ctx, types.ContainerListOptions{
+func (a *Agent) getContainersCreatedByStart(ctx context.Context) ([]types.Container, error) {
+	containers, err := a.ContainerList(ctx, types.ContainerListOptions{
 		All: true,
 	})
 	if err != nil {

+ 18 - 5
cli/cmd/errors.go

@@ -3,6 +3,7 @@ package cmd
 import (
 	"context"
 	"errors"
+	"fmt"
 	"os"
 	"strings"
 
@@ -18,10 +19,22 @@ var (
 	ErrCannotConnect error = errors.New("Unable to connect to the Porter server.")
 )
 
-func checkLoginAndRun(args []string, runner func(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error) error {
-	client := config.GetAPIClient()
+func checkLoginAndRun(ctx context.Context, args []string, runner func(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, args []string) error) error {
+	cliConf, err := config.InitAndLoadConfig()
+	if err != nil {
+		return fmt.Errorf("error loading porter config: %w", err)
+	}
+
+	client, err := api.NewClientWithConfig(ctx, api.NewClientInput{
+		BaseURL:        fmt.Sprintf("%s/api", cliConf.Host),
+		BearerToken:    cliConf.Token,
+		CookieFileName: "cookie.json",
+	})
+	if err != nil {
+		return fmt.Errorf("error creating porter API client: %w", err)
+	}
 
-	user, err := client.AuthCheck(context.Background())
+	user, err := client.AuthCheck(ctx)
 	if err != nil {
 		red := color.New(color.FgRed)
 
@@ -39,7 +52,7 @@ func checkLoginAndRun(args []string, runner func(user *types.GetAuthenticatedUse
 		return err
 	}
 
-	err = runner(user, client, args)
+	err = runner(ctx, user, client, cliConf, args)
 
 	if err != nil {
 		red := color.New(color.FgRed)
@@ -54,7 +67,7 @@ func checkLoginAndRun(args []string, runner func(user *types.GetAuthenticatedUse
 			return nil
 		}
 
-		cliErrors.GetErrorHandler().HandleError(err)
+		cliErrors.GetErrorHandler(cliConf).HandleError(err)
 
 		return err
 	}

+ 14 - 6
cli/cmd/errors/error_handler.go

@@ -10,6 +10,7 @@ import (
 	"github.com/porter-dev/porter/cli/cmd/config"
 )
 
+// SentryDSN is a global value for sentry's dsn. This should be removed
 var SentryDSN string = ""
 
 type errorHandler interface {
@@ -18,21 +19,25 @@ type errorHandler interface {
 
 type standardErrorHandler struct{}
 
+// HandleError implements errorhandler for handling non-sentry errors
 func (h *standardErrorHandler) HandleError(err error) {
 	color.New(color.FgRed).Fprintf(os.Stderr, "error: %s\n", err.Error())
 }
 
-type sentryErrorHandler struct{}
+type sentryErrorHandler struct {
+	cliConfig config.CLIConfig
+}
 
+// HandleError implements errorhandler for handling sentry errors
 func (h *sentryErrorHandler) HandleError(err error) {
 	if SentryDSN != "" {
 		localHub := sentry.CurrentHub().Clone()
 
 		localHub.ConfigureScope(func(scope *sentry.Scope) {
 			scope.SetTags(map[string]string{
-				"host":    config.GetCLIConfig().Host,
-				"project": fmt.Sprintf("%d", config.GetCLIConfig().Project),
-				"cluster": fmt.Sprintf("%d", config.GetCLIConfig().Cluster),
+				"host":    h.cliConfig.Host,
+				"project": fmt.Sprintf("%d", h.cliConfig.Project),
+				"cluster": fmt.Sprintf("%d", h.cliConfig.Cluster),
 			})
 		})
 
@@ -43,9 +48,12 @@ func (h *sentryErrorHandler) HandleError(err error) {
 	color.New(color.FgRed).Fprintf(os.Stderr, "error: %s\n", err.Error())
 }
 
-func GetErrorHandler() errorHandler {
+// GetErrorHandler returns an errorhandler.
+func GetErrorHandler(cliConf config.CLIConfig) errorHandler {
 	if SentryDSN != "" {
-		return &sentryErrorHandler{}
+		return &sentryErrorHandler{
+			cliConfig: cliConf,
+		}
 	}
 
 	return &standardErrorHandler{}

+ 7 - 10
cli/cmd/get.go

@@ -6,6 +6,7 @@ import (
 	"fmt"
 	"os"
 
+	"github.com/porter-dev/porter/cli/cmd/config"
 	v2 "github.com/porter-dev/porter/cli/cmd/v2"
 
 	api "github.com/porter-dev/porter/api/client"
@@ -22,7 +23,7 @@ var getCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Fetches a release.",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, get)
+		err := checkLoginAndRun(cmd.Context(), args, get)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -35,7 +36,7 @@ var getValuesCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Fetches the Helm values for a release.",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, getValues)
+		err := checkLoginAndRun(cmd.Context(), args, getValues)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -72,9 +73,7 @@ type getReleaseInfo struct {
 	RevisionID   int       `json:"revision_id" yaml:"revision_id"`
 }
 
-func get(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func get(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -88,7 +87,7 @@ func get(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []strin
 		return nil
 	}
 
-	rel, err := client.GetRelease(context.Background(), cliConf.Project, cliConf.Cluster, namespace, args[0])
+	rel, err := client.GetRelease(ctx, cliConf.Project, cliConf.Cluster, namespace, args[0])
 	if err != nil {
 		return err
 	}
@@ -126,9 +125,7 @@ func get(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []strin
 	return nil
 }
 
-func getValues(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func getValues(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -142,7 +139,7 @@ func getValues(_ *types.GetAuthenticatedUserResponse, client *api.Client, args [
 		return nil
 	}
 
-	rel, err := client.GetRelease(context.Background(), cliConf.Project, cliConf.Cluster, namespace, args[0])
+	rel, err := client.GetRelease(ctx, cliConf.Project, cliConf.Cluster, namespace, args[0])
 	if err != nil {
 		return err
 	}

+ 8 - 8
cli/cmd/github/release.go

@@ -44,8 +44,8 @@ type ZIPReleaseGetter struct {
 }
 
 // GetLatestRelease downloads the latest .zip release from a given Github repository
-func (z *ZIPReleaseGetter) GetLatestRelease() error {
-	releaseURL, err := z.getLatestReleaseDownloadURL()
+func (z *ZIPReleaseGetter) GetLatestRelease(ctx context.Context) error {
+	releaseURL, err := z.getLatestReleaseDownloadURL(ctx)
 	if err != nil {
 		return err
 	}
@@ -54,8 +54,8 @@ func (z *ZIPReleaseGetter) GetLatestRelease() error {
 }
 
 // GetRelease downloads a specific .zip release from a given Github repository
-func (z *ZIPReleaseGetter) GetRelease(releaseTag string) error {
-	releaseURL, err := z.getReleaseDownloadURL(releaseTag)
+func (z *ZIPReleaseGetter) GetRelease(ctx context.Context, releaseTag string) error {
+	releaseURL, err := z.getReleaseDownloadURL(ctx, releaseTag)
 
 	fmt.Printf("getting release %s\n", releaseURL)
 
@@ -85,10 +85,10 @@ func (z *ZIPReleaseGetter) getReleaseFromURL(releaseURL string) error {
 }
 
 // retrieves the download url for the latest release of an asset
-func (z *ZIPReleaseGetter) getLatestReleaseDownloadURL() (string, error) {
+func (z *ZIPReleaseGetter) getLatestReleaseDownloadURL(ctx context.Context) (string, error) {
 	client := github.NewClient(nil)
 
-	rel, _, err := client.Repositories.GetLatestRelease(context.Background(), z.EntityID, z.RepoName)
+	rel, _, err := client.Repositories.GetLatestRelease(ctx, z.EntityID, z.RepoName)
 	if err != nil {
 		return "", err
 	}
@@ -110,10 +110,10 @@ func (z *ZIPReleaseGetter) getLatestReleaseDownloadURL() (string, error) {
 	return releaseURL, nil
 }
 
-func (z *ZIPReleaseGetter) getReleaseDownloadURL(releaseTag string) (string, error) {
+func (z *ZIPReleaseGetter) getReleaseDownloadURL(ctx context.Context, releaseTag string) (string, error) {
 	client := github.NewClient(nil)
 
-	rel, _, err := client.Repositories.GetReleaseByTag(context.Background(), z.EntityID, z.RepoName, releaseTag)
+	rel, _, err := client.Repositories.GetReleaseByTag(ctx, z.EntityID, z.RepoName, releaseTag)
 	if err != nil {
 		return "", fmt.Errorf("release %s does not exist", releaseTag)
 	}

+ 5 - 3
cli/cmd/helm.go

@@ -1,12 +1,14 @@
 package cmd
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"os/exec"
 
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/spf13/cobra"
 )
 
@@ -14,7 +16,7 @@ var helmCmd = &cobra.Command{
 	Use:   "helm",
 	Short: "Use helm to interact with a Porter cluster",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runHelm)
+		err := checkLoginAndRun(cmd.Context(), args, runHelm)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -25,13 +27,13 @@ func init() {
 	rootCmd.AddCommand(helmCmd)
 }
 
-func runHelm(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func runHelm(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	_, err := exec.LookPath("helm")
 	if err != nil {
 		return fmt.Errorf("error finding helm: %w", err)
 	}
 
-	tmpFile, err := downloadTempKubeconfig(client)
+	tmpFile, err := downloadTempKubeconfig(ctx, client, cliConf)
 	if err != nil {
 		return err
 	}

+ 15 - 18
cli/cmd/job.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"os"
 
+	"github.com/porter-dev/porter/cli/cmd/config"
 	v2 "github.com/porter-dev/porter/cli/cmd/v2"
 
 	"github.com/fatih/color"
@@ -43,7 +44,7 @@ use the --namespace flag:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter job update-images --namespace custom-namespace --image-repo-uri my-image.registry.io --tag newtag"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, batchImageUpdate)
+		err := checkLoginAndRun(cmd.Context(), args, batchImageUpdate)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -73,7 +74,7 @@ use the --namespace flag:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter job wait --name job-example --namespace custom-namespace"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, waitForJob)
+		err := checkLoginAndRun(cmd.Context(), args, waitForJob)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -103,7 +104,7 @@ use the --namespace flag:
 		color.New(color.FgGreen, color.Bold).Sprintf("porter job run --name job-example --namespace custom-namespace"),
 	),
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runJob)
+		err := checkLoginAndRun(cmd.Context(), args, runJob)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -176,9 +177,7 @@ func init() {
 	runJobCmd.MarkPersistentFlagRequired("name")
 }
 
-func batchImageUpdate(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func batchImageUpdate(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -195,7 +194,7 @@ func batchImageUpdate(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 	color.New(color.FgGreen).Println("Updating all jobs which use the image:", imageRepoURI)
 
 	return client.UpdateBatchImage(
-		context.TODO(),
+		ctx,
 		cliConf.Project,
 		cliConf.Cluster,
 		namespace,
@@ -207,9 +206,7 @@ func batchImageUpdate(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 }
 
 // waits for a job with a given name/namespace
-func waitForJob(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func waitForJob(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -223,7 +220,7 @@ func waitForJob(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 		return nil
 	}
 
-	return wait.WaitForJob(client, &wait.WaitOpts{
+	return wait.WaitForJob(ctx, client, &wait.WaitOpts{
 		ProjectID: cliConf.Project,
 		ClusterID: cliConf.Cluster,
 		Namespace: namespace,
@@ -231,9 +228,7 @@ func waitForJob(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 	})
 }
 
-func runJob(authRes *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func runJob(ctx context.Context, authRes *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -263,14 +258,16 @@ func runJob(authRes *types.GetAuthenticatedUserResponse, client *api.Client, arg
 		},
 	}
 
-	err = updateAgent.UpdateImageAndValues(map[string]interface{}{
-		"paused": false,
-	})
+	err = updateAgent.UpdateImageAndValues(
+		ctx,
+		map[string]interface{}{
+			"paused": false,
+		})
 	if err != nil {
 		return fmt.Errorf("error running job: %w", err)
 	}
 
-	err = waitForJob(authRes, client, args)
+	err = waitForJob(ctx, authRes, client, cliConf, args)
 
 	if err != nil {
 		return fmt.Errorf("error waiting for job to complete: %w", err)

+ 6 - 5
cli/cmd/kubectl.go

@@ -8,6 +8,7 @@ import (
 
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/spf13/cobra"
 )
 
@@ -15,7 +16,7 @@ var kubectlCmd = &cobra.Command{
 	Use:   "kubectl",
 	Short: "Use kubectl to interact with a Porter cluster",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, runKubectl)
+		err := checkLoginAndRun(cmd.Context(), args, runKubectl)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -26,13 +27,13 @@ func init() {
 	rootCmd.AddCommand(kubectlCmd)
 }
 
-func runKubectl(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func runKubectl(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	_, err := exec.LookPath("kubectl")
 	if err != nil {
 		return fmt.Errorf("error finding kubectl: %w", err)
 	}
 
-	tmpFile, err := downloadTempKubeconfig(client)
+	tmpFile, err := downloadTempKubeconfig(ctx, client, cliConf)
 	if err != nil {
 		return err
 	}
@@ -57,7 +58,7 @@ func runKubectl(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 	return nil
 }
 
-func downloadTempKubeconfig(client *api.Client) (string, error) {
+func downloadTempKubeconfig(ctx context.Context, client api.Client, cliConf config.CLIConfig) (string, error) {
 	tmpFile, err := os.CreateTemp("", "porter_kubeconfig_*.yaml")
 	if err != nil {
 		return "", fmt.Errorf("error creating temp file for kubeconfig: %w", err)
@@ -65,7 +66,7 @@ func downloadTempKubeconfig(client *api.Client) (string, error) {
 
 	defer tmpFile.Close()
 
-	resp, err := client.GetKubeconfig(context.Background(), cliConf.Project, cliConf.Cluster, cliConf.Kubeconfig)
+	resp, err := client.GetKubeconfig(ctx, cliConf.Project, cliConf.Cluster, cliConf.Kubeconfig)
 	if err != nil {
 		return "", fmt.Errorf("error fetching kubeconfig for cluster: %w", err)
 	}

+ 16 - 21
cli/cmd/list.go

@@ -6,6 +6,7 @@ import (
 	"os"
 	"text/tabwriter"
 
+	"github.com/porter-dev/porter/cli/cmd/config"
 	v2 "github.com/porter-dev/porter/cli/cmd/v2"
 
 	"github.com/fatih/color"
@@ -23,7 +24,7 @@ var listCmd = &cobra.Command{
 	Short: "List applications, addons or jobs.",
 	Run: func(cmd *cobra.Command, args []string) {
 		if len(args) == 0 || (args[0] == "all") {
-			err := checkLoginAndRun(args, listAll)
+			err := checkLoginAndRun(cmd.Context(), args, listAll)
 			if err != nil {
 				os.Exit(1)
 			}
@@ -38,7 +39,7 @@ var listAppsCmd = &cobra.Command{
 	Aliases: []string{"applications", "app", "application"},
 	Short:   "Lists applications in a specific namespace, or across all namespaces",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listApps)
+		err := checkLoginAndRun(cmd.Context(), args, listApps)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -50,7 +51,7 @@ var listJobsCmd = &cobra.Command{
 	Aliases: []string{"job"},
 	Short:   "Lists jobs in a specific namespace, or across all namespaces",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listJobs)
+		err := checkLoginAndRun(cmd.Context(), args, listJobs)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -62,7 +63,7 @@ var listAddonsCmd = &cobra.Command{
 	Aliases: []string{"addon"},
 	Short:   "Lists addons in a specific namespace, or across all namespaces",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listAddons)
+		err := checkLoginAndRun(cmd.Context(), args, listAddons)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -91,9 +92,7 @@ func init() {
 	rootCmd.AddCommand(listCmd)
 }
 
-func listAll(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func listAll(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -107,7 +106,7 @@ func listAll(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []s
 		return nil
 	}
 
-	err = writeReleases(client, "all")
+	err = writeReleases(ctx, client, cliConf, "all")
 	if err != nil {
 		return err
 	}
@@ -115,9 +114,7 @@ func listAll(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []s
 	return nil
 }
 
-func listApps(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func listApps(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -131,7 +128,7 @@ func listApps(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []
 		return nil
 	}
 
-	err = writeReleases(client, "application")
+	err = writeReleases(ctx, client, cliConf, "application")
 	if err != nil {
 		return err
 	}
@@ -139,9 +136,7 @@ func listApps(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []
 	return nil
 }
 
-func listJobs(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func listJobs(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -155,7 +150,7 @@ func listJobs(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []
 		return nil
 	}
 
-	err = writeReleases(client, "job")
+	err = writeReleases(ctx, client, cliConf, "job")
 	if err != nil {
 		return err
 	}
@@ -163,8 +158,8 @@ func listJobs(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []
 	return nil
 }
 
-func listAddons(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	err := writeReleases(client, "addon")
+func listAddons(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
+	err := writeReleases(ctx, client, cliConf, "addon")
 	if err != nil {
 		return err
 	}
@@ -172,12 +167,12 @@ func listAddons(_ *types.GetAuthenticatedUserResponse, client *api.Client, args
 	return nil
 }
 
-func writeReleases(client *api.Client, kind string) error {
+func writeReleases(ctx context.Context, client api.Client, cliConf config.CLIConfig, kind string) error {
 	var namespaces []string
 	var releases []*release.Release
 
 	if allNamespaces {
-		resp, err := client.GetK8sNamespaces(context.Background(), cliConf.Project, cliConf.Cluster)
+		resp, err := client.GetK8sNamespaces(ctx, cliConf.Project, cliConf.Cluster)
 		if err != nil {
 			return err
 		}
@@ -192,7 +187,7 @@ func writeReleases(client *api.Client, kind string) error {
 	}
 
 	for _, ns := range namespaces {
-		resp, err := client.ListReleases(context.Background(), cliConf.Project, cliConf.Cluster, ns,
+		resp, err := client.ListReleases(ctx, cliConf.Project, cliConf.Cluster, ns,
 			&types.ListReleasesRequest{
 				ReleaseListFilter: &types.ReleaseListFilter{
 					Limit: 50,

+ 9 - 6
cli/cmd/logs.go

@@ -1,11 +1,13 @@
 package cmd
 
 import (
+	"context"
 	"fmt"
 	"os"
 
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 	"github.com/spf13/cobra"
 )
@@ -17,7 +19,7 @@ var logsCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Logs the output from a given application.",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, logs)
+		err := checkLoginAndRun(cmd.Context(), args, logs)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -45,8 +47,8 @@ func init() {
 	)
 }
 
-func logs(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	podsSimple, err := getPods(client, namespace, args[0])
+func logs(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, args []string) error {
+	podsSimple, err := getPods(ctx, client, cliConfig, namespace, args[0])
 	if err != nil {
 		return fmt.Errorf("Could not retrieve list of pods: %s", err.Error())
 	}
@@ -95,16 +97,17 @@ func logs(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []stri
 	}
 
 	config := &PorterRunSharedConfig{
-		Client: client,
+		Client:    client,
+		CLIConfig: cliConfig,
 	}
 
-	err = config.setSharedConfig()
+	err = config.setSharedConfig(ctx)
 
 	if err != nil {
 		return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
 	}
 
-	_, err = pipePodLogsToStdout(config, namespace, selectedPod.Name, selectedContainerName, follow)
+	_, err = pipePodLogsToStdout(ctx, config, namespace, selectedPod.Name, selectedContainerName, follow)
 
 	return err
 }

+ 23 - 6
cli/cmd/open.go

@@ -1,9 +1,11 @@
 package cmd
 
 import (
-	"context"
 	"fmt"
+	"os"
 
+	"github.com/fatih/color"
+	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 
@@ -14,15 +16,30 @@ var openCmd = &cobra.Command{
 	Use:   "open",
 	Short: "Opens the browser at the currently set Porter instance",
 	Run: func(cmd *cobra.Command, args []string) {
-		client := config.GetAPIClient()
+		ctx := cmd.Context()
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			color.New(color.FgRed).Fprintf(os.Stderr, "error loading porter config: %s\n", err) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
+			os.Exit(1)
+		}
 
-		user, err := client.AuthCheck(context.Background())
+		client, err := api.NewClientWithConfig(ctx, api.NewClientInput{
+			BaseURL:        fmt.Sprintf("%s/api", cliConf.Host),
+			BearerToken:    cliConf.Token,
+			CookieFileName: "cookie.json",
+		})
+		if err != nil {
+			_, _ = color.New(color.FgRed).Fprintf(os.Stderr, "error creating porter API client: %v\n", err)
+			os.Exit(1)
+		}
 
-		if err == nil {
-			utils.OpenBrowser(fmt.Sprintf("%s/login?email=%s", cliConf.Host, user.Email))
-		} else {
+		user, err := client.AuthCheck(ctx)
+		if err != nil {
 			utils.OpenBrowser(fmt.Sprintf("%s/register", cliConf.Host))
+			return
 		}
+
+		utils.OpenBrowser(fmt.Sprintf("%s/login?email=%s", cliConf.Host, user.Email)) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	},
 }
 

+ 6 - 4
cli/cmd/pack/pack.go

@@ -31,9 +31,11 @@ func init() {
 	}
 }
 
+// Agent is a buildpack agent
 type Agent struct{}
 
-func (a *Agent) Build(opts *docker.BuildOpts, buildConfig *types.BuildConfig, cacheImage string) error {
+// Build manages buildpack builds
+func (a *Agent) Build(ctx context.Context, opts *docker.BuildOpts, buildConfig *types.BuildConfig, cacheImage string) error {
 	absPath, err := filepath.Abs(opts.BuildContext)
 	if err != nil {
 		return err
@@ -88,7 +90,7 @@ func (a *Agent) Build(opts *docker.BuildOpts, buildConfig *types.BuildConfig, ca
 					// try to download the repo ZIP from github
 					githubClient := githubApi.NewClient(nil)
 					rel, _, err := githubClient.Repositories.GetLatestRelease(
-						context.Background(),
+						ctx,
 						urlPaths[0],
 						urlPaths[1],
 					)
@@ -97,7 +99,7 @@ func (a *Agent) Build(opts *docker.BuildOpts, buildConfig *types.BuildConfig, ca
 					} else {
 						// default to the current default branch
 						repo, _, err := githubClient.Repositories.Get(
-							context.Background(),
+							ctx,
 							urlPaths[0],
 							urlPaths[1],
 						)
@@ -142,5 +144,5 @@ func (a *Agent) Build(opts *docker.BuildOpts, buildConfig *types.BuildConfig, ca
 		buildOpts.Buildpacks = append(buildOpts.Buildpacks, "heroku/procfile@1.0.1")
 	}
 
-	return sharedPackClient.Build(context.Background(), buildOpts)
+	return sharedPackClient.Build(ctx, buildOpts)
 }

+ 26 - 22
cli/cmd/porter_app/apply.go

@@ -18,16 +18,23 @@ import (
 )
 
 type StackConf struct {
-	apiClient            *api.Client
+	apiClient            api.Client
 	parsed               *Application
 	stackName, namespace string
 	projectID, clusterID uint
 }
 
-func CreateApplicationDeploy(client *api.Client, worker *switchboardWorker.Worker, app *Application, applicationName string, cliConf *config.CLIConfig) ([]*switchboardTypes.Resource, error) {
+// CreateApplicationDeploy creates everything needed to deploy a porter app
+func CreateApplicationDeploy(ctx context.Context, client api.Client, worker *switchboardWorker.Worker, app *Application, applicationName string, cliConf config.CLIConfig) ([]*switchboardTypes.Resource, error) {
+	err := cliConf.ValidateCLIEnvironment()
+	if err != nil {
+		errMsg := composePreviewMessage("porter CLI is not configured correctly", Error)
+		return nil, fmt.Errorf("%s: %w", errMsg, err)
+	}
+
 	// we need to know the builder so that we can inject launcher to the start command later if heroku builder is used
 	var builder string
-	resources, builder, err := createV1BuildResources(client, app, applicationName, cliConf.Project, cliConf.Cluster)
+	resources, builder, err := createV1BuildResources(ctx, client, app, applicationName, cliConf.Project, cliConf.Cluster)
 	if err != nil {
 		return nil, fmt.Errorf("error parsing porter.yaml for build resources: %w", err)
 	}
@@ -39,6 +46,7 @@ func CreateApplicationDeploy(client *api.Client, worker *switchboardWorker.Worke
 
 	deployAppHook := &DeployAppHook{
 		Client:               client,
+		CLIConfig:            cliConf,
 		ApplicationName:      applicationName,
 		ProjectID:            cliConf.Project,
 		ClusterID:            cliConf.Cluster,
@@ -52,7 +60,7 @@ func CreateApplicationDeploy(client *api.Client, worker *switchboardWorker.Worke
 }
 
 // Create app event to signfy start of build
-func createAppEvent(client *api.Client, applicationName string, projectId, clusterId uint) (string, error) {
+func createAppEvent(ctx context.Context, client api.Client, applicationName string, projectId, clusterId uint) (string, error) {
 	var req *types.CreateOrUpdatePorterAppEventRequest
 	if os.Getenv("GITHUB_RUN_ID") != "" {
 		req = &types.CreateOrUpdatePorterAppEventRequest{
@@ -97,7 +105,6 @@ func createAppEvent(client *api.Client, applicationName string, projectId, clust
 		}
 	}
 
-	ctx := context.Background()
 	event, err := client.CreateOrUpdatePorterAppEvent(ctx, projectId, clusterId, applicationName, req)
 	if err != nil {
 		return "", fmt.Errorf("unable to create porter app build event: %w", err)
@@ -106,11 +113,11 @@ func createAppEvent(client *api.Client, applicationName string, projectId, clust
 	return event.ID, nil
 }
 
-func createV1BuildResources(client *api.Client, app *Application, stackName string, projectID uint, clusterID uint) ([]*switchboardTypes.Resource, string, error) {
+func createV1BuildResources(ctx context.Context, client api.Client, app *Application, stackName string, projectID uint, clusterID uint) ([]*switchboardTypes.Resource, string, error) {
 	var builder string
 	resources := make([]*switchboardTypes.Resource, 0)
 
-	stackConf, err := createStackConf(client, app, stackName, projectID, clusterID)
+	stackConf, err := createStackConf(ctx, client, app, stackName, projectID, clusterID)
 	if err != nil {
 		return nil, "", err
 	}
@@ -121,7 +128,7 @@ func createV1BuildResources(client *api.Client, app *Application, stackName stri
 	if stackConf.parsed.Build == nil {
 		color.New(color.FgYellow).Printf("No build values specified in porter.yaml, attempting to load stack build settings instead \n")
 
-		res, err := client.GetPorterApp(context.Background(), stackConf.projectID, stackConf.clusterID, stackConf.stackName)
+		res, err := client.GetPorterApp(ctx, stackConf.projectID, stackConf.clusterID, stackConf.stackName)
 		if err != nil {
 			return nil, "", fmt.Errorf("unable to read build info from DB: %w", err)
 		}
@@ -141,7 +148,9 @@ func createV1BuildResources(client *api.Client, app *Application, stackName stri
 		resources = append(resources, bi, pi)
 
 		// also excluding use of pre-deploy with pre-built imges
-		preDeploy, cmd, err := createPreDeployResource(client,
+		preDeploy, cmd, err := createPreDeployResource(
+			ctx,
+			client,
 			stackConf.parsed.Release,
 			stackConf.stackName,
 			bi.Name,
@@ -165,15 +174,10 @@ func createV1BuildResources(client *api.Client, app *Application, stackName stri
 	return resources, builder, nil
 }
 
-func createStackConf(client *api.Client, app *Application, stackName string, projectID uint, clusterID uint) (*StackConf, error) {
-	err := config.ValidateCLIEnvironment()
-	if err != nil {
-		errMsg := composePreviewMessage("porter CLI is not configured correctly", Error)
-		return nil, fmt.Errorf("%s: %w", errMsg, err)
-	}
-
-	releaseEnvVars := getEnvFromRelease(client, stackName, projectID, clusterID)
-	releaseEnvGroupVars := getEnvGroupFromRelease(client, stackName, projectID, clusterID)
+//nolint:unparam
+func createStackConf(ctx context.Context, client api.Client, app *Application, stackName string, projectID uint, clusterID uint) (*StackConf, error) {
+	releaseEnvVars := getEnvFromRelease(ctx, client, stackName, projectID, clusterID)
+	releaseEnvGroupVars := getEnvGroupFromRelease(ctx, client, stackName, projectID, clusterID)
 	// releaseEnvVars will override releaseEnvGroupVars
 	totalEnv := mergeStringMaps(releaseEnvGroupVars, releaseEnvVars)
 
@@ -260,11 +264,11 @@ func convertToBuild(porterApp *types.PorterApp) Build {
 	}
 }
 
-func getEnvGroupFromRelease(client *api.Client, stackName string, projectID uint, clusterID uint) map[string]string {
+func getEnvGroupFromRelease(ctx context.Context, client api.Client, stackName string, projectID uint, clusterID uint) map[string]string {
 	var envGroups []string
 	envVarsGroupStringMap := make(map[string]string)
 
-	ctx, span := telemetry.NewSpan(context.Background(), "get-env-from-release")
+	ctx, span := telemetry.NewSpan(ctx, "get-env-from-release")
 	telemetry.WithAttributes(span,
 		telemetry.AttributeKV{Key: "project-id", Value: projectID},
 		telemetry.AttributeKV{Key: "stack-name", Value: stackName},
@@ -326,11 +330,11 @@ func getEnvGroupFromRelease(client *api.Client, stackName string, projectID uint
 	return envVarsGroupStringMap
 }
 
-func getEnvFromRelease(client *api.Client, stackName string, projectID uint, clusterID uint) map[string]string {
+func getEnvFromRelease(ctx context.Context, client api.Client, stackName string, projectID uint, clusterID uint) map[string]string {
 	var envVarsStringMap map[string]string
 	namespace := fmt.Sprintf("porter-stack-%s", stackName)
 	release, err := client.GetRelease(
-		context.Background(),
+		ctx,
 		projectID,
 		clusterID,
 		namespace,

+ 14 - 9
cli/cmd/porter_app/hooks.go

@@ -13,23 +13,25 @@ import (
 )
 
 type DeployAppHook struct {
-	Client               *api.Client
+	Client               api.Client
 	ApplicationName      string
 	ProjectID, ClusterID uint
 	BuildImageDriverName string
 	PorterYAML           []byte
 	Builder              string
 	BuildEventID         string
+	CLIConfig            config.CLIConfig
 }
 
 func (t *DeployAppHook) PreApply() error {
-	err := config.ValidateCLIEnvironment()
+	err := t.CLIConfig.ValidateCLIEnvironment()
 	if err != nil {
 		errMsg := composePreviewMessage("porter CLI is not configured correctly", Error)
 		return fmt.Errorf("%s: %w", errMsg, err)
 	}
+	ctx := context.TODO() // switchboard blocks being able to change this for now
 
-	buildEventId, err := createAppEvent(t.Client, t.ApplicationName, t.ProjectID, t.ClusterID)
+	buildEventId, err := createAppEvent(ctx, t.Client, t.ApplicationName, t.ProjectID, t.ClusterID)
 	if err != nil {
 		return err
 	}
@@ -47,10 +49,11 @@ func (t *DeployAppHook) DataQueries() map[string]interface{} {
 
 // deploy the app
 func (t *DeployAppHook) PostApply(driverOutput map[string]interface{}) error {
+	ctx := context.TODO() // switchboard blocks being able to change this for now
 	namespace := fmt.Sprintf("porter-stack-%s", t.ApplicationName)
 
 	_, err := t.Client.GetRelease(
-		context.Background(),
+		ctx,
 		t.ProjectID,
 		t.ClusterID,
 		namespace,
@@ -65,7 +68,7 @@ func (t *DeployAppHook) PostApply(driverOutput map[string]interface{}) error {
 		color.New(color.FgGreen).Printf("Found release for app %s: attempting update\n", t.ApplicationName)
 	}
 
-	err = t.createOrUpdateApplication(shouldCreate, driverOutput)
+	err = t.createOrUpdateApplication(ctx, shouldCreate, driverOutput)
 	if err != nil {
 		return err
 	}
@@ -75,12 +78,12 @@ func (t *DeployAppHook) PostApply(driverOutput map[string]interface{}) error {
 		Metadata: map[string]any{},
 		ID:       t.BuildEventID,
 	}
-	_, _ = t.Client.CreateOrUpdatePorterAppEvent(context.Background(), t.ProjectID, t.ClusterID, t.ApplicationName, &eventRequest)
+	_, _ = t.Client.CreateOrUpdatePorterAppEvent(ctx, t.ProjectID, t.ClusterID, t.ApplicationName, &eventRequest)
 
 	return nil
 }
 
-func (t *DeployAppHook) createOrUpdateApplication(shouldCreate bool, driverOutput map[string]interface{}) error {
+func (t *DeployAppHook) createOrUpdateApplication(ctx context.Context, shouldCreate bool, driverOutput map[string]interface{}) error {
 	var imageInfo types.ImageInfo
 	image, ok := driverOutput["image"].(string)
 	// if it contains a $, then it means the query didn't resolve to anything
@@ -97,7 +100,7 @@ func (t *DeployAppHook) createOrUpdateApplication(shouldCreate bool, driverOutpu
 	}
 
 	_, err := t.Client.CreatePorterApp(
-		context.Background(),
+		ctx,
 		t.ProjectID,
 		t.ClusterID,
 		t.ApplicationName,
@@ -121,6 +124,8 @@ func (t *DeployAppHook) createOrUpdateApplication(shouldCreate bool, driverOutpu
 }
 
 func (t *DeployAppHook) OnConsolidatedErrors(errors map[string]error) {
+	ctx := context.TODO() // switchboard blocks being able to change this for now
+
 	errorStringMap := make(map[string]string)
 	for k, v := range errors {
 		errorStringMap[k] = fmt.Sprintf("%+v", v)
@@ -133,7 +138,7 @@ func (t *DeployAppHook) OnConsolidatedErrors(errors map[string]error) {
 		},
 		ID: t.BuildEventID,
 	}
-	_, _ = t.Client.CreateOrUpdatePorterAppEvent(context.Background(), t.ProjectID, t.ClusterID, t.ApplicationName, &eventRequest)
+	_, _ = t.Client.CreateOrUpdatePorterAppEvent(ctx, t.ProjectID, t.ClusterID, t.ApplicationName, &eventRequest)
 }
 
 func (t *DeployAppHook) OnError(err error) {

+ 4 - 4
cli/cmd/porter_app/preDeploy.go

@@ -12,12 +12,12 @@ import (
 	switchboardTypes "github.com/porter-dev/switchboard/pkg/types"
 )
 
-func createPreDeployResource(client *api.Client, release *Service, stackName, buildResourceName, pushResourceName string, projectID, clusterID uint, env map[string]string) (*switchboardTypes.Resource, string, error) {
+func createPreDeployResource(ctx context.Context, client api.Client, release *Service, stackName, buildResourceName, pushResourceName string, projectID, clusterID uint, env map[string]string) (*switchboardTypes.Resource, string, error) {
 	var finalCmd string
 	if release != nil && release.Run != nil {
 		finalCmd = *release.Run
 	} else {
-		finalCmd = getPredeployStartCommandFromRelease(client, stackName, projectID, clusterID)
+		finalCmd = getPredeployStartCommandFromRelease(ctx, client, stackName, projectID, clusterID)
 		if finalCmd == "" {
 			return nil, "", nil
 		}
@@ -64,11 +64,11 @@ func createPreDeployResource(client *api.Client, release *Service, stackName, bu
 	}, finalCmd, nil
 }
 
-func getPredeployStartCommandFromRelease(client *api.Client, stackName string, projectID uint, clusterID uint) string {
+func getPredeployStartCommandFromRelease(ctx context.Context, client api.Client, stackName string, projectID uint, clusterID uint) string {
 	namespace := fmt.Sprintf("porter-stack-%s", stackName)
 	releaseName := fmt.Sprintf("%s-r", stackName)
 	release, err := client.GetRelease(
-		context.Background(),
+		ctx,
 		projectID,
 		clusterID,
 		namespace,

+ 42 - 32
cli/cmd/preview/build_image_driver.go

@@ -27,29 +27,36 @@ type BuildDriver struct {
 	config      *preview.BuildDriverConfig
 	lookupTable *map[string]drivers.Driver
 	output      map[string]interface{}
+	apiClient   client.Client
+	cliConfig   config.CLIConfig
 }
 
-func NewBuildDriver(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
-	driver := &BuildDriver{
-		lookupTable: opts.DriverLookupTable,
-		output:      make(map[string]interface{}),
-	}
+// NewBuildDriver extends switchboard with the ability to build images and buildpacks
+func NewBuildDriver(ctx context.Context, apiClient client.Client, cliConfig config.CLIConfig) func(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+	return func(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+		driver := &BuildDriver{
+			lookupTable: opts.DriverLookupTable,
+			output:      make(map[string]interface{}),
+			cliConfig:   cliConfig,
+			apiClient:   apiClient,
+		}
 
-	target, err := GetTarget(resource.Name, resource.Target)
-	if err != nil {
-		return nil, err
-	}
+		target, err := GetTarget(ctx, resource.Name, resource.Target, apiClient, cliConfig)
+		if err != nil {
+			return nil, err
+		}
 
-	driver.target = target
+		driver.target = target
 
-	source, err := GetSource(target.Project, resource.Name, resource.Source)
-	if err != nil {
-		return nil, err
-	}
+		source, err := GetSource(ctx, target.Project, resource.Name, resource.Source, apiClient)
+		if err != nil {
+			return nil, err
+		}
 
-	driver.source = source
+		driver.source = source
 
-	return driver, nil
+		return driver, nil
+	}
 }
 
 func (d *BuildDriver) ShouldApply(resource *models.Resource) bool {
@@ -57,6 +64,8 @@ func (d *BuildDriver) ShouldApply(resource *models.Resource) bool {
 }
 
 func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error) {
+	ctx := context.TODO() // switchboard blocks changing this for now
+
 	buildDriverConfig, err := d.getConfig(resource)
 	if err != nil {
 		return nil, err
@@ -64,8 +73,6 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 
 	d.config = buildDriverConfig
 
-	client := config.GetAPIClient()
-
 	// FIXME: give tag option in config build, but override if PORTER_TAG is present
 	tag := os.Getenv("PORTER_TAG")
 
@@ -89,7 +96,7 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 		}
 	}
 
-	regList, err := client.ListRegistries(context.Background(), d.target.Project)
+	regList, err := d.apiClient.ListRegistries(ctx, d.target.Project)
 	if err != nil {
 		return nil, err
 	}
@@ -111,7 +118,7 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 	}
 
 	createAgent := &deploy.CreateAgent{
-		Client: client,
+		Client: d.apiClient,
 		CreateOpts: &deploy.CreateOpts{
 			SharedOpts: &deploy.SharedOpts{
 				ProjectID:       d.target.Project,
@@ -131,13 +138,13 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 		},
 	}
 
-	regID, imageURL, err := createAgent.GetImageRepoURL(d.target.AppName, d.target.Namespace)
+	regID, imageURL, err := createAgent.GetImageRepoURL(ctx, d.target.AppName, d.target.Namespace)
 	if err != nil {
 		return nil, err
 	}
 
 	// create repository if it does not exist
-	repoResp, err := client.ListRegistryRepositories(context.Background(), d.target.Project, regID)
+	repoResp, err := d.apiClient.ListRegistryRepositories(ctx, d.target.Project, regID)
 	if err != nil {
 		return nil, err
 	}
@@ -154,8 +161,8 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 	}
 
 	if !found {
-		err = client.CreateRepository(
-			context.Background(),
+		err = d.apiClient.CreateRepository(
+			ctx,
 			d.target.Project,
 			regID,
 			&types.CreateRegistryRepositoryRequest{
@@ -169,7 +176,7 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 	}
 
 	if d.config.Build.UsePackCache {
-		err := config.SetDockerConfig(client)
+		err := config.SetDockerConfig(ctx, d.apiClient, d.target.Project)
 		if err != nil {
 			return nil, err
 		}
@@ -200,18 +207,19 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 	}
 
 	// create docker agent
-	agent, err := docker.NewAgentWithAuthGetter(client, d.target.Project)
+	agent, err := docker.NewAgentWithAuthGetter(ctx, d.apiClient, d.target.Project)
 	if err != nil {
 		return nil, err
 	}
 
-	_, mergedValues, err := createAgent.GetMergedValues(d.config.Values)
+	_, mergedValues, err := createAgent.GetMergedValues(ctx, d.config.Values)
 	if err != nil {
 		return nil, err
 	}
 
 	env, err := deploy.GetEnvForRelease(
-		client,
+		ctx,
+		d.apiClient,
 		mergedValues,
 		d.target.Project,
 		d.target.Cluster,
@@ -245,7 +253,7 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 
 	buildAgent := &deploy.BuildAgent{
 		SharedOpts:  createAgent.CreateOpts.SharedOpts,
-		APIClient:   client,
+		APIClient:   d.apiClient,
 		ImageRepo:   imageURL,
 		Env:         env,
 		ImageExists: false,
@@ -263,10 +271,11 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 		var currentTag string
 		// implement caching for porter stack builds
 		if os.Getenv("PORTER_STACK_NAME") != "" {
-			currentTag = getCurrentImageTagIfExists(client, d.target.Project, d.target.Cluster, os.Getenv("PORTER_STACK_NAME"))
+			currentTag = getCurrentImageTagIfExists(ctx, d.apiClient, d.target.Project, d.target.Cluster, os.Getenv("PORTER_STACK_NAME"))
 		}
 
 		err = buildAgent.BuildDocker(
+			ctx,
 			agent,
 			basePath,
 			d.config.Build.Context,
@@ -285,6 +294,7 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 		}
 
 		err = buildAgent.BuildPack(
+			ctx,
 			agent,
 			d.config.Build.Context,
 			tag,
@@ -309,10 +319,10 @@ func (d *BuildDriver) Apply(resource *models.Resource) (*models.Resource, error)
 	return resource, nil
 }
 
-func getCurrentImageTagIfExists(client *client.Client, projectID, clusterID uint, stackName string) string {
+func getCurrentImageTagIfExists(ctx context.Context, client client.Client, projectID, clusterID uint, stackName string) string {
 	namespace := fmt.Sprintf("porter-stack-%s", stackName)
 	release, err := client.GetRelease(
-		context.Background(),
+		ctx,
 		projectID,
 		clusterID,
 		namespace,

+ 25 - 17
cli/cmd/preview/env_group_driver.go

@@ -6,6 +6,7 @@ import (
 
 	"github.com/fatih/color"
 	"github.com/mitchellh/mapstructure"
+	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/internal/integrations/preview"
@@ -18,22 +19,29 @@ type EnvGroupDriver struct {
 	lookupTable *map[string]drivers.Driver
 	target      *preview.Target
 	config      *preview.EnvGroupDriverConfig
+	apiClient   api.Client
+	cliConfig   config.CLIConfig
 }
 
-func NewEnvGroupDriver(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
-	driver := &EnvGroupDriver{
-		lookupTable: opts.DriverLookupTable,
-		output:      make(map[string]interface{}),
-	}
+// NewEnvGroupDriver extends switchboard with environment groups
+func NewEnvGroupDriver(ctx context.Context, apiClient api.Client, cliConfig config.CLIConfig) func(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+	return func(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+		driver := &EnvGroupDriver{
+			lookupTable: opts.DriverLookupTable,
+			output:      make(map[string]interface{}),
+			apiClient:   apiClient,
+			cliConfig:   cliConfig,
+		}
 
-	target, err := GetTarget(resource.Name, resource.Target)
-	if err != nil {
-		return nil, err
-	}
+		target, err := GetTarget(ctx, resource.Name, resource.Target, apiClient, cliConfig)
+		if err != nil {
+			return nil, err
+		}
 
-	driver.target = target
+		driver.target = target
 
-	return driver, nil
+		return driver, nil
+	}
 }
 
 func (d *EnvGroupDriver) ShouldApply(resource *models.Resource) bool {
@@ -41,6 +49,8 @@ func (d *EnvGroupDriver) ShouldApply(resource *models.Resource) bool {
 }
 
 func (d *EnvGroupDriver) Apply(resource *models.Resource) (*models.Resource, error) {
+	ctx := context.TODO() // switchboard blocks changing this for now
+
 	driverConfig, err := d.getConfig(resource)
 	if err != nil {
 		return nil, err
@@ -48,8 +58,6 @@ func (d *EnvGroupDriver) Apply(resource *models.Resource) (*models.Resource, err
 
 	d.config = driverConfig
 
-	client := config.GetAPIClient()
-
 	for _, group := range d.config.EnvGroups {
 		if group.Name == "" {
 			return nil, fmt.Errorf("env group name cannot be empty")
@@ -62,8 +70,8 @@ func (d *EnvGroupDriver) Apply(resource *models.Resource) (*models.Resource, err
 			group.Namespace = d.target.Namespace
 		}
 
-		envGroupResp, err := client.GetEnvGroup(
-			context.Background(),
+		envGroupResp, err := d.apiClient.GetEnvGroup(
+			ctx,
 			d.target.Project,
 			d.target.Cluster,
 			group.Namespace,
@@ -73,8 +81,8 @@ func (d *EnvGroupDriver) Apply(resource *models.Resource) (*models.Resource, err
 		)
 
 		if err != nil && err.Error() == "env group not found" {
-			newEnvGroup, err := client.CreateEnvGroup(
-				context.Background(), d.target.Project, d.target.Cluster, group.Namespace,
+			newEnvGroup, err := d.apiClient.CreateEnvGroup(
+				ctx, d.target.Project, d.target.Cluster, group.Namespace,
 				&types.CreateEnvGroupRequest{
 					Name:      group.Name,
 					Variables: group.Variables,

+ 30 - 22
cli/cmd/preview/push_image_driver.go

@@ -6,6 +6,7 @@ import (
 	"os"
 
 	"github.com/mitchellh/mapstructure"
+	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/deploy"
@@ -21,22 +22,29 @@ type PushDriver struct {
 	config      *preview.PushDriverConfig
 	lookupTable *map[string]drivers.Driver
 	output      map[string]interface{}
+	apiClient   api.Client
+	cliConfig   config.CLIConfig
 }
 
-func NewPushDriver(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
-	driver := &PushDriver{
-		lookupTable: opts.DriverLookupTable,
-		output:      make(map[string]interface{}),
-	}
+// NewPushDriver extends switchboard with image pushing to registries
+func NewPushDriver(ctx context.Context, apiClient api.Client, cliConfig config.CLIConfig) func(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+	return func(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+		driver := &PushDriver{
+			lookupTable: opts.DriverLookupTable,
+			output:      make(map[string]interface{}),
+			apiClient:   apiClient,
+			cliConfig:   cliConfig,
+		}
 
-	target, err := GetTarget(resource.Name, resource.Target)
-	if err != nil {
-		return nil, err
-	}
+		target, err := GetTarget(ctx, resource.Name, resource.Target, apiClient, cliConfig)
+		if err != nil {
+			return nil, err
+		}
 
-	driver.target = target
+		driver.target = target
 
-	return driver, nil
+		return driver, nil
+	}
 }
 
 func (d *PushDriver) ShouldApply(resource *models.Resource) bool {
@@ -44,6 +52,8 @@ func (d *PushDriver) ShouldApply(resource *models.Resource) bool {
 }
 
 func (d *PushDriver) Apply(resource *models.Resource) (*models.Resource, error) {
+	ctx := context.TODO() // switchboard blocks changing this for now
+
 	pushDriverConfig, err := d.getConfig(resource)
 	if err != nil {
 		return nil, err
@@ -57,15 +67,13 @@ func (d *PushDriver) Apply(resource *models.Resource) (*models.Resource, error)
 		return resource, nil
 	}
 
-	client := config.GetAPIClient()
-
-	agent, err := docker.NewAgentWithAuthGetter(client, d.target.Project)
+	agent, err := docker.NewAgentWithAuthGetter(ctx, d.apiClient, d.target.Project)
 	if err != nil {
 		return nil, err
 	}
 
-	_, err = client.GetRelease(
-		context.Background(),
+	_, err = d.apiClient.GetRelease(
+		ctx,
 		d.target.Project,
 		d.target.Cluster,
 		d.target.Namespace,
@@ -75,7 +83,7 @@ func (d *PushDriver) Apply(resource *models.Resource) (*models.Resource, error)
 	shouldCreate := err != nil
 
 	if shouldCreate {
-		regList, err := client.ListRegistries(context.Background(), d.target.Project)
+		regList, err := d.apiClient.ListRegistries(ctx, d.target.Project)
 		if err != nil {
 			return nil, err
 		}
@@ -103,7 +111,7 @@ func (d *PushDriver) Apply(resource *models.Resource) (*models.Resource, error)
 		}
 
 		createAgent := &deploy.CreateAgent{
-			Client: client,
+			Client: d.apiClient,
 			CreateOpts: &deploy.CreateOpts{
 				SharedOpts:  sharedOpts,
 				ReleaseName: d.target.AppName,
@@ -112,13 +120,13 @@ func (d *PushDriver) Apply(resource *models.Resource) (*models.Resource, error)
 			},
 		}
 
-		regID, imageURL, err := createAgent.GetImageRepoURL(d.target.AppName, sharedOpts.Namespace)
+		regID, imageURL, err := createAgent.GetImageRepoURL(ctx, d.target.AppName, sharedOpts.Namespace)
 		if err != nil {
 			return nil, err
 		}
 
-		err = client.CreateRepository(
-			context.Background(),
+		err = d.apiClient.CreateRepository(
+			ctx,
 			sharedOpts.ProjectID,
 			regID,
 			&types.CreateRegistryRepositoryRequest{
@@ -131,7 +139,7 @@ func (d *PushDriver) Apply(resource *models.Resource) (*models.Resource, error)
 		}
 	}
 
-	err = agent.PushImage(d.config.Push.Image)
+	err = agent.PushImage(ctx, d.config.Push.Image)
 	if err != nil {
 		return nil, err
 	}

+ 34 - 30
cli/cmd/preview/update_config_driver.go

@@ -26,29 +26,36 @@ type UpdateConfigDriver struct {
 	config      *preview.UpdateConfigDriverConfig
 	lookupTable *map[string]drivers.Driver
 	output      map[string]interface{}
+	apiClient   api.Client
+	cliConfig   config.CLIConfig
 }
 
-func NewUpdateConfigDriver(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
-	driver := &UpdateConfigDriver{
-		lookupTable: opts.DriverLookupTable,
-		output:      make(map[string]interface{}),
-	}
+// NewUpdateConfigDriver extends switchboard with config updating for an app
+func NewUpdateConfigDriver(ctx context.Context, apiClient api.Client, cliConfig config.CLIConfig) func(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+	return func(resource *models.Resource, opts *drivers.SharedDriverOpts) (drivers.Driver, error) {
+		driver := &UpdateConfigDriver{
+			lookupTable: opts.DriverLookupTable,
+			output:      make(map[string]interface{}),
+			apiClient:   apiClient,
+			cliConfig:   cliConfig,
+		}
 
-	target, err := GetTarget(resource.Name, resource.Target)
-	if err != nil {
-		return nil, err
-	}
+		target, err := GetTarget(ctx, resource.Name, resource.Target, apiClient, cliConfig)
+		if err != nil {
+			return nil, err
+		}
 
-	driver.target = target
+		driver.target = target
 
-	source, err := GetSource(driver.target.Project, resource.Name, resource.Source)
-	if err != nil {
-		return nil, err
-	}
+		source, err := GetSource(ctx, driver.target.Project, resource.Name, resource.Source, apiClient)
+		if err != nil {
+			return nil, err
+		}
 
-	driver.source = source
+		driver.source = source
 
-	return driver, nil
+		return driver, nil
+	}
 }
 
 func (d *UpdateConfigDriver) ShouldApply(resource *models.Resource) bool {
@@ -56,7 +63,7 @@ func (d *UpdateConfigDriver) ShouldApply(resource *models.Resource) bool {
 }
 
 func (d *UpdateConfigDriver) Apply(resource *models.Resource) (*models.Resource, error) {
-	ctx := context.Background()
+	ctx := context.TODO() // switchboard blocks changing this for now
 
 	updateConfigDriverConfig, err := d.getConfig(resource)
 	if err != nil {
@@ -65,9 +72,7 @@ func (d *UpdateConfigDriver) Apply(resource *models.Resource) (*models.Resource,
 
 	d.config = updateConfigDriverConfig
 
-	client := config.GetAPIClient()
-
-	_, err = client.GetRelease(
+	_, err = d.apiClient.GetRelease(
 		ctx,
 		d.target.Project,
 		d.target.Cluster,
@@ -98,7 +103,7 @@ func (d *UpdateConfigDriver) Apply(resource *models.Resource) (*models.Resource,
 		tag = commit.Sha[:7]
 	}
 
-	regList, err := client.ListRegistries(context.Background(), d.target.Project)
+	regList, err := d.apiClient.ListRegistries(ctx, d.target.Project)
 	if err != nil {
 		return nil, err
 	}
@@ -132,7 +137,7 @@ func (d *UpdateConfigDriver) Apply(resource *models.Resource) (*models.Resource,
 		color.New(color.FgYellow).Printf("Could not read release %s/%s: attempting creation\n", d.target.Namespace, d.target.AppName)
 
 		createAgent := &deploy.CreateAgent{
-			Client: client,
+			Client: d.apiClient,
 			CreateOpts: &deploy.CreateOpts{
 				SharedOpts:  sharedOpts,
 				Kind:        d.source.Name,
@@ -144,13 +149,13 @@ func (d *UpdateConfigDriver) Apply(resource *models.Resource) (*models.Resource,
 
 		image := fmt.Sprintf("%s:%s", strings.Split(d.config.UpdateConfig.Image, ":")[0], tag)
 
-		_, err = createAgent.CreateFromRegistry(image, d.config.Values)
+		_, err = createAgent.CreateFromRegistry(ctx, image, d.config.Values)
 
 		if err != nil {
 			return nil, err
 		}
 	} else if !updateConfigDriverConfig.OnlyCreate {
-		updateAgent, err := deploy.NewDeployAgent(client, d.target.AppName, &deploy.DeployOpts{
+		updateAgent, err := deploy.NewDeployAgent(ctx, d.apiClient, d.target.AppName, &deploy.DeployOpts{
 			SharedOpts: sharedOpts,
 			Local:      false,
 		})
@@ -158,7 +163,7 @@ func (d *UpdateConfigDriver) Apply(resource *models.Resource) (*models.Resource,
 			return nil, err
 		}
 
-		err = updateAgent.UpdateImageAndValues(d.config.Values)
+		err = updateAgent.UpdateImageAndValues(ctx, d.config.Values)
 
 		if err != nil {
 			return nil, err
@@ -168,7 +173,7 @@ func (d *UpdateConfigDriver) Apply(resource *models.Resource) (*models.Resource,
 	if d.source.Name == "job" && updateConfigDriverConfig.WaitForJob && (shouldCreate || !updateConfigDriverConfig.OnlyCreate) {
 		color.New(color.FgYellow).Printf("Waiting for job '%s' to finish\n", resource.Name)
 
-		err = wait.WaitForJob(client, &wait.WaitOpts{
+		err = wait.WaitForJob(ctx, d.apiClient, &wait.WaitOpts{
 			ProjectID: d.target.Project,
 			ClusterID: d.target.Cluster,
 			Namespace: d.target.Namespace,
@@ -179,8 +184,7 @@ func (d *UpdateConfigDriver) Apply(resource *models.Resource) (*models.Resource,
 		}
 	}
 
-	err = d.assignOutput(resource, client)
-
+	err = d.assignOutput(ctx, resource, d.apiClient)
 	if err != nil {
 		return nil, err
 	}
@@ -213,9 +217,9 @@ func (d *UpdateConfigDriver) getConfig(resource *models.Resource) (*preview.Upda
 	return config, nil
 }
 
-func (d *UpdateConfigDriver) assignOutput(resource *models.Resource, client *api.Client) error {
+func (d *UpdateConfigDriver) assignOutput(ctx context.Context, _ *models.Resource, client api.Client) error {
 	release, err := client.GetRelease(
-		context.Background(),
+		ctx,
 		d.target.Project,
 		d.target.Cluster,
 		d.target.Namespace,

+ 17 - 18
cli/cmd/preview/utils.go

@@ -6,12 +6,14 @@ import (
 	"os"
 	"strconv"
 
+	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/internal/integrations/preview"
 )
 
-func GetSource(projectID uint, resourceName string, input map[string]interface{}) (*preview.Source, error) {
+// GetSource extends switchboard
+func GetSource(ctx context.Context, projectID uint, resourceName string, input map[string]interface{}, apiClient api.Client) (*preview.Source, error) {
 	output := &preview.Source{}
 
 	// first read from env vars
@@ -59,9 +61,7 @@ func GetSource(projectID uint, resourceName string, input map[string]interface{}
 		output.Version = "latest"
 	}
 
-	apiClient := config.GetAPIClient()
-
-	serverMetadata, err := apiClient.GetPorterInstanceMetadata(context.Background())
+	serverMetadata, err := apiClient.GetPorterInstanceMetadata(ctx)
 	if err != nil {
 		return nil, fmt.Errorf("error fetching Porter instance metadata: %w", err)
 	}
@@ -73,7 +73,7 @@ func GetSource(projectID uint, resourceName string, input map[string]interface{}
 			output.Repo = "https://charts.getporter.dev"
 		}
 
-		values, err := existsInRepo(projectID, output.Name, output.Version, output.Repo)
+		values, err := existsInRepo(ctx, projectID, output.Name, output.Version, output.Repo, apiClient)
 
 		if err == nil {
 			output.SourceValues = values
@@ -88,7 +88,7 @@ func GetSource(projectID uint, resourceName string, input map[string]interface{}
 			output.Repo = "https://chart-addons.getporter.dev"
 		}
 
-		values, err = existsInRepo(projectID, output.Name, output.Version, output.Repo)
+		values, err = existsInRepo(ctx, projectID, output.Name, output.Version, output.Repo, apiClient)
 
 		if err == nil {
 			output.SourceValues = values
@@ -100,7 +100,7 @@ func GetSource(projectID uint, resourceName string, input map[string]interface{}
 			"Helm repositories", resourceName)
 	} else {
 		// we look in the passed-in repo
-		values, err := existsInRepo(projectID, output.Name, output.Version, output.Repo)
+		values, err := existsInRepo(ctx, projectID, output.Name, output.Version, output.Repo, apiClient)
 
 		if err == nil {
 			output.SourceValues = values
@@ -114,7 +114,8 @@ func GetSource(projectID uint, resourceName string, input map[string]interface{}
 		resourceName, output.Name, output.Repo)
 }
 
-func GetTarget(resourceName string, input map[string]interface{}) (*preview.Target, error) {
+// GetTarget extends switchboard
+func GetTarget(ctx context.Context, resourceName string, input map[string]interface{}, apiClient api.Client, cliConfig config.CLIConfig) (*preview.Target, error) {
 	output := &preview.Target{}
 
 	// first read from env vars
@@ -187,11 +188,11 @@ func GetTarget(resourceName string, input map[string]interface{}) (*preview.Targ
 	// lastly, just put in the defaults
 
 	if output.Project == 0 {
-		output.Project = config.GetCLIConfig().Project
+		output.Project = cliConfig.Project
 	}
 
 	if output.Cluster == 0 {
-		output.Cluster = config.GetCLIConfig().Cluster
+		output.Cluster = cliConfig.Cluster
 	}
 
 	if output.Namespace == "" {
@@ -199,10 +200,8 @@ func GetTarget(resourceName string, input map[string]interface{}) (*preview.Targ
 	}
 
 	if output.RegistryURL == "" {
-		apiClient := config.GetAPIClient()
-
-		if config.GetCLIConfig().Registry == 0 {
-			regList, err := apiClient.ListRegistries(context.Background(), output.Project)
+		if cliConfig.Registry == 0 {
+			regList, err := apiClient.ListRegistries(ctx, output.Project)
 			if err != nil {
 				return nil, fmt.Errorf("for resource '%s', error listing registries in project: %w", resourceName, err)
 			}
@@ -213,7 +212,7 @@ func GetTarget(resourceName string, input map[string]interface{}) (*preview.Targ
 
 			output.RegistryURL = (*regList)[0].URL
 		} else {
-			reg, err := apiClient.GetRegistry(context.Background(), output.Project, config.GetCLIConfig().Registry)
+			reg, err := apiClient.GetRegistry(ctx, output.Project, cliConfig.Registry)
 			if err != nil {
 				return nil, fmt.Errorf("for resource '%s', error getting registry from CLI config: %w", resourceName, err)
 			}
@@ -225,9 +224,9 @@ func GetTarget(resourceName string, input map[string]interface{}) (*preview.Targ
 	return output, nil
 }
 
-func existsInRepo(projectID uint, name, version, url string) (map[string]interface{}, error) {
-	chart, err := config.GetAPIClient().GetTemplate(
-		context.Background(),
+func existsInRepo(ctx context.Context, projectID uint, name, version, url string, apiClient api.Client) (map[string]interface{}, error) {
+	chart, err := apiClient.GetTemplate(
+		ctx,
 		projectID,
 		name, version,
 		&types.GetTemplateRequest{

+ 17 - 30
cli/cmd/preview/v2beta1/apply.go

@@ -11,24 +11,16 @@ import (
 	"gopkg.in/yaml.v3"
 )
 
-// const (
-// 	constantsEnvGroup = "preview-env-constants"
-
-// 	defaultCharset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789~`!@#$%^&*()_+-={}[]"
-// )
-
 type PreviewApplier struct {
-	apiClient *api.Client
+	apiClient api.Client
+	cliConfig config.CLIConfig
 	rawBytes  []byte
 	namespace string
 	parsed    *PorterYAML
-
-	// variablesMap map[string]string
-	// osEnv        map[string]string
-	// envGroups    map[string]*apiTypes.EnvGroup
 }
 
-func NewApplier(client *api.Client, raw []byte, namespace string) (*PreviewApplier, error) {
+// NewApplier returns an applier for preview environments
+func NewApplier(client api.Client, cliConfig config.CLIConfig, raw []byte, namespace string) (*PreviewApplier, error) {
 	// replace all instances of ${{ porter.env.FOO }} with { .get-env.FOO }
 	re := regexp.MustCompile(`\$\{\{\s*porter\.env\.(.*)\s*\}\}`)
 	raw = re.ReplaceAll(raw, []byte("{.get-env.$1}"))
@@ -41,13 +33,7 @@ func NewApplier(client *api.Client, raw []byte, namespace string) (*PreviewAppli
 		return nil, fmt.Errorf("%s: %w", errMsg, err)
 	}
 
-	// err = validator.ValidatePorterYAML(parsed)
-
-	// if err != nil {
-	// 	return nil, err
-	// }
-
-	err = config.ValidateCLIEnvironment()
+	err = cliConfig.ValidateCLIEnvironment()
 
 	if err != nil {
 		errMsg := composePreviewMessage("porter CLI is not configured correctly", Error)
@@ -56,6 +42,7 @@ func NewApplier(client *api.Client, raw []byte, namespace string) (*PreviewAppli
 
 	return &PreviewApplier{
 		apiClient: client,
+		cliConfig: cliConfig,
 		rawBytes:  raw,
 		namespace: namespace,
 		parsed:    parsed,
@@ -68,13 +55,13 @@ func (a *PreviewApplier) Apply() error {
 	// this is a sanity check to ensure that the user does not see any internal
 	// errors that are caused by the namespace not existing
 	nsList, err := a.apiClient.GetK8sNamespaces(
-		context.Background(),
-		config.GetCLIConfig().Project,
-		config.GetCLIConfig().Cluster,
+		context.TODO(), // can not change because of switchboard
+		a.cliConfig.Project,
+		a.cliConfig.Cluster,
 	)
 	if err != nil {
 		errMsg := composePreviewMessage(fmt.Sprintf("error listing namespaces for project '%d', cluster '%d'",
-			config.GetCLIConfig().Project, config.GetCLIConfig().Cluster), Error)
+			a.cliConfig.Project, a.cliConfig.Cluster), Error)
 		return fmt.Errorf("%s: %w", errMsg, err)
 	}
 
@@ -96,9 +83,9 @@ func (a *PreviewApplier) Apply() error {
 
 	printInfoMessage(fmt.Sprintf("Applying porter.yaml with the following attributes:\n"+
 		"\tHost: %s\n\tProject ID: %d\n\tCluster ID: %d\n\tNamespace: %s",
-		config.GetCLIConfig().Host,
-		config.GetCLIConfig().Project,
-		config.GetCLIConfig().Cluster,
+		a.cliConfig.Host,
+		a.cliConfig.Project,
+		a.cliConfig.Cluster,
 		a.namespace),
 	)
 
@@ -279,7 +266,7 @@ func (a *PreviewApplier) DowngradeToV1() (*types.ResourceGroup, error) {
 // 	if len(constantsMap) > 0 {
 // 		// we need to create these constants in the env group
 // 		_, err := a.apiClient.CreateEnvGroup(
-// 			context.Background(),
+// 			ctx,
 // 			config.GetCLIConfig().Project,
 // 			config.GetCLIConfig().Cluster,
 // 			a.namespace,
@@ -305,7 +292,7 @@ func (a *PreviewApplier) DowngradeToV1() (*types.ResourceGroup, error) {
 
 // func (a *PreviewApplier) constantExistsInEnvGroup(name string) (*bool, error) {
 // 	apiResponse, err := a.apiClient.GetEnvGroup(
-// 		context.Background(),
+// 		ctx,
 // 		config.GetCLIConfig().Project,
 // 		config.GetCLIConfig().Cluster,
 // 		a.namespace,
@@ -343,7 +330,7 @@ func (a *PreviewApplier) DowngradeToV1() (*types.ResourceGroup, error) {
 // 		}
 
 // 		envGroup, err := a.apiClient.GetEnvGroup(
-// 			context.Background(),
+// 			ctx,
 // 			config.GetCLIConfig().Project,
 // 			config.GetCLIConfig().Cluster,
 // 			a.namespace,
@@ -365,7 +352,7 @@ func (a *PreviewApplier) DowngradeToV1() (*types.ResourceGroup, error) {
 
 // 			// clone the env group
 // 			envGroup, err := a.apiClient.CloneEnvGroup(
-// 				context.Background(),
+// 				ctx,
 // 				config.GetCLIConfig().Project,
 // 				config.GetCLIConfig().Cluster,
 // 				egNS,

+ 0 - 375
cli/cmd/preview/v2beta1/default_driver.go

@@ -1,375 +0,0 @@
-package v2beta1
-
-// import (
-// 	"context"
-// 	"fmt"
-// 	"os"
-// 	"strings"
-
-// 	"github.com/cli/cli/git"
-// 	"github.com/fatih/color"
-// 	"github.com/mitchellh/mapstructure"
-// 	api "github.com/porter-dev/porter/api/client"
-// 	apiTypes "github.com/porter-dev/porter/api/types"
-// 	"github.com/porter-dev/porter/cli/cmd/config"
-// 	"github.com/porter-dev/porter/cli/cmd/deploy"
-// 	"github.com/porter-dev/switchboard/v2/pkg/types"
-// )
-
-// type DefaultDriver struct {
-// 	Vars      map[string]string
-// 	Env       map[string]string
-// 	Builds    []*types.Build
-// 	APIClient *api.Client
-// 	Namespace string
-
-// 	allErrors []error
-// }
-
-// func (d *DefaultDriver) PreApply(resource *types.YAMLNode[*types.Resource]) error {
-// 	return nil
-// }
-
-// func (d *DefaultDriver) Apply(resource *types.YAMLNode[*types.Resource]) error {
-// 	if isPorterApp(resource) {
-// 		return d.applyPorterApp(resource)
-// 	}
-
-// 	// everything else is an addon
-// 	return d.applyAddon(resource)
-// }
-
-// func (d *DefaultDriver) PostApply(resource *types.YAMLNode[*types.Resource]) error {
-// 	return nil
-// }
-
-// func (d *DefaultDriver) OnError(resource *types.YAMLNode[*types.Resource], errs []error) {
-
-// }
-
-// func isPorterApp(resource *types.YAMLNode[*types.Resource]) bool {
-// 	if resource.GetValue().ChartURL.GetValue() == "https://charts.getporter.dev" &&
-// 		(resource.GetValue().Type.GetValue() == "web" ||
-// 			resource.GetValue().Type.GetValue() == "worker" ||
-// 			resource.GetValue().Type.GetValue() == "job") {
-// 		return true
-// 	}
-
-// 	return false
-// }
-
-// func (d *DefaultDriver) applyPorterApp(resource *types.YAMLNode[*types.Resource]) error {
-// 	appBuild := &porterAppBuild{}
-// 	appDeploy := &porterAppDeploy{}
-// 	buildNode := resource.GetValue().Build.GetRawYAMLNode()
-// 	deployNode := resource.GetValue().Deploy.GetRawYAMLNode()
-
-// 	err := buildNode.Decode(appBuild)
-
-// 	if err != nil {
-// 		return err // FIXME: descriptive error
-// 	}
-
-// 	err = deployNode.Decode(appDeploy)
-
-// 	if err != nil {
-// 		return err // FIXME: descriptive error
-// 	}
-
-// 	var buildConfig *types.Build
-
-// 	if appBuild.Ref != "" {
-// 		for _, b := range d.Builds {
-// 			if b.Name.GetValue() == appBuild.Ref {
-// 				buildConfig = b
-// 				break
-// 			}
-// 		}
-
-// 		if buildConfig == nil {
-// 			// this should not happen
-// 			return fmt.Errorf("internal error: please let the Porter team know about this and quote the following "+
-// 				"error:\n-----\nERROR: invalid build ref given for app '%s'", resource.GetValue().Name.GetValue())
-// 		}
-// 	} else {
-// 		buildConfig = appBuild.Build
-// 	}
-
-// 	if buildConfig == nil {
-// 		// this should not happen
-// 		return fmt.Errorf("internal error: please let the Porter team know about this and quote the following "+
-// 			"error:\n-----\nERROR: neither build ref nor build body given for app '%s'", resource.GetValue().Name.GetValue())
-// 	}
-
-// 	if resource.GetValue().Type.GetValue() == "job" {
-// 		jobConfig := &porterJob{}
-// 		jobNode := resource.GetRawYAMLNode()
-
-// 		err := jobNode.Decode(jobConfig)
-
-// 		if err != nil {
-// 			return err // FIXME: descriptive error
-// 		}
-
-// 		return d.applyJob(resource, buildConfig, appDeploy, jobConfig)
-// 	} else if oneOf(resource.GetValue().Type.GetValue(), "web", "worker") {
-
-// 	} else {
-// 		// this should not happen
-// 		return fmt.Errorf("internal error: please let the Porter team know about this and quote the following "+
-// 			"error:\n-----\nERROR: app '%s' is not one of 'web', 'worker', 'job'", resource.GetValue().Name.GetValue())
-// 	}
-
-// 	return nil
-// }
-
-// func (d *DefaultDriver) applyAddon(resource *types.YAMLNode[*types.Resource]) error {
-// 	return nil
-// }
-
-// func (d *DefaultDriver) applyJob(
-// 	resource *types.YAMLNode[*types.Resource],
-// 	buildConfig *types.Build,
-// 	appDeploy *porterAppDeploy,
-// 	jobConfig *porterJob,
-// ) error {
-// 	_, err := d.APIClient.GetRelease(
-// 		context.Background(),
-// 		config.GetCLIConfig().Project,
-// 		config.GetCLIConfig().Cluster,
-// 		d.Namespace,
-// 		resource.GetValue().Name.GetValue(),
-// 	)
-
-// 	exists := err == nil
-
-// 	flattenedBuildEnv := make(map[string]string)
-
-// 	for k, v := range buildConfig.Env {
-// 		flattenedBuildEnv[k.GetValue()] = v.GetValue()
-// 	}
-
-// 	var flattenedBuildEnvGroup []apiTypes.EnvGroupMeta
-
-// 	for _, egName := range buildConfig.EnvGroups {
-// 		flattenedBuildEnvGroup = append(flattenedBuildEnvGroup, apiTypes.EnvGroupMeta{
-// 			Name:      egName.GetValue(),
-// 			Namespace: d.Namespace,
-// 		})
-// 	}
-
-// 	tag := getImageTag()
-
-// 	sharedOpts := &deploy.SharedOpts{
-// 		ProjectID:       config.GetCLIConfig().Project,
-// 		ClusterID:       config.GetCLIConfig().Cluster,
-// 		Namespace:       d.Namespace,
-// 		LocalPath:       buildConfig.Context.GetValue(),
-// 		LocalDockerfile: buildConfig.Dockerfile.GetValue(),
-// 		OverrideTag:     tag,
-// 		Method:          deploy.DeployBuildType(buildConfig.Method.GetValue()),
-// 		AdditionalEnv:   flattenedBuildEnv,
-// 		EnvGroups:       flattenedBuildEnvGroup,
-// 	}
-
-// 	if buildConfig.Method.GetValue() == "pack" && buildConfig.UseCache != nil {
-// 		sharedOpts.UseCache = buildConfig.UseCache.GetValue()
-// 	}
-
-// 	if exists {
-// 		if jobConfig.Once {
-// 			// since the job already exists and was marked 'once', simply return
-// 			return nil
-// 		}
-
-// 		updateAgent, err := deploy.NewDeployAgent(d.APIClient, resource.GetValue().Name.GetValue(), &deploy.DeployOpts{
-// 			SharedOpts: sharedOpts,
-// 			Local:      buildConfig.Method.GetValue() != "registry",
-// 		})
-
-// 		if err != nil {
-// 			return fmt.Errorf("[porter.yaml v2][app:%s] error creating deploy agent to update app: %w",
-// 				resource.GetValue().Name.GetValue(), err)
-// 		}
-
-// 		// if the build method is registry, we do not trigger a build
-// 		if buildConfig.Method.GetValue() != "registry" {
-// 			buildEnv, err := updateAgent.GetBuildEnv(&deploy.GetBuildEnvOpts{
-// 				UseNewConfig: true,
-// 				// NewConfig:    appConf.Values,
-// 			})
-
-// 			if err != nil {
-// 				return err // FIXME
-// 			}
-
-// 			err = updateAgent.SetBuildEnv(buildEnv)
-
-// 			if err != nil {
-// 				return err // FIXME
-// 			}
-
-// 			var bc *apiTypes.BuildConfig
-
-// 			if buildConfig.Method.GetValue() == "pack" {
-// 				// FIXME: temporary fix
-// 				var bp []string
-
-// 				for _, b := range buildConfig.Buildpacks {
-// 					bp = append(bp, b.GetValue())
-// 				}
-
-// 				bc = &apiTypes.BuildConfig{
-// 					Builder:    buildConfig.Builder.GetValue(),
-// 					Buildpacks: bp,
-// 				}
-// 			}
-
-// 			err = updateAgent.Build(bc)
-
-// 			if err != nil {
-// 				return err // FIXME
-// 			}
-
-// 			// if !appConf.Build.UseCache { // FIXME
-// 			err = updateAgent.Push()
-
-// 			if err != nil {
-// 				return err // FIXME
-// 			}
-// 			// }
-// 		}
-
-// 		// err = updateAgent.UpdateImageAndValues(appConf.Values) // FIXME
-
-// 		// if err != nil {
-// 		// 	return err // FIXME
-// 		// }
-// 	} else { // create the job
-// 		// attempt to get repo suffix from environment variables
-// 		var repoSuffix string
-
-// 		if repoName := os.Getenv("PORTER_REPO_NAME"); repoName != "" {
-// 			if repoOwner := os.Getenv("PORTER_REPO_OWNER"); repoOwner != "" {
-// 				repoSuffix = strings.ToLower(strings.ReplaceAll(fmt.Sprintf("%s-%s", repoOwner, repoName), "_", "-"))
-// 			}
-// 		}
-
-// 		var registryURL string
-
-// 		if buildConfig.ImageRepoURI != nil {
-// 			registryURL = buildConfig.ImageRepoURI.GetValue()
-// 		}
-
-// 		if registryURL == "" {
-// 			regList, err := d.APIClient.ListRegistries(context.Background(), config.GetCLIConfig().Project)
-
-// 			if err != nil {
-// 				return fmt.Errorf("error fetching list of registries while trying to choose registry to deploy new"+
-// 					" image for app '%s': %w", resource.GetValue().Name.GetValue(), err)
-// 			}
-
-// 			if len(*regList) == 0 {
-// 				return fmt.Errorf("no registries linked with project, needed to deploy new image for app '%s'",
-// 					resource.GetValue().Name.GetValue())
-// 			} else {
-// 				registryURL = (*regList)[0].URL
-// 			}
-// 		}
-
-// 		createAgent := &deploy.CreateAgent{
-// 			Client: d.APIClient,
-// 			CreateOpts: &deploy.CreateOpts{
-// 				SharedOpts:  sharedOpts,
-// 				Kind:        resource.GetValue().Type.GetValue(),
-// 				ReleaseName: resource.GetValue().Name.GetValue(),
-// 				RegistryURL: registryURL,
-// 				RepoSuffix:  repoSuffix,
-// 			},
-// 		}
-
-// 		if buildConfig.Method.GetValue() == "registry" {
-// 			flattenedDeployMap := make(map[string]any)
-
-// 			for k, v := range resource.GetValue().Deploy.GetValue() {
-// 				flattenedDeployMap[k.GetValue()] = v.GetValue()
-// 			}
-
-// 			values := &porterWebChartValues{}
-
-// 			// delete the aliases from the deploy section
-// 			delete(flattenedDeployMap, "command")
-// 			delete(flattenedDeployMap, "cpu")
-// 			delete(flattenedDeployMap, "memory")
-
-// 			// replace alias values to the original expect yaml values
-// 			values.Container.Command = appDeploy.Command
-// 			values.Container.Env.Build = flattenedBuildEnv
-// 			values.Container.Env.Normal = appDeploy.Env
-// 			// values.Container.Env.Synced
-// 			values.Resources.Requests.CPU = appDeploy.CPU
-// 			values.Resources.Requests.Memory = appDeploy.Memory
-// 			if len(appDeploy.Hosts) > 0 {
-// 				values.Ingress.CustomDomain = true
-// 				values.Ingress.Hosts = appDeploy.Hosts
-// 			}
-
-// 			overrideValues := make(map[string]any)
-
-// 			err = mapstructure.Decode(values, &overrideValues)
-
-// 			if err != nil {
-// 				return err // FIXME
-// 			}
-
-// 			_, err := createAgent.CreateFromRegistry("", overrideValues)
-
-// 			if err != nil {
-// 				return fmt.Errorf("[porter.yaml v2][app:%s] error creating job: %w", resource.GetValue().Name.GetValue(), err)
-// 			}
-// 		} else if oneOf(buildConfig.Method.GetValue(), "pack", "docker") {
-// 			_, err := createAgent.CreateFromDocker(nil, "", nil)
-
-// 			if err != nil {
-// 				return fmt.Errorf("[porter.yaml v2][app:%s] error creating job: %w", resource.GetValue().Name.GetValue(), err)
-// 			}
-// 		} else {
-// 			// this should not happen
-// 			return fmt.Errorf("internal error: please let the Porter team know about this and quote the following "+
-// 				"error:\n-----\nERROR: build method was not one of 'pack', 'docker', 'registry' for app '%s'",
-// 				resource.GetValue().Name.GetValue())
-// 		}
-// 	}
-
-// 	return nil
-// }
-
-// // fetching the image tag works in 3 steps
-// //   - read PORTER_TAG env var
-// //   - read the git SHA from the current directory
-// //   - default to 'latest' tag
-// func getImageTag() string {
-// 	tag := os.Getenv("PORTER_TAG")
-
-// 	if tag == "" {
-// 		commit, err := git.LastCommit()
-
-// 		if err == nil {
-// 			tag = commit.Sha[:7]
-
-// 			color.New(color.FgBlue).Printf("[porter.yaml v2] PORTER_TAG not defined, falling back to image tag '%s'"+
-// 				" from git SHA\n", tag)
-// 		}
-// 	} else {
-// 		color.New(color.FgBlue).Printf("[porter.yaml v2] Using image tag '%s' from PORTER_TAG environment variable\n", tag)
-// 	}
-
-// 	if tag == "" {
-// 		color.New(color.FgBlue).Println("[porter.yaml v2] PORTER_TAG not defined, not a git repository, falling back" +
-// 			" to image tag 'latest'")
-
-// 		tag = "latest"
-// 	}
-
-// 	return tag
-// }

+ 13 - 12
cli/cmd/project.go

@@ -11,6 +11,7 @@ import (
 	"github.com/fatih/color"
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 	"github.com/spf13/cobra"
 )
@@ -28,7 +29,7 @@ var createProjectCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Creates a project with the authorized user as admin",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, createProject)
+		err := checkLoginAndRun(cmd.Context(), args, createProject)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -40,7 +41,7 @@ var deleteProjectCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Deletes the project with the given id",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, deleteProject)
+		err := checkLoginAndRun(cmd.Context(), args, deleteProject)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -51,7 +52,7 @@ var listProjectCmd = &cobra.Command{
 	Use:   "list",
 	Short: "Lists the projects for the logged in user",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listProjects)
+		err := checkLoginAndRun(cmd.Context(), args, listProjects)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -66,8 +67,8 @@ func init() {
 	projectCmd.AddCommand(listProjectCmd)
 }
 
-func createProject(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	resp, err := client.CreateProject(context.Background(), &types.CreateProjectRequest{
+func createProject(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
+	resp, err := client.CreateProject(ctx, &types.CreateProjectRequest{
 		Name: args[0],
 	})
 	if err != nil {
@@ -76,11 +77,11 @@ func createProject(_ *types.GetAuthenticatedUserResponse, client *api.Client, ar
 
 	color.New(color.FgGreen).Printf("Created project with name %s and id %d\n", args[0], resp.ID)
 
-	return cliConf.SetProject(resp.ID)
+	return cliConf.SetProject(ctx, client, resp.ID)
 }
 
-func listProjects(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	resp, err := client.ListUserProjects(context.Background())
+func listProjects(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
+	resp, err := client.ListUserProjects(ctx)
 	if err != nil {
 		return err
 	}
@@ -107,7 +108,7 @@ func listProjects(user *types.GetAuthenticatedUserResponse, client *api.Client,
 	return nil
 }
 
-func deleteProject(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func deleteProject(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, args []string) error {
 	userResp, err := utils.PromptPlaintext(
 		fmt.Sprintf(
 			`Are you sure you'd like to delete the project with id %s? %s `,
@@ -125,7 +126,7 @@ func deleteProject(_ *types.GetAuthenticatedUserResponse, client *api.Client, ar
 			return err
 		}
 
-		err = client.DeleteProject(context.Background(), uint(id))
+		err = client.DeleteProject(ctx, uint(id))
 
 		if err != nil {
 			return err
@@ -137,8 +138,8 @@ func deleteProject(_ *types.GetAuthenticatedUserResponse, client *api.Client, ar
 	return nil
 }
 
-func setProjectCluster(client *api.Client, projectID uint) error {
-	resp, err := client.ListProjectClusters(context.Background(), projectID)
+func setProjectCluster(ctx context.Context, client api.Client, cliConf config.CLIConfig, projectID uint) error {
+	resp, err := client.ListProjectClusters(ctx, projectID)
 	if err != nil {
 		return err
 	}

+ 1 - 2
cli/cmd/providers/gcp/local/config.go

@@ -18,8 +18,7 @@ import (
 // NewDefaultAgent returns an agent using Application Default Credentials. If these are not
 // set and the gcloud utility is installed on the machine, this will spawn a setup process
 // to link these credentials.
-func NewDefaultAgent() (*gcp.Agent, error) {
-	ctx := context.Background()
+func NewDefaultAgent(ctx context.Context) (*gcp.Agent, error) {
 	creds, err := setupDefaultCredentials(ctx)
 	if err != nil {
 		return nil, err

+ 13 - 12
cli/cmd/registry.go

@@ -11,6 +11,7 @@ import (
 	"github.com/fatih/color"
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 	"github.com/spf13/cobra"
 )
@@ -27,7 +28,7 @@ var registryListCmd = &cobra.Command{
 	Use:   "list",
 	Short: "Lists the registries linked to a project",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listRegistries)
+		err := checkLoginAndRun(cmd.Context(), args, listRegistries)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -39,7 +40,7 @@ var registryDeleteCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Deletes the registry with the given id",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, deleteRegistry)
+		err := checkLoginAndRun(cmd.Context(), args, deleteRegistry)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -56,7 +57,7 @@ var registryReposListCmd = &cobra.Command{
 	Use:   "list",
 	Short: "Lists the repositories in an image registry",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listRepos)
+		err := checkLoginAndRun(cmd.Context(), args, listRepos)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -74,7 +75,7 @@ var registryImageListCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Lists the images the specified image repository",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, listImages)
+		err := checkLoginAndRun(cmd.Context(), args, listImages)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -96,12 +97,12 @@ func init() {
 	registryImageCmd.AddCommand(registryImageListCmd)
 }
 
-func listRegistries(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func listRegistries(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	pID := cliConf.Project
 
 	// get the list of namespaces
 	resp, err := client.ListRegistries(
-		context.Background(),
+		ctx,
 		pID,
 	)
 	if err != nil {
@@ -130,7 +131,7 @@ func listRegistries(user *types.GetAuthenticatedUserResponse, client *api.Client
 	return nil
 }
 
-func deleteRegistry(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func deleteRegistry(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	userResp, err := utils.PromptPlaintext(
 		fmt.Sprintf(
 			`Are you sure you'd like to delete the registry with id %s? %s `,
@@ -148,7 +149,7 @@ func deleteRegistry(user *types.GetAuthenticatedUserResponse, client *api.Client
 			return err
 		}
 
-		err = client.DeleteProjectRegistry(context.Background(), cliConf.Project, uint(id))
+		err = client.DeleteProjectRegistry(ctx, cliConf.Project, uint(id))
 
 		if err != nil {
 			return err
@@ -160,13 +161,13 @@ func deleteRegistry(user *types.GetAuthenticatedUserResponse, client *api.Client
 	return nil
 }
 
-func listRepos(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func listRepos(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	pID := cliConf.Project
 	rID := cliConf.Registry
 
 	// get the list of namespaces
 	resp, err := client.ListRegistryRepositories(
-		context.Background(),
+		ctx,
 		pID,
 		rID,
 	)
@@ -190,14 +191,14 @@ func listRepos(user *types.GetAuthenticatedUserResponse, client *api.Client, arg
 	return nil
 }
 
-func listImages(user *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func listImages(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	pID := cliConf.Project
 	rID := cliConf.Registry
 	repoName := args[0]
 
 	// get the list of namespaces
 	resp, err := client.ListImages(
-		context.Background(),
+		ctx,
 		pID,
 		rID,
 		repoName,

+ 6 - 11
cli/cmd/root.go

@@ -11,7 +11,7 @@ import (
 	"github.com/Masterminds/semver/v3"
 	"github.com/fatih/color"
 	"github.com/google/go-github/v41/github"
-	"github.com/porter-dev/porter/cli/cmd/config"
+	cfg "github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 	"github.com/spf13/cobra"
 	"k8s.io/client-go/util/homedir"
@@ -28,20 +28,18 @@ var home = homedir.HomeDir()
 
 // Execute adds all child commands to the root command and sets flags appropriately.
 // This is called by main.main(). It only needs to happen once to the rootCmd.
-func Execute() {
-	Setup()
-
+func Execute(ctx context.Context) error {
 	rootCmd.PersistentFlags().AddFlagSet(utils.DefaultFlagSet)
 
-	if config.Version != "dev" {
+	if cfg.Version != "dev" {
 		ghClient := github.NewClient(nil)
-		ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+		ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
 		defer cancel()
 		release, _, err := ghClient.Repositories.GetLatestRelease(ctx, "porter-dev", "porter")
 		if err == nil {
 			release.GetURL()
 			// we do not care for an error here because we do not want to block the user here
-			constraint, err := semver.NewConstraint(fmt.Sprintf("> %s", strings.TrimPrefix(config.Version, "v")))
+			constraint, err := semver.NewConstraint(fmt.Sprintf("> %s", strings.TrimPrefix(cfg.Version, "v")))
 			if err == nil {
 				latestRelease, err := semver.NewVersion(strings.TrimPrefix(release.GetTagName(), "v"))
 				if err == nil {
@@ -63,8 +61,5 @@ func Execute() {
 		color.New(color.FgRed).Println(err)
 		os.Exit(1)
 	}
-}
-
-func Setup() {
-	config.InitAndLoadConfig()
+	return nil
 }

+ 77 - 72
cli/cmd/run.go

@@ -12,6 +12,7 @@ import (
 	"github.com/fatih/color"
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/cli/cmd/config"
 	"github.com/porter-dev/porter/cli/cmd/utils"
 	"github.com/spf13/cobra"
 	batchv1 "k8s.io/api/batch/v1"
@@ -48,7 +49,7 @@ var runCmd = &cobra.Command{
 	Args:  cobra.MinimumNArgs(2),
 	Short: "Runs a command inside a connected cluster container.",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, run)
+		err := checkLoginAndRun(cmd.Context(), args, run)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -61,7 +62,7 @@ var cleanupCmd = &cobra.Command{
 	Args:  cobra.NoArgs,
 	Short: "Delete any lingering ephemeral pods that were created with \"porter run\".",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, cleanup)
+		err := checkLoginAndRun(cmd.Context(), args, cleanup)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -128,7 +129,7 @@ func init() {
 	runCmd.AddCommand(cleanupCmd)
 }
 
-func run(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
+func run(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	execArgs := args[1:]
 
 	color.New(color.FgGreen).Println("Running", strings.Join(execArgs, " "), "for release", args[0])
@@ -139,7 +140,7 @@ func run(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []strin
 
 	if len(execArgs) > 0 {
 		release, err := client.GetRelease(
-			context.Background(), cliConf.Project, cliConf.Cluster, namespace, args[0],
+			ctx, cliConf.Project, cliConf.Cluster, namespace, args[0],
 		)
 		if err != nil {
 			return fmt.Errorf("error fetching release %s: %w", args[0], err)
@@ -155,7 +156,7 @@ func run(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []strin
 		}
 	}
 
-	podsSimple, err := getPods(client, namespace, args[0])
+	podsSimple, err := getPods(ctx, client, cliConf, namespace, args[0])
 	if err != nil {
 		return fmt.Errorf("Could not retrieve list of pods: %s", err.Error())
 	}
@@ -227,10 +228,11 @@ func run(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []strin
 	}
 
 	config := &PorterRunSharedConfig{
-		Client: client,
+		Client:    client,
+		CLIConfig: cliConf,
 	}
 
-	err = config.setSharedConfig()
+	err = config.setSharedConfig(ctx)
 
 	if err != nil {
 		return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
@@ -240,15 +242,16 @@ func run(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []strin
 		return executeRun(config, namespace, selectedPod.Name, selectedContainerName, execArgs)
 	}
 
-	return executeRunEphemeral(config, namespace, selectedPod.Name, selectedContainerName, execArgs)
+	return executeRunEphemeral(ctx, config, namespace, selectedPod.Name, selectedContainerName, execArgs)
 }
 
-func cleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
+func cleanup(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, _ []string) error {
 	config := &PorterRunSharedConfig{
-		Client: client,
+		Client:    client,
+		CLIConfig: cliConfig,
 	}
 
-	err := config.setSharedConfig()
+	err := config.setSharedConfig(ctx)
 	if err != nil {
 		return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
 	}
@@ -270,20 +273,20 @@ func cleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []stri
 	color.New(color.FgGreen).Println("Fetching ephemeral pods for cleanup")
 
 	if proceed == "All namespaces" {
-		namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
+		namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
 		if err != nil {
 			return err
 		}
 
 		for _, namespace := range namespaces.Items {
-			if pods, err := getEphemeralPods(namespace.Name, config.Clientset); err == nil {
+			if pods, err := getEphemeralPods(ctx, namespace.Name, config.Clientset); err == nil {
 				podNames = append(podNames, pods...)
 			} else {
 				return err
 			}
 		}
 	} else {
-		if pods, err := getEphemeralPods(namespace, config.Clientset); err == nil {
+		if pods, err := getEphemeralPods(ctx, namespace, config.Clientset); err == nil {
 			podNames = append(podNames, pods...)
 		} else {
 			return err
@@ -304,7 +307,7 @@ func cleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []stri
 		color.New(color.FgBlue).Printf("Deleting ephemeral pod: %s\n", podName)
 
 		err = config.Clientset.CoreV1().Pods(namespace).Delete(
-			context.Background(), podName, metav1.DeleteOptions{},
+			ctx, podName, metav1.DeleteOptions{},
 		)
 		if err != nil {
 			return err
@@ -314,11 +317,11 @@ func cleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []stri
 	return nil
 }
 
-func getEphemeralPods(namespace string, clientset *kubernetes.Clientset) ([]string, error) {
+func getEphemeralPods(ctx context.Context, namespace string, clientset *kubernetes.Clientset) ([]string, error) {
 	var podNames []string
 
 	pods, err := clientset.CoreV1().Pods(namespace).List(
-		context.Background(), metav1.ListOptions{LabelSelector: "porter/ephemeral-pod"},
+		ctx, metav1.ListOptions{LabelSelector: "porter/ephemeral-pod"},
 	)
 	if err != nil {
 		return nil, err
@@ -332,17 +335,18 @@ func getEphemeralPods(namespace string, clientset *kubernetes.Clientset) ([]stri
 }
 
 type PorterRunSharedConfig struct {
-	Client     *api.Client
+	Client     api.Client
 	RestConf   *rest.Config
 	Clientset  *kubernetes.Clientset
 	RestClient *rest.RESTClient
+	CLIConfig  config.CLIConfig
 }
 
-func (p *PorterRunSharedConfig) setSharedConfig() error {
-	pID := cliConf.Project
-	cID := cliConf.Cluster
+func (p *PorterRunSharedConfig) setSharedConfig(ctx context.Context) error {
+	pID := p.CLIConfig.Project
+	cID := p.CLIConfig.Cluster
 
-	kubeResp, err := p.Client.GetKubeconfig(context.Background(), pID, cID, cliConf.Kubeconfig)
+	kubeResp, err := p.Client.GetKubeconfig(ctx, pID, cID, p.CLIConfig.Kubeconfig)
 	if err != nil {
 		return err
 	}
@@ -390,11 +394,11 @@ type podSimple struct {
 	ContainerNames []string
 }
 
-func getPods(client *api.Client, namespace, releaseName string) ([]podSimple, error) {
+func getPods(ctx context.Context, client api.Client, cliConf config.CLIConfig, namespace, releaseName string) ([]podSimple, error) {
 	pID := cliConf.Project
 	cID := cliConf.Cluster
 
-	resp, err := client.GetK8sAllPods(context.TODO(), pID, cID, namespace, releaseName)
+	resp, err := client.GetK8sAllPods(ctx, pID, cID, namespace, releaseName)
 	if err != nil {
 		return nil, err
 	}
@@ -461,28 +465,28 @@ func executeRun(config *PorterRunSharedConfig, namespace, name, container string
 	})
 }
 
-func executeRunEphemeral(config *PorterRunSharedConfig, namespace, name, container string, args []string) error {
-	existing, err := getExistingPod(config, name, namespace)
+func executeRunEphemeral(ctx context.Context, config *PorterRunSharedConfig, namespace, name, container string, args []string) error {
+	existing, err := getExistingPod(ctx, config, name, namespace)
 	if err != nil {
 		return err
 	}
 
-	newPod, err := createEphemeralPodFromExisting(config, existing, container, args)
+	newPod, err := createEphemeralPodFromExisting(ctx, config, existing, container, args)
 	if err != nil {
 		return err
 	}
 	podName := newPod.ObjectMeta.Name
 
 	// delete the ephemeral pod no matter what
-	defer deletePod(config, podName, namespace)
+	defer deletePod(ctx, config, podName, namespace) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
 	color.New(color.FgYellow).Printf("Waiting for pod %s to be ready...", podName)
-	if err = waitForPod(config, newPod); err != nil {
+	if err = waitForPod(ctx, config, newPod); err != nil {
 		color.New(color.FgRed).Println("failed")
-		return handlePodAttachError(err, config, namespace, podName, container)
+		return handlePodAttachError(ctx, err, config, namespace, podName, container)
 	}
 
-	err = checkForPodDeletionCronJob(config)
+	err = checkForPodDeletionCronJob(ctx, config)
 	if err != nil {
 		return err
 	}
@@ -490,7 +494,7 @@ func executeRunEphemeral(config *PorterRunSharedConfig, namespace, name, contain
 	// refresh pod info for latest status
 	newPod, err = config.Clientset.CoreV1().
 		Pods(newPod.Namespace).
-		Get(context.Background(), newPod.Name, metav1.GetOptions{})
+		Get(ctx, newPod.Name, metav1.GetOptions{})
 
 	// pod exited while we were waiting.  maybe an error maybe not.
 	// we dont know if the user wanted an interactive shell or not.
@@ -498,11 +502,11 @@ func executeRunEphemeral(config *PorterRunSharedConfig, namespace, name, contain
 	if isPodExited(newPod) {
 		color.New(color.FgGreen).Println("complete!")
 		var writtenBytes int64
-		writtenBytes, _ = pipePodLogsToStdout(config, namespace, podName, container, false)
+		writtenBytes, _ = pipePodLogsToStdout(ctx, config, namespace, podName, container, false)
 
 		if verbose || writtenBytes == 0 {
 			color.New(color.FgYellow).Println("Could not get logs. Pod events:")
-			pipeEventsToStdout(config, namespace, podName, container, false)
+			pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 		}
 		return nil
 	}
@@ -543,44 +547,44 @@ func executeRunEphemeral(config *PorterRunSharedConfig, namespace, name, contain
 		})
 	}); err != nil {
 		// ugly way to catch no TTY errors, such as when running command "echo \"hello\""
-		return handlePodAttachError(err, config, namespace, podName, container)
+		return handlePodAttachError(ctx, err, config, namespace, podName, container)
 	}
 
 	if verbose {
 		color.New(color.FgYellow).Println("Pod events:")
-		pipeEventsToStdout(config, namespace, podName, container, false)
+		pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	}
 
 	return err
 }
 
-func checkForPodDeletionCronJob(config *PorterRunSharedConfig) error {
+func checkForPodDeletionCronJob(ctx context.Context, config *PorterRunSharedConfig) error {
 	// try and create the cron job and all of the other required resources as necessary,
 	// starting with the service account, then role and then a role binding
 
-	err := checkForServiceAccount(config)
+	err := checkForServiceAccount(ctx, config)
 	if err != nil {
 		return err
 	}
 
-	err = checkForClusterRole(config)
+	err = checkForClusterRole(ctx, config)
 	if err != nil {
 		return err
 	}
 
-	err = checkForRoleBinding(config)
+	err = checkForRoleBinding(ctx, config)
 	if err != nil {
 		return err
 	}
 
-	namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
+	namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
 
 	for _, namespace := range namespaces.Items {
 		cronJobs, err := config.Clientset.BatchV1().CronJobs(namespace.Name).List(
-			context.Background(), metav1.ListOptions{},
+			ctx, metav1.ListOptions{},
 		)
 		if err != nil {
 			return err
@@ -596,7 +600,7 @@ func checkForPodDeletionCronJob(config *PorterRunSharedConfig) error {
 			for _, cronJob := range cronJobs.Items {
 				if cronJob.Name == "porter-ephemeral-pod-deletion-cronjob" {
 					err = config.Clientset.BatchV1().CronJobs(namespace.Name).Delete(
-						context.Background(), cronJob.Name, metav1.DeleteOptions{},
+						ctx, cronJob.Name, metav1.DeleteOptions{},
 					)
 					if err != nil {
 						return err
@@ -635,7 +639,7 @@ func checkForPodDeletionCronJob(config *PorterRunSharedConfig) error {
 		},
 	}
 	_, err = config.Clientset.BatchV1().CronJobs("default").Create(
-		context.Background(), cronJob, metav1.CreateOptions{},
+		ctx, cronJob, metav1.CreateOptions{},
 	)
 	if err != nil {
 		return err
@@ -644,15 +648,15 @@ func checkForPodDeletionCronJob(config *PorterRunSharedConfig) error {
 	return nil
 }
 
-func checkForServiceAccount(config *PorterRunSharedConfig) error {
-	namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
+func checkForServiceAccount(ctx context.Context, config *PorterRunSharedConfig) error {
+	namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
 	if err != nil {
 		return err
 	}
 
 	for _, namespace := range namespaces.Items {
 		serviceAccounts, err := config.Clientset.CoreV1().ServiceAccounts(namespace.Name).List(
-			context.Background(), metav1.ListOptions{},
+			ctx, metav1.ListOptions{},
 		)
 		if err != nil {
 			return err
@@ -668,7 +672,7 @@ func checkForServiceAccount(config *PorterRunSharedConfig) error {
 			for _, svcAccount := range serviceAccounts.Items {
 				if svcAccount.Name == "porter-ephemeral-pod-deletion-service-account" {
 					err = config.Clientset.CoreV1().ServiceAccounts(namespace.Name).Delete(
-						context.Background(), svcAccount.Name, metav1.DeleteOptions{},
+						ctx, svcAccount.Name, metav1.DeleteOptions{},
 					)
 					if err != nil {
 						return err
@@ -684,7 +688,7 @@ func checkForServiceAccount(config *PorterRunSharedConfig) error {
 		},
 	}
 	_, err = config.Clientset.CoreV1().ServiceAccounts("default").Create(
-		context.Background(), serviceAccount, metav1.CreateOptions{},
+		ctx, serviceAccount, metav1.CreateOptions{},
 	)
 	if err != nil {
 		return err
@@ -693,9 +697,9 @@ func checkForServiceAccount(config *PorterRunSharedConfig) error {
 	return nil
 }
 
-func checkForClusterRole(config *PorterRunSharedConfig) error {
+func checkForClusterRole(ctx context.Context, config *PorterRunSharedConfig) error {
 	roles, err := config.Clientset.RbacV1().ClusterRoles().List(
-		context.Background(), metav1.ListOptions{},
+		ctx, metav1.ListOptions{},
 	)
 	if err != nil {
 		return err
@@ -725,7 +729,7 @@ func checkForClusterRole(config *PorterRunSharedConfig) error {
 		},
 	}
 	_, err = config.Clientset.RbacV1().ClusterRoles().Create(
-		context.Background(), role, metav1.CreateOptions{},
+		ctx, role, metav1.CreateOptions{},
 	)
 	if err != nil {
 		return err
@@ -734,9 +738,9 @@ func checkForClusterRole(config *PorterRunSharedConfig) error {
 	return nil
 }
 
-func checkForRoleBinding(config *PorterRunSharedConfig) error {
+func checkForRoleBinding(ctx context.Context, config *PorterRunSharedConfig) error {
 	bindings, err := config.Clientset.RbacV1().ClusterRoleBindings().List(
-		context.Background(), metav1.ListOptions{},
+		ctx, metav1.ListOptions{},
 	)
 	if err != nil {
 		return err
@@ -767,7 +771,7 @@ func checkForRoleBinding(config *PorterRunSharedConfig) error {
 		},
 	}
 	_, err = config.Clientset.RbacV1().ClusterRoleBindings().Create(
-		context.Background(), binding, metav1.CreateOptions{},
+		ctx, binding, metav1.CreateOptions{},
 	)
 	if err != nil {
 		return err
@@ -776,7 +780,7 @@ func checkForRoleBinding(config *PorterRunSharedConfig) error {
 	return nil
 }
 
-func waitForPod(config *PorterRunSharedConfig, pod *v1.Pod) error {
+func waitForPod(ctx context.Context, config *PorterRunSharedConfig, pod *v1.Pod) error {
 	var (
 		w   watch.Interface
 		err error
@@ -789,7 +793,7 @@ func waitForPod(config *PorterRunSharedConfig, pod *v1.Pod) error {
 		selector := fields.OneTermEqualSelector("metadata.name", pod.Name).String()
 		w, err = config.Clientset.CoreV1().
 			Pods(pod.Namespace).
-			Watch(context.Background(), metav1.ListOptions{FieldSelector: selector})
+			Watch(ctx, metav1.ListOptions{FieldSelector: selector})
 
 		if err == nil {
 			break
@@ -807,7 +811,7 @@ func waitForPod(config *PorterRunSharedConfig, pod *v1.Pod) error {
 			// creating the listener.
 			pod, err = config.Clientset.CoreV1().
 				Pods(pod.Namespace).
-				Get(context.Background(), pod.Name, metav1.GetOptions{})
+				Get(ctx, pod.Name, metav1.GetOptions{})
 			if isPodReady(pod) || isPodExited(pod) {
 				return nil
 			}
@@ -840,23 +844,23 @@ func isPodExited(pod *v1.Pod) bool {
 	return pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
 }
 
-func handlePodAttachError(err error, config *PorterRunSharedConfig, namespace, podName, container string) error {
+func handlePodAttachError(ctx context.Context, err error, config *PorterRunSharedConfig, namespace, podName, container string) error {
 	if verbose {
 		color.New(color.FgYellow).Fprintf(os.Stderr, "Error: %s\n", err)
 	}
 	color.New(color.FgYellow).Fprintln(os.Stderr, "Could not open a shell to this container. Container logs:")
 
 	var writtenBytes int64
-	writtenBytes, _ = pipePodLogsToStdout(config, namespace, podName, container, false)
+	writtenBytes, _ = pipePodLogsToStdout(ctx, config, namespace, podName, container, false)
 
 	if verbose || writtenBytes == 0 {
 		color.New(color.FgYellow).Fprintln(os.Stderr, "Could not get logs. Pod events:")
-		pipeEventsToStdout(config, namespace, podName, container, false)
+		pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 	}
 	return err
 }
 
-func pipePodLogsToStdout(config *PorterRunSharedConfig, namespace, name, container string, follow bool) (int64, error) {
+func pipePodLogsToStdout(ctx context.Context, config *PorterRunSharedConfig, namespace, name, container string, follow bool) (int64, error) {
 	podLogOpts := v1.PodLogOptions{
 		Container: container,
 		Follow:    follow,
@@ -865,7 +869,7 @@ func pipePodLogsToStdout(config *PorterRunSharedConfig, namespace, name, contain
 	req := config.Clientset.CoreV1().Pods(namespace).GetLogs(name, &podLogOpts)
 
 	podLogs, err := req.Stream(
-		context.Background(),
+		ctx,
 	)
 	if err != nil {
 		return 0, err
@@ -876,13 +880,13 @@ func pipePodLogsToStdout(config *PorterRunSharedConfig, namespace, name, contain
 	return io.Copy(os.Stdout, podLogs)
 }
 
-func pipeEventsToStdout(config *PorterRunSharedConfig, namespace, name, container string, follow bool) error {
+func pipeEventsToStdout(ctx context.Context, config *PorterRunSharedConfig, namespace, name, _ string, _ bool) error {
 	// update the config in case the operation has taken longer than token expiry time
-	config.setSharedConfig()
+	config.setSharedConfig(ctx) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
 	// creates the clientset
 	resp, err := config.Clientset.CoreV1().Events(namespace).List(
-		context.TODO(),
+		ctx,
 		metav1.ListOptions{
 			FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=%s", name, namespace),
 		},
@@ -898,20 +902,20 @@ func pipeEventsToStdout(config *PorterRunSharedConfig, namespace, name, containe
 	return nil
 }
 
-func getExistingPod(config *PorterRunSharedConfig, name, namespace string) (*v1.Pod, error) {
+func getExistingPod(ctx context.Context, config *PorterRunSharedConfig, name, namespace string) (*v1.Pod, error) {
 	return config.Clientset.CoreV1().Pods(namespace).Get(
-		context.Background(),
+		ctx,
 		name,
 		metav1.GetOptions{},
 	)
 }
 
-func deletePod(config *PorterRunSharedConfig, name, namespace string) error {
+func deletePod(ctx context.Context, config *PorterRunSharedConfig, name, namespace string) error {
 	// update the config in case the operation has taken longer than token expiry time
-	config.setSharedConfig()
+	config.setSharedConfig(ctx) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
 
 	err := config.Clientset.CoreV1().Pods(namespace).Delete(
-		context.Background(),
+		ctx,
 		name,
 		metav1.DeleteOptions{},
 	)
@@ -926,6 +930,7 @@ func deletePod(config *PorterRunSharedConfig, name, namespace string) error {
 }
 
 func createEphemeralPodFromExisting(
+	ctx context.Context,
 	config *PorterRunSharedConfig,
 	existing *v1.Pod,
 	container string,
@@ -1011,7 +1016,7 @@ func createEphemeralPodFromExisting(
 
 	// create the pod and return it
 	return config.Clientset.CoreV1().Pods(existing.ObjectMeta.Namespace).Create(
-		context.Background(),
+		ctx,
 		newPod,
 		metav1.CreateOptions{},
 	)

+ 31 - 11
cli/cmd/server.go

@@ -1,6 +1,7 @@
 package cmd
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"os/exec"
@@ -35,10 +36,18 @@ var startCmd = &cobra.Command{
 	Use:   "start",
 	Short: "Starts a Porter server instance on the host",
 	Run: func(cmd *cobra.Command, args []string) {
+		ctx := cmd.Context()
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			os.Exit(1)
+		}
+
 		if cliConf.Driver == "docker" {
 			cliConf.SetDriver("docker")
 
 			err := startDocker(
+				ctx,
+				cliConf,
 				opts.imageTag,
 				opts.db,
 				*opts.port,
@@ -48,7 +57,7 @@ var startCmd = &cobra.Command{
 				red.Println("Error running start:", err.Error())
 				red.Println("Shutting down...")
 
-				err = stopDocker()
+				err = stopDocker(ctx)
 
 				if err != nil {
 					red.Println("Shutdown unsuccessful:", err.Error())
@@ -59,6 +68,8 @@ var startCmd = &cobra.Command{
 		} else {
 			cliConf.SetDriver("local")
 			err := startLocal(
+				ctx,
+				cliConf,
 				opts.db,
 				*opts.port,
 			)
@@ -75,8 +86,13 @@ var stopCmd = &cobra.Command{
 	Use:   "stop",
 	Short: "Stops a Porter instance running on the Docker engine",
 	Run: func(cmd *cobra.Command, args []string) {
+		cliConf, err := config.InitAndLoadConfig()
+		if err != nil {
+			os.Exit(1)
+		}
+
 		if cliConf.Driver == "docker" {
-			if err := stopDocker(); err != nil {
+			if err := stopDocker(cmd.Context()); err != nil {
 				color.New(color.FgRed).Println("Shutdown unsuccessful:", err.Error())
 				os.Exit(1)
 			}
@@ -115,6 +131,8 @@ func init() {
 }
 
 func startDocker(
+	ctx context.Context,
+	cliConf config.CLIConfig,
 	imageTag string,
 	db string,
 	port int,
@@ -141,7 +159,7 @@ func startDocker(
 		Env:            env,
 	}
 
-	_, _, err := docker.StartPorter(startOpts)
+	_, _, err := docker.StartPorter(ctx, startOpts)
 	if err != nil {
 		return err
 	}
@@ -154,6 +172,8 @@ func startDocker(
 }
 
 func startLocal(
+	ctx context.Context,
+	cliConf config.CLIConfig,
 	db string,
 	port int,
 ) error {
@@ -169,7 +189,7 @@ func startLocal(
 	staticFilePath := filepath.Join(home, ".porter", "static")
 
 	if _, err := os.Stat(cmdPath); os.IsNotExist(err) {
-		err := downloadMatchingRelease(porterDir)
+		err := downloadMatchingRelease(ctx, porterDir)
 		if err != nil {
 			color.New(color.FgRed).Println("Failed to download server binary:", err.Error())
 			os.Exit(1)
@@ -184,7 +204,7 @@ func startLocal(
 	err := cmdVersionPorter.Run()
 
 	if err != nil || writer.Version != config.Version {
-		err := downloadMatchingRelease(porterDir)
+		err := downloadMatchingRelease(ctx, porterDir)
 		if err != nil {
 			color.New(color.FgRed).Println("Failed to download server binary:", err.Error())
 			os.Exit(1)
@@ -223,13 +243,13 @@ func startLocal(
 	return nil
 }
 
-func stopDocker() error {
-	agent, err := docker.NewAgentFromEnv()
+func stopDocker(ctx context.Context) error {
+	agent, err := docker.NewAgentFromEnv(ctx)
 	if err != nil {
 		return err
 	}
 
-	err = agent.StopPorterContainersWithProcessID("main", false)
+	err = agent.StopPorterContainersWithProcessID(ctx, "main", false)
 
 	if err != nil {
 		return err
@@ -242,7 +262,7 @@ func stopDocker() error {
 	return nil
 }
 
-func downloadMatchingRelease(porterDir string) error {
+func downloadMatchingRelease(ctx context.Context, porterDir string) error {
 	z := &github.ZIPReleaseGetter{
 		AssetName:           "portersvr",
 		AssetFolderDest:     porterDir,
@@ -258,7 +278,7 @@ func downloadMatchingRelease(porterDir string) error {
 		},
 	}
 
-	err := z.GetRelease(config.Version)
+	err := z.GetRelease(ctx, config.Version)
 	if err != nil {
 		return err
 	}
@@ -278,5 +298,5 @@ func downloadMatchingRelease(porterDir string) error {
 		},
 	}
 
-	return zStatic.GetRelease(config.Version)
+	return zStatic.GetRelease(ctx, config.Version)
 }

+ 9 - 12
cli/cmd/stack.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"os"
 
+	"github.com/porter-dev/porter/cli/cmd/config"
 	v2 "github.com/porter-dev/porter/cli/cmd/v2"
 
 	"github.com/fatih/color"
@@ -37,7 +38,7 @@ var stackEnvGroupAddCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Add an env group to a stack",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, stackAddEnvGroup)
+		err := checkLoginAndRun(cmd.Context(), args, stackAddEnvGroup)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -49,7 +50,7 @@ var stackEnvGroupRemoveCmd = &cobra.Command{
 	Args:  cobra.ExactArgs(1),
 	Short: "Remove an existing env group from a stack",
 	Run: func(cmd *cobra.Command, args []string) {
-		err := checkLoginAndRun(args, stackRemoveEnvGroup)
+		err := checkLoginAndRun(cmd.Context(), args, stackRemoveEnvGroup)
 		if err != nil {
 			os.Exit(1)
 		}
@@ -102,9 +103,7 @@ func init() {
 	stackEnvGroupCmd.AddCommand(stackEnvGroupRemoveCmd)
 }
 
-func stackAddEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func stackAddEnvGroup(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -128,7 +127,7 @@ func stackAddEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 		return fmt.Errorf("one or more variables are required to create the env group")
 	}
 
-	listStacks, err := client.ListStacks(context.Background(), cliConf.Project, cliConf.Cluster, namespace)
+	listStacks, err := client.ListStacks(ctx, cliConf.Project, cliConf.Cluster, namespace)
 	if err != nil {
 		return err
 	}
@@ -169,7 +168,7 @@ func stackAddEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 	}
 
 	err = client.AddEnvGroupToStack(
-		context.Background(), cliConf.Project, cliConf.Cluster, namespace, stackID,
+		ctx, cliConf.Project, cliConf.Cluster, namespace, stackID,
 		&types.CreateStackEnvGroupRequest{
 			Name:               envGroupName,
 			Variables:          normalVariables,
@@ -187,9 +186,7 @@ func stackAddEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client,
 	return nil
 }
 
-func stackRemoveEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
-	ctx := context.Background()
-
+func stackRemoveEnvGroup(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
 	project, err := client.GetProject(ctx, cliConf.Project)
 	if err != nil {
 		return fmt.Errorf("could not retrieve project from Porter API. Please contact support@porter.run")
@@ -211,7 +208,7 @@ func stackRemoveEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Clie
 		return fmt.Errorf("empty stack name")
 	}
 
-	listStacks, err := client.ListStacks(context.Background(), cliConf.Project, cliConf.Cluster, namespace)
+	listStacks, err := client.ListStacks(ctx, cliConf.Project, cliConf.Cluster, namespace)
 	if err != nil {
 		return err
 	}
@@ -230,7 +227,7 @@ func stackRemoveEnvGroup(_ *types.GetAuthenticatedUserResponse, client *api.Clie
 		return fmt.Errorf("stack not found")
 	}
 
-	err = client.RemoveEnvGroupFromStack(context.Background(), cliConf.Project, cliConf.Cluster, namespace, stackID,
+	err = client.RemoveEnvGroupFromStack(ctx, cliConf.Project, cliConf.Cluster, namespace, stackID,
 		envGroupName)
 
 	if err != nil {

+ 1 - 1
cli/cmd/v2/apply.go

@@ -16,7 +16,7 @@ import (
 )
 
 // Apply implements the functionality of the `porter apply` command for validate apply v2 projects
-func Apply(ctx context.Context, cliConf *config.CLIConfig, client *api.Client, porterYamlPath string) error {
+func Apply(ctx context.Context, cliConf config.CLIConfig, client api.Client, porterYamlPath string) error {
 	if len(porterYamlPath) == 0 {
 		return fmt.Errorf("porter yaml is empty")
 	}

+ 6 - 5
cli/cmd/v2/build.go

@@ -40,7 +40,7 @@ type buildInput struct {
 }
 
 // build will create an image repository if it does not exist, and then build and push the image
-func build(ctx context.Context, client *api.Client, inp buildInput) error {
+func build(ctx context.Context, client api.Client, inp buildInput) error {
 	if inp.ProjectID == 0 {
 		return errors.New("must specify a project id")
 	}
@@ -61,7 +61,7 @@ func build(ctx context.Context, client *api.Client, inp buildInput) error {
 		return fmt.Errorf("error creating image repository: %w", err)
 	}
 
-	dockerAgent, err := docker.NewAgentWithAuthGetter(client, projectID)
+	dockerAgent, err := docker.NewAgentWithAuthGetter(ctx, client, projectID)
 	if err != nil {
 		return fmt.Errorf("error getting docker agent: %w", err)
 	}
@@ -92,6 +92,7 @@ func build(ctx context.Context, client *api.Client, inp buildInput) error {
 		}
 
 		err = dockerAgent.BuildLocal(
+			ctx,
 			opts,
 		)
 		if err != nil {
@@ -111,7 +112,7 @@ func build(ctx context.Context, client *api.Client, inp buildInput) error {
 			Buildpacks: inp.BuildPacks,
 		}
 
-		err := packAgent.Build(opts, buildConfig, "")
+		err := packAgent.Build(ctx, opts, buildConfig, "")
 		if err != nil {
 			return fmt.Errorf("error building image with pack: %w", err)
 		}
@@ -119,7 +120,7 @@ func build(ctx context.Context, client *api.Client, inp buildInput) error {
 		return fmt.Errorf("invalid build method: %s", inp.BuildMethod)
 	}
 
-	err = dockerAgent.PushImage(fmt.Sprintf("%s:%s", imageURL, tag))
+	err = dockerAgent.PushImage(ctx, fmt.Sprintf("%s:%s", imageURL, tag))
 	if err != nil {
 		return fmt.Errorf("error pushing image url: %w\n", err)
 	}
@@ -127,7 +128,7 @@ func build(ctx context.Context, client *api.Client, inp buildInput) error {
 	return nil
 }
 
-func createImageRepositoryIfNotExists(ctx context.Context, client *api.Client, projectID uint, imageURL string) error {
+func createImageRepositoryIfNotExists(ctx context.Context, client api.Client, projectID uint, imageURL string) error {
 	if projectID == 0 {
 		return errors.New("must specify a project id")
 	}

+ 7 - 1
cli/main.go

@@ -4,6 +4,7 @@
 package main
 
 import (
+	"context"
 	"os"
 	"time"
 
@@ -15,6 +16,7 @@ import (
 )
 
 func main() {
+	ctx := context.Background()
 	if errors.SentryDSN != "" {
 		err := sentry.Init(sentry.ClientOptions{
 			Dsn:         errors.SentryDSN,
@@ -33,5 +35,9 @@ func main() {
 		defer sentry.Flush(2 * time.Second)
 	}
 
-	cmd.Execute()
+	err := cmd.Execute(ctx)
+	if err != nil {
+		color.New(color.FgRed).Fprintf(os.Stderr, "error executing command: %s\n", err)
+		os.Exit(1)
+	}
 }

+ 22 - 10
cmd/docker-credential-porter/helper/helper.go

@@ -1,6 +1,9 @@
 package helper
 
 import (
+	"context"
+	"fmt"
+
 	"github.com/docker/docker-credential-helpers/credentials"
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/cli/cmd/config"
@@ -17,17 +20,25 @@ type PorterHelper struct {
 	Cache      docker.CredentialsCache
 }
 
-func NewPorterHelper(debug bool) *PorterHelper {
+// NewPorterHelper creates a docker credential helper
+func NewPorterHelper(debug bool) (*PorterHelper, error) {
+	ctx := context.Background()
+
 	// get the current project ID
-	cliConfig := config.InitAndLoadNewConfig()
-	cache := docker.NewFileCredentialsCache()
+	cliConfig, err := config.InitAndLoadConfig()
+	if err != nil {
+		return nil, fmt.Errorf("error loading porter config: %w", err)
+	}
 
-	var client *api.Client
+	cache := docker.NewFileCredentialsCache()
 
-	if token := cliConfig.Token; token != "" {
-		client = api.NewClientWithToken(cliConfig.Host+"/api", token)
-	} else {
-		client = api.NewClient(cliConfig.Host+"/api", "cookie.json")
+	client, err := api.NewClientWithConfig(ctx, api.NewClientInput{
+		BaseURL:        fmt.Sprintf("%s/api", cliConfig.Host),
+		BearerToken:    cliConfig.Token,
+		CookieFileName: "cookie.json",
+	})
+	if err != nil {
+		return nil, fmt.Errorf("unable to get porter API client: %w", err)
 	}
 
 	return &PorterHelper{
@@ -39,7 +50,7 @@ func NewPorterHelper(debug bool) *PorterHelper {
 			ProjectID: cliConfig.Project,
 		},
 		Cache: cache,
-	}
+	}, nil
 }
 
 // Add appends credentials to the store.
@@ -57,7 +68,8 @@ func (p *PorterHelper) Delete(serverURL string) error {
 // Get retrieves credentials from the store.
 // It returns username and secret as strings.
 func (p *PorterHelper) Get(serverURL string) (user string, secret string, err error) {
-	return p.AuthGetter.GetCredentials(serverURL)
+	ctx := context.TODO() // docker credentials.Serve interface blocks changing this for now
+	return p.AuthGetter.GetCredentials(ctx, serverURL)
 }
 
 // List returns the stored serverURLs and their associated usernames.

+ 6 - 1
cmd/docker-credential-porter/main.go

@@ -6,6 +6,7 @@ import (
 	"os"
 
 	"github.com/docker/docker-credential-helpers/credentials"
+	"github.com/fatih/color"
 	"github.com/porter-dev/porter/cmd/docker-credential-porter/helper"
 )
 
@@ -23,7 +24,11 @@ func main() {
 		os.Exit(0)
 	}
 
-	helper := helper.NewPorterHelper(Version == "dev")
+	helper, err := helper.NewPorterHelper(Version == "dev")
+	if err != nil {
+		color.New(color.FgRed).Fprintf(os.Stderr, "%s\n", err.Error()) //nolint:errcheck,gosec
+		os.Exit(1)
+	}
 
 	credentials.Serve(helper)
 }