Просмотр исходного кода

revert kubeconfig cache (#3075)

Separate out capi and non-capi kubeconfig calls and add telemetry

---------

Co-authored-by: David Townley <davidtownley@Davids-MacBook-Air.local>
Co-authored-by: sunguroku <65516095+sunguroku@users.noreply.github.com>
d-g-town 2 лет назад
Родитель
Сommit
fb1e53fd2d
3 измененных файлов с 246 добавлено и 266 удалено
  1. 1 1
      api/server/authz/cluster.go
  2. 32 27
      api/server/handlers/cluster/get_kubeconfig.go
  3. 213 238
      internal/kubernetes/config.go

+ 1 - 1
api/server/authz/cluster.go

@@ -120,7 +120,7 @@ func (d *OutOfClusterAgentGetter) GetAgent(r *http.Request, cluster *models.Clus
 
 	agent, err := kubernetes.GetAgentOutOfClusterConfig(ooc)
 	if err != nil {
-		return nil, fmt.Errorf("failed to get agent: %w", err)
+		return nil, fmt.Errorf("failed to get agent: %s", err.Error())
 	}
 
 	newCtx := context.WithValue(r.Context(), KubernetesAgentCtxKey, agent)

+ 32 - 27
api/server/handlers/cluster/get_kubeconfig.go

@@ -1,9 +1,14 @@
 package cluster
 
 import (
+	"context"
+	"encoding/base64"
 	"errors"
+	"fmt"
 	"net/http"
 
+	"github.com/bufbuild/connect-go"
+	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
 	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
@@ -41,33 +46,33 @@ func (c *GetTemporaryKubeconfigHandler) ServeHTTP(w http.ResponseWriter, r *http
 	cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
 
 	outOfClusterConfig := c.GetOutOfClusterConfig(cluster)
-	//
-	//if cluster.ProvisionedBy == "CAPI" {
-	//	kubeconfigResp, err := c.Config().ClusterControlPlaneClient.KubeConfigForCluster(context.Background(), connect.NewRequest(
-	//		&porterv1.KubeConfigForClusterRequest{
-	//			ProjectId: int64(cluster.ProjectID),
-	//			ClusterId: int64(cluster.ID),
-	//		},
-	//	))
-	//	if err != nil {
-	//		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error getting temporary capi config: %w", err)))
-	//		return
-	//	}
-	//	if kubeconfigResp.Msg == nil {
-	//		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error reading temporary capi config: %w", err)))
-	//		return
-	//	}
-	//	b64, err := base64.StdEncoding.DecodeString(kubeconfigResp.Msg.KubeConfig)
-	//	if err != nil {
-	//		c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("unable to decode base64 kubeconfig: %w", err)))
-	//		return
-	//	}
-	//	res := &types.GetTemporaryKubeconfigResponse{
-	//		Kubeconfig: b64,
-	//	}
-	//	c.WriteResult(w, r, res)
-	//	return
-	//}
+
+	if cluster.ProvisionedBy == "CAPI" {
+		kubeconfigResp, err := c.Config().ClusterControlPlaneClient.KubeConfigForCluster(context.Background(), connect.NewRequest(
+			&porterv1.KubeConfigForClusterRequest{
+				ProjectId: int64(cluster.ProjectID),
+				ClusterId: int64(cluster.ID),
+			},
+		))
+		if err != nil {
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error getting temporary capi config: %w", err)))
+			return
+		}
+		if kubeconfigResp.Msg == nil {
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("error reading temporary capi config: %w", err)))
+			return
+		}
+		b64, err := base64.StdEncoding.DecodeString(kubeconfigResp.Msg.KubeConfig)
+		if err != nil {
+			c.HandleAPIError(w, r, apierrors.NewErrInternal(fmt.Errorf("unable to decode base64 kubeconfig: %w", err)))
+			return
+		}
+		res := &types.GetTemporaryKubeconfigResponse{
+			Kubeconfig: b64,
+		}
+		c.WriteResult(w, r, res)
+		return
+	}
 
 	kubeconfig, err := outOfClusterConfig.CreateRawConfigFromCluster()
 	if err != nil {

+ 213 - 238
internal/kubernetes/config.go

@@ -11,6 +11,8 @@ import (
 	"strings"
 	"time"
 
+	"github.com/porter-dev/porter/internal/telemetry"
+
 	"github.com/bufbuild/connect-go"
 	porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
 	"github.com/porter-dev/api-contracts/generated/go/porter/v1/porterv1connect"
@@ -63,33 +65,38 @@ func GetDynamicClientOutOfClusterConfig(conf *OutOfClusterConfig) (dynamic.Inter
 
 // GetAgentOutOfClusterConfig creates a new Agent using the OutOfClusterConfig
 func GetAgentOutOfClusterConfig(conf *OutOfClusterConfig) (*Agent, error) {
+	ctx, span := telemetry.NewSpan(context.Background(), "get-agent-out-of-cluster-config")
+	defer span.End()
+
 	if conf.AllowInClusterConnections && conf.Cluster.AuthMechanism == models.InCluster {
 		return GetAgentInClusterConfig(conf.DefaultNamespace)
 	}
 
 	var restConf *rest.Config
 
-	//if conf.Cluster.ProvisionedBy == "CAPI" {
-	//	rc, err := restConfigForCAPICluster(context.Background(), conf.CAPIManagementClusterClient, *conf.Cluster)
-	//	if err != nil {
-	//		return nil, err
-	//	}
-	//	restConf = rc
-	//} else {
-	rc, err := conf.ToRESTConfig()
-	if err != nil {
-		return nil, fmt.Errorf("failed to convert ooc config to rest config: %w", err)
+	if conf.Cluster.ProvisionedBy == "CAPI" {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "capi-provisioned", Value: true})
+
+		rc, err := restConfigForCAPICluster(ctx, conf.CAPIManagementClusterClient, *conf.Cluster)
+		if err != nil {
+			return nil, telemetry.Error(ctx, span, err, "error getting rest config for capi cluster")
+		}
+		restConf = rc
+	} else {
+		rc, err := conf.ToRESTConfig()
+		if err != nil {
+			return nil, telemetry.Error(ctx, span, err, "error getting rest config")
+		}
+		restConf = rc
 	}
-	restConf = rc
-	//}
 
 	if restConf == nil {
-		return nil, fmt.Errorf("error getting rest config for cluster %s", conf.Cluster.ProvisionedBy)
+		return nil, telemetry.Error(ctx, span, nil, "error getting rest config for cluster")
 	}
 
 	clientset, err := kubernetes.NewForConfig(restConf)
 	if err != nil {
-		return nil, fmt.Errorf("failed to get new clientset from rest config: %w", err)
+		return nil, telemetry.Error(ctx, span, err, "error getting new clientset for config")
 	}
 
 	return &Agent{conf, clientset}, nil
@@ -97,38 +104,56 @@ func GetAgentOutOfClusterConfig(conf *OutOfClusterConfig) (*Agent, error) {
 
 // restConfigForCAPICluster gets the kubernetes rest API client for a CAPI cluster
 func restConfigForCAPICluster(ctx context.Context, mgmtClusterConnection porterv1connect.ClusterControlPlaneServiceClient, cluster models.Cluster) (*rest.Config, error) {
+	ctx, span := telemetry.NewSpan(ctx, "rest-config-for-capi-cluster")
+	defer span.End()
+
 	kc, err := kubeConfigForCAPICluster(ctx, mgmtClusterConnection, cluster)
 	if err != nil {
-		return nil, err
+		return nil, telemetry.Error(ctx, span, err, "error getting kubeconfig")
 	}
 
 	rc, err := writeKubeConfigToFileAndRestClient([]byte(kc))
 	if err != nil {
-		return nil, err
+		return nil, telemetry.Error(ctx, span, err, "error writing kubeconfig to file")
 	}
 	return rc, nil
 }
 
 // kubeConfigForCAPICluster grabs the raw kube config for a capi cluster
 func kubeConfigForCAPICluster(ctx context.Context, mgmtClusterConnection porterv1connect.ClusterControlPlaneServiceClient, cluster models.Cluster) (string, error) {
-	kubeconfigResp, err := mgmtClusterConnection.KubeConfigForCluster(context.Background(), connect.NewRequest(
+	ctx, span := telemetry.NewSpan(ctx, "kubeconfig-capi")
+	defer span.End()
+
+	if cluster.ProjectID == 0 {
+		return "", telemetry.Error(ctx, span, nil, "missing project id")
+	}
+	if cluster.ID == 0 {
+		return "", telemetry.Error(ctx, span, nil, "missing cluster id")
+	}
+
+	telemetry.WithAttributes(span,
+		telemetry.AttributeKV{Key: "project-id", Value: cluster.ProjectID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cluster.ID},
+	)
+
+	kubeconfigResp, err := mgmtClusterConnection.KubeConfigForCluster(ctx, connect.NewRequest(
 		&porterv1.KubeConfigForClusterRequest{
 			ProjectId: int64(cluster.ProjectID),
 			ClusterId: int64(cluster.ID),
 		},
 	))
 	if err != nil {
-		return "", fmt.Errorf("error getting capi config: %w", err)
+		return "", telemetry.Error(ctx, span, err, "error getting capi config")
 	}
 	if kubeconfigResp.Msg == nil {
-		return "", errors.New("no kubeconfig returned for capi cluster")
+		return "", telemetry.Error(ctx, span, nil, "no msg returned for capi cluster")
 	}
 	if kubeconfigResp.Msg.KubeConfig == "" {
-		return "", errors.New("no kubeconfig returned for capi cluster")
+		return "", telemetry.Error(ctx, span, nil, "no kubeconfig returned for capi cluster")
 	}
 	decodedKubeconfig, err := base64.StdEncoding.DecodeString(kubeconfigResp.Msg.KubeConfig)
 	if err != nil {
-		return "", fmt.Errorf("error decoding kubeconfig: %w", err)
+		return "", telemetry.Error(ctx, span, nil, "error decoding capi cluster")
 	}
 	return string(decodedKubeconfig), nil
 }
@@ -207,22 +232,27 @@ type OutOfClusterConfig struct {
 // the result of ToRawKubeConfigLoader, and also adds a custom http transport layer
 // if necessary (required for GCP auth)
 func (conf *OutOfClusterConfig) ToRESTConfig() (*rest.Config, error) {
-	//if conf.Cluster.ProvisionedBy == "CAPI" {
-	//	rc, err := restConfigForCAPICluster(context.Background(), conf.CAPIManagementClusterClient, *conf.Cluster)
-	//	if err != nil {
-	//		return nil, err
-	//	}
-	//	return rc, nil
-	//}
-
-	cmdConf, err := conf.GetClientConfigFromCluster()
+	ctx, span := telemetry.NewSpan(context.Background(), "ooc-to-rest-config")
+	defer span.End()
+
+	if conf.Cluster.ProvisionedBy == "CAPI" {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "capi-provisioned", Value: true})
+
+		rc, err := restConfigForCAPICluster(ctx, conf.CAPIManagementClusterClient, *conf.Cluster)
+		if err != nil {
+			return nil, telemetry.Error(ctx, span, err, "error getting config for capi cluster")
+		}
+		return rc, nil
+	}
+
+	cmdConf, err := conf.GetClientConfigFromCluster(ctx)
 	if err != nil {
-		return nil, fmt.Errorf("failed to get cmdConf from cluster: %w", err)
+		return nil, telemetry.Error(ctx, span, err, "error getting client config from cluster")
 	}
 
 	restConf, err := cmdConf.ClientConfig()
 	if err != nil {
-		return nil, fmt.Errorf("failed to get client config from cmdConf: %w", err)
+		return nil, telemetry.Error(ctx, span, err, "error getting client config")
 	}
 
 	restConf.Timeout = conf.Timeout
@@ -234,7 +264,10 @@ func (conf *OutOfClusterConfig) ToRESTConfig() (*rest.Config, error) {
 // ToRawKubeConfigLoader creates a clientcmd.ClientConfig from the raw kubeconfig found in
 // the OutOfClusterConfig. It does not implement loading rules or overrides.
 func (conf *OutOfClusterConfig) ToRawKubeConfigLoader() clientcmd.ClientConfig {
-	cmdConf, _ := conf.GetClientConfigFromCluster()
+	ctx, span := telemetry.NewSpan(context.Background(), "ooc-to-raw-kubeconfig-loader")
+	defer span.End()
+
+	cmdConf, _ := conf.GetClientConfigFromCluster(ctx)
 
 	return cmdConf
 }
@@ -277,41 +310,51 @@ func (conf *OutOfClusterConfig) ToRESTMapper() (meta.RESTMapper, error) {
 
 // GetClientConfigFromCluster will construct new clientcmd.ClientConfig using
 // the configuration saved within a Cluster model
-func (conf *OutOfClusterConfig) GetClientConfigFromCluster() (clientcmd.ClientConfig, error) {
+func (conf *OutOfClusterConfig) GetClientConfigFromCluster(ctx context.Context) (clientcmd.ClientConfig, error) {
+	ctx, span := telemetry.NewSpan(ctx, "ooc-get-client-config-from-cluster")
+	defer span.End()
+
 	if conf.Cluster == nil {
-		return nil, fmt.Errorf("cluster cannot be nil")
-	}
-
-	//if conf.Cluster.ProvisionedBy == "CAPI" {
-	//	rc, err := kubeConfigForCAPICluster(context.Background(), conf.CAPIManagementClusterClient, *conf.Cluster)
-	//	if err != nil {
-	//		return nil, err
-	//	}
-	//	clientConfig, err := clientcmd.NewClientConfigFromBytes([]byte(rc))
-	//	if err != nil {
-	//		return nil, err
-	//	}
-	//	rawConfig, err := clientConfig.RawConfig()
-	//	if err != nil {
-	//		return nil, err
-	//	}
-	//
-	//	overrides := &clientcmd.ConfigOverrides{}
-	//
-	//	overrides.Context = api.Context{
-	//		Namespace: conf.DefaultNamespace,
-	//	}
-	//
-	//	return clientcmd.NewDefaultClientConfig(rawConfig, overrides), nil
-	//}
+		return nil, telemetry.Error(ctx, span, nil, "cluster cannot be nil")
+	}
+
+	if conf.Cluster.ProvisionedBy == "CAPI" {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "capi-provisioned", Value: true})
+
+		rc, err := kubeConfigForCAPICluster(ctx, conf.CAPIManagementClusterClient, *conf.Cluster)
+		if err != nil {
+			return nil, telemetry.Error(ctx, span, err, "error getting capi kube config")
+		}
+		clientConfig, err := clientcmd.NewClientConfigFromBytes([]byte(rc))
+		if err != nil {
+			return nil, telemetry.Error(ctx, span, err, "error getting config from bytes")
+		}
+		rawConfig, err := clientConfig.RawConfig()
+		if err != nil {
+			return nil, telemetry.Error(ctx, span, err, "error getting raw config")
+		}
+
+		overrides := &clientcmd.ConfigOverrides{}
+
+		if conf.DefaultNamespace != "" {
+			telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace-override", Value: conf.DefaultNamespace})
+			overrides.Context = api.Context{
+				Namespace: conf.DefaultNamespace,
+			}
+		}
+
+		return clientcmd.NewDefaultClientConfig(rawConfig, overrides), nil
+	}
 
 	if conf.Cluster.AuthMechanism == models.Local {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "local-provisioned", Value: true})
+
 		kubeAuth, err := conf.Repo.KubeIntegration().ReadKubeIntegration(
 			conf.Cluster.ProjectID,
 			conf.Cluster.KubeIntegrationID,
 		)
 		if err != nil {
-			return nil, err
+			return nil, telemetry.Error(ctx, span, err, "error reading kube integration")
 		}
 
 		return clientcmd.NewClientConfigFromBytes(kubeAuth.Kubeconfig)
@@ -319,12 +362,13 @@ func (conf *OutOfClusterConfig) GetClientConfigFromCluster() (clientcmd.ClientCo
 
 	apiConfig, err := conf.CreateRawConfigFromCluster()
 	if err != nil {
-		return nil, fmt.Errorf("failed to create raw config from cluster: %w", err)
+		return nil, telemetry.Error(ctx, span, err, "error creating raw config from cluster")
 	}
 
 	overrides := &clientcmd.ConfigOverrides{}
 
 	if conf.DefaultNamespace != "" {
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "namespace-override", Value: conf.DefaultNamespace})
 		overrides.Context = api.Context{
 			Namespace: conf.DefaultNamespace,
 		}
@@ -364,183 +408,132 @@ func (conf *OutOfClusterConfig) CreateRawConfigFromCluster() (*api.Config, error
 		authInfoMap[authInfoName].ImpersonateGroups = groups
 	}
 
-	if conf.Cluster.ProvisionedBy == "CAPI" {
-
-		decodedCert, err := capiCertAuthData(conf.CAPIManagementClusterClient, int(cluster.ID), int(cluster.ProjectID))
+	switch cluster.AuthMechanism {
+	case models.X509:
+		kubeAuth, err := conf.Repo.KubeIntegration().ReadKubeIntegration(
+			cluster.ProjectID,
+			cluster.KubeIntegrationID,
+		)
 		if err != nil {
-			return nil, fmt.Errorf("error retrieving capi certificate authority data: %w", err)
+			return nil, err
 		}
 
-		clusterMap[cluster.Name].CertificateAuthorityData = decodedCert
-
-		// check cache here so that we don't unnecessarily assume role
-		cache, err := conf.getTokenCache()
-		if cache != nil {
-			if tok := cache.Token; err == nil && !cache.IsExpired() && len(tok) > 0 {
-				authInfoMap[authInfoName].Token = string(tok)
-			}
+		authInfoMap[authInfoName].ClientCertificateData = kubeAuth.ClientCertificateData
+		authInfoMap[authInfoName].ClientKeyData = kubeAuth.ClientKeyData
+	case models.Basic:
+		kubeAuth, err := conf.Repo.KubeIntegration().ReadKubeIntegration(
+			cluster.ProjectID,
+			cluster.KubeIntegrationID,
+		)
+		if err != nil {
+			return nil, err
 		}
 
-		// if we didn't get a valid token from cache, generate a new one
-		if authInfoMap[authInfoName].Token == "" {
-
-			req := connect.NewRequest(&porterv1.AssumeRoleCredentialsRequest{
-				ProjectId: int64(cluster.ProjectID),
-			})
-
-			creds, err := conf.CAPIManagementClusterClient.AssumeRoleCredentials(context.Background(), req)
-			if err != nil {
-				return nil, fmt.Errorf("error getting capi credentials for repository: %w", err)
-			}
-
-			awsAuth := &ints.AWSIntegration{
-				AWSAccessKeyID:     []byte(creds.Msg.AwsAccessId),
-				AWSSecretAccessKey: []byte(creds.Msg.AwsSecretKey),
-				AWSSessionToken:    []byte(creds.Msg.AwsSessionToken),
-			}
-
-			awsClusterID := cluster.Name
-			shouldOverride := false
-
-			if cluster.AWSClusterID != "" {
-				awsClusterID = cluster.AWSClusterID
-				shouldOverride = true
-			}
-			tok, err := awsAuth.GetBearerToken(conf.getTokenCache, conf.setTokenCache, awsClusterID, shouldOverride)
-			if err != nil {
-				return nil, fmt.Errorf("error getting bearer token for repository: %w", err)
-			}
-
-			authInfoMap[authInfoName].Token = tok
+		authInfoMap[authInfoName].Username = string(kubeAuth.Username)
+		authInfoMap[authInfoName].Password = string(kubeAuth.Password)
+	case models.Bearer:
+		kubeAuth, err := conf.Repo.KubeIntegration().ReadKubeIntegration(
+			cluster.ProjectID,
+			cluster.KubeIntegrationID,
+		)
+		if err != nil {
+			return nil, err
 		}
-	} else {
-		switch cluster.AuthMechanism {
-		case models.X509:
-			kubeAuth, err := conf.Repo.KubeIntegration().ReadKubeIntegration(
-				cluster.ProjectID,
-				cluster.KubeIntegrationID,
-			)
-			if err != nil {
-				return nil, err
-			}
-
-			authInfoMap[authInfoName].ClientCertificateData = kubeAuth.ClientCertificateData
-			authInfoMap[authInfoName].ClientKeyData = kubeAuth.ClientKeyData
-		case models.Basic:
-			kubeAuth, err := conf.Repo.KubeIntegration().ReadKubeIntegration(
-				cluster.ProjectID,
-				cluster.KubeIntegrationID,
-			)
-			if err != nil {
-				return nil, err
-			}
-
-			authInfoMap[authInfoName].Username = string(kubeAuth.Username)
-			authInfoMap[authInfoName].Password = string(kubeAuth.Password)
-		case models.Bearer:
-			kubeAuth, err := conf.Repo.KubeIntegration().ReadKubeIntegration(
-				cluster.ProjectID,
-				cluster.KubeIntegrationID,
-			)
-			if err != nil {
-				return nil, err
-			}
-
-			authInfoMap[authInfoName].Token = string(kubeAuth.Token)
-		case models.OIDC:
-			oidcAuth, err := conf.Repo.OIDCIntegration().ReadOIDCIntegration(
-				cluster.ProjectID,
-				cluster.OIDCIntegrationID,
-			)
-			if err != nil {
-				return nil, err
-			}
 
-			authInfoMap[authInfoName].AuthProvider = &api.AuthProviderConfig{
-				Name: "oidc",
-				Config: map[string]string{
-					"idp-issuer-url":                 string(oidcAuth.IssuerURL),
-					"client-id":                      string(oidcAuth.ClientID),
-					"client-secret":                  string(oidcAuth.ClientSecret),
-					"idp-certificate-authority-data": string(oidcAuth.CertificateAuthorityData),
-					"id-token":                       string(oidcAuth.IDToken),
-					"refresh-token":                  string(oidcAuth.RefreshToken),
-				},
-			}
-		case models.GCP:
-			gcpAuth, err := conf.Repo.GCPIntegration().ReadGCPIntegration(
-				cluster.ProjectID,
-				cluster.GCPIntegrationID,
-			)
-			if err != nil {
-				return nil, err
-			}
+		authInfoMap[authInfoName].Token = string(kubeAuth.Token)
+	case models.OIDC:
+		oidcAuth, err := conf.Repo.OIDCIntegration().ReadOIDCIntegration(
+			cluster.ProjectID,
+			cluster.OIDCIntegrationID,
+		)
+		if err != nil {
+			return nil, err
+		}
 
-			tok, err := gcpAuth.GetBearerToken(
-				conf.getTokenCache,
-				conf.setTokenCache,
-				"https://www.googleapis.com/auth/cloud-platform",
-			)
+		authInfoMap[authInfoName].AuthProvider = &api.AuthProviderConfig{
+			Name: "oidc",
+			Config: map[string]string{
+				"idp-issuer-url":                 string(oidcAuth.IssuerURL),
+				"client-id":                      string(oidcAuth.ClientID),
+				"client-secret":                  string(oidcAuth.ClientSecret),
+				"idp-certificate-authority-data": string(oidcAuth.CertificateAuthorityData),
+				"id-token":                       string(oidcAuth.IDToken),
+				"refresh-token":                  string(oidcAuth.RefreshToken),
+			},
+		}
+	case models.GCP:
+		gcpAuth, err := conf.Repo.GCPIntegration().ReadGCPIntegration(
+			cluster.ProjectID,
+			cluster.GCPIntegrationID,
+		)
+		if err != nil {
+			return nil, err
+		}
 
-			if tok == nil && err != nil {
-				return nil, err
-			}
+		tok, err := gcpAuth.GetBearerToken(
+			conf.getTokenCache,
+			conf.setTokenCache,
+			"https://www.googleapis.com/auth/cloud-platform",
+		)
 
-			// add this as a bearer token
-			authInfoMap[authInfoName].Token = tok.AccessToken
-		case models.AWS:
-			awsAuth, err := conf.Repo.AWSIntegration().ReadAWSIntegration(
-				cluster.ProjectID,
-				cluster.AWSIntegrationID,
-			)
-			if err != nil {
-				return nil, err
-			}
+		if tok == nil && err != nil {
+			return nil, err
+		}
 
-			awsClusterID := cluster.Name
-			shouldOverride := false
+		// add this as a bearer token
+		authInfoMap[authInfoName].Token = tok.AccessToken
+	case models.AWS:
+		awsAuth, err := conf.Repo.AWSIntegration().ReadAWSIntegration(
+			cluster.ProjectID,
+			cluster.AWSIntegrationID,
+		)
+		if err != nil {
+			return nil, err
+		}
 
-			if cluster.AWSClusterID != "" {
-				awsClusterID = cluster.AWSClusterID
-				shouldOverride = true
-			}
+		awsClusterID := cluster.Name
+		shouldOverride := false
 
-			tok, err := awsAuth.GetBearerToken(conf.getTokenCache, conf.setTokenCache, awsClusterID, shouldOverride)
-			if err != nil {
-				return nil, err
-			}
+		if cluster.AWSClusterID != "" {
+			awsClusterID = cluster.AWSClusterID
+			shouldOverride = true
+		}
 
-			// add this as a bearer token
-			authInfoMap[authInfoName].Token = tok
-		case models.DO:
-			oauthInt, err := conf.Repo.OAuthIntegration().ReadOAuthIntegration(
-				cluster.ProjectID,
-				cluster.DOIntegrationID,
-			)
-			if err != nil {
-				return nil, err
-			}
+		tok, err := awsAuth.GetBearerToken(conf.getTokenCache, conf.setTokenCache, awsClusterID, shouldOverride)
+		if err != nil {
+			return nil, err
+		}
 
-			tok, _, err := oauth.GetAccessToken(oauthInt.SharedOAuthModel, conf.DigitalOceanOAuth, oauth.MakeUpdateOAuthIntegrationTokenFunction(oauthInt, conf.Repo))
-			if err != nil {
-				return nil, err
-			}
+		// add this as a bearer token
+		authInfoMap[authInfoName].Token = tok
+	case models.DO:
+		oauthInt, err := conf.Repo.OAuthIntegration().ReadOAuthIntegration(
+			cluster.ProjectID,
+			cluster.DOIntegrationID,
+		)
+		if err != nil {
+			return nil, err
+		}
 
-			// add this as a bearer token
-			authInfoMap[authInfoName].Token = tok
-		case models.Azure:
-			azInt, err := conf.Repo.AzureIntegration().ReadAzureIntegration(
-				cluster.ProjectID,
-				cluster.AzureIntegrationID,
-			)
-			if err != nil {
-				return nil, err
-			}
+		tok, _, err := oauth.GetAccessToken(oauthInt.SharedOAuthModel, conf.DigitalOceanOAuth, oauth.MakeUpdateOAuthIntegrationTokenFunction(oauthInt, conf.Repo))
+		if err != nil {
+			return nil, err
+		}
 
-			authInfoMap[authInfoName].Token = string(azInt.AKSPassword)
-		default:
-			return nil, errors.New("not a supported auth mechanism")
+		// add this as a bearer token
+		authInfoMap[authInfoName].Token = tok
+	case models.Azure:
+		azInt, err := conf.Repo.AzureIntegration().ReadAzureIntegration(
+			cluster.ProjectID,
+			cluster.AzureIntegrationID,
+		)
+		if err != nil {
+			return nil, err
 		}
+
+		authInfoMap[authInfoName].Token = string(azInt.AKSPassword)
+	default:
+		return nil, errors.New("not a supported auth mechanism")
 	}
 
 	// create a context of the cluster name
@@ -578,24 +571,6 @@ func (conf *OutOfClusterConfig) setTokenCache(token string, expiry time.Time) er
 	return err
 }
 
-func capiCertAuthData(ccpClient porterv1connect.ClusterControlPlaneServiceClient, clusterId, projectId int) ([]byte, error) {
-	req := connect.NewRequest(&porterv1.CertificateAuthorityDataRequest{
-		ProjectId: int64(projectId),
-		ClusterId: int64(clusterId),
-	})
-	cert, err := ccpClient.CertificateAuthorityData(context.Background(), req)
-	if err != nil {
-		return []byte(""), fmt.Errorf("error getting certificate authority data: %w", err)
-	}
-
-	decodedCert, err := b64.DecodeString(cert.Msg.CertificateAuthorityData)
-	if err != nil {
-		return []byte(""), fmt.Errorf("error decoding certificate authority data: %w", err)
-	}
-
-	return decodedCert, nil
-}
-
 // NewRESTClientGetterFromInClusterConfig returns a RESTClientGetter using
 // default values set from the *rest.Config
 func NewRESTClientGetterFromInClusterConfig(conf *rest.Config, namespace string) genericclioptions.RESTClientGetter {