Explorar el Código

adding basic telemetry to cli for porter apply

Feroze Mohideen hace 2 años
padre
commit
2159281a59
Se han modificado 3 ficheros con 132 adiciones y 37 borrados
  1. 37 15
      cli/cmd/apply.go
  2. 85 18
      cli/cmd/stack/apply.go
  3. 10 4
      cli/cmd/stack/pre-deploy.go

+ 37 - 15
cli/cmd/apply.go

@@ -25,6 +25,7 @@ import (
 	previewV2Beta1 "github.com/porter-dev/porter/cli/cmd/preview/v2beta1"
 	stack "github.com/porter-dev/porter/cli/cmd/stack"
 	previewInt "github.com/porter-dev/porter/internal/integrations/preview"
+	"github.com/porter-dev/porter/internal/telemetry"
 	"github.com/porter-dev/porter/internal/templater/utils"
 	"github.com/porter-dev/switchboard/pkg/drivers"
 	switchboardModels "github.com/porter-dev/switchboard/pkg/models"
@@ -109,11 +110,21 @@ func init() {
 }
 
 func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) (err error) {
+	ctx, span := telemetry.NewSpan(context.Background(), "cli-apply")
+	defer span.End()
+
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "project-id", Value: cliConf.Project},
+		telemetry.AttributeKV{Key: "cluster-id", Value: cliConf.Cluster},
+	)
+
 	fileBytes, err := ioutil.ReadFile(porterYAML)
 	if err != nil {
 		stackName := os.Getenv("PORTER_STACK_NAME")
 		if stackName == "" {
-			return fmt.Errorf("a valid porter.yaml file must be specified. Run porter apply --help for more information")
+			err = telemetry.Error(ctx, span, nil, "a valid porter.yaml file must be specified. Run porter apply --help for more information")
+			return err
 		}
 	}
 
@@ -122,11 +133,16 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 	}
 
 	err = yaml.Unmarshal(fileBytes, &previewVersion)
-
 	if err != nil {
-		return fmt.Errorf("error unmarshaling porter.yaml: %w", err)
+		err = telemetry.Error(ctx, span, err, "error unmarshaling porter.yaml")
+		return err
 	}
 
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "version", Value: previewVersion.Version},
+	)
+
 	var resGroup *switchboardTypes.ResourceGroup
 	worker := switchboardWorker.NewWorker()
 
@@ -159,14 +175,17 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 	} else if previewVersion.Version == "v1stack" || previewVersion.Version == "" {
 		stackName := os.Getenv("PORTER_STACK_NAME")
 		if stackName == "" {
-			return fmt.Errorf("environment variable PORTER_STACK_NAME must be set")
+			err = telemetry.Error(ctx, span, nil, "environment variable PORTER_STACK_NAME must be set")
+			return err
 		}
+		telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "application-name", Value: stackName})
 
 		// we need to know the builder so that we can inject launcher to the start command later if heroku builder is used
 		var builder string
-		resGroup, builder, err = stack.CreateV1BuildResources(client, fileBytes, stackName, cliConf.Project, cliConf.Cluster)
+		resGroup, builder, err = stack.CreateV1BuildResources(ctx, client, fileBytes, stackName, cliConf.Project, cliConf.Cluster)
 		if err != nil {
-			return fmt.Errorf("error parsing porter.yaml for build resources: %w", err)
+			err = telemetry.Error(ctx, span, err, "error creating build resources")
+			return err
 		}
 
 		deployStackHook := &stack.DeployStackHook{
@@ -194,7 +213,8 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 
 			repoNameSplit := strings.Split(os.Getenv("GITHUB_REPOSITORY"), "/")
 			if len(repoNameSplit) != 2 {
-				return fmt.Errorf("unable to parse GITHUB_REPOSITORY")
+				err = telemetry.Error(ctx, span, nil, "unable to parse GITHUB_REPOSITORY")
+				return err
 			}
 			req.Metadata["repo"] = repoNameSplit[1]
 
@@ -202,7 +222,8 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 			if actionRunID != "" {
 				arid, err := strconv.Atoi(actionRunID)
 				if err != nil {
-					return fmt.Errorf("unable to parse GITHUB_RUN_ID as int: %w", err)
+					err = telemetry.Error(ctx, span, err, "unable to parse GITHUB_RUN_ID as int")
+					return err
 				}
 				req.Metadata["action_run_id"] = arid
 			}
@@ -211,15 +232,16 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 			if repoOwnerAccountID != "" {
 				arid, err := strconv.Atoi(repoOwnerAccountID)
 				if err != nil {
-					return fmt.Errorf("unable to parse GITHUB_REPOSITORY_OWNER_ID as int: %w", err)
+					err = telemetry.Error(ctx, span, err, "unable to parse GITHUB_REPOSITORY_OWNER_ID as int")
+					return err
 				}
 				req.Metadata["github_account_id"] = arid
 			}
 
-			ctx := context.Background()
 			_, err := client.CreateOrUpdatePorterAppEvent(ctx, cliConf.Project, cliConf.Cluster, stackName, req)
 			if err != nil {
-				return fmt.Errorf("unable to create porter app build event: %w", err)
+				err = telemetry.Error(ctx, span, err, "unable to create porter app build event")
+				return err
 			}
 		}
 	} else {
@@ -228,8 +250,8 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 
 	basePath, err := os.Getwd()
 	if err != nil {
-		err = fmt.Errorf("error getting working directory: %w", err)
-		return
+		err = telemetry.Error(ctx, span, err, "error getting working directory")
+		return err
 	}
 
 	drivers := []struct {
@@ -247,8 +269,8 @@ func apply(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string
 	for _, driver := range drivers {
 		err = worker.RegisterDriver(driver.name, driver.funcName)
 		if err != nil {
-			err = fmt.Errorf("error registering driver %s: %w", driver.name, err)
-			return
+			err = telemetry.Error(ctx, span, err, "error registering driver")
+			return err
 		}
 	}
 

+ 85 - 18
cli/cmd/stack/apply.go

@@ -9,7 +9,9 @@ import (
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/api/types"
 	"github.com/porter-dev/porter/cli/cmd/config"
+	"github.com/porter-dev/porter/internal/telemetry"
 	switchboardTypes "github.com/porter-dev/switchboard/pkg/types"
+	"go.opentelemetry.io/otel/trace"
 	"gopkg.in/yaml.v2"
 )
 
@@ -21,7 +23,10 @@ type StackConf struct {
 	projectID, clusterID uint
 }
 
-func CreateV1BuildResources(client *api.Client, raw []byte, stackName string, projectID uint, clusterID uint) (*switchboardTypes.ResourceGroup, string, error) {
+func CreateV1BuildResources(ctx context.Context, client *api.Client, raw []byte, stackName string, projectID uint, clusterID uint) (*switchboardTypes.ResourceGroup, string, error) {
+	ctx, span := telemetry.NewSpan(ctx, "create-v1-build-resources")
+	defer span.End()
+
 	v1File := &switchboardTypes.ResourceGroup{
 		Version: "v1",
 		Resources: []*switchboardTypes.Resource{
@@ -33,33 +38,45 @@ func CreateV1BuildResources(client *api.Client, raw []byte, stackName string, pr
 	}
 	var builder string
 
-	stackConf, err := createStackConf(client, raw, stackName, projectID, clusterID)
+	stackConf, err := createStackConf(ctx, span, client, raw, stackName, projectID, clusterID)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating stack config")
 		return nil, "", err
 	}
 
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "application-name", Value: stackConf.stackName},
+		telemetry.AttributeKV{Key: "project-id", Value: stackConf.projectID},
+		telemetry.AttributeKV{Key: "cluster-id", Value: stackConf.clusterID},
+	)
+
 	var bi, pi *switchboardTypes.Resource
 
 	if stackConf.parsed.Build != nil {
-		bi, pi, builder, err = createV1BuildResourcesFromPorterYaml(stackConf)
+		bi, pi, builder, err = createV1BuildResourcesFromPorterYaml(ctx, stackConf)
 		if err != nil {
 			color.New(color.FgRed).Printf("Could not build using values specified in porter.yaml (%s), attempting to load stack build settings instead \n", err.Error())
-			bi, pi, builder, err = createV1BuildResourcesFromDB(client, stackConf)
+			bi, pi, builder, err = createV1BuildResourcesFromDB(ctx, client, stackConf)
 			if err != nil {
+				err = telemetry.Error(ctx, span, err, "error creating build resources")
 				return nil, "", err
 			}
 		}
 	} else {
 		color.New(color.FgYellow).Printf("No build values specified in porter.yaml, attempting to load stack build settings instead \n")
-		bi, pi, builder, err = createV1BuildResourcesFromDB(client, stackConf)
+		bi, pi, builder, err = createV1BuildResourcesFromDB(ctx, client, stackConf)
 		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error creating build resources")
 			return nil, "", err
 		}
 	}
 
 	v1File.Resources = append(v1File.Resources, bi, pi)
 
-	release, cmd, err := createReleaseResource(client,
+	preDeploy, cmd, err := maybeCreatePreDeployResource(
+		ctx,
+		client,
 		stackConf.parsed.Release,
 		stackConf.stackName,
 		bi.Name,
@@ -69,20 +86,28 @@ func CreateV1BuildResources(client *api.Client, raw []byte, stackName string, pr
 		stackConf.parsed.Env,
 	)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating pre-deploy resource")
 		return nil, "", err
 	}
 
-	if release != nil {
-		color.New(color.FgYellow).Printf("Found release command to run before deploying apps: %s \n", cmd)
-		v1File.Resources = append(v1File.Resources, release)
+	if preDeploy != nil {
+		telemetry.WithAttributes(
+			span,
+			telemetry.AttributeKV{Key: "pre-deploy-resource-name", Value: preDeploy.Name},
+			telemetry.AttributeKV{Key: "pre-deploy-resource-driver", Value: preDeploy.Driver},
+			telemetry.AttributeKV{Key: "pre-deploy-resource-source", Value: preDeploy.Source},
+			telemetry.AttributeKV{Key: "pre-deploy-resource-target", Value: preDeploy.Target},
+		)
+		color.New(color.FgYellow).Printf("Found pre-deploy command to run before deploying apps: %s \n", cmd)
+		v1File.Resources = append(v1File.Resources, preDeploy)
 	} else {
-		color.New(color.FgYellow).Printf("No release command found in porter.yaml or helm. \n")
+		color.New(color.FgYellow).Printf("No pre-deploy command found in porter.yaml or helm. \n")
 	}
 
 	return v1File, builder, nil
 }
 
-func createStackConf(client *api.Client, raw []byte, stackName string, projectID uint, clusterID uint) (*StackConf, error) {
+func createStackConf(ctx context.Context, span trace.Span, client *api.Client, raw []byte, stackName string, projectID uint, clusterID uint) (*StackConf, error) {
 	var parsed *PorterStackYAML
 	if raw == nil {
 		parsed = createDefaultPorterYaml()
@@ -90,6 +115,7 @@ func createStackConf(client *api.Client, raw []byte, stackName string, projectID
 		parsed = &PorterStackYAML{}
 		err := yaml.Unmarshal(raw, parsed)
 		if err != nil {
+			err = telemetry.Error(ctx, span, err, "error parsing porter.yaml")
 			errMsg := composePreviewMessage("error parsing porter.yaml", Error)
 			return nil, fmt.Errorf("%s: %w", errMsg, err)
 		}
@@ -97,11 +123,12 @@ func createStackConf(client *api.Client, raw []byte, stackName string, projectID
 
 	err := config.ValidateCLIEnvironment()
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "porter CLI is not configured correctly")
 		errMsg := composePreviewMessage("porter CLI is not configured correctly", Error)
 		return nil, fmt.Errorf("%s: %w", errMsg, err)
 	}
 
-	releaseEnvVars := getEnvFromRelease(client, stackName, projectID, clusterID)
+	releaseEnvVars := getEnvFromRelease(ctx, client, stackName, projectID, clusterID)
 	if releaseEnvVars != nil {
 		color.New(color.FgYellow).Printf("Reading build env from release\n")
 		parsed.Env = mergeStringMaps(parsed.Env, releaseEnvVars)
@@ -118,41 +145,78 @@ func createStackConf(client *api.Client, raw []byte, stackName string, projectID
 	}, nil
 }
 
-func createV1BuildResourcesFromPorterYaml(stackConf *StackConf) (*switchboardTypes.Resource, *switchboardTypes.Resource, string, error) {
+func createV1BuildResourcesFromPorterYaml(ctx context.Context, stackConf *StackConf) (*switchboardTypes.Resource, *switchboardTypes.Resource, string, error) {
+	ctx, span := telemetry.NewSpan(ctx, "create-v1-build-resources-from-porter-yaml")
 	bi, err := stackConf.parsed.Build.getV1BuildImage(stackConf.parsed.Env, stackConf.namespace)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating build resource")
 		return nil, nil, "", err
 	}
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "build-resource-name", Value: bi.Name},
+		telemetry.AttributeKV{Key: "build-resource-driver", Value: bi.Driver},
+		telemetry.AttributeKV{Key: "build-resource-source", Value: bi.Source},
+		telemetry.AttributeKV{Key: "build-resource-target", Value: bi.Target},
+	)
 
 	pi, err := stackConf.parsed.Build.getV1PushImage(stackConf.namespace)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating push resource")
 		return nil, nil, "", err
 	}
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "push-resource-name", Value: bi.Name},
+		telemetry.AttributeKV{Key: "push-resource-driver", Value: bi.Driver},
+		telemetry.AttributeKV{Key: "push-resource-source", Value: bi.Source},
+		telemetry.AttributeKV{Key: "push-resource-target", Value: bi.Target},
+	)
 
 	return bi, pi, stackConf.parsed.Build.GetBuilder(), nil
 }
 
-func createV1BuildResourcesFromDB(client *api.Client, stackConf *StackConf) (*switchboardTypes.Resource, *switchboardTypes.Resource, string, error) {
+func createV1BuildResourcesFromDB(ctx context.Context, client *api.Client, stackConf *StackConf) (*switchboardTypes.Resource, *switchboardTypes.Resource, string, error) {
+	ctx, span := telemetry.NewSpan(ctx, "create-v1-build-resources-from-db")
+
 	res, err := client.GetPorterApp(context.Background(), stackConf.projectID, stackConf.clusterID, stackConf.stackName)
 	if err != nil {
-		return nil, nil, "", fmt.Errorf("unable to read build info from DB: %w", err)
+		err = telemetry.Error(ctx, span, err, "error reading build info from DB")
+		return nil, nil, "", err
 	}
 
 	if res == nil {
-		return nil, nil, "", fmt.Errorf("stack %s not found", stackConf.stackName)
+		err = telemetry.Error(ctx, span, err, "stack not found")
+		return nil, nil, "", err
 	}
 
 	build := convertToBuild(res)
 
 	bi, err := build.getV1BuildImage(stackConf.parsed.Env, stackConf.namespace)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating build resource")
 		return nil, nil, "", err
 	}
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "build-resource-name", Value: bi.Name},
+		telemetry.AttributeKV{Key: "build-resource-driver", Value: bi.Driver},
+		telemetry.AttributeKV{Key: "build-resource-source", Value: bi.Source},
+		telemetry.AttributeKV{Key: "build-resource-target", Value: bi.Target},
+	)
 
 	pi, err := build.getV1PushImage(stackConf.namespace)
 	if err != nil {
+		err = telemetry.Error(ctx, span, err, "error creating push resource")
 		return nil, nil, "", err
 	}
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "push-resource-name", Value: bi.Name},
+		telemetry.AttributeKV{Key: "push-resource-driver", Value: bi.Driver},
+		telemetry.AttributeKV{Key: "push-resource-source", Value: bi.Source},
+		telemetry.AttributeKV{Key: "push-resource-target", Value: bi.Target},
+	)
 
 	return bi, pi, build.GetBuilder(), nil
 }
@@ -217,11 +281,14 @@ func createDefaultPorterYaml() *PorterStackYAML {
 	}
 }
 
-func getEnvFromRelease(client *api.Client, stackName string, projectID uint, clusterID uint) map[string]string {
+func getEnvFromRelease(ctx context.Context, client *api.Client, stackName string, projectID uint, clusterID uint) map[string]string {
+	ctx, span := telemetry.NewSpan(ctx, "get-env-from-release")
+	defer span.End()
+
 	var envVarsStringMap map[string]string
 	namespace := fmt.Sprintf("porter-stack-%s", stackName)
 	release, err := client.GetRelease(
-		context.Background(),
+		ctx,
 		projectID,
 		clusterID,
 		namespace,

+ 10 - 4
cli/cmd/stack/release.go → cli/cmd/stack/pre-deploy.go

@@ -8,21 +8,27 @@ import (
 	api "github.com/porter-dev/porter/api/client"
 	"github.com/porter-dev/porter/cli/cmd/deploy"
 	"github.com/porter-dev/porter/internal/integrations/preview"
+	"github.com/porter-dev/porter/internal/telemetry"
 
 	switchboardTypes "github.com/porter-dev/switchboard/pkg/types"
 )
 
-func createReleaseResource(client *api.Client, release *App, stackName, buildResourceName, pushResourceName string, projectID, clusterID uint, env map[string]string) (*switchboardTypes.Resource, string, error) {
+func maybeCreatePreDeployResource(ctx context.Context, client *api.Client, release *App, stackName, buildResourceName, pushResourceName string, projectID, clusterID uint, env map[string]string) (*switchboardTypes.Resource, string, error) {
+	ctx, span := telemetry.NewSpan(ctx, "maybe-create-pre-deploy-resource")
+	defer span.End()
+
 	var finalCmd string
 	if release != nil && release.Run != nil {
 		finalCmd = *release.Run
 	} else {
-		finalCmd = getReleaseCommandFromRelease(client, stackName, projectID, clusterID)
+		finalCmd = getPreDeployCommandFromRelease(ctx, client, stackName, projectID, clusterID)
 		if finalCmd == "" {
 			return nil, "", nil
 		}
 	}
 
+	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "pre-deploy-cmd", Value: finalCmd})
+
 	config := &preview.ApplicationConfig{}
 
 	config.Build.Method = "registry"
@@ -64,11 +70,11 @@ func createReleaseResource(client *api.Client, release *App, stackName, buildRes
 	}, finalCmd, nil
 }
 
-func getReleaseCommandFromRelease(client *api.Client, stackName string, projectID uint, clusterID uint) string {
+func getPreDeployCommandFromRelease(ctx context.Context, client *api.Client, stackName string, projectID uint, clusterID uint) string {
 	namespace := fmt.Sprintf("porter-stack-%s", stackName)
 	releaseName := fmt.Sprintf("%s-r", stackName)
 	release, err := client.GetRelease(
-		context.Background(),
+		ctx,
 		projectID,
 		clusterID,
 		namespace,