Ver código fonte

Report docker build logs on failure (#3753)

Feroze Mohideen 2 anos atrás
pai
commit
f9d95cc88a

+ 8 - 3
api/server/handlers/porter_app/analytics.go

@@ -125,13 +125,17 @@ func TrackStackBuildStatus(
 	errorMessage string,
 	status types.PorterAppEventStatus,
 	validateApplyV2 bool,
+	b64BuildLogs string,
 ) error {
 	_, span := telemetry.NewSpan(ctx, "track-build-status")
 	defer span.End()
 
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-build-status", Value: string(status)})
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-name", Value: stackName})
-	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-error-message", Value: errorMessage})
+	telemetry.WithAttributes(
+		span,
+		telemetry.AttributeKV{Key: "porter-app-build-status", Value: string(status)},
+		telemetry.AttributeKV{Key: "porter-app-name", Value: stackName},
+		telemetry.AttributeKV{Key: "porter-app-error-message", Value: errorMessage},
+	)
 
 	if status == types.PorterAppEventStatus_Progressing {
 		return config.AnalyticsClient.Track(analytics.StackBuildProgressingTrack(&analytics.StackBuildOpts{
@@ -162,6 +166,7 @@ func TrackStackBuildStatus(
 			ProjectScopedTrackOpts: analytics.GetProjectScopedTrackOpts(user.ID, project.ID),
 			StackName:              stackName,
 			ErrorMessage:           errorMessage,
+			B64BuildLogs:           b64BuildLogs,
 			Email:                  user.Email,
 			FirstName:              user.FirstName,
 			LastName:               user.LastName,

+ 7 - 2
api/server/handlers/porter_app/create_and_update_events.go

@@ -96,12 +96,17 @@ func reportBuildStatus(ctx context.Context, request *types.CreateOrUpdatePorterA
 	telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "porter-app-build-status", Value: string(request.Status)})
 
 	var errStr string
+	var buildLogs string
 	if errors, ok := request.Metadata["errors"]; ok {
 		if errs, ok := errors.(map[string]interface{}); ok {
 			errStringMap := make(map[string]string)
 			for k, v := range errs {
 				if valueStr, ok := v.(string); ok {
-					errStringMap[k] = valueStr
+					if k == "b64-build-logs" {
+						buildLogs = valueStr
+					} else {
+						errStringMap[k] = valueStr
+					}
 				}
 			}
 
@@ -114,7 +119,7 @@ func reportBuildStatus(ctx context.Context, request *types.CreateOrUpdatePorterA
 		}
 	}
 
-	_ = TrackStackBuildStatus(ctx, config, user, project, stackName, errStr, request.Status, validateApplyV2)
+	_ = TrackStackBuildStatus(ctx, config, user, project, stackName, errStr, request.Status, validateApplyV2, buildLogs)
 }
 
 // createNewAppEvent will create a new app event for the given porter app name. If the app event is an agent event, then it will be created only if there is no existing event which has the agent ID. In the case that an existing event is found, that will be returned instead

+ 2 - 2
api/server/handlers/porter_app/list_events.go

@@ -223,10 +223,10 @@ func (p *PorterAppEventListHandler) updateBuildEvent_Github(
 	if *actionRun.Status == "completed" {
 		if *actionRun.Conclusion == "success" {
 			event.Status = string(types.PorterAppEventStatus_Success)
-			_ = TrackStackBuildStatus(ctx, p.Config(), user, project, stackName, "", types.PorterAppEventStatus_Success, validateApplyV2)
+			_ = TrackStackBuildStatus(ctx, p.Config(), user, project, stackName, "", types.PorterAppEventStatus_Success, validateApplyV2, "")
 		} else {
 			event.Status = string(types.PorterAppEventStatus_Failed)
-			_ = TrackStackBuildStatus(ctx, p.Config(), user, project, stackName, "", types.PorterAppEventStatus_Failed, validateApplyV2)
+			_ = TrackStackBuildStatus(ctx, p.Config(), user, project, stackName, "", types.PorterAppEventStatus_Failed, validateApplyV2, "")
 		}
 		event.Metadata["end_time"] = actionRun.GetUpdatedAt().Time
 	}

+ 8 - 1
cli/cmd/docker/builder.go

@@ -30,6 +30,8 @@ type BuildOpts struct {
 	UseCache          bool
 
 	Env map[string]string
+
+	LogFile *os.File
 }
 
 // BuildLocal
@@ -57,6 +59,11 @@ func (a *Agent) BuildLocal(ctx context.Context, opts *BuildOpts) (err error) {
 		return err
 	}
 
+	var writer io.Writer = os.Stderr
+	if opts.LogFile != nil {
+		writer = io.MultiWriter(os.Stderr, opts.LogFile)
+	}
+
 	if !opts.IsDockerfileInCtx {
 		dockerfileCtx, err := os.Open(dockerfilePath)
 		if err != nil {
@@ -104,7 +111,7 @@ func (a *Agent) BuildLocal(ctx context.Context, opts *BuildOpts) (err error) {
 
 	termFd, isTerm := term.GetFdInfo(os.Stderr)
 
-	return jsonmessage.DisplayJSONMessagesStream(out.Body, os.Stderr, termFd, isTerm, nil)
+	return jsonmessage.DisplayJSONMessagesStream(out.Body, writer, termFd, isTerm, nil)
 }
 
 func trimBuildFilesFromExcludes(excludes []string, dockerfile string) []string {

+ 22 - 17
cli/cmd/v2/apply.go

@@ -176,41 +176,41 @@ func Apply(ctx context.Context, inp ApplyInput) error {
 
 		if commitSHA == "" {
 			err := errors.New("Build is required but commit SHA cannot be identified. Please set the PORTER_COMMIT_SHA environment variable or run apply in git repository with access to the git CLI.")
-			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err)
+			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err, "")
 			return err
 		}
 
 		buildSettings, err := buildSettingsFromBase64AppProto(base64AppProto)
 		if err != nil {
 			err := fmt.Errorf("error getting build settings from base64 app proto: %w", err)
-			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err)
+			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err, "")
 			return err
 		}
 
 		currentAppRevisionResp, err := client.CurrentAppRevision(ctx, cliConf.Project, cliConf.Cluster, appName, deploymentTargetID)
 		if err != nil {
 			err := fmt.Errorf("error getting current app revision: %w", err)
-			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err)
+			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err, "")
 			return err
 		}
 
 		if currentAppRevisionResp == nil {
 			err := errors.New("current app revision is nil")
-			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err)
+			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err, "")
 			return err
 		}
 
 		appRevision := currentAppRevisionResp.AppRevision
 		if appRevision.B64AppProto == "" {
 			err := errors.New("current app revision b64 app proto is empty")
-			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err)
+			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err, "")
 			return err
 		}
 
 		currentImageTag, err := imageTagFromBase64AppProto(appRevision.B64AppProto)
 		if err != nil {
 			err := fmt.Errorf("error getting image tag from current app revision: %w", err)
-			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err)
+			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err, "")
 			return err
 		}
 
@@ -220,15 +220,15 @@ func Apply(ctx context.Context, inp ApplyInput) error {
 		buildEnv, err := client.GetBuildEnv(ctx, cliConf.Project, cliConf.Cluster, appName, appRevision.ID)
 		if err != nil {
 			err := fmt.Errorf("error getting build env: %w", err)
-			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err)
+			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err, "")
 			return err
 		}
 		buildSettings.Env = buildEnv.BuildEnvVariables
 
-		err = build(ctx, client, buildSettings)
-		if err != nil {
-			err := fmt.Errorf("error building app: %w", err)
-			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err)
+		buildOutput := build(ctx, client, buildSettings)
+		if buildOutput.Error != nil {
+			err := fmt.Errorf("error building app: %w", buildOutput.Error)
+			_ = reportBuildFailure(ctx, client, appName, cliConf, deploymentTargetID, applyResp.AppRevisionId, eventID, err, buildOutput.Logs)
 			return err
 		}
 
@@ -509,20 +509,25 @@ func updateEnvGroupsInProto(ctx context.Context, base64AppProto string, envGroup
 	return editedB64AppProto, nil
 }
 
-func reportBuildFailure(ctx context.Context, client api.Client, appName string, cliConf config.CLIConfig, deploymentTargetID string, appRevisionID string, eventID string, buildError error) error {
+func reportBuildFailure(ctx context.Context, client api.Client, appName string, cliConf config.CLIConfig, deploymentTargetID string, appRevisionID string, eventID string, buildError error, buildLogs string) error {
+	_, err := client.UpdateRevisionStatus(ctx, cliConf.Project, cliConf.Cluster, appName, appRevisionID, models.AppRevisionStatus_BuildFailed)
+	if err != nil {
+		return err
+	}
+
 	buildMetadata := make(map[string]interface{})
 	buildMetadata["end_time"] = time.Now().UTC()
 
 	// the below is a temporary solution until we can report build errors via telemetry from the CLI
 	errorStringMap := make(map[string]string)
 	errorStringMap["build-error"] = fmt.Sprintf("%+v", buildError)
+	b64BuildLogs := base64.StdEncoding.EncodeToString([]byte(buildLogs))
+	// the key name below must be kept the same so that reportBuildStatus in the CreateOrUpdatePorterAppEvent handler reports logs correctly
+	errorStringMap["b64-build-logs"] = b64BuildLogs
+
 	buildMetadata["errors"] = errorStringMap
 
-	err := updateExistingEvent(ctx, client, appName, cliConf.Project, cliConf.Cluster, deploymentTargetID, types.PorterAppEventType_Build, eventID, types.PorterAppEventStatus_Failed, buildMetadata)
-	if err != nil {
-		return err
-	}
-	_, err = client.UpdateRevisionStatus(ctx, cliConf.Project, cliConf.Cluster, appName, appRevisionID, models.AppRevisionStatus_BuildFailed)
+	err = updateExistingEvent(ctx, client, appName, cliConf.Project, cliConf.Cluster, deploymentTargetID, types.PorterAppEventType_Build, eventID, types.PorterAppEventStatus_Failed, buildMetadata)
 	if err != nil {
 		return err
 	}

+ 47 - 13
cli/cmd/v2/build.go

@@ -19,6 +19,7 @@ import (
 const (
 	buildMethodPack   = "pack"
 	buildMethodDocker = "docker"
+	buildLogFilename  = "PORTER_BUILD_LOGS"
 )
 
 // buildInput is the input struct for the build method
@@ -41,38 +42,51 @@ type buildInput struct {
 	Env map[string]string
 }
 
+type buildOutput struct {
+	Error error
+	Logs  string
+}
+
 // build will create an image repository if it does not exist, and then build and push the image
-func build(ctx context.Context, client api.Client, inp buildInput) error {
+func build(ctx context.Context, client api.Client, inp buildInput) buildOutput {
+	output := buildOutput{}
+
 	if inp.ProjectID == 0 {
-		return errors.New("must specify a project id")
+		output.Error = errors.New("must specify a project id")
+		return output
 	}
 	projectID := inp.ProjectID
 
 	if inp.ImageTag == "" {
-		return errors.New("must specify an image tag")
+		output.Error = errors.New("must specify an image tag")
+		return output
 	}
 	tag := inp.ImageTag
 
 	if inp.RepositoryURL == "" {
-		return errors.New("must specify a registry url")
+		output.Error = errors.New("must specify a registry url")
+		return output
 	}
 	imageURL := strings.TrimPrefix(inp.RepositoryURL, "https://")
 
 	err := createImageRepositoryIfNotExists(ctx, client, projectID, imageURL)
 	if err != nil {
-		return fmt.Errorf("error creating image repository: %w", err)
+		output.Error = fmt.Errorf("error creating image repository: %w", err)
+		return output
 	}
 
 	dockerAgent, err := docker.NewAgentWithAuthGetter(ctx, client, projectID)
 	if err != nil {
-		return fmt.Errorf("error getting docker agent: %w", err)
+		output.Error = fmt.Errorf("error getting docker agent: %w", err)
+		return output
 	}
 
 	switch inp.BuildMethod {
 	case buildMethodDocker:
 		basePath, err := filepath.Abs(".")
 		if err != nil {
-			return fmt.Errorf("error getting absolute path: %w", err)
+			output.Error = fmt.Errorf("error getting absolute path: %w", err)
+			return output
 		}
 
 		buildCtx, dockerfilePath, isDockerfileInCtx, err := resolveDockerPaths(
@@ -81,9 +95,14 @@ func build(ctx context.Context, client api.Client, inp buildInput) error {
 			inp.Dockerfile,
 		)
 		if err != nil {
-			return fmt.Errorf("error resolving docker paths: %w", err)
+			output.Error = fmt.Errorf("error resolving docker paths: %w", err)
+			return output
 		}
 
+		// create a temp file which build logs will be written to
+		// temp file gets cleaned up when os exits (i.e. when the GHA completes), so no need to remove it manually
+		logFile, _ := os.CreateTemp("", buildLogFilename)
+
 		opts := &docker.BuildOpts{
 			ImageRepo:         inp.RepositoryURL,
 			Tag:               tag,
@@ -92,6 +111,7 @@ func build(ctx context.Context, client api.Client, inp buildInput) error {
 			DockerfilePath:    dockerfilePath,
 			IsDockerfileInCtx: isDockerfileInCtx,
 			Env:               inp.Env,
+			LogFile:           logFile,
 		}
 
 		err = dockerAgent.BuildLocal(
@@ -99,7 +119,18 @@ func build(ctx context.Context, client api.Client, inp buildInput) error {
 			opts,
 		)
 		if err != nil {
-			return fmt.Errorf("error building image with docker: %w", err)
+			output.Error = fmt.Errorf("error building image with docker: %w", err)
+			logString := "Error reading contents of build log file"
+
+			if logFile != nil {
+				content, err := os.ReadFile(logFile.Name())
+				// only continue if we can read the file. if we cannot, logString will be the default
+				if err == nil {
+					logString = string(content)
+				}
+			}
+			output.Logs = logString
+			return output
 		}
 	case buildMethodPack:
 		packAgent := &pack.Agent{}
@@ -118,18 +149,21 @@ func build(ctx context.Context, client api.Client, inp buildInput) error {
 
 		err := packAgent.Build(ctx, opts, buildConfig, "")
 		if err != nil {
-			return fmt.Errorf("error building image with pack: %w", err)
+			output.Error = fmt.Errorf("error building image with pack: %w", err)
+			return output
 		}
 	default:
-		return fmt.Errorf("invalid build method: %s", inp.BuildMethod)
+		output.Error = fmt.Errorf("invalid build method: %s", inp.BuildMethod)
+		return output
 	}
 
 	err = dockerAgent.PushImage(ctx, fmt.Sprintf("%s:%s", imageURL, tag))
 	if err != nil {
-		return fmt.Errorf("error pushing image url: %w\n", err)
+		output.Error = fmt.Errorf("error pushing image: %w", err)
+		return output
 	}
 
-	return nil
+	return output
 }
 
 func createImageRepositoryIfNotExists(ctx context.Context, client api.Client, projectID uint, imageURL string) error {

+ 2 - 0
internal/analytics/tracks.go

@@ -922,6 +922,7 @@ type StackBuildOpts struct {
 
 	StackName       string
 	ErrorMessage    string
+	B64BuildLogs    string
 	Email           string
 	FirstName       string
 	LastName        string
@@ -938,6 +939,7 @@ func StackBuildFailureTrack(opts *StackBuildOpts) segmentTrack {
 	additionalProps["name"] = opts.FirstName + " " + opts.LastName
 	additionalProps["company"] = opts.CompanyName
 	additionalProps["validate_apply_v2"] = opts.ValidateApplyV2
+	additionalProps["b64_build_logs"] = opts.B64BuildLogs
 
 	return getSegmentProjectTrack(
 		opts.ProjectScopedTrackOpts,