Преглед изворни кода

Release patching (#3070)

* patching missing contexts

* adding go vet to pr
Stefan McShane пре 3 година
родитељ
комит
6b7804dbb0

+ 2 - 0
.github/workflows/test-backend.yml

@@ -24,5 +24,7 @@ jobs:
         with:
           go-version-file: go.mod
           cache: false
+      - name: Run Go vet
+        run: go vet ./${{ matrix.folder }}/...
       - name: Run Go tests
         run: go test ./${{ matrix.folder }}/...

+ 3 - 2
api/server/handlers/project_integration/list_gitlab.go

@@ -40,10 +40,11 @@ func (p *ListGitlabHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 
 	for _, gitlabInt := range gitlabInts {
 		username := p.getCurrentUsername(user.ID, project.ID, gitlabInt)
+		glit := gitlabInt.ToGitlabIntegrationType()
 		res = append(res,
 			&types.GitlabIntegrationWithUsername{
-				*gitlabInt.ToGitlabIntegrationType(),
-				username,
+				Username:          username,
+				GitlabIntegration: *glit,
 			},
 		)
 	}

+ 3 - 2
internal/worker/dispatcher.go

@@ -1,6 +1,7 @@
 package worker
 
 import (
+	"context"
 	"log"
 
 	"github.com/google/uuid"
@@ -29,7 +30,7 @@ func NewDispatcher(maxWorkers int) *Dispatcher {
 
 // Run creates workers in the worker pool with the given
 // job queue and starts the workers
-func (d *Dispatcher) Run(jobQueue chan Job) error {
+func (d *Dispatcher) Run(ctx context.Context, jobQueue chan Job) error {
 	go func() {
 		var workers []*Worker
 
@@ -46,7 +47,7 @@ func (d *Dispatcher) Run(jobQueue chan Job) error {
 
 			log.Printf("starting worker with UUID: %v", uuid)
 
-			worker.Start()
+			worker.Start(ctx)
 		}
 
 		for {

+ 3 - 2
internal/worker/dispatcher_test.go

@@ -1,6 +1,7 @@
 package worker
 
 import (
+	"context"
 	"testing"
 
 	"go.uber.org/goleak"
@@ -8,11 +9,11 @@ import (
 
 func TestDispatcher(t *testing.T) {
 	defer goleak.VerifyNone(t)
-
+	ctx := context.Background()
 	jobChan := make(chan Job)
 
 	d := NewDispatcher(10)
-	err := d.Run(jobChan)
+	err := d.Run(ctx, jobChan)
 	if err != nil {
 		panic(err)
 	}

+ 4 - 3
internal/worker/worker.go

@@ -1,6 +1,7 @@
 package worker
 
 import (
+	"context"
 	"log"
 	"time"
 
@@ -17,7 +18,7 @@ type Job interface {
 	EnqueueTime() time.Time
 
 	// The main logic and control of a job
-	Run() error
+	Run(ctx context.Context) error
 
 	// To set external data if a job needs it
 	SetData([]byte)
@@ -46,7 +47,7 @@ func NewWorker(uuid uuid.UUID, workerPool chan chan Job) *Worker {
 
 // Start spawns a goroutine to add itself to the global worker pool
 // and listens for incoming jobs as they come, in random order
-func (w *Worker) Start() {
+func (w *Worker) Start(ctx context.Context) {
 	go func() {
 		for {
 			w.WorkerPool <- w.JobChannel
@@ -55,7 +56,7 @@ func (w *Worker) Start() {
 			case job := <-w.JobChannel:
 				log.Printf("attempting to run job ID '%s' via worker '%s'", job.ID(), w.uuid.String())
 
-				if err := job.Run(); err != nil {
+				if err := job.Run(ctx); err != nil {
 					log.Printf("error running job %s: %s", job.ID(), err.Error())
 				}
 			case <-w.exitChan:

+ 3 - 1
internal/worker/worker_test.go

@@ -1,6 +1,7 @@
 package worker
 
 import (
+	"context"
 	"testing"
 
 	"github.com/google/uuid"
@@ -9,6 +10,7 @@ import (
 
 func TestWorker(t *testing.T) {
 	defer goleak.VerifyNone(t)
+	ctx := context.Background()
 
 	uuid, err := uuid.NewUUID()
 	if err != nil {
@@ -19,6 +21,6 @@ func TestWorker(t *testing.T) {
 
 	w := NewWorker(uuid, workerPool)
 
-	w.Start()
+	w.Start(ctx)
 	w.Stop()
 }

+ 6 - 4
workers/jobs/helm_revisions_count_tracker.go

@@ -20,6 +20,7 @@ backed up to an S3 bucket.
 package jobs
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"log"
@@ -75,6 +76,7 @@ type HelmRevisionsCountTrackerOpts struct {
 }
 
 func NewHelmRevisionsCountTracker(
+	ctx context.Context,
 	db *gorm.DB,
 	enqueueTime time.Time,
 	opts *HelmRevisionsCountTrackerOpts,
@@ -124,7 +126,7 @@ func (t *helmRevisionsCountTracker) EnqueueTime() time.Time {
 	return t.enqueueTime
 }
 
-func (t *helmRevisionsCountTracker) Run() error {
+func (t *helmRevisionsCountTracker) Run(ctx context.Context) error {
 	var count int64
 
 	if err := t.db.Model(&models.Cluster{}).Count(&count).Error; err != nil {
@@ -200,7 +202,7 @@ func (t *helmRevisionsCountTracker) Run() error {
 						continue
 					}
 
-					releases, err := agent.ListReleases(ns.GetName(), &types.ReleaseListFilter{
+					releases, err := agent.ListReleases(ctx, ns.GetName(), &types.ReleaseListFilter{
 						ByDate: true,
 						StatusFilter: []string{
 							"deployed",
@@ -220,7 +222,7 @@ func (t *helmRevisionsCountTracker) Run() error {
 					log.Printf("fetched %d releases for namespace %s in cluster ID %d", len(releases), ns.Name, cluster.ID)
 
 					for _, rel := range releases {
-						revisions, err := agent.GetReleaseHistory(rel.Name)
+						revisions, err := agent.GetReleaseHistory(ctx, rel.Name)
 						if err != nil {
 							log.Printf("error fetching release history for release %s in namespace %s of cluster ID %d: %v."+
 								" skipping release ...", rel.Name, ns.Name, cluster.ID, err)
@@ -263,7 +265,7 @@ func (t *helmRevisionsCountTracker) Run() error {
 							log.Printf("revision %d of release %s in namespace %s of cluster ID %d was successfully backed up.",
 								rev.Version, rel.Name, ns.Name, cluster.ID)
 
-							err = agent.DeleteReleaseRevision(rev.Name, rev.Version)
+							err = agent.DeleteReleaseRevision(ctx, rev.Name, rev.Version)
 
 							if err != nil {
 								log.Printf("error deleting revision %d of release %s in namespace %s of cluster ID %d: %v",

+ 2 - 1
workers/jobs/preview_deployments_ttl_deleter.go

@@ -3,6 +3,7 @@
 package jobs
 
 import (
+	"context"
 	"log"
 	"sync"
 	"time"
@@ -92,7 +93,7 @@ func (n *previewDeploymentsTTLDeleter) EnqueueTime() time.Time {
 	return n.enqueueTime
 }
 
-func (n *previewDeploymentsTTLDeleter) Run() error {
+func (n *previewDeploymentsTTLDeleter) Run(ctx context.Context) error {
 	if n.previewDeploymentsTTL == "" {
 		log.Println("no TTL set for preview deployments, skipping job altogether")
 		return nil

+ 2 - 1
workers/jobs/recommender.go

@@ -11,6 +11,7 @@ This job checks to see if a cluster matches policies set by the OPA config file.
 package jobs
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"log"
@@ -184,7 +185,7 @@ func (n *recommender) EnqueueTime() time.Time {
 	return n.enqueueTime
 }
 
-func (n *recommender) Run() error {
+func (n *recommender) Run(ctx context.Context) error {
 	for _, ids := range n.clusterAndProjectIDs {
 		fmt.Println(ids.projectID, ids.clusterID)
 

+ 8 - 6
workers/main.go

@@ -75,6 +75,8 @@ type EnvConf struct {
 }
 
 func main() {
+	ctx := context.Background()
+
 	if err := envdecode.StrictDecode(&envDecoder); err != nil {
 		log.Fatalf("Failed to decode server conf: %v", err)
 	}
@@ -120,13 +122,13 @@ func main() {
 
 	log.Println("starting worker dispatcher")
 
-	err = d.Run(jobQueue)
+	err = d.Run(ctx, jobQueue)
 
 	if err != nil {
 		log.Fatalln(err)
 	}
 
-	server := &http.Server{Addr: fmt.Sprintf(":%d", envDecoder.Port), Handler: httpService()}
+	server := &http.Server{Addr: fmt.Sprintf(":%d", envDecoder.Port), Handler: httpService(ctx)}
 
 	serverCtx, serverStopCtx := context.WithCancel(context.Background())
 
@@ -171,7 +173,7 @@ func main() {
 	d.Exit()
 }
 
-func httpService() http.Handler {
+func httpService(ctx context.Context) http.Handler {
 	log.Println("setting up HTTP router and adding middleware")
 
 	r := chi.NewRouter()
@@ -192,7 +194,7 @@ func httpService() http.Handler {
 			return
 		}
 
-		job := getJob(chi.URLParam(r, "id"), req)
+		job := getJob(ctx, chi.URLParam(r, "id"), req)
 
 		if job == nil {
 			w.WriteHeader(http.StatusNotFound)
@@ -206,9 +208,9 @@ func httpService() http.Handler {
 	return r
 }
 
-func getJob(id string, input map[string]interface{}) worker.Job {
+func getJob(ctx context.Context, id string, input map[string]interface{}) worker.Job {
 	if id == "helm-revisions-count-tracker" {
-		newJob, err := jobs.NewHelmRevisionsCountTracker(dbConn, time.Now().UTC(), &jobs.HelmRevisionsCountTrackerOpts{
+		newJob, err := jobs.NewHelmRevisionsCountTracker(ctx, dbConn, time.Now().UTC(), &jobs.HelmRevisionsCountTrackerOpts{
 			DBConf:             &envDecoder.DBConf,
 			DOClientID:         envDecoder.DOClientID,
 			DOClientSecret:     envDecoder.DOClientSecret,

+ 7 - 3
workers/utils/retry_helm_agent.go

@@ -3,6 +3,7 @@
 package utils
 
 import (
+	"context"
 	"fmt"
 	"log"
 	"os"
@@ -43,11 +44,12 @@ func NewRetryHelmAgent(
 }
 
 func (a *RetryHelmAgent) ListReleases(
+	ctx context.Context,
 	namespace string,
 	filter *types.ReleaseListFilter,
 ) ([]*release.Release, error) {
 	for i := uint(0); i < a.retryCount; i++ {
-		releases, err := a.agent.ListReleases(namespace, filter)
+		releases, err := a.agent.ListReleases(ctx, namespace, filter)
 
 		if err == nil {
 			return releases, nil
@@ -68,10 +70,11 @@ func (a *RetryHelmAgent) ListReleases(
 }
 
 func (a *RetryHelmAgent) GetReleaseHistory(
+	ctx context.Context,
 	name string,
 ) ([]*release.Release, error) {
 	for i := uint(0); i < a.retryCount; i++ {
-		releases, err := a.agent.GetReleaseHistory(name)
+		releases, err := a.agent.GetReleaseHistory(ctx, name)
 
 		if err == nil {
 			return releases, nil
@@ -92,11 +95,12 @@ func (a *RetryHelmAgent) GetReleaseHistory(
 }
 
 func (a *RetryHelmAgent) DeleteReleaseRevision(
+	ctx context.Context,
 	name string,
 	version int,
 ) error {
 	for i := uint(0); i < a.retryCount; i++ {
-		err := a.agent.DeleteReleaseRevision(name, version)
+		err := a.agent.DeleteReleaseRevision(ctx, name, version)
 
 		if err == nil {
 			return nil