Quellcode durchsuchen

add migration option for vault

Alexander Belanger vor 4 Jahren
Ursprung
Commit
4e39df7def

+ 3 - 0
Makefile

@@ -4,6 +4,9 @@ VERSION ?= dev
 start-dev: install setup-env-files
 	bash ./scripts/dev-environment/StartDevServer.sh
 
+run-migrate-dev: install setup-env-files
+	bash ./scripts/dev-environment/RunMigrateDev.sh
+
 install: 
 	bash ./scripts/dev-environment/SetupEnvironment.sh
 

+ 1 - 1
cmd/migrate/keyrotate/helpers_test.go

@@ -92,7 +92,7 @@ func setupTestEnv(tester *tester, t *testing.T) {
 	tester.Key = &key
 	tester.DB = db
 
-	tester.repo = gorm.NewRepository(db, &key)
+	tester.repo = gorm.NewRepository(db, &key, nil)
 }
 
 func cleanup(tester *tester, t *testing.T) {

+ 3 - 3
cmd/migrate/keyrotate/rotate.go

@@ -574,7 +574,7 @@ func rotateOAuthIntegrationModel(db *_gorm.DB, oldKey, newKey *[32]byte) error {
 	}
 
 	// cluster-scoped repository
-	repo := gorm.NewOAuthIntegrationRepository(db, oldKey).(*gorm.OAuthIntegrationRepository)
+	repo := gorm.NewOAuthIntegrationRepository(db, oldKey, nil).(*gorm.OAuthIntegrationRepository)
 
 	// iterate (count / stepSize) + 1 times using Limit and Offset
 	for i := 0; i < (int(count)/stepSize)+1; i++ {
@@ -629,7 +629,7 @@ func rotateGCPIntegrationModel(db *_gorm.DB, oldKey, newKey *[32]byte) error {
 	}
 
 	// cluster-scoped repository
-	repo := gorm.NewGCPIntegrationRepository(db, oldKey).(*gorm.GCPIntegrationRepository)
+	repo := gorm.NewGCPIntegrationRepository(db, oldKey, nil).(*gorm.GCPIntegrationRepository)
 
 	// iterate (count / stepSize) + 1 times using Limit and Offset
 	for i := 0; i < (int(count)/stepSize)+1; i++ {
@@ -682,7 +682,7 @@ func rotateAWSIntegrationModel(db *_gorm.DB, oldKey, newKey *[32]byte) error {
 	}
 
 	// cluster-scoped repository
-	repo := gorm.NewAWSIntegrationRepository(db, oldKey).(*gorm.AWSIntegrationRepository)
+	repo := gorm.NewAWSIntegrationRepository(db, oldKey, nil).(*gorm.AWSIntegrationRepository)
 
 	// iterate (count / stepSize) + 1 times using Limit and Offset
 	for i := 0; i < (int(count)/stepSize)+1; i++ {

+ 3 - 3
cmd/migrate/keyrotate/rotate_test.go

@@ -471,7 +471,7 @@ func TestOAuthIntegrationModelRotation(t *testing.T) {
 	}
 
 	// very all oauths decoded properly
-	repo := gorm.NewOAuthIntegrationRepository(tester.DB, &newKey).(*gorm.OAuthIntegrationRepository)
+	repo := gorm.NewOAuthIntegrationRepository(tester.DB, &newKey, nil).(*gorm.OAuthIntegrationRepository)
 
 	oauths := []*ints.OAuthIntegration{}
 
@@ -527,7 +527,7 @@ func TestGCPIntegrationModelRotation(t *testing.T) {
 	}
 
 	// very all gcps decoded properly
-	repo := gorm.NewGCPIntegrationRepository(tester.DB, &newKey).(*gorm.GCPIntegrationRepository)
+	repo := gorm.NewGCPIntegrationRepository(tester.DB, &newKey, nil).(*gorm.GCPIntegrationRepository)
 
 	gcps := []*ints.GCPIntegration{}
 
@@ -575,7 +575,7 @@ func TestAWSIntegrationModelRotation(t *testing.T) {
 	}
 
 	// very all awss decoded properly
-	repo := gorm.NewAWSIntegrationRepository(tester.DB, &newKey).(*gorm.AWSIntegrationRepository)
+	repo := gorm.NewAWSIntegrationRepository(tester.DB, &newKey, nil).(*gorm.AWSIntegrationRepository)
 
 	awss := []*ints.AWSIntegration{}
 

+ 4 - 0
cmd/migrate/main.go

@@ -60,6 +60,10 @@ func main() {
 			logger.Fatal().Err(err).Msg("key rotation failed")
 		}
 	}
+
+	if err := InstanceMigrate(db, envConf.DBConf); err != nil {
+		logger.Fatal().Err(err).Msg("vault migration failed")
+	}
 }
 
 type RotateConf struct {

+ 16 - 0
cmd/migrate/migrate_ce.go

@@ -0,0 +1,16 @@
+// +build !ee
+
+package main
+
+import (
+	"fmt"
+
+	"github.com/porter-dev/porter/api/server/shared/config/env"
+	"gorm.io/gorm"
+)
+
+func InstanceMigrate(db *gorm.DB, dbConf *env.DBConf) error {
+	fmt.Println("HIT CE EDITION")
+
+	return nil
+}

+ 39 - 0
cmd/migrate/migrate_ee.go

@@ -0,0 +1,39 @@
+// +build ee
+
+package main
+
+import (
+	"log"
+
+	"github.com/joeshaw/envdecode"
+	"github.com/porter-dev/porter/api/server/shared/config/env"
+	"github.com/porter-dev/porter/ee/migrate"
+	"gorm.io/gorm"
+)
+
+func InstanceMigrate(db *gorm.DB, dbConf *env.DBConf) error {
+	if shouldVaultRotate() {
+		if err := migrate.MigrateVault(db, dbConf); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type VaultMigrateConf struct {
+	// we add a dummy field to avoid empty struct issue with envdecode
+	DummyField   string `env:"ASDF,default=asdf"`
+	VaultMigrate bool   `env:"VAULT_MIGRATE"`
+}
+
+func shouldVaultRotate() bool {
+	var c VaultMigrateConf
+
+	if err := envdecode.StrictDecode(&c); err != nil {
+		log.Fatalf("Failed to decode Vault migration conf: %s", err)
+		return false
+	}
+
+	return c.VaultMigrate
+}

+ 0 - 2
ee/integrations/vault/vault.go

@@ -219,8 +219,6 @@ func (c *Client) writeRequest(method, path string, data interface{}, dst interfa
 	req.Header.Set("Accept", "application/json; charset=utf-8")
 	req.Header.Set("X-Vault-Token", c.apiKey)
 
-	fmt.Println("URL IS", reqURL.String(), string(strData))
-
 	res, err := c.httpClient.Do(req)
 
 	if err != nil {

+ 280 - 0
ee/migrate/migrate_vault.go

@@ -0,0 +1,280 @@
+package migrate
+
+import (
+	"fmt"
+
+	"github.com/porter-dev/porter/api/server/shared/config/env"
+	"github.com/porter-dev/porter/ee/integrations/vault"
+	ints "github.com/porter-dev/porter/internal/models/integrations"
+	"github.com/porter-dev/porter/internal/repository/credentials"
+	"gorm.io/gorm"
+)
+
+// process 100 records at a time
+const stepSize = 100
+
+func MigrateVault(db *gorm.DB, dbConf *env.DBConf) error {
+	var vaultClient *vault.Client
+
+	if dbConf.VaultAPIKey != "" && dbConf.VaultServerURL != "" && dbConf.VaultPrefix != "" {
+		vaultClient = vault.NewClient(
+			dbConf.VaultServerURL,
+			dbConf.VaultAPIKey,
+			dbConf.VaultPrefix,
+		)
+	} else {
+		return fmt.Errorf("env variables not properly set for vault migration")
+	}
+
+	err := migrateOAuthIntegrationModel(db, vaultClient)
+
+	if err != nil {
+		fmt.Printf("failed on oauth migration: %v\n", err)
+
+		return err
+	}
+
+	err = migrateGCPIntegrationModel(db, vaultClient)
+
+	if err != nil {
+		fmt.Printf("failed on gcp migration: %v\n", err)
+
+		return err
+	}
+
+	err = migrateAWSIntegrationModel(db, vaultClient)
+
+	if err != nil {
+		fmt.Printf("failed on aws migration: %v\n", err)
+
+		return err
+	}
+
+	return nil
+}
+
+func migrateOAuthIntegrationModel(db *gorm.DB, client *vault.Client) error {
+	// get count of model
+	var count int64
+
+	if err := db.Model(&ints.OAuthIntegration{}).Count(&count).Error; err != nil {
+		return err
+	}
+
+	// make a map of ids to errors -- we don't clear the integrations with errors
+	errors := make(map[uint]error)
+
+	// iterate (count / stepSize) + 1 times using Limit and Offset
+	for i := 0; i < (int(count)/stepSize)+1; i++ {
+		oauths := []*ints.OAuthIntegration{}
+
+		if err := db.Order("id asc").Offset(i * stepSize).Limit(stepSize).Find(&oauths).Error; err != nil {
+			return err
+		}
+
+		// decrypt with the old key
+		for _, oauth := range oauths {
+			// Check if record already exists in vault client. If so, we don't write anything to vault,
+			// since we don't want to overwrite any data that's been written.
+			if resp, _ := client.GetOAuthCredential(oauth); resp != nil {
+				continue
+			}
+
+			// write the data to the vault client
+			if err := client.WriteOAuthCredential(oauth, &credentials.OAuthCredential{
+				ClientID:     oauth.ClientID,
+				AccessToken:  oauth.AccessToken,
+				RefreshToken: oauth.RefreshToken,
+			}); err != nil {
+				errors[oauth.ID] = err
+				fmt.Printf("oauth vault write error on ID %d: %v\n", oauth.ID, err)
+			}
+		}
+	}
+
+	fmt.Printf("migrated %d oauth integrations with %d errors\n", count, len(errors))
+
+	saveErrors := make(map[uint]error, 0)
+
+	// iterate a second time, clearing the data
+	// iterate (count / stepSize) + 1 times using Limit and Offset
+	for i := 0; i < (int(count)/stepSize)+1; i++ {
+		oauths := []*ints.OAuthIntegration{}
+
+		if err := db.Order("id asc").Offset(i * stepSize).Limit(stepSize).Find(&oauths).Error; err != nil {
+			return err
+		}
+
+		// decrypt with the old key
+		for _, oauth := range oauths {
+			if _, found := errors[oauth.ID]; !found {
+				// clear the data from the db, and save
+				oauth.ClientID = []byte{}
+				oauth.AccessToken = []byte{}
+				oauth.RefreshToken = []byte{}
+
+				if err := db.Save(oauth).Error; err != nil {
+					saveErrors[oauth.ID] = err
+				}
+			}
+		}
+	}
+
+	fmt.Printf("cleared %d oauth integrations with %d errors\n", count, len(saveErrors))
+
+	for saveErrorID, saveError := range saveErrors {
+		fmt.Printf("oauth save error on ID %d: %v\n", saveErrorID, saveError)
+	}
+
+	return nil
+}
+
+func migrateGCPIntegrationModel(db *gorm.DB, client *vault.Client) error {
+	// get count of model
+	var count int64
+
+	if err := db.Model(&ints.GCPIntegration{}).Count(&count).Error; err != nil {
+		return err
+	}
+
+	// make a map of ids to errors -- we don't clear the integrations with errors
+	errors := make(map[uint]error)
+
+	// iterate (count / stepSize) + 1 times using Limit and Offset
+	for i := 0; i < (int(count)/stepSize)+1; i++ {
+		gcps := []*ints.GCPIntegration{}
+
+		if err := db.Order("id asc").Offset(i * stepSize).Limit(stepSize).Find(&gcps).Error; err != nil {
+			return err
+		}
+
+		// decrypt with the old key
+		for _, gcp := range gcps {
+			// Check if record already exists in vault client. If so, we don't write anything to vault,
+			// since we don't want to overwrite any data that's been written.
+			if resp, _ := client.GetGCPCredential(gcp); resp != nil {
+				continue
+			}
+
+			// write the data to the vault client
+			if err := client.WriteGCPCredential(gcp, &credentials.GCPCredential{
+				GCPKeyData: gcp.GCPKeyData,
+			}); err != nil {
+				errors[gcp.ID] = err
+				fmt.Printf("gcp vault write error on ID %d: %v\n", gcp.ID, err)
+			}
+		}
+	}
+
+	fmt.Printf("migrated %d gcp integrations with %d errors\n", count, len(errors))
+
+	saveErrors := make(map[uint]error, 0)
+
+	// iterate a second time, clearing the data
+	// iterate (count / stepSize) + 1 times using Limit and Offset
+	for i := 0; i < (int(count)/stepSize)+1; i++ {
+		gcps := []*ints.GCPIntegration{}
+
+		if err := db.Order("id asc").Offset(i * stepSize).Limit(stepSize).Find(&gcps).Error; err != nil {
+			return err
+		}
+
+		// decrypt with the old key
+		for _, gcp := range gcps {
+			if _, found := errors[gcp.ID]; !found {
+				// clear the data from the db, and save
+				gcp.GCPKeyData = []byte{}
+
+				if err := db.Save(gcp).Error; err != nil {
+					saveErrors[gcp.ID] = err
+				}
+			}
+		}
+	}
+
+	fmt.Printf("cleared %d gcp integrations with %d errors\n", count, len(saveErrors))
+
+	for saveErrorID, saveError := range saveErrors {
+		fmt.Printf("gcp save error on ID %d: %v\n", saveErrorID, saveError)
+	}
+
+	return nil
+}
+
+func migrateAWSIntegrationModel(db *gorm.DB, client *vault.Client) error {
+	// get count of model
+	var count int64
+
+	if err := db.Model(&ints.AWSIntegration{}).Count(&count).Error; err != nil {
+		return err
+	}
+
+	// make a map of ids to errors -- we don't clear the integrations with errors
+	errors := make(map[uint]error)
+
+	// iterate (count / stepSize) + 1 times using Limit and Offset
+	for i := 0; i < (int(count)/stepSize)+1; i++ {
+		awss := []*ints.AWSIntegration{}
+
+		if err := db.Order("id asc").Offset(i * stepSize).Limit(stepSize).Find(&awss).Error; err != nil {
+			return err
+		}
+
+		// decrypt with the old key
+		for _, aws := range awss {
+			// Check if record already exists in vault client. If so, we don't write anything to vault,
+			// since we don't want to overwrite any data that's been written.
+			if resp, _ := client.GetAWSCredential(aws); resp != nil {
+				continue
+			}
+
+			// write the data to the vault client
+			if err := client.WriteAWSCredential(aws, &credentials.AWSCredential{
+				AWSAccessKeyID:     aws.AWSAccessKeyID,
+				AWSClusterID:       aws.AWSClusterID,
+				AWSSecretAccessKey: aws.AWSSecretAccessKey,
+				AWSSessionToken:    aws.AWSSessionToken,
+			}); err != nil {
+				errors[aws.ID] = err
+				fmt.Printf("aws vault write error on ID %d: %v\n", aws.ID, err)
+			}
+		}
+	}
+
+	fmt.Printf("migrated %d aws integrations with %d errors\n", count, len(errors))
+
+	saveErrors := make(map[uint]error, 0)
+
+	// iterate a second time, clearing the data
+	// iterate (count / stepSize) + 1 times using Limit and Offset
+	for i := 0; i < (int(count)/stepSize)+1; i++ {
+		awss := []*ints.AWSIntegration{}
+
+		if err := db.Order("id asc").Offset(i * stepSize).Limit(stepSize).Find(&awss).Error; err != nil {
+			return err
+		}
+
+		// decrypt with the old key
+		for _, aws := range awss {
+			if _, found := errors[aws.ID]; !found {
+				// clear the data from the db, and save
+				aws.AWSAccessKeyID = []byte{}
+				aws.AWSClusterID = []byte{}
+				aws.AWSSecretAccessKey = []byte{}
+				aws.AWSSessionToken = []byte{}
+
+				if err := db.Save(aws).Error; err != nil {
+					saveErrors[aws.ID] = err
+				}
+			}
+		}
+	}
+
+	fmt.Printf("cleared %d aws integrations with %d errors\n", count, len(saveErrors))
+
+	for saveErrorID, saveError := range saveErrors {
+		fmt.Printf("aws save error on ID %d: %v\n", saveErrorID, saveError)
+	}
+
+	return nil
+}

+ 14 - 0
scripts/dev-environment/RunMigrateDev.sh

@@ -0,0 +1,14 @@
+#!/bin/bash
+
+# Load env variables for backend
+if [[ -e ./docker/.env ]]
+then
+  set -a # automatically export all variables
+  source ./docker/.env
+  set +a
+else 
+  echo "Couldn't find any backend env variables, exiting process"
+  exit
+fi
+
+go run -tags ee ./cmd/migrate