Преглед изворни кода

cluster and git repo model repository

Alexander Belanger пре 5 година
родитељ
комит
8d1735815a

+ 7 - 3
internal/models/cluster.go

@@ -21,9 +21,6 @@ type Cluster struct {
 	// Server endpoint for the cluster
 	Server string `json:"server"`
 
-	// CertificateAuthorityData for the cluster, encrypted at rest
-	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
-
 	// Additional fields optionally used by the kube client
 	ClusterLocationOfOrigin string `json:"location_of_origin,omitempty"`
 	TLSServerName           string `json:"tls-server-name,omitempty"`
@@ -33,6 +30,10 @@ type Cluster struct {
 	UserImpersonate         string `json:"act-as,omitempty"`
 	UserImpersonateGroups   string `json:"act-as-groups,omitempty"`
 
+	// ------------------------------------------------------------------
+	// All fields below this line are encrypted before storage
+	// ------------------------------------------------------------------
+
 	// The various auth mechanisms available to the integration
 	KubeIntegrationID uint
 	OIDCIntegrationID uint
@@ -41,6 +42,9 @@ type Cluster struct {
 
 	// A token cache that can be used by an auth mechanism, if desired
 	TokenCache integrations.TokenCache `json:"token_cache"`
+
+	// CertificateAuthorityData for the cluster, encrypted at rest
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
 }
 
 // ClusterExternal is an external Cluster to be shared over REST

+ 1 - 1
internal/models/gitrepo.go

@@ -16,7 +16,7 @@ type GitRepo struct {
 	RepoEntity string `json:"repo_entity"`
 
 	// The various auth mechanisms available to the integration
-	OIntegrationID uint
+	OAuthIntegrationID uint
 }
 
 // GitRepoExternal is a repository to be shared over REST

+ 10 - 10
internal/models/project.go

@@ -14,7 +14,7 @@ type Project struct {
 	Roles []Role `json:"roles"`
 
 	// linked repos
-	Repos []GitRepo `json:"git_repos,omitempty"`
+	GitRepos []GitRepo `json:"git_repos,omitempty"`
 
 	// linked clusters
 	Clusters          []Cluster          `json:"clusters"`
@@ -30,10 +30,10 @@ type Project struct {
 
 // ProjectExternal represents the Project type that is sent over REST
 type ProjectExternal struct {
-	ID          uint              `json:"id"`
-	Name        string            `json:"name"`
-	Roles       []RoleExternal    `json:"roles"`
-	RepoClients []GitRepoExternal `json:"git_repos,omitempty"`
+	ID       uint              `json:"id"`
+	Name     string            `json:"name"`
+	Roles    []RoleExternal    `json:"roles"`
+	GitRepos []GitRepoExternal `json:"git_repos,omitempty"`
 }
 
 // Externalize generates an external Project to be shared over REST
@@ -46,14 +46,14 @@ func (p *Project) Externalize() *ProjectExternal {
 
 	repos := make([]GitRepoExternal, 0)
 
-	for _, repo := range p.Repos {
+	for _, repo := range p.GitRepos {
 		repos = append(repos, *repo.Externalize())
 	}
 
 	return &ProjectExternal{
-		ID:          p.ID,
-		Name:        p.Name,
-		Roles:       roles,
-		RepoClients: repos,
+		ID:       p.ID,
+		Name:     p.Name,
+		Roles:    roles,
+		GitRepos: repos,
 	}
 }

+ 2 - 1
internal/repository/cluster.go

@@ -12,7 +12,8 @@ type ClusterRepository interface {
 	ReadClusterCandidate(id uint) (*models.ClusterCandidate, error)
 	ListClusterCandidatesByProjectID(projectID uint) ([]*models.ClusterCandidate, error)
 	UpdateClusterCandidateCreatedClusterID(id uint, createdClusterID uint) (*models.ClusterCandidate, error)
-	CreateCluster(sa *models.Cluster) (*models.Cluster, error)
+
+	CreateCluster(cluster *models.Cluster) (*models.Cluster, error)
 	ReadCluster(id uint) (*models.Cluster, error)
 	ListClustersByProjectID(projectID uint) ([]*models.Cluster, error)
 	UpdateClusterTokenCache(tokenCache *ints.TokenCache) (*models.Cluster, error)

+ 317 - 0
internal/repository/gorm/cluster.go

@@ -0,0 +1,317 @@
+package gorm
+
+import (
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/repository"
+	"gorm.io/gorm"
+
+	ints "github.com/porter-dev/porter/internal/models/integrations"
+)
+
+// ClusterRepository uses gorm.DB for querying the database
+type ClusterRepository struct {
+	db  *gorm.DB
+	key *[32]byte
+}
+
+// NewClusterRepository returns a ClusterRepository which uses
+// gorm.DB for querying the database. It accepts an encryption key to encrypt
+// sensitive data
+func NewClusterRepository(db *gorm.DB, key *[32]byte) repository.ClusterRepository {
+	return &ClusterRepository{db, key}
+}
+
+// CreateClusterCandidate creates a new cluster candidate
+func (repo *ClusterRepository) CreateClusterCandidate(
+	cc *models.ClusterCandidate,
+) (*models.ClusterCandidate, error) {
+	err := repo.EncryptClusterCandidateData(cc, repo.key)
+
+	if err != nil {
+		return nil, err
+	}
+
+	project := &models.Project{}
+
+	if err := repo.db.Where("id = ?", cc.ProjectID).First(&project).Error; err != nil {
+		return nil, err
+	}
+
+	assoc := repo.db.Model(&project).Association("ClusterCandidates")
+
+	if assoc.Error != nil {
+		return nil, assoc.Error
+	}
+
+	if err := assoc.Append(cc); err != nil {
+		return nil, err
+	}
+
+	return cc, nil
+}
+
+// ReadClusterCandidate finds a cluster candidate by id
+func (repo *ClusterRepository) ReadClusterCandidate(
+	id uint,
+) (*models.ClusterCandidate, error) {
+	cc := &models.ClusterCandidate{}
+
+	if err := repo.db.Preload("Resolvers").Where("id = ?", id).First(&cc).Error; err != nil {
+		return nil, err
+	}
+
+	repo.DecryptClusterCandidateData(cc, repo.key)
+
+	return cc, nil
+}
+
+// ListClusterCandidatesByProjectID finds all cluster candidates
+// for a given project id
+func (repo *ClusterRepository) ListClusterCandidatesByProjectID(
+	projectID uint,
+) ([]*models.ClusterCandidate, error) {
+	ccs := []*models.ClusterCandidate{}
+
+	if err := repo.db.Preload("Resolvers").Where("project_id = ?", projectID).Find(&ccs).Error; err != nil {
+		return nil, err
+	}
+
+	for _, cc := range ccs {
+		repo.DecryptClusterCandidateData(cc, repo.key)
+	}
+
+	return ccs, nil
+}
+
+// UpdateClusterCandidateCreatedClusterID updates the CreatedClusterID for
+// a candidate, after the candidate has been resolved.
+func (repo *ClusterRepository) UpdateClusterCandidateCreatedClusterID(
+	id uint,
+	createdClusterID uint,
+) (*models.ClusterCandidate, error) {
+	cc := &models.ClusterCandidate{}
+
+	if err := repo.db.Where("id = ?", id).First(&cc).Error; err != nil {
+		return nil, err
+	}
+
+	cc.CreatedClusterID = createdClusterID
+
+	if err := repo.db.Save(cc).Error; err != nil {
+		return nil, err
+	}
+
+	repo.DecryptClusterCandidateData(cc, repo.key)
+
+	return cc, nil
+}
+
+// CreateCluster creates a new cluster
+func (repo *ClusterRepository) CreateCluster(
+	cluster *models.Cluster,
+) (*models.Cluster, error) {
+	err := repo.EncryptClusterData(cluster, repo.key)
+
+	if err != nil {
+		return nil, err
+	}
+
+	project := &models.Project{}
+
+	if err := repo.db.Where("id = ?", cluster.ProjectID).First(&project).Error; err != nil {
+		return nil, err
+	}
+
+	assoc := repo.db.Model(&project).Association("Clusters")
+
+	if assoc.Error != nil {
+		return nil, assoc.Error
+	}
+
+	if err := assoc.Append(cluster); err != nil {
+		return nil, err
+	}
+
+	// create a token cache by default
+	assoc = repo.db.Model(cluster).Association("TokenCache")
+
+	if assoc.Error != nil {
+		return nil, assoc.Error
+	}
+
+	if err := assoc.Append(&cluster.TokenCache); err != nil {
+		return nil, err
+	}
+
+	return cluster, nil
+}
+
+// ReadCluster finds a cluster by id
+func (repo *ClusterRepository) ReadCluster(
+	id uint,
+) (*models.Cluster, error) {
+	cluster := &models.Cluster{}
+
+	// preload Clusters association
+	if err := repo.db.Preload("TokenCache").Where("id = ?", id).First(&cluster).Error; err != nil {
+		return nil, err
+	}
+
+	repo.DecryptClusterData(cluster, repo.key)
+
+	return cluster, nil
+}
+
+// ListClustersByProjectID finds all clusters
+// for a given project id
+func (repo *ClusterRepository) ListClustersByProjectID(
+	projectID uint,
+) ([]*models.Cluster, error) {
+	clusters := []*models.Cluster{}
+
+	if err := repo.db.Where("project_id = ?", projectID).Find(&clusters).Error; err != nil {
+		return nil, err
+	}
+
+	for _, cluster := range clusters {
+		repo.DecryptClusterData(cluster, repo.key)
+	}
+
+	return clusters, nil
+}
+
+// UpdateClusterTokenCache updates the token cache for a cluster
+func (repo *ClusterRepository) UpdateClusterTokenCache(
+	tokenCache *ints.TokenCache,
+) (*models.Cluster, error) {
+	if tok := tokenCache.Token; len(tok) > 0 {
+		cipherData, err := repository.Encrypt(tok, repo.key)
+
+		if err != nil {
+			return nil, err
+		}
+
+		tokenCache.Token = cipherData
+	}
+
+	cluster := &models.Cluster{}
+
+	if err := repo.db.Where("id = ?", tokenCache.ClusterID).First(&cluster).Error; err != nil {
+		return nil, err
+	}
+
+	cluster.TokenCache.Token = tokenCache.Token
+	cluster.TokenCache.Expiry = tokenCache.Expiry
+
+	if err := repo.db.Save(cluster).Error; err != nil {
+		return nil, err
+	}
+
+	return cluster, nil
+}
+
+// EncryptClusterData will encrypt the user's service account data before writing
+// to the DB
+func (repo *ClusterRepository) EncryptClusterData(
+	cluster *models.Cluster,
+	key *[32]byte,
+) error {
+	if len(cluster.CertificateAuthorityData) > 0 {
+		cipherData, err := repository.Encrypt(cluster.CertificateAuthorityData, key)
+
+		if err != nil {
+			return err
+		}
+
+		cluster.CertificateAuthorityData = cipherData
+	}
+
+	return nil
+}
+
+// EncryptClusterCandidateData will encrypt the service account candidate data before
+// writing to the DB
+func (repo *ClusterRepository) EncryptClusterCandidateData(
+	cc *models.ClusterCandidate,
+	key *[32]byte,
+) error {
+	if len(cc.AWSClusterIDGuess) > 0 {
+		cipherData, err := repository.Encrypt(cc.AWSClusterIDGuess, key)
+
+		if err != nil {
+			return err
+		}
+
+		cc.AWSClusterIDGuess = cipherData
+	}
+
+	if len(cc.Kubeconfig) > 0 {
+		cipherData, err := repository.Encrypt(cc.Kubeconfig, key)
+
+		if err != nil {
+			return err
+		}
+
+		cc.Kubeconfig = cipherData
+	}
+
+	return nil
+}
+
+// DecryptClusterData will decrypt the user's service account data before
+// returning it from the DB
+func (repo *ClusterRepository) DecryptClusterData(
+	cluster *models.Cluster,
+	key *[32]byte,
+) error {
+	if len(cluster.CertificateAuthorityData) > 0 {
+		plaintext, err := repository.Decrypt(cluster.CertificateAuthorityData, key)
+
+		if err != nil {
+			return err
+		}
+
+		cluster.CertificateAuthorityData = plaintext
+	}
+
+	if tok := cluster.TokenCache.Token; len(tok) > 0 {
+		plaintext, err := repository.Decrypt(tok, key)
+
+		if err != nil {
+			return err
+		}
+
+		cluster.TokenCache.Token = plaintext
+	}
+
+	return nil
+}
+
+// DecryptClusterCandidateData will decrypt the service account candidate data before
+// returning it from the DB
+func (repo *ClusterRepository) DecryptClusterCandidateData(
+	cc *models.ClusterCandidate,
+	key *[32]byte,
+) error {
+	if len(cc.AWSClusterIDGuess) > 0 {
+		plaintext, err := repository.Decrypt(cc.AWSClusterIDGuess, key)
+
+		if err != nil {
+			return err
+		}
+
+		cc.AWSClusterIDGuess = plaintext
+	}
+
+	if len(cc.Kubeconfig) > 0 {
+		plaintext, err := repository.Decrypt(cc.Kubeconfig, key)
+
+		if err != nil {
+			return err
+		}
+
+		cc.Kubeconfig = plaintext
+	}
+
+	return nil
+}

+ 373 - 0
internal/repository/gorm/cluster_test.go

@@ -0,0 +1,373 @@
+package gorm_test
+
+import (
+	"testing"
+	"time"
+
+	"github.com/go-test/deep"
+	"github.com/porter-dev/porter/internal/models"
+	ints "github.com/porter-dev/porter/internal/models/integrations"
+	orm "gorm.io/gorm"
+)
+
+func TestCreateClusterCandidate(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_create_cc.db",
+	}
+
+	setupTestEnv(tester, t)
+	initProject(tester, t)
+	defer cleanup(tester, t)
+
+	cc := &models.ClusterCandidate{
+		ProjectID:         tester.initProjects[0].ID,
+		CreatedClusterID:  0,
+		Resolvers:         []models.ClusterResolver{},
+		Name:              "cluster-test",
+		Server:            "https://localhost",
+		ContextName:       "context-test",
+		AWSClusterIDGuess: []byte("example-cluster-0"),
+		Kubeconfig:        []byte("current-context: testing\n"),
+	}
+
+	expCC := *cc
+
+	cc, err := tester.repo.Cluster.CreateClusterCandidate(cc)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	cc, err = tester.repo.Cluster.ReadClusterCandidate(cc.Model.ID)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	// make sure id is 1
+	if cc.Model.ID != 1 {
+		t.Errorf("incorrect cluster candidate ID: expected %d, got %d\n", 1, cc.Model.ID)
+	}
+
+	// reset fields for deep.Equal
+	cc.Model = orm.Model{}
+
+	if diff := deep.Equal(expCC, *cc); diff != nil {
+		t.Errorf("incorrect cluster candidate")
+		t.Error(diff)
+	}
+}
+
+func TestCreateClusterCandidateWithResolvers(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_create_cc.db",
+	}
+
+	setupTestEnv(tester, t)
+	initProject(tester, t)
+	defer cleanup(tester, t)
+
+	cc := &models.ClusterCandidate{
+		ProjectID:        tester.initProjects[0].ID,
+		CreatedClusterID: 0,
+		Resolvers: []models.ClusterResolver{
+			models.ClusterResolver{
+				Name:     models.ClusterLocalhost,
+				Resolved: false,
+				Docs:     models.ClusterResolverInfos[models.ClusterLocalhost].Docs,
+				Fields:   models.ClusterResolverInfos[models.ClusterLocalhost].Fields,
+			},
+		},
+		Name:              "cluster-test",
+		Server:            "https://localhost",
+		ContextName:       "context-test",
+		AWSClusterIDGuess: []byte("example-cluster-0"),
+		Kubeconfig:        []byte("current-context: testing\n"),
+	}
+
+	expCC := *cc
+
+	cc, err := tester.repo.Cluster.CreateClusterCandidate(cc)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	cc, err = tester.repo.Cluster.ReadClusterCandidate(cc.Model.ID)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	// make sure id is 1
+	if cc.Model.ID != 1 {
+		t.Errorf("incorrect cluster candidate ID: expected %d, got %d\n", 1, cc.Model.ID)
+	}
+
+	// make sure length of resolvers is 1
+	if len(cc.Resolvers) != 1 {
+		t.Fatalf("incorrect cluster candidate resolvers length: expected %d, got %d\n", 1, len(cc.Resolvers))
+	}
+
+	// make sure resolver cluster candidate id is 1
+	if cc.Resolvers[0].ClusterCandidateID != 1 {
+		t.Errorf("incorrect resolver ClusterCandidateID: expected %d, got %d\n", 1, cc.Resolvers[0].ClusterCandidateID)
+	}
+
+	// reset fields for deep.Equal
+	cc.Model = orm.Model{}
+	cc.Resolvers[0].Model = orm.Model{}
+	expCC.Resolvers[0].Model = orm.Model{}
+	expCC.Resolvers[0].ClusterCandidateID = 1
+
+	if diff := deep.Equal(expCC, *cc); diff != nil {
+		t.Errorf("incorrect cluster candidate")
+		t.Error(diff)
+	}
+}
+
+func TestListClusterCandidatesByProjectID(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_list_ccs.db",
+	}
+
+	setupTestEnv(tester, t)
+	initProject(tester, t)
+	initClusterCandidate(tester, t)
+	defer cleanup(tester, t)
+
+	ccs, err := tester.repo.Cluster.ListClusterCandidatesByProjectID(
+		tester.initProjects[0].Model.ID,
+	)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	if len(ccs) != 1 {
+		t.Fatalf("length of cluster candidates incorrect: expected %d, got %d\n", 1, len(ccs))
+	}
+
+	// make sure data is correct
+	expCC := models.ClusterCandidate{
+		ProjectID:         tester.initProjects[0].ID,
+		CreatedClusterID:  0,
+		Resolvers:         []models.ClusterResolver{},
+		Name:              "cluster-test",
+		Server:            "https://localhost",
+		ContextName:       "context-test",
+		AWSClusterIDGuess: []byte("example-cluster-0"),
+		Kubeconfig:        []byte("current-context: testing\n"),
+	}
+
+	cc := ccs[0]
+
+	// reset fields for reflect.DeepEqual
+	cc.Model = orm.Model{}
+
+	if diff := deep.Equal(expCC, *cc); diff != nil {
+		t.Errorf("incorrect cluster candidate")
+		t.Error(diff)
+	}
+}
+
+func TestUpdateClusterCandidateCreatedClusterID(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_update_cc_cluster_id.db",
+	}
+
+	setupTestEnv(tester, t)
+	initClusterCandidate(tester, t)
+	initCluster(tester, t)
+	defer cleanup(tester, t)
+
+	cc, err := tester.repo.Cluster.UpdateClusterCandidateCreatedClusterID(
+		tester.initCCs[0].ID,
+		tester.initClusters[0].ID,
+	)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	expCC := models.ClusterCandidate{
+		ProjectID:         tester.initProjects[0].ID,
+		CreatedClusterID:  tester.initClusters[0].ID,
+		Name:              "cluster-test",
+		Server:            "https://localhost",
+		ContextName:       "context-test",
+		AWSClusterIDGuess: []byte("example-cluster-0"),
+		Kubeconfig:        []byte("current-context: testing\n"),
+	}
+
+	// reset fields for reflect.DeepEqual
+	cc.Model = orm.Model{}
+
+	if diff := deep.Equal(expCC, *cc); diff != nil {
+		t.Errorf("incorrect cluster candidate")
+		t.Error(diff)
+	}
+}
+
+func TestCreateCluster(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_create_cluster.db",
+	}
+
+	setupTestEnv(tester, t)
+	initProject(tester, t)
+	initKubeIntegration(tester, t)
+	defer cleanup(tester, t)
+
+	cluster := &models.Cluster{
+		ProjectID:                tester.initProjects[0].ID,
+		Name:                     "cluster-test",
+		Server:                   "https://localhost",
+		KubeIntegrationID:        tester.initKIs[0].ID,
+		CertificateAuthorityData: []byte("-----BEGIN"),
+	}
+
+	expCluster := *cluster
+
+	cluster, err := tester.repo.Cluster.CreateCluster(cluster)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	cluster, err = tester.repo.Cluster.ReadCluster(cluster.Model.ID)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	// make sure id is 1
+	if cluster.Model.ID != 1 {
+		t.Errorf("incorrect cluster ID: expected %d, got %d\n", 1, cluster.Model.ID)
+	}
+
+	// reset fields for deep.Equal
+	cluster.Model = orm.Model{}
+
+	if diff := deep.Equal(expCluster, *cluster); diff != nil {
+		t.Errorf("incorrect cluster")
+		t.Error(diff)
+	}
+}
+
+func TestListClustersByProjectID(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_list_clusters.db",
+	}
+
+	setupTestEnv(tester, t)
+	initProject(tester, t)
+	initCluster(tester, t)
+	defer cleanup(tester, t)
+
+	clusters, err := tester.repo.Cluster.ListClustersByProjectID(
+		tester.initProjects[0].Model.ID,
+	)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	if len(clusters) != 1 {
+		t.Fatalf("length of clusters incorrect: expected %d, got %d\n", 1, len(clusters))
+	}
+
+	// make sure data is correct
+	expCluster := models.Cluster{
+		ProjectID:                tester.initProjects[0].ID,
+		Name:                     "cluster-test",
+		Server:                   "https://localhost",
+		KubeIntegrationID:        tester.initKIs[0].ID,
+		CertificateAuthorityData: []byte("-----BEGIN"),
+	}
+
+	cluster := clusters[0]
+
+	// reset fields for reflect.DeepEqual
+	cluster.Model = orm.Model{}
+
+	if diff := deep.Equal(expCluster, *cluster); diff != nil {
+		t.Errorf("incorrect cluster")
+		t.Error(diff)
+	}
+}
+
+func TestUpdateClusterToken(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_test_update_cluster_token.db",
+	}
+
+	setupTestEnv(tester, t)
+	initProject(tester, t)
+	initKubeIntegration(tester, t)
+	defer cleanup(tester, t)
+
+	cluster := &models.Cluster{
+		ProjectID:                tester.initProjects[0].ID,
+		Name:                     "cluster-test",
+		Server:                   "https://localhost",
+		KubeIntegrationID:        tester.initKIs[0].ID,
+		CertificateAuthorityData: []byte("-----BEGIN"),
+		TokenCache: ints.TokenCache{
+			Token:  []byte("token-1"),
+			Expiry: time.Now().Add(-1 * time.Hour),
+		},
+	}
+
+	cluster, err := tester.repo.Cluster.CreateCluster(cluster)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	cluster, err = tester.repo.Cluster.ReadCluster(cluster.Model.ID)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	// make sure cluster id of token is 1
+	if cluster.TokenCache.ClusterID != 1 {
+		t.Fatalf("incorrect cluster id in token cache: expected %d, got %d\n", 1, cluster.TokenCache.ClusterID)
+	}
+
+	// make sure old token is expired
+	if isExpired := cluster.TokenCache.IsExpired(); !isExpired {
+		t.Fatalf("token was not expired\n")
+	}
+
+	cluster.TokenCache.Token = []byte("token-2")
+	cluster.TokenCache.Expiry = time.Now().Add(24 * time.Hour)
+
+	cluster, err = tester.repo.Cluster.UpdateClusterTokenCache(&cluster.TokenCache)
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+	cluster, err = tester.repo.Cluster.ReadCluster(cluster.Model.ID)
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	// make sure id is 1
+	if cluster.Model.ID != 1 {
+		t.Errorf("incorrect service account ID: expected %d, got %d\n", 1, cluster.Model.ID)
+	}
+
+	// make sure new token is correct and not expired
+	if cluster.TokenCache.ClusterID != 1 {
+		t.Fatalf("incorrect service account ID in token cache: expected %d, got %d\n", 1, cluster.TokenCache.ClusterID)
+	}
+
+	if isExpired := cluster.TokenCache.IsExpired(); isExpired {
+		t.Fatalf("token was expired\n")
+	}
+
+	if string(cluster.TokenCache.Token) != "token-2" {
+		t.Errorf("incorrect token in cache: expected %s, got %s\n", "token-2", cluster.TokenCache.Token)
+	}
+}

+ 91 - 102
internal/repository/gorm/gitrepo_test.go

@@ -1,104 +1,93 @@
 package gorm_test
 
-// func TestCreateGitRepo(t *testing.T) {
-// 	tester := &tester{
-// 		dbFileName: "./porter_create_gr.db",
-// 	}
-
-// 	setupTestEnv(tester, t)
-// 	initUser(tester, t)
-// 	initProject(tester, t)
-// 	defer cleanup(tester, t)
-
-// 	repoClient := &models.GitRepo{
-// 		ProjectID:    tester.initProjects[0].ID,
-// 		UserID:       tester.initUsers[0].ID,
-// 		RepoUserID:   1,
-// 		Kind:         models.GitRepoGithub,
-// 		AccessToken:  []byte("accesstoken1234"),
-// 		RefreshToken: []byte("refreshtoken1234"),
-// 	}
-
-// 	repoClient, err := tester.repo.GitRepo.CreateGitRepo(repoClient)
-
-// 	if err != nil {
-// 		t.Fatalf("%v\n", err)
-// 	}
-
-// 	repoClient, err = tester.repo.GitRepo.ReadGitRepo(repoClient.Model.ID)
-
-// 	if err != nil {
-// 		t.Fatalf("%v\n", err)
-// 	}
-
-// 	// make sure id is 1
-// 	if repoClient.Model.ID != 1 {
-// 		t.Errorf("incorrect repo client ID: expected %d, got %d\n", 1, repoClient.Model.ID)
-// 	}
-
-// 	// make sure data is correct
-// 	expGitRepo := &models.GitRepo{
-// 		ProjectID:    tester.initProjects[0].ID,
-// 		UserID:       tester.initUsers[0].ID,
-// 		RepoUserID:   1,
-// 		Kind:         models.GitRepoGithub,
-// 		AccessToken:  []byte("accesstoken1234"),
-// 		RefreshToken: []byte("refreshtoken1234"),
-// 	}
-
-// 	copyGitRepo := repoClient
-
-// 	// reset fields for reflect.DeepEqual
-// 	copyGitRepo.Model = orm.Model{}
-
-// 	if diff := deep.Equal(copyGitRepo, expGitRepo); diff != nil {
-// 		t.Errorf("incorrect repo client")
-// 		t.Error(diff)
-// 	}
-// }
-
-// func TestListGitReposByProjectID(t *testing.T) {
-// 	tester := &tester{
-// 		dbFileName: "./porter_list_grs.db",
-// 	}
-
-// 	setupTestEnv(tester, t)
-// 	initUser(tester, t)
-// 	initProject(tester, t)
-// 	initServiceAccount(tester, t)
-// 	initGitRepo(tester, t)
-// 	defer cleanup(tester, t)
-
-// 	grs, err := tester.repo.GitRepo.ListGitReposByProjectID(
-// 		tester.initProjects[0].Model.ID,
-// 	)
-
-// 	if err != nil {
-// 		t.Fatalf("%v\n", err)
-// 	}
-
-// 	if len(grs) != 1 {
-// 		t.Fatalf("length of grs incorrect: expected %d, got %d\n", 1, len(grs))
-// 	}
-
-// 	// make sure data is correct
-// 	// make sure data is correct
-// 	expGitRepo := &models.GitRepo{
-// 		ProjectID:    tester.initProjects[0].ID,
-// 		UserID:       tester.initUsers[0].ID,
-// 		RepoUserID:   1,
-// 		Kind:         models.GitRepoGithub,
-// 		AccessToken:  []byte("accesstoken1234"),
-// 		RefreshToken: []byte("refreshtoken1234"),
-// 	}
-
-// 	copyGitRepo := grs[0]
-
-// 	// reset fields for reflect.DeepEqual
-// 	copyGitRepo.Model = orm.Model{}
-
-// 	if diff := deep.Equal(copyGitRepo, expGitRepo); diff != nil {
-// 		t.Errorf("incorrect repo client")
-// 		t.Error(diff)
-// 	}
-// }
+import (
+	"testing"
+
+	"github.com/go-test/deep"
+	"github.com/porter-dev/porter/internal/models"
+	orm "gorm.io/gorm"
+)
+
+func TestCreateGitRepo(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_create_gr.db",
+	}
+
+	setupTestEnv(tester, t)
+	initUser(tester, t)
+	initProject(tester, t)
+	initOAuthIntegration(tester, t)
+	defer cleanup(tester, t)
+
+	gr := &models.GitRepo{
+		ProjectID:          tester.initProjects[0].ID,
+		RepoEntity:         "porter-dev",
+		OAuthIntegrationID: tester.initOAuths[0].ID,
+	}
+
+	expGR := *gr
+
+	gr, err := tester.repo.GitRepo.CreateGitRepo(gr)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	gr, err = tester.repo.GitRepo.ReadGitRepo(gr.Model.ID)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	// make sure id is 1
+	if gr.Model.ID != 1 {
+		t.Errorf("incorrect git repo ID: expected %d, got %d\n", 1, gr.Model.ID)
+	}
+
+	// reset fields for reflect.DeepEqual
+	gr.Model = orm.Model{}
+
+	if diff := deep.Equal(expGR, *gr); diff != nil {
+		t.Errorf("incorrect git repo")
+		t.Error(diff)
+	}
+}
+
+func TestListGitReposByProjectID(t *testing.T) {
+	tester := &tester{
+		dbFileName: "./porter_list_grs.db",
+	}
+
+	setupTestEnv(tester, t)
+	initGitRepo(tester, t)
+	defer cleanup(tester, t)
+
+	grs, err := tester.repo.GitRepo.ListGitReposByProjectID(
+		tester.initProjects[0].Model.ID,
+	)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	if len(grs) != 1 {
+		t.Fatalf("length of oidc integrations incorrect: expected %d, got %d\n", 1, len(grs))
+	}
+
+	// make sure data is correct
+	expGR := models.GitRepo{
+		ProjectID:          tester.initProjects[0].ID,
+		RepoEntity:         "porter-dev",
+		OAuthIntegrationID: tester.initOAuths[0].ID,
+	}
+
+	gr := grs[0]
+
+	// reset fields for reflect.DeepEqual
+	gr.Model = orm.Model{}
+
+	if diff := deep.Equal(expGR, *gr); diff != nil {
+		t.Errorf("incorrect git repo")
+		t.Error(diff)
+	}
+}

+ 78 - 66
internal/repository/gorm/helpers_test.go

@@ -19,6 +19,8 @@ type tester struct {
 	initUsers    []*models.User
 	initProjects []*models.Project
 	initGRs      []*models.GitRepo
+	initClusters []*models.Cluster
+	initCCs      []*models.ClusterCandidate
 	initKIs      []*ints.KubeIntegration
 	initOIDCs    []*ints.OIDCIntegration
 	initOAuths   []*ints.OAuthIntegration
@@ -45,11 +47,15 @@ func setupTestEnv(tester *tester, t *testing.T) {
 		&models.User{},
 		&models.Session{},
 		&models.GitRepo{},
+		&models.Cluster{},
+		&models.ClusterCandidate{},
+		&models.ClusterResolver{},
 		&ints.KubeIntegration{},
 		&ints.OIDCIntegration{},
 		&ints.OAuthIntegration{},
 		&ints.GCPIntegration{},
 		&ints.AWSIntegration{},
+		&ints.TokenCache{},
 	)
 
 	if err != nil {
@@ -269,77 +275,83 @@ func initAWSIntegration(tester *tester, t *testing.T) {
 	tester.initAWSs = append(tester.initAWSs, aws)
 }
 
-// func initServiceAccountCandidate(tester *tester, t *testing.T) {
-// 	t.Helper()
-
-// 	saCandidate := &models.ServiceAccountCandidate{
-// 		ProjectID:       1,
-// 		Kind:            "connector",
-// 		ClusterName:     "cluster-test",
-// 		ClusterEndpoint: "https://localhost",
-// 		Integration:   models.X509,
-// 		Kubeconfig:      []byte("current-context: testing\n"),
-// 		Actions: []models.ServiceAccountAction{
-// 			models.ServiceAccountAction{
-// 				Name:     models.TokenDataAction,
-// 				Resolved: false,
-// 			},
-// 		},
-// 	}
-
-// 	saCandidate, err := tester.repo.ServiceAccount.CreateServiceAccountCandidate(saCandidate)
-
-// 	if err != nil {
-// 		t.Fatalf("%v\n", err)
-// 	}
-
-// 	tester.initSACandidates = append(tester.initSACandidates, saCandidate)
-// }
-
-// func initServiceAccount(tester *tester, t *testing.T) {
-// 	t.Helper()
-
-// 	sa := &models.ServiceAccount{
-// 		ProjectID:             1,
-// 		Kind:                  "connector",
-// 		Integration:         models.X509,
-// 		ClientCertificateData: []byte("-----BEGIN"),
-// 		ClientKeyData:         []byte("-----BEGIN"),
-// 		Clusters: []models.Cluster{
-// 			models.Cluster{
-// 				Name:                     "cluster-test",
-// 				Server:                   "https://localhost",
-// 				CertificateAuthorityData: []byte("-----BEGIN"),
-// 			},
-// 		},
-// 	}
-
-// 	sa, err := tester.repo.ServiceAccount.CreateServiceAccount(sa)
-
-// 	if err != nil {
-// 		t.Fatalf("%v\n", err)
-// 	}
-
-// 	tester.initSAs = append(tester.initSAs, sa)
-// }
+func initClusterCandidate(tester *tester, t *testing.T) {
+	t.Helper()
+
+	if len(tester.initProjects) == 0 {
+		initProject(tester, t)
+	}
+
+	cc := &models.ClusterCandidate{
+		ProjectID:         tester.initProjects[0].ID,
+		CreatedClusterID:  0,
+		Resolvers:         []models.ClusterResolver{},
+		Name:              "cluster-test",
+		Server:            "https://localhost",
+		ContextName:       "context-test",
+		AWSClusterIDGuess: []byte("example-cluster-0"),
+		Kubeconfig:        []byte("current-context: testing\n"),
+	}
+
+	cc, err := tester.repo.Cluster.CreateClusterCandidate(cc)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	tester.initCCs = append(tester.initCCs, cc)
+}
+
+func initCluster(tester *tester, t *testing.T) {
+	t.Helper()
+
+	if len(tester.initProjects) == 0 {
+		initProject(tester, t)
+	}
+
+	if len(tester.initKIs) == 0 {
+		initKubeIntegration(tester, t)
+	}
+
+	cluster := &models.Cluster{
+		ProjectID:                tester.initProjects[0].ID,
+		Name:                     "cluster-test",
+		Server:                   "https://localhost",
+		KubeIntegrationID:        tester.initKIs[0].ID,
+		CertificateAuthorityData: []byte("-----BEGIN"),
+	}
+
+	cluster, err := tester.repo.Cluster.CreateCluster(cluster)
+
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
+
+	tester.initClusters = append(tester.initClusters, cluster)
+}
 
 func initGitRepo(tester *tester, t *testing.T) {
 	t.Helper()
 
-	// rc := &models.GitRepo{
-	// 	ProjectID:    tester.initProjects[0].ID,
-	// 	UserID:       tester.initUsers[0].ID,
-	// 	RepoUserID:   1,
-	// 	Kind:         models.RepoClientGithub,
-	// 	AccessToken:  []byte("accesstoken1234"),
-	// 	RefreshToken: []byte("refreshtoken1234"),
-	// }
+	if len(tester.initProjects) == 0 {
+		initProject(tester, t)
+	}
+
+	if len(tester.initOAuths) == 0 {
+		initOAuthIntegration(tester, t)
+	}
+
+	gr := &models.GitRepo{
+		ProjectID:          tester.initProjects[0].ID,
+		RepoEntity:         "porter-dev",
+		OAuthIntegrationID: tester.initOAuths[0].ID,
+	}
 
-	// rc, err := tester.repo.RepoClient.CreateRepoClient(rc)
+	gr, err := tester.repo.GitRepo.CreateGitRepo(gr)
 
-	// if err != nil {
-	// 	t.Fatalf("%v\n", err)
-	// }
+	if err != nil {
+		t.Fatalf("%v\n", err)
+	}
 
-	// tester.initRCs = append(tester.initRCs, rc)
+	tester.initGRs = append(tester.initGRs, gr)
 }

+ 1 - 0
internal/repository/gorm/repository.go

@@ -13,6 +13,7 @@ func NewRepository(db *gorm.DB, key *[32]byte) *repository.Repository {
 		Session:          NewSessionRepository(db),
 		Project:          NewProjectRepository(db),
 		GitRepo:          NewGitRepoRepository(db, key),
+		Cluster:          NewClusterRepository(db, key),
 		KubeIntegration:  NewKubeIntegrationRepository(db, key),
 		OIDCIntegration:  NewOIDCIntegrationRepository(db, key),
 		OAuthIntegration: NewOAuthIntegrationRepository(db, key),