Alexander Belanger 4 lat temu
rodzic
commit
c0e6f1693b

+ 0 - 0
cli/cmd/apply.go


+ 1 - 1
cli/cmd/deploy/build.go

@@ -44,7 +44,7 @@ func (b *BuildAgent) BuildDocker(
 	opts := &docker.BuildOpts{
 		ImageRepo:         b.imageRepo,
 		Tag:               tag,
-		CurrentTag:		   currentTag,
+		CurrentTag:        currentTag,
 		BuildContext:      buildCtx,
 		Env:               b.env,
 		DockerfilePath:    dockerfilePath,

+ 1 - 1
cli/cmd/docker/builder.go

@@ -21,7 +21,7 @@ import (
 type BuildOpts struct {
 	ImageRepo         string
 	Tag               string
-	CurrentTag		  string
+	CurrentTag        string
 	BuildContext      string
 	DockerfilePath    string
 	IsDockerfileInCtx bool

+ 142 - 0
cli/cmd/switchboard/exec/exec.go

@@ -0,0 +1,142 @@
+package exec
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/porter-dev/porter/cli/cmd/switchboard/models"
+)
+
+// TODO: this exec func should probably accept channels or something
+type ExecFunc func(resource *models.Resource) error
+
+type ExecNode struct {
+	isExecFinished bool
+	isExecStarted  bool
+	parents        []*ExecNode
+	resource       *models.Resource
+}
+
+func (e *ExecNode) IsFinished() bool {
+	return e.isExecFinished
+}
+
+func (e *ExecNode) SetFinished() {
+	e.isExecFinished = true
+}
+
+func (e *ExecNode) IsStarted() bool {
+	return e.isExecStarted
+}
+
+func (e *ExecNode) SetStarted() {
+	e.isExecStarted = true
+}
+
+func (e *ExecNode) ShouldStart() bool {
+	// if the exec has started or finished, return false
+	if e.IsStarted() || e.IsFinished() {
+		return false
+	}
+
+	// if all parents have finished execution, the exec process should start
+	parentsFinished := true
+
+	for _, parent := range e.parents {
+		parentsFinished = parentsFinished && parent.IsFinished()
+	}
+
+	return parentsFinished
+}
+
+// GetExecNodes
+func GetExecNodes(group *models.ResourceGroup) ([]*ExecNode, error) {
+	// create a map of resource names to exec nodes
+	resourceMap := make(map[string]*ExecNode)
+
+	for _, resource := range group.Resources {
+		// check that name does not already exist
+		if _, exists := resourceMap[resource.Name]; exists {
+			return nil, fmt.Errorf("duplicate resource name encountered for \"%s\"", resource.Name)
+		}
+
+		resourceMap[resource.Name] = &ExecNode{
+			resource: resource,
+			parents:  make([]*ExecNode, 0),
+		}
+	}
+
+	// Now that resources are registered, iterate through the resources again
+	// to find the dependencies. If a dependency does not exist, throw an error
+	res := make([]*ExecNode, 0)
+
+	for _, execNode := range resourceMap {
+		for _, dependency := range execNode.resource.Dependencies {
+			// check that the resource described by the dependency exists
+			if _, exists := resourceMap[dependency]; !exists {
+				return nil, fmt.Errorf("parent resource \"%s\" does not exist", dependency)
+			}
+
+			execNode.parents = append(execNode.parents, resourceMap[dependency])
+		}
+
+		res = append(res, execNode)
+	}
+
+	// TODO: check against circular dependencies
+
+	return res, nil
+}
+
+// Execute simply calls exec on nodes in parallel, in batches. This could be much more
+// efficient.
+func Execute(nodes []*ExecNode, execFunc ExecFunc) error {
+	var outErr error
+	for {
+		var wg sync.WaitGroup
+
+		// get the list of nodes which are ready to execute, and execute those nodes
+		for _, node := range nodes {
+			nodeP := node
+			if nodeP.ShouldStart() {
+				wg.Add(1)
+
+				go func() {
+					defer wg.Done()
+
+					nodeP.SetStarted()
+					err := execFunc(nodeP.resource)
+
+					if err != nil {
+						outErr = err
+						return
+					}
+
+					nodeP.SetFinished()
+				}()
+			}
+		}
+
+		if outErr != nil {
+			break
+		}
+
+		wg.Wait()
+
+		if allFinished := areAllNodesFinished(nodes); allFinished {
+			break
+		}
+	}
+
+	return outErr
+}
+
+func areAllNodesFinished(nodes []*ExecNode) bool {
+	areFinished := true
+
+	for _, node := range nodes {
+		areFinished = areFinished && node.IsFinished()
+	}
+
+	return areFinished
+}

+ 57 - 0
cli/cmd/switchboard/models/cluster.go

@@ -0,0 +1,57 @@
+package models
+
+// ClusterAuth is an auth mechanism that a cluster candidate can resolve
+type ClusterAuth string
+
+// The support cluster candidate auth mechanisms
+const (
+	X509   ClusterAuth = "x509"
+	Basic  ClusterAuth = "basic"
+	Bearer ClusterAuth = "bearerToken"
+	OIDC   ClusterAuth = "oidc"
+	GCP    ClusterAuth = "gcp-sa"
+	AWS    ClusterAuth = "aws-sa"
+	DO     ClusterAuth = "do-oauth"
+	Local  ClusterAuth = "local"
+)
+
+// Cluster is an integration that can connect to a Kubernetes cluster via
+// a specific auth mechanism
+type Cluster struct {
+	// The auth mechanism that this cluster will use
+	AuthMechanism ClusterAuth `json:"auth_mechanism"`
+
+	// The project that this integration belongs to
+	ProjectID uint `json:"project_id"`
+
+	// Name of the cluster
+	Name string `json:"name"`
+
+	// Server endpoint for the cluster
+	Server string `json:"server"`
+
+	// Additional fields optionally used by the kube client
+	ClusterLocationOfOrigin string `json:"location_of_origin,omitempty"`
+	TLSServerName           string `json:"tls-server-name,omitempty"`
+	InsecureSkipTLSVerify   bool   `json:"insecure-skip-tls-verify,omitempty"`
+	ProxyURL                string `json:"proxy-url,omitempty"`
+	UserLocationOfOrigin    string
+	UserImpersonate         string `json:"act-as,omitempty"`
+	UserImpersonateGroups   string `json:"act-as-groups,omitempty"`
+
+	InfraID uint `json:"infra_id"`
+
+	// ------------------------------------------------------------------
+	// All fields below this line are encrypted before storage
+	// ------------------------------------------------------------------
+
+	// The various auth mechanisms available to the integration
+	KubeIntegrationID uint
+	OIDCIntegrationID uint
+	GCPIntegrationID  uint
+	AWSIntegrationID  uint
+	DOIntegrationID   uint
+
+	// CertificateAuthorityData for the cluster, encrypted at rest
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+}

+ 15 - 0
cli/cmd/switchboard/models/resource.go

@@ -0,0 +1,15 @@
+package models
+
+type ResourceGroup struct {
+	APIVersion string
+	Resources  []*Resource
+}
+
+type Resource struct {
+	Name         string
+	Driver       string
+	Config       map[string]interface{}
+	Source       map[string]interface{}
+	Target       map[string]interface{}
+	Dependencies []string
+}

+ 94 - 0
cli/cmd/switchboard/objutils/coalesce.go

@@ -0,0 +1,94 @@
+package objutils
+
+import "sigs.k8s.io/yaml"
+
+// MergeYAML merges raw yaml, with preference given to override
+func MergeYAML(base, override []byte) (map[string]interface{}, error) {
+	baseVals := map[string]interface{}{}
+	overrideVals := map[string]interface{}{}
+
+	err := yaml.Unmarshal(base, &baseVals)
+
+	if err != nil {
+		return nil, err
+	}
+
+	err = yaml.Unmarshal(override, &overrideVals)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return CoalesceValues(baseVals, overrideVals), nil
+}
+
+// CoalesceValues replaces arrays and scalar values, merges maps
+func CoalesceValues(base, override map[string]interface{}) map[string]interface{} {
+	if base == nil && override != nil {
+		return override
+	} else if override == nil {
+		return base
+	}
+
+	for key, val := range base {
+		if oVal, ok := override[key]; ok {
+			if oVal == nil {
+				delete(override, key)
+			} else if isYAMLTable(oVal) && isYAMLTable(val) {
+				oMapVal, _ := oVal.(map[string]interface{})
+				bMapVal, _ := val.(map[string]interface{})
+
+				override[key] = mergeMaps(bMapVal, oMapVal)
+			}
+		} else {
+			override[key] = val
+		}
+	}
+
+	return override
+}
+
+func isYAMLTable(v interface{}) bool {
+	_, ok := v.(map[string]interface{})
+	return ok
+}
+
+// mergeMaps merges any number of maps together, with maps later in the slice taking
+// precedent
+func mergeMaps(maps ...map[string]interface{}) map[string]interface{} {
+	// merge bottom-up
+	if len(maps) > 2 {
+		mLen := len(maps)
+		newMaps := maps[0 : mLen-2]
+
+		// reduce length of maps by 1 and merge again
+		newMaps = append(newMaps, mergeMaps(maps[mLen-2], maps[mLen-1]))
+		return mergeMaps(newMaps...)
+	} else if len(maps) == 2 {
+		if maps[0] == nil {
+			return maps[1]
+		}
+
+		if maps[1] == nil {
+			return maps[0]
+		}
+
+		for key, map0Val := range maps[0] {
+			if map1Val, ok := maps[1][key]; ok && map1Val == nil {
+				delete(maps[1], key)
+			} else if !ok {
+				maps[1][key] = map0Val
+			} else if isYAMLTable(map0Val) {
+				if isYAMLTable(map1Val) {
+					mergeMaps(map0Val.(map[string]interface{}), map1Val.(map[string]interface{}))
+				}
+			}
+		}
+
+		return maps[1]
+	} else if len(maps) == 1 {
+		return maps[0]
+	}
+
+	return nil
+}

+ 46 - 0
cli/cmd/switchboard/objutils/get.go

@@ -0,0 +1,46 @@
+package objutils
+
+import "fmt"
+
+type NestedFieldNotFoundError struct {
+	Field string
+}
+
+func (e *NestedFieldNotFoundError) Error() string {
+	return fmt.Sprintf("could not find field %s in configuration", e.Field)
+}
+
+// GetNestedString finds a nested string in a set of map objects. Arrays not supported.
+func GetNestedString(obj map[string]interface{}, fields ...string) (string, error) {
+	curr := obj
+	lastIndex := len(fields) - 1
+
+	for _, field := range fields[0:lastIndex] {
+		objField, ok := curr[field]
+
+		if !ok {
+			return "", &NestedFieldNotFoundError{field}
+		}
+
+		curr, ok = objField.(map[string]interface{})
+
+		if !ok {
+			return "", fmt.Errorf("%s is not a nested object", field)
+		}
+	}
+
+	// convert the last field to a string
+	strFieldGeneric, ok := curr[fields[lastIndex]]
+
+	if !ok {
+		return "", &NestedFieldNotFoundError{fields[lastIndex]}
+	}
+
+	res, ok := strFieldGeneric.(string)
+
+	if !ok {
+		return "", fmt.Errorf("%s is not a string", fields[lastIndex])
+	}
+
+	return res, nil
+}

+ 18 - 0
cli/cmd/switchboard/parser/parser.go

@@ -0,0 +1,18 @@
+package parser
+
+import (
+	"github.com/porter-dev/porter/cli/cmd/switchboard/types"
+	"sigs.k8s.io/yaml"
+)
+
+func ParseRawBytes(raw []byte) (*types.ResourceGroup, error) {
+	res := &types.ResourceGroup{}
+
+	err := yaml.Unmarshal(raw, res)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}

+ 36 - 0
cli/cmd/switchboard/query/jsonpath/jsonpath.go

@@ -0,0 +1,36 @@
+package jsonpath
+
+import (
+	"fmt"
+
+	"k8s.io/client-go/util/jsonpath"
+)
+
+func GetResult(data map[string]interface{}, query string) (interface{}, error) {
+	js := jsonpath.New("query")
+
+	err := js.Parse(query)
+
+	if err != nil {
+		return nil, err
+	}
+
+	results, err := js.FindResults(data)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, result := range results {
+		for _, r := range result {
+			// if this cannot be interfaced, throw an error
+			if !r.CanInterface() {
+				return nil, fmt.Errorf("result cannot be interfaced")
+			}
+
+			return r.Interface(), nil
+		}
+	}
+
+	return nil, fmt.Errorf("no query result")
+}

+ 69 - 0
cli/cmd/switchboard/query/query.go

@@ -0,0 +1,69 @@
+package query
+
+import (
+	"regexp"
+
+	"github.com/porter-dev/porter/cli/cmd/switchboard/query/jsonpath"
+)
+
+// PopulateQuery reads through config to detect queries. If a query is found, the data
+// is queried and the relevant field is populated in the config. This method is recursive.
+func PopulateQueries(config map[string]interface{}, data map[string]interface{}) (map[string]interface{}, error) {
+	iter := queryIterator{data, make([]error, 0)}
+	res := iter.iterMap(config)
+
+	return res, nil
+}
+
+type queryIterator struct {
+	data   map[string]interface{}
+	errors []error
+}
+
+func (q *queryIterator) iterSlice(arr []interface{}) []interface{} {
+	res := make([]interface{}, 0)
+
+	for _, arrVal := range arr {
+		res = append(res, q.iterInterface(arrVal))
+	}
+
+	return res
+}
+
+func (q *queryIterator) iterMap(mapVal map[string]interface{}) map[string]interface{} {
+	res := make(map[string]interface{})
+
+	for key, val := range mapVal {
+		res[key] = q.iterInterface(val)
+	}
+
+	return res
+}
+
+func (q *queryIterator) iterInterface(val interface{}) interface{} {
+	switch val.(type) {
+	case []interface{}:
+		return q.iterSlice(val.([]interface{}))
+	case map[string]interface{}:
+		return q.iterMap(val.(map[string]interface{}))
+	case string:
+		// TODO: move out to higher-level func
+		bracesReg := regexp.MustCompile(`\{(.+)\}`)
+
+		if bracesReg.MatchString(val.(string)) {
+			// get the query value from data
+			res, err := jsonpath.GetResult(q.data, val.(string))
+
+			if err != nil {
+				q.errors = append(q.errors, err)
+				return val
+			}
+
+			return res
+		}
+
+		return val
+	default:
+		return val
+	}
+}

+ 15 - 0
cli/cmd/switchboard/types/resource.go

@@ -0,0 +1,15 @@
+package types
+
+type ResourceGroup struct {
+	Version   string      `json:"version"`
+	Resources []*Resource `json:"resources"`
+}
+
+type Resource struct {
+	Name      string                 `json:"name"`
+	Driver    string                 `json:"driver"`
+	Source    map[string]interface{} `json:"source"`
+	Target    map[string]interface{} `json:"target"`
+	Config    map[string]interface{} `json:"config"`
+	DependsOn []string               `json:"depends_on"`
+}

+ 106 - 0
cli/cmd/switchboard/worker/worker.go

@@ -0,0 +1,106 @@
+package worker
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/porter-dev/porter/cli/cmd/switchboard/drivers"
+	"github.com/porter-dev/porter/cli/cmd/switchboard/drivers/helm"
+	"github.com/porter-dev/porter/cli/cmd/switchboard/drivers/kubernetes"
+	"github.com/porter-dev/porter/cli/cmd/switchboard/drivers/terraform"
+	"github.com/porter-dev/porter/cli/cmd/switchboard/exec"
+	"github.com/porter-dev/porter/cli/cmd/switchboard/models"
+	"github.com/porter-dev/porter/cli/cmd/switchboard/types"
+	"github.com/rs/zerolog"
+)
+
+type ApplyOpts struct {
+	BasePath       string
+	Logger         *zerolog.Logger
+	ResourceLogger *zerolog.Logger
+}
+
+// Apply creates a ResourceGroup
+func Apply(group *types.ResourceGroup, opts *ApplyOpts) error {
+	// create a map of resource names to drivers
+	driverLookupTable := make(map[string]drivers.Driver)
+	stdOut := zerolog.New(zerolog.ConsoleWriter{Out: os.Stdout})
+
+	sharedDriverOpts := &drivers.SharedDriverOpts{
+		BaseDir:           opts.BasePath,
+		DriverLookupTable: &driverLookupTable,
+		Logger:            &stdOut,
+	}
+
+	execFunc := getExecFunc(sharedDriverOpts)
+
+	resources := make([]*models.Resource, 0)
+
+	for _, resource := range group.Resources {
+		modelResource := &models.Resource{
+			Name:         resource.Name,
+			Driver:       resource.Driver,
+			Config:       resource.Config,
+			Source:       resource.Source,
+			Target:       resource.Target,
+			Dependencies: resource.DependsOn,
+		}
+
+		resources = append(resources, modelResource)
+
+		var driver drivers.Driver
+		var err error
+
+		// switch on the driver type to construct the driver
+		switch resource.Driver {
+		case "kubernetes":
+			driver, err = kubernetes.NewKubernetesDriver(modelResource, sharedDriverOpts)
+		case "helm":
+			driver, err = helm.NewHelmDriver(modelResource, sharedDriverOpts)
+		case "terraform":
+			driver, err = terraform.NewTerraformDriver(modelResource, sharedDriverOpts)
+		}
+
+		// TODO: append errors, don't exit here
+		if err != nil {
+			stdOut.Err(err).Send()
+			return err
+		}
+
+		driverLookupTable[resource.Name] = driver
+	}
+
+	nodes, err := exec.GetExecNodes(&models.ResourceGroup{
+		APIVersion: group.Version,
+		Resources:  resources,
+	})
+
+	if err != nil {
+		stdOut.Err(err).Send()
+		return err
+	}
+
+	return exec.Execute(nodes, execFunc)
+}
+
+func getExecFunc(opts *drivers.SharedDriverOpts) exec.ExecFunc {
+	return func(resource *models.Resource) error {
+		opts.Logger.Info().Msg(
+			fmt.Sprintf("running apply for resource %s", resource.Name),
+		)
+
+		lookupTable := *opts.DriverLookupTable
+
+		_, err := lookupTable[resource.Name].Apply(resource)
+
+		if err != nil {
+			return err
+		}
+
+		opts.Logger.Info().Msg(
+			fmt.Sprintf("successfully applied resource %s", resource.Name),
+		)
+
+		return nil
+	}
+}