فهرست منبع

create ee folder and update mit license

Alexander Belanger 4 سال پیش
والد
کامیت
5797ae24eb

+ 1 - 1
.air.toml

@@ -7,7 +7,7 @@ tmp_dir = "tmp"
 
 [build]
 # Just plain old shell command. You could use `make` as well.
-cmd = "go build -o ./tmp/app ./cmd/app"
+cmd = "go build -o ./tmp/app -tags ee ./cmd/app"
 # Binary file yields from `cmd`.
 bin = "tmp/app"
 # Customize binary.

+ 7 - 3
LICENSE

@@ -1,6 +1,10 @@
-MIT License
+Copyright (c) 2020-2021 Porter Technologies Inc.
 
-Copyright (c) 2020 Porter Technologies Inc.
+Portions of this software are licensed as follows:
+
+* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE".
+* All third party components incorporated into the Porter Software are licensed under the original license provided by the owner of the applicable component.
+* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
@@ -18,4 +22,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+SOFTWARE.

+ 16 - 0
api/server/handlers/handler.go

@@ -108,3 +108,19 @@ func (d *DefaultPorterHandler) PopulateOAuthSession(w http.ResponseWriter, r *ht
 
 	return nil
 }
+
+type Unavailable struct {
+	config    *config.Config
+	handlerID string
+}
+
+func NewUnavailable(config *config.Config, handlerID string) *Unavailable {
+	return &Unavailable{config, handlerID}
+}
+
+func (u *Unavailable) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	apierrors.HandleAPIError(u.config, w, r, apierrors.NewErrPassThroughToClient(
+		fmt.Errorf("%s not available in community edition", u.handlerID),
+		http.StatusBadRequest,
+	), true)
+}

+ 68 - 0
api/server/handlers/invite/invite_ce.go

@@ -0,0 +1,68 @@
+// +build !ee
+
+package invite
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/handlers"
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/config"
+)
+
+type InviteUpdateRoleHandler struct {
+	handlers.PorterHandlerReader
+	handlers.Unavailable
+}
+
+func NewInviteUpdateRoleHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+) http.Handler {
+	return handlers.NewUnavailable(config, "invite_update_role")
+}
+
+type InviteAcceptHandler struct {
+	handlers.PorterHandler
+}
+
+func NewInviteAcceptHandler(
+	config *config.Config,
+) http.Handler {
+	return handlers.NewUnavailable(config, "invite_accept")
+}
+
+type InviteCreateHandler struct {
+	handlers.PorterHandlerReadWriter
+}
+
+func NewInviteCreateHandler(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) http.Handler {
+	return handlers.NewUnavailable(config, "invite_create")
+}
+
+type InviteDeleteHandler struct {
+	handlers.PorterHandler
+	authz.KubernetesAgentGetter
+}
+
+func NewInviteDeleteHandler(
+	config *config.Config,
+) http.Handler {
+	return handlers.NewUnavailable(config, "invite_delete")
+}
+
+type InvitesListHandler struct {
+	handlers.PorterHandlerWriter
+}
+
+func NewInvitesListHandler(
+	config *config.Config,
+	writer shared.ResultWriter,
+) http.Handler {
+	return handlers.NewUnavailable(config, "invite_list")
+}

+ 44 - 0
api/server/handlers/invite/invite_ee.go

@@ -0,0 +1,44 @@
+// +build ee
+
+package invite
+
+import (
+	"net/http"
+
+	"github.com/porter-dev/porter/api/server/shared"
+	"github.com/porter-dev/porter/api/server/shared/config"
+
+	"github.com/porter-dev/porter/ee/api/server/handlers/invite"
+)
+
+var NewInviteUpdateRoleHandler func(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+) http.Handler
+
+var NewInviteAcceptHandler func(
+	config *config.Config,
+) http.Handler
+
+var NewInviteCreateHandler func(
+	config *config.Config,
+	decoderValidator shared.RequestDecoderValidator,
+	writer shared.ResultWriter,
+) http.Handler
+
+var NewInviteDeleteHandler func(
+	config *config.Config,
+) http.Handler
+
+var NewInvitesListHandler func(
+	config *config.Config,
+	writer shared.ResultWriter,
+) http.Handler
+
+func init() {
+	NewInviteUpdateRoleHandler = invite.NewInviteUpdateRoleHandler
+	NewInviteAcceptHandler = invite.NewInviteAcceptHandler
+	NewInviteCreateHandler = invite.NewInviteCreateHandler
+	NewInviteDeleteHandler = invite.NewInviteDeleteHandler
+	NewInvitesListHandler = invite.NewInvitesListHandler
+}

+ 4 - 107
api/server/handlers/project/get_usage.go

@@ -1,20 +1,15 @@
 package project
 
 import (
-	"errors"
-	"fmt"
 	"net/http"
 
-	"github.com/porter-dev/porter/api/server/authz"
 	"github.com/porter-dev/porter/api/server/handlers"
 	"github.com/porter-dev/porter/api/server/shared"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
-	"github.com/porter-dev/porter/internal/kubernetes"
-	"github.com/porter-dev/porter/internal/kubernetes/nodes"
 	"github.com/porter-dev/porter/internal/models"
-	"gorm.io/gorm"
+	"github.com/porter-dev/porter/internal/usage"
 )
 
 type ProjectGetUsageHandler struct {
@@ -31,9 +26,11 @@ func NewProjectGetUsageHandler(
 }
 
 func (p *ProjectGetUsageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+
 	res := &types.GetProjectUsageResponse{}
 
-	currUsage, limit, err := GetUsage(p.Config(), r)
+	currUsage, limit, err := usage.GetUsage(p.Config(), proj)
 
 	if err != nil {
 		p.HandleAPIError(w, r, apierrors.NewErrInternal(err))
@@ -45,103 +42,3 @@ func (p *ProjectGetUsageHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
 
 	p.WriteResult(w, r, res)
 }
-
-// GetUsage gets a project's current usage and usage limit
-func GetUsage(config *config.Config, r *http.Request) (
-	current, limit *types.ProjectUsage,
-	err error,
-) {
-	// read the project from the request
-	proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
-
-	// query for the project limit; if not found, default to basic
-	limitModel, err := config.Repo.ProjectUsage().ReadProjectUsage(proj.ID)
-
-	if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
-		copyBasic := types.BasicPlan
-		limit = &copyBasic
-	} else if err != nil {
-		return nil, nil, err
-	} else {
-		limit = limitModel.ToProjectUsageType()
-	}
-
-	// query for the linked cluster counts
-	clusters, err := config.Repo.Cluster().ListClustersByProjectID(proj.ID)
-
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// query for the linked user counts
-	roles, err := config.Repo.Project().ListProjectRoles(proj.ID)
-
-	if err != nil {
-		return nil, nil, err
-	}
-
-	usageCache, err := config.Repo.ProjectUsage().ReadProjectUsageCache(proj.ID)
-	isCacheFound := true
-
-	if isCacheFound = !errors.Is(err, gorm.ErrRecordNotFound); err != nil && isCacheFound {
-		return nil, nil, err
-	}
-
-	// if the usage cache is 24 hours old, was not found, or usage is over limit,
-	// re-query for the usage
-	if !isCacheFound || usageCache.Is24HrOld() || usageCache.ResourceMemory > limit.ResourceMemory || usageCache.ResourceCPU > limit.ResourceCPU {
-		cpu, memory, err := getResourceUsage(config, clusters)
-
-		if err != nil {
-			return nil, nil, err
-		}
-
-		if !isCacheFound {
-			usageCache, err = config.Repo.ProjectUsage().CreateProjectUsageCache(&models.ProjectUsageCache{
-				ProjectID:      proj.ID,
-				ResourceCPU:    cpu,
-				ResourceMemory: memory,
-			})
-		} else {
-			usageCache.ResourceCPU = cpu
-			usageCache.ResourceMemory = memory
-
-			usageCache, err = config.Repo.ProjectUsage().UpdateProjectUsageCache(usageCache)
-		}
-	}
-
-	return &types.ProjectUsage{
-		ResourceCPU:    usageCache.ResourceCPU,
-		ResourceMemory: usageCache.ResourceMemory,
-		Clusters:       uint(len(clusters)),
-		Users:          uint(len(roles)),
-	}, limit, nil
-}
-
-// gets the total resource usage across all nodes in all clusters
-func getResourceUsage(config *config.Config, clusters []*models.Cluster) (uint, uint, error) {
-	// TODO; pass this in?
-	var totCPU, totMem uint = 0, 0
-	getter := authz.NewOutOfClusterAgentGetter(config)
-
-	for _, cluster := range clusters {
-		ooc := getter.GetOutOfClusterConfig(cluster)
-
-		agent, err := kubernetes.GetAgentOutOfClusterConfig(ooc)
-
-		if err != nil {
-			return 0, 0, fmt.Errorf("failed to get agent: %s", err.Error())
-		}
-
-		totAlloc, err := nodes.GetAllocatableResources(agent.Clientset)
-
-		if err != nil {
-			return 0, 0, fmt.Errorf("failed to get alloc: %s", err.Error())
-		}
-
-		totCPU += totAlloc.CPU
-		totMem += totAlloc.Memory
-	}
-
-	return totCPU / 1000, totMem / (1024 * 1024), nil
-}

+ 5 - 2
api/server/router/middleware/usage.go

@@ -4,10 +4,11 @@ import (
 	"fmt"
 	"net/http"
 
-	"github.com/porter-dev/porter/api/server/handlers/project"
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"github.com/porter-dev/porter/internal/usage"
 )
 
 type UsageMiddleware struct {
@@ -23,8 +24,10 @@ var UsageErrFmt = "usage limit reached for metric %s: limit %d, requested %d"
 
 func (b *UsageMiddleware) Middleware(next http.Handler) http.Handler {
 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		proj, _ := r.Context().Value(types.ProjectScope).(*models.Project)
+
 		// get the project usage limits
-		currentUsage, limit, err := project.GetUsage(b.config, r)
+		currentUsage, limit, err := usage.GetUsage(b.config, proj)
 
 		if err != nil {
 			apierrors.HandleAPIError(

+ 3 - 1
api/server/handlers/invite/accept.go → ee/api/server/handlers/invite/accept.go

@@ -1,3 +1,5 @@
+// +build ee
+
 package invite
 
 import (
@@ -21,7 +23,7 @@ type InviteAcceptHandler struct {
 
 func NewInviteAcceptHandler(
 	config *config.Config,
-) *InviteAcceptHandler {
+) http.Handler {
 	return &InviteAcceptHandler{
 		PorterHandler: handlers.NewDefaultPorterHandler(config, nil, nil),
 	}

+ 3 - 1
api/server/handlers/invite/create.go → ee/api/server/handlers/invite/create.go

@@ -1,3 +1,5 @@
+// +build ee
+
 package invite
 
 import (
@@ -23,7 +25,7 @@ func NewInviteCreateHandler(
 	config *config.Config,
 	decoderValidator shared.RequestDecoderValidator,
 	writer shared.ResultWriter,
-) *InviteCreateHandler {
+) http.Handler {
 	return &InviteCreateHandler{
 		PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
 	}

+ 3 - 1
api/server/handlers/invite/delete.go → ee/api/server/handlers/invite/delete.go

@@ -1,3 +1,5 @@
+// +build ee
+
 package invite
 
 import (
@@ -18,7 +20,7 @@ type InviteDeleteHandler struct {
 
 func NewInviteDeleteHandler(
 	config *config.Config,
-) *InviteDeleteHandler {
+) http.Handler {
 	return &InviteDeleteHandler{
 		PorterHandler:         handlers.NewDefaultPorterHandler(config, nil, nil),
 		KubernetesAgentGetter: authz.NewOutOfClusterAgentGetter(config),

+ 3 - 1
api/server/handlers/invite/list.go → ee/api/server/handlers/invite/list.go

@@ -1,3 +1,5 @@
+// +build ee
+
 package invite
 
 import (
@@ -18,7 +20,7 @@ type InvitesListHandler struct {
 func NewInvitesListHandler(
 	config *config.Config,
 	writer shared.ResultWriter,
-) *InvitesListHandler {
+) http.Handler {
 	return &InvitesListHandler{
 		PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
 	}

+ 3 - 1
api/server/handlers/invite/update_role.go → ee/api/server/handlers/invite/update_role.go

@@ -1,3 +1,5 @@
+// +build ee
+
 package invite
 
 import (
@@ -18,7 +20,7 @@ type InviteUpdateRoleHandler struct {
 func NewInviteUpdateRoleHandler(
 	config *config.Config,
 	decoderValidator shared.RequestDecoderValidator,
-) *InviteUpdateRoleHandler {
+) http.Handler {
 	return &InviteUpdateRoleHandler{
 		PorterHandlerReader: handlers.NewDefaultPorterHandler(config, decoderValidator, nil),
 	}

+ 28 - 0
ee/usage/limit.go

@@ -0,0 +1,28 @@
+// +build ee
+
+package usage
+
+import (
+	"errors"
+
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+	"gorm.io/gorm"
+)
+
+func GetLimit(config *config.Config, proj *models.Project) (limit *types.ProjectUsage, err error) {
+	// query for the project limit; if not found, default to basic
+	limitModel, err := config.Repo.ProjectUsage().ReadProjectUsage(proj.ID)
+
+	if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
+		copyBasic := types.BasicPlan
+		limit = &copyBasic
+	} else if err != nil {
+		return nil, err
+	} else {
+		limit = limitModel.ToProjectUsageType()
+	}
+
+	return limit, nil
+}

+ 15 - 0
internal/usage/limit_ce.go

@@ -0,0 +1,15 @@
+// +build !ee
+
+package usage
+
+import (
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+func GetLimit(config *config.Config, proj *models.Project) (limit *types.ProjectUsage, err error) {
+	copyLimit := types.BasicPlan
+
+	return &copyLimit, nil
+}

+ 16 - 0
internal/usage/limit_ee.go

@@ -0,0 +1,16 @@
+// +build ee
+
+package usage
+
+import (
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/ee/usage"
+	"github.com/porter-dev/porter/internal/models"
+)
+
+var GetLimit func(config *config.Config, proj *models.Project) (limit *types.ProjectUsage, err error)
+
+func init() {
+	GetLimit = usage.GetLimit
+}

+ 105 - 0
internal/usage/usage.go

@@ -0,0 +1,105 @@
+package usage
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/porter-dev/porter/api/server/authz"
+	"github.com/porter-dev/porter/api/server/shared/config"
+	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/kubernetes"
+	"github.com/porter-dev/porter/internal/kubernetes/nodes"
+	"github.com/porter-dev/porter/internal/models"
+	"gorm.io/gorm"
+)
+
+// GetUsage gets a project's current usage and usage limit
+func GetUsage(config *config.Config, proj *models.Project) (
+	current, limit *types.ProjectUsage,
+	err error,
+) {
+	limit, err = GetLimit(config, proj)
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// query for the linked cluster counts
+	clusters, err := config.Repo.Cluster().ListClustersByProjectID(proj.ID)
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// query for the linked user counts
+	roles, err := config.Repo.Project().ListProjectRoles(proj.ID)
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	usageCache, err := config.Repo.ProjectUsage().ReadProjectUsageCache(proj.ID)
+	isCacheFound := true
+
+	if isCacheFound = !errors.Is(err, gorm.ErrRecordNotFound); err != nil && isCacheFound {
+		return nil, nil, err
+	}
+
+	// if the usage cache is 24 hours old, was not found, or usage is over limit,
+	// re-query for the usage
+	if !isCacheFound || usageCache.Is24HrOld() || usageCache.ResourceMemory > limit.ResourceMemory || usageCache.ResourceCPU > limit.ResourceCPU {
+		cpu, memory, err := getResourceUsage(config, clusters)
+
+		if err != nil {
+			return nil, nil, err
+		}
+
+		if !isCacheFound {
+			usageCache, err = config.Repo.ProjectUsage().CreateProjectUsageCache(&models.ProjectUsageCache{
+				ProjectID:      proj.ID,
+				ResourceCPU:    cpu,
+				ResourceMemory: memory,
+			})
+		} else {
+			usageCache.ResourceCPU = cpu
+			usageCache.ResourceMemory = memory
+
+			usageCache, err = config.Repo.ProjectUsage().UpdateProjectUsageCache(usageCache)
+		}
+	}
+
+	return &types.ProjectUsage{
+		ResourceCPU:    usageCache.ResourceCPU,
+		ResourceMemory: usageCache.ResourceMemory,
+		Clusters:       uint(len(clusters)),
+		Users:          uint(len(roles)),
+	}, limit, nil
+}
+
+// gets the total resource usage across all nodes in all clusters
+func getResourceUsage(config *config.Config, clusters []*models.Cluster) (uint, uint, error) {
+	// TODO; pass this in?
+	var totCPU, totMem uint = 0, 0
+	getter := authz.NewOutOfClusterAgentGetter(config)
+
+	for _, cluster := range clusters {
+		ooc := getter.GetOutOfClusterConfig(cluster)
+
+		agent, err := kubernetes.GetAgentOutOfClusterConfig(ooc)
+
+		if err != nil {
+			return 0, 0, fmt.Errorf("failed to get agent: %s", err.Error())
+		}
+
+		totAlloc, err := nodes.GetAllocatableResources(agent.Clientset)
+
+		if err != nil {
+			return 0, 0, fmt.Errorf("failed to get alloc: %s", err.Error())
+		}
+
+		totCPU += totAlloc.CPU
+		totMem += totAlloc.Memory
+	}
+
+	return totCPU / 1000, totMem / (1024 * 1024), nil
+}