| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163 |
- package usage
- import (
- "context"
- "errors"
- "time"
- "github.com/porter-dev/api-contracts/generated/go/porter/v1/porterv1connect"
- "github.com/porter-dev/porter/api/types"
- "github.com/porter-dev/porter/internal/kubernetes"
- "github.com/porter-dev/porter/internal/kubernetes/nodes"
- "github.com/porter-dev/porter/internal/models"
- "github.com/porter-dev/porter/internal/repository"
- "golang.org/x/oauth2"
- "gorm.io/gorm"
- )
- type GetUsageOpts struct {
- Repo repository.Repository
- DOConf *oauth2.Config
- Project *models.Project
- WhitelistedUsers map[uint]uint
- ClusterControlPlaneServiceClient porterv1connect.ClusterControlPlaneServiceClient
- }
- // GetUsage gets a project's current usage and usage limit
- func GetUsage(opts *GetUsageOpts) (
- current, limit *types.ProjectUsage,
- resourceUse *models.ProjectUsageCache,
- err error,
- ) {
- limit, err = GetLimit(opts.Repo, opts.Project)
- if err != nil {
- return nil, nil, nil, err
- }
- usageCache, err := opts.Repo.ProjectUsage().ReadProjectUsageCache(opts.Project.ID)
- isCacheFound := true
- if isCacheFound = !errors.Is(err, gorm.ErrRecordNotFound); err != nil && isCacheFound {
- return nil, nil, nil, err
- }
- // query for the linked cluster counts
- clusters, err := opts.Repo.Cluster().ListClustersByProjectID(opts.Project.ID)
- if err != nil {
- return nil, nil, nil, err
- }
- // query for the linked user counts
- roles, err := opts.Repo.Project().ListProjectRoles(opts.Project.ID)
- if err != nil {
- return nil, nil, nil, err
- }
- countedRoles := make([]models.Role, 0)
- for _, role := range roles {
- if _, exists := opts.WhitelistedUsers[role.UserID]; !exists {
- countedRoles = append(countedRoles, role)
- }
- }
- if !isCacheFound {
- usageCache = &models.ProjectUsageCache{
- ProjectID: opts.Project.ID,
- }
- }
- oldUsageCache := *usageCache
- usageCache.Clusters = uint(len(clusters))
- usageCache.Users = uint(len(countedRoles))
- // if the usage cache is 1 hour old, was not found, usage is currently over limit, or the clusters/users
- // counts have changed, re-query for the usage
- if !isCacheFound || usageCache.Is1HrOld() || isUsageExceeded(usageCache, limit) || isUsageChanged(&oldUsageCache, usageCache) {
- cpu, memory, err := getResourceUsage(opts, clusters)
- if err != nil {
- return nil, nil, nil, err
- }
- usageCache.ResourceCPU = cpu
- usageCache.ResourceMemory = memory
- }
- isExceeded := isUsageExceeded(usageCache, limit)
- if !usageCache.Exceeded && isExceeded {
- // update the usage cache with a time exceeded
- currTime := time.Now()
- usageCache.ExceededSince = &currTime
- }
- usageCache.Exceeded = isExceeded
- if !isCacheFound {
- usageCache, err = opts.Repo.ProjectUsage().CreateProjectUsageCache(usageCache)
- } else if isUsageChanged(&oldUsageCache, usageCache) {
- usageCache, err = opts.Repo.ProjectUsage().UpdateProjectUsageCache(usageCache)
- }
- if err != nil {
- return nil, nil, nil, err
- }
- return &types.ProjectUsage{
- ResourceCPU: usageCache.ResourceCPU,
- ResourceMemory: usageCache.ResourceMemory,
- Clusters: usageCache.Clusters,
- Users: usageCache.Users,
- }, limit, usageCache, nil
- }
- func isUsageExceeded(usageCache *models.ProjectUsageCache, limit *types.ProjectUsage) bool {
- isCPUExceeded := limit.ResourceCPU != 0 && usageCache.ResourceCPU > limit.ResourceCPU
- isMemExceeded := limit.ResourceMemory != 0 && usageCache.ResourceMemory > limit.ResourceMemory
- isUsersExceeded := limit.Users != 0 && usageCache.Users > limit.Users
- isClustersExceeded := limit.Clusters != 0 && usageCache.Clusters > limit.Clusters
- return isCPUExceeded || isMemExceeded || isUsersExceeded || isClustersExceeded
- }
- func isUsageChanged(oldUsageCache, currUsageCache *models.ProjectUsageCache) bool {
- return oldUsageCache.Exceeded != currUsageCache.Exceeded ||
- oldUsageCache.Clusters != currUsageCache.Clusters ||
- oldUsageCache.Users != currUsageCache.Users ||
- oldUsageCache.ResourceCPU != currUsageCache.ResourceCPU ||
- oldUsageCache.ResourceMemory != currUsageCache.ResourceMemory
- }
- // gets the total resource usage across all nodes in all clusters
- func getResourceUsage(opts *GetUsageOpts, clusters []*models.Cluster) (uint, uint, error) {
- ctx := context.Background()
- var totCPU, totMem uint = 0, 0
- for _, cluster := range clusters {
- ooc := &kubernetes.OutOfClusterConfig{
- Cluster: cluster,
- Repo: opts.Repo,
- DigitalOceanOAuth: opts.DOConf,
- AllowInClusterConnections: false,
- CAPIManagementClusterClient: opts.ClusterControlPlaneServiceClient,
- }
- agent, err := kubernetes.GetAgentOutOfClusterConfig(ctx, ooc)
- if err != nil {
- continue
- }
- totAlloc, err := nodes.GetAllocatableResources(agent.Clientset)
- if err != nil {
- continue
- }
- totCPU += totAlloc.CPU
- totMem += totAlloc.Memory
- }
- return totCPU / 1000, totMem / (1000 * 1000), nil
- }
|