usage.go 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. package usage
  2. import (
  3. "context"
  4. "errors"
  5. "time"
  6. "github.com/porter-dev/api-contracts/generated/go/porter/v1/porterv1connect"
  7. "github.com/porter-dev/porter/api/types"
  8. "github.com/porter-dev/porter/internal/kubernetes"
  9. "github.com/porter-dev/porter/internal/kubernetes/nodes"
  10. "github.com/porter-dev/porter/internal/models"
  11. "github.com/porter-dev/porter/internal/repository"
  12. "golang.org/x/oauth2"
  13. "gorm.io/gorm"
  14. )
  15. type GetUsageOpts struct {
  16. Repo repository.Repository
  17. DOConf *oauth2.Config
  18. Project *models.Project
  19. WhitelistedUsers map[uint]uint
  20. ClusterControlPlaneServiceClient porterv1connect.ClusterControlPlaneServiceClient
  21. }
  22. // GetUsage gets a project's current usage and usage limit
  23. func GetUsage(opts *GetUsageOpts) (
  24. current, limit *types.ProjectUsage,
  25. resourceUse *models.ProjectUsageCache,
  26. err error,
  27. ) {
  28. limit, err = GetLimit(opts.Repo, opts.Project)
  29. if err != nil {
  30. return nil, nil, nil, err
  31. }
  32. usageCache, err := opts.Repo.ProjectUsage().ReadProjectUsageCache(opts.Project.ID)
  33. isCacheFound := true
  34. if isCacheFound = !errors.Is(err, gorm.ErrRecordNotFound); err != nil && isCacheFound {
  35. return nil, nil, nil, err
  36. }
  37. // query for the linked cluster counts
  38. clusters, err := opts.Repo.Cluster().ListClustersByProjectID(opts.Project.ID)
  39. if err != nil {
  40. return nil, nil, nil, err
  41. }
  42. // query for the linked user counts
  43. roles, err := opts.Repo.Project().ListProjectRoles(opts.Project.ID)
  44. if err != nil {
  45. return nil, nil, nil, err
  46. }
  47. countedRoles := make([]models.Role, 0)
  48. for _, role := range roles {
  49. if _, exists := opts.WhitelistedUsers[role.UserID]; !exists {
  50. countedRoles = append(countedRoles, role)
  51. }
  52. }
  53. if !isCacheFound {
  54. usageCache = &models.ProjectUsageCache{
  55. ProjectID: opts.Project.ID,
  56. }
  57. }
  58. oldUsageCache := *usageCache
  59. usageCache.Clusters = uint(len(clusters))
  60. usageCache.Users = uint(len(countedRoles))
  61. // if the usage cache is 1 hour old, was not found, usage is currently over limit, or the clusters/users
  62. // counts have changed, re-query for the usage
  63. if !isCacheFound || usageCache.Is1HrOld() || isUsageExceeded(usageCache, limit) || isUsageChanged(&oldUsageCache, usageCache) {
  64. cpu, memory, err := getResourceUsage(opts, clusters)
  65. if err != nil {
  66. return nil, nil, nil, err
  67. }
  68. usageCache.ResourceCPU = cpu
  69. usageCache.ResourceMemory = memory
  70. }
  71. isExceeded := isUsageExceeded(usageCache, limit)
  72. if !usageCache.Exceeded && isExceeded {
  73. // update the usage cache with a time exceeded
  74. currTime := time.Now()
  75. usageCache.ExceededSince = &currTime
  76. }
  77. usageCache.Exceeded = isExceeded
  78. if !isCacheFound {
  79. usageCache, err = opts.Repo.ProjectUsage().CreateProjectUsageCache(usageCache)
  80. } else if isUsageChanged(&oldUsageCache, usageCache) {
  81. usageCache, err = opts.Repo.ProjectUsage().UpdateProjectUsageCache(usageCache)
  82. }
  83. if err != nil {
  84. return nil, nil, nil, err
  85. }
  86. return &types.ProjectUsage{
  87. ResourceCPU: usageCache.ResourceCPU,
  88. ResourceMemory: usageCache.ResourceMemory,
  89. Clusters: usageCache.Clusters,
  90. Users: usageCache.Users,
  91. }, limit, usageCache, nil
  92. }
  93. func isUsageExceeded(usageCache *models.ProjectUsageCache, limit *types.ProjectUsage) bool {
  94. isCPUExceeded := limit.ResourceCPU != 0 && usageCache.ResourceCPU > limit.ResourceCPU
  95. isMemExceeded := limit.ResourceMemory != 0 && usageCache.ResourceMemory > limit.ResourceMemory
  96. isUsersExceeded := limit.Users != 0 && usageCache.Users > limit.Users
  97. isClustersExceeded := limit.Clusters != 0 && usageCache.Clusters > limit.Clusters
  98. return isCPUExceeded || isMemExceeded || isUsersExceeded || isClustersExceeded
  99. }
  100. func isUsageChanged(oldUsageCache, currUsageCache *models.ProjectUsageCache) bool {
  101. return oldUsageCache.Exceeded != currUsageCache.Exceeded ||
  102. oldUsageCache.Clusters != currUsageCache.Clusters ||
  103. oldUsageCache.Users != currUsageCache.Users ||
  104. oldUsageCache.ResourceCPU != currUsageCache.ResourceCPU ||
  105. oldUsageCache.ResourceMemory != currUsageCache.ResourceMemory
  106. }
  107. // gets the total resource usage across all nodes in all clusters
  108. func getResourceUsage(opts *GetUsageOpts, clusters []*models.Cluster) (uint, uint, error) {
  109. ctx := context.Background()
  110. var totCPU, totMem uint = 0, 0
  111. for _, cluster := range clusters {
  112. ooc := &kubernetes.OutOfClusterConfig{
  113. Cluster: cluster,
  114. Repo: opts.Repo,
  115. DigitalOceanOAuth: opts.DOConf,
  116. AllowInClusterConnections: false,
  117. CAPIManagementClusterClient: opts.ClusterControlPlaneServiceClient,
  118. }
  119. agent, err := kubernetes.GetAgentOutOfClusterConfig(ctx, ooc)
  120. if err != nil {
  121. continue
  122. }
  123. totAlloc, err := nodes.GetAllocatableResources(agent.Clientset)
  124. if err != nil {
  125. continue
  126. }
  127. totCPU += totAlloc.CPU
  128. totMem += totAlloc.Memory
  129. }
  130. return totCPU / 1000, totMem / (1000 * 1000), nil
  131. }