usage.go 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. package usage
  2. import (
  3. "errors"
  4. "time"
  5. "github.com/porter-dev/porter/api/types"
  6. "github.com/porter-dev/porter/internal/kubernetes"
  7. "github.com/porter-dev/porter/internal/kubernetes/nodes"
  8. "github.com/porter-dev/porter/internal/models"
  9. "github.com/porter-dev/porter/internal/repository"
  10. "golang.org/x/oauth2"
  11. "gorm.io/gorm"
  12. )
  13. type GetUsageOpts struct {
  14. Repo repository.Repository
  15. DOConf *oauth2.Config
  16. Project *models.Project
  17. WhitelistedUsers map[uint]uint
  18. }
  19. // GetUsage gets a project's current usage and usage limit
  20. func GetUsage(opts *GetUsageOpts) (
  21. current, limit *types.ProjectUsage,
  22. resourceUse *models.ProjectUsageCache,
  23. err error,
  24. ) {
  25. limit, err = GetLimit(opts.Repo, opts.Project)
  26. if err != nil {
  27. return nil, nil, nil, err
  28. }
  29. // query for the linked cluster counts
  30. clusters, err := opts.Repo.Cluster().ListClustersByProjectID(opts.Project.ID)
  31. if err != nil {
  32. return nil, nil, nil, err
  33. }
  34. // query for the linked user counts
  35. roles, err := opts.Repo.Project().ListProjectRoles(opts.Project.ID)
  36. if err != nil {
  37. return nil, nil, nil, err
  38. }
  39. countedRoles := make([]models.Role, 0)
  40. for _, role := range roles {
  41. if _, exists := opts.WhitelistedUsers[role.UserID]; !exists {
  42. countedRoles = append(countedRoles, role)
  43. }
  44. }
  45. usageCache, err := opts.Repo.ProjectUsage().ReadProjectUsageCache(opts.Project.ID)
  46. isCacheFound := true
  47. if isCacheFound = !errors.Is(err, gorm.ErrRecordNotFound); err != nil && isCacheFound {
  48. return nil, nil, nil, err
  49. }
  50. // if the usage cache is 1 hour old, was not found, or usage is over limit,
  51. // re-query for the usage
  52. if true || !isCacheFound || usageCache.Is1HrOld() || usageCache.ResourceMemory > limit.ResourceMemory || usageCache.ResourceCPU > limit.ResourceCPU {
  53. cpu, memory, err := getResourceUsage(opts, clusters)
  54. if err != nil {
  55. return nil, nil, nil, err
  56. }
  57. if !isCacheFound {
  58. usageCache = &models.ProjectUsageCache{
  59. ProjectID: opts.Project.ID,
  60. ResourceCPU: cpu,
  61. ResourceMemory: memory,
  62. }
  63. } else {
  64. usageCache.ResourceCPU = cpu
  65. usageCache.ResourceMemory = memory
  66. }
  67. isExceeded := isUsageExceeded(usageCache, limit, uint(len(countedRoles)), uint(len(clusters)))
  68. if !usageCache.Exceeded && isExceeded {
  69. // update the usage cache with a time exceeded
  70. currTime := time.Now()
  71. usageCache.ExceededSince = &currTime
  72. }
  73. usageCache.Exceeded = isExceeded
  74. if !isCacheFound {
  75. usageCache, err = opts.Repo.ProjectUsage().CreateProjectUsageCache(usageCache)
  76. } else {
  77. usageCache, err = opts.Repo.ProjectUsage().UpdateProjectUsageCache(usageCache)
  78. }
  79. }
  80. // we check whether it's currently exceeded based on the cache every time, since
  81. // it's an inexpensive operation and involves no further DB lookups
  82. usageCache.Exceeded = isUsageExceeded(usageCache, limit, uint(len(countedRoles)), uint(len(clusters)))
  83. return &types.ProjectUsage{
  84. ResourceCPU: usageCache.ResourceCPU,
  85. ResourceMemory: usageCache.ResourceMemory,
  86. Clusters: uint(len(clusters)),
  87. Users: uint(len(countedRoles)),
  88. }, limit, usageCache, nil
  89. }
  90. func isUsageExceeded(usageCache *models.ProjectUsageCache, limit *types.ProjectUsage, numUsers, numClusters uint) bool {
  91. isCPUExceeded := limit.ResourceCPU != 0 && usageCache.ResourceCPU > limit.ResourceCPU
  92. isMemExceeded := limit.ResourceMemory != 0 && usageCache.ResourceMemory > limit.ResourceMemory
  93. isUsersExceeded := limit.Users != 0 && numUsers > limit.Users
  94. isClustersExceeded := limit.Clusters != 0 && numClusters > limit.Clusters
  95. return isCPUExceeded || isMemExceeded || isUsersExceeded || isClustersExceeded
  96. }
  97. // gets the total resource usage across all nodes in all clusters
  98. func getResourceUsage(opts *GetUsageOpts, clusters []*models.Cluster) (uint, uint, error) {
  99. var totCPU, totMem uint = 0, 0
  100. for _, cluster := range clusters {
  101. ooc := &kubernetes.OutOfClusterConfig{
  102. Cluster: cluster,
  103. Repo: opts.Repo,
  104. DigitalOceanOAuth: opts.DOConf,
  105. }
  106. agent, err := kubernetes.GetAgentOutOfClusterConfig(ooc)
  107. if err != nil {
  108. continue
  109. }
  110. totAlloc, err := nodes.GetAllocatableResources(agent.Clientset)
  111. if err != nil {
  112. continue
  113. }
  114. totCPU += totAlloc.CPU
  115. totMem += totAlloc.Memory
  116. }
  117. return totCPU / 1000, totMem / (1000 * 1000), nil
  118. }