get_all_pods.go 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. package release
  2. import (
  3. "fmt"
  4. "net/http"
  5. "strings"
  6. "github.com/porter-dev/porter/api/server/authz"
  7. "github.com/porter-dev/porter/api/server/handlers"
  8. "github.com/porter-dev/porter/api/server/shared"
  9. "github.com/porter-dev/porter/api/server/shared/apierrors"
  10. "github.com/porter-dev/porter/api/server/shared/config"
  11. "github.com/porter-dev/porter/api/types"
  12. "github.com/porter-dev/porter/internal/helm/grapher"
  13. "github.com/porter-dev/porter/internal/kubernetes"
  14. "github.com/porter-dev/porter/internal/models"
  15. "github.com/stefanmcshane/helm/pkg/release"
  16. v1 "k8s.io/api/core/v1"
  17. )
  18. type GetAllPodsHandler struct {
  19. handlers.PorterHandlerReadWriter
  20. authz.KubernetesAgentGetter
  21. }
  22. func NewGetAllPodsHandler(
  23. config *config.Config,
  24. writer shared.ResultWriter,
  25. ) *GetAllPodsHandler {
  26. return &GetAllPodsHandler{
  27. PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
  28. KubernetesAgentGetter: authz.NewOutOfClusterAgentGetter(config),
  29. }
  30. }
  31. func (c *GetAllPodsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
  32. helmRelease, _ := r.Context().Value(types.ReleaseScope).(*release.Release)
  33. cluster, _ := r.Context().Value(types.ClusterScope).(*models.Cluster)
  34. agent, err := c.GetAgent(r, cluster, "")
  35. if err != nil {
  36. err = fmt.Errorf("error getting agent: %w", err)
  37. c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
  38. return
  39. }
  40. yamlArr := grapher.ImportMultiDocYAML([]byte(helmRelease.Manifest))
  41. controllers := grapher.ParseControllers(yamlArr)
  42. pods := make([]v1.Pod, 0)
  43. // get current status of each controller
  44. for _, controller := range controllers {
  45. controller.Namespace = helmRelease.Namespace
  46. _, selector, err := getController(controller, agent)
  47. if err != nil {
  48. err = fmt.Errorf("error getting controller %s: %w", controller.Name, err)
  49. c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
  50. return
  51. }
  52. selectors := make([]string, 0)
  53. if strings.ToLower(controller.Kind) == "cronjob" {
  54. // in the case of cronjobs, getting the pod is non-arbitrary. We only get the pod
  55. // declared by the manifest, which will have a certain revision attached. But the
  56. // label on the pod is the job name, not the cronjob name. So we first find the
  57. // list of jobs run by this cronjob, and then get the pods attached to that job.
  58. jobLabels := make([]kubernetes.Label, 0)
  59. for key, val := range selector.MatchLabels {
  60. jobLabels = append(jobLabels, kubernetes.Label{
  61. Key: key,
  62. Val: val,
  63. })
  64. }
  65. jobPods, err := getPodsForJobs(agent, helmRelease.Namespace, jobLabels)
  66. if err != nil {
  67. err = fmt.Errorf("error getting cronjob pods in namespace %s with labels %+v : %w", helmRelease.Namespace, jobLabels, err)
  68. c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
  69. return
  70. }
  71. pods = append(pods, jobPods...)
  72. continue
  73. } else if strings.ToLower(controller.Kind) == "job" {
  74. // in the case of jobs as the controller, we simply find the job matching the
  75. // pod name.
  76. selectors = append(selectors, "job-name="+controller.Name)
  77. } else {
  78. for key, val := range selector.MatchLabels {
  79. selectors = append(selectors, key+"="+val)
  80. }
  81. }
  82. podList, err := agent.GetPodsByLabel(strings.Join(selectors, ","), helmRelease.Namespace)
  83. if err != nil {
  84. err = fmt.Errorf("error getting pods in namespace %s with labels %+v : %w", helmRelease.Namespace, strings.Join(selectors, ","), err)
  85. c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
  86. return
  87. }
  88. pods = append(pods, podList.Items...)
  89. podList, err = agent.GetPodsByLabel(strings.Join(selectors, ","), "default")
  90. if err != nil {
  91. err = fmt.Errorf("error getting pods in namespace %s with labels %+v : %w", helmRelease.Namespace, strings.Join(selectors, ","), err)
  92. c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
  93. return
  94. }
  95. pods = append(pods, podList.Items...)
  96. }
  97. // we also check for jobs attached to this release
  98. labels := getJobLabels(helmRelease)
  99. labels = append(labels, kubernetes.Label{
  100. Key: "helm.sh/revision",
  101. Val: fmt.Sprintf("%d", helmRelease.Version),
  102. })
  103. jobPods, err := getPodsForJobs(agent, helmRelease.Namespace, labels)
  104. if err != nil {
  105. err = fmt.Errorf("error getting cronjob pods in namespace %s with labels %+v : %w", helmRelease.Namespace, labels, err)
  106. c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
  107. return
  108. }
  109. pods = append(pods, jobPods...)
  110. c.WriteResult(w, r, pods)
  111. }
  112. func getPodsForJobs(agent *kubernetes.Agent, namespace string, labels []kubernetes.Label) ([]v1.Pod, error) {
  113. pods := make([]v1.Pod, 0)
  114. jobs, err := agent.ListJobsByLabel(namespace, labels...)
  115. if err != nil {
  116. return nil, err
  117. }
  118. for _, job := range jobs {
  119. podList, err := agent.GetPodsByLabel("job-name="+job.Name, namespace)
  120. if err != nil {
  121. return nil, err
  122. }
  123. pods = append(pods, podList.Items...)
  124. }
  125. return pods, nil
  126. }