run.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023
  1. package cmd
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "os"
  8. "strings"
  9. "time"
  10. "github.com/fatih/color"
  11. api "github.com/porter-dev/porter/api/client"
  12. "github.com/porter-dev/porter/api/types"
  13. "github.com/porter-dev/porter/cli/cmd/config"
  14. "github.com/porter-dev/porter/cli/cmd/utils"
  15. "github.com/spf13/cobra"
  16. batchv1 "k8s.io/api/batch/v1"
  17. v1 "k8s.io/api/core/v1"
  18. rbacv1 "k8s.io/api/rbac/v1"
  19. "k8s.io/apimachinery/pkg/api/resource"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/fields"
  22. "k8s.io/apimachinery/pkg/watch"
  23. "k8s.io/kubectl/pkg/util/term"
  24. "k8s.io/apimachinery/pkg/runtime"
  25. "k8s.io/apimachinery/pkg/runtime/schema"
  26. "k8s.io/client-go/kubernetes"
  27. "k8s.io/client-go/rest"
  28. "k8s.io/client-go/tools/clientcmd"
  29. "k8s.io/client-go/tools/remotecommand"
  30. )
  31. var (
  32. namespace string
  33. verbose bool
  34. existingPod bool
  35. nonInteractive bool
  36. containerName string
  37. cpuMilli int
  38. memoryMi int
  39. )
  40. // runCmd represents the "porter run" base command when called
  41. // without any subcommands
  42. var runCmd = &cobra.Command{
  43. Use: "run [release] -- COMMAND [args...]",
  44. Args: cobra.MinimumNArgs(2),
  45. Short: "Runs a command inside a connected cluster container.",
  46. Run: func(cmd *cobra.Command, args []string) {
  47. err := checkLoginAndRun(cmd.Context(), args, run)
  48. if err != nil {
  49. os.Exit(1)
  50. }
  51. },
  52. }
  53. // cleanupCmd represents the "porter run cleanup" subcommand
  54. var cleanupCmd = &cobra.Command{
  55. Use: "cleanup",
  56. Args: cobra.NoArgs,
  57. Short: "Delete any lingering ephemeral pods that were created with \"porter run\".",
  58. Run: func(cmd *cobra.Command, args []string) {
  59. err := checkLoginAndRun(cmd.Context(), args, cleanup)
  60. if err != nil {
  61. os.Exit(1)
  62. }
  63. },
  64. }
  65. func init() {
  66. rootCmd.AddCommand(runCmd)
  67. runCmd.PersistentFlags().StringVar(
  68. &namespace,
  69. "namespace",
  70. "default",
  71. "namespace of release to connect to",
  72. )
  73. runCmd.PersistentFlags().BoolVarP(
  74. &existingPod,
  75. "existing_pod",
  76. "e",
  77. false,
  78. "whether to connect to an existing pod",
  79. )
  80. runCmd.PersistentFlags().BoolVarP(
  81. &verbose,
  82. "verbose",
  83. "v",
  84. false,
  85. "whether to print verbose output",
  86. )
  87. runCmd.PersistentFlags().BoolVar(
  88. &nonInteractive,
  89. "non-interactive",
  90. false,
  91. "whether to run in non-interactive mode",
  92. )
  93. runCmd.PersistentFlags().StringVarP(
  94. &containerName,
  95. "container",
  96. "c",
  97. "",
  98. "name of the container inside pod to run the command in",
  99. )
  100. runCmd.PersistentFlags().IntVarP(
  101. &cpuMilli,
  102. "cpu",
  103. "",
  104. 0,
  105. "cpu allocation in millicores (1000 millicores = 1 vCPU)",
  106. )
  107. runCmd.PersistentFlags().IntVarP(
  108. &memoryMi,
  109. "ram",
  110. "",
  111. 0,
  112. "ram allocation in Mi (1024 Mi = 1 GB)",
  113. )
  114. runCmd.AddCommand(cleanupCmd)
  115. }
  116. func run(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, args []string) error {
  117. execArgs := args[1:]
  118. color.New(color.FgGreen).Println("Running", strings.Join(execArgs, " "), "for release", args[0])
  119. if nonInteractive {
  120. color.New(color.FgBlue).Println("Using non-interactive mode. The first available pod will be used to run the command.")
  121. }
  122. if len(execArgs) > 0 {
  123. release, err := client.GetRelease(
  124. ctx, cliConf.Project, cliConf.Cluster, namespace, args[0],
  125. )
  126. if err != nil {
  127. return fmt.Errorf("error fetching release %s: %w", args[0], err)
  128. }
  129. if release.BuildConfig != nil &&
  130. (strings.Contains(release.BuildConfig.Builder, "heroku") ||
  131. strings.Contains(release.BuildConfig.Builder, "paketo")) &&
  132. execArgs[0] != "/cnb/lifecycle/launcher" &&
  133. execArgs[0] != "launcher" {
  134. // this is a buildpacks release using a heroku builder, prepend the launcher
  135. execArgs = append([]string{"/cnb/lifecycle/launcher"}, execArgs...)
  136. }
  137. }
  138. podsSimple, err := getPods(ctx, client, cliConf, namespace, args[0])
  139. if err != nil {
  140. return fmt.Errorf("Could not retrieve list of pods: %s", err.Error())
  141. }
  142. // if length of pods is 0, throw error
  143. var selectedPod podSimple
  144. if len(podsSimple) == 0 {
  145. return fmt.Errorf("At least one pod must exist in this deployment.")
  146. } else if nonInteractive || len(podsSimple) == 1 {
  147. selectedPod = podsSimple[0]
  148. } else {
  149. podNames := make([]string, 0)
  150. for _, podSimple := range podsSimple {
  151. podNames = append(podNames, podSimple.Name)
  152. }
  153. selectedPodName, err := utils.PromptSelect("Select the pod:", podNames)
  154. if err != nil {
  155. return err
  156. }
  157. // find selected pod
  158. for _, podSimple := range podsSimple {
  159. if selectedPodName == podSimple.Name {
  160. selectedPod = podSimple
  161. }
  162. }
  163. }
  164. var selectedContainerName string
  165. if len(selectedPod.ContainerNames) == 0 {
  166. return fmt.Errorf("At least one container must exist in the selected pod.")
  167. } else if len(selectedPod.ContainerNames) == 1 {
  168. if containerName != "" && containerName != selectedPod.ContainerNames[0] {
  169. return fmt.Errorf("provided container %s does not exist in pod %s", containerName, selectedPod.Name)
  170. }
  171. selectedContainerName = selectedPod.ContainerNames[0]
  172. }
  173. if containerName != "" && selectedContainerName == "" {
  174. // check if provided container name exists in the pod
  175. for _, name := range selectedPod.ContainerNames {
  176. if name == containerName {
  177. selectedContainerName = name
  178. break
  179. }
  180. }
  181. if selectedContainerName == "" {
  182. return fmt.Errorf("provided container %s does not exist in pod %s", containerName, selectedPod.Name)
  183. }
  184. }
  185. if selectedContainerName == "" {
  186. if nonInteractive {
  187. return fmt.Errorf("container name must be specified using the --container flag when using non-interactive mode")
  188. }
  189. selectedContainer, err := utils.PromptSelect("Select the container:", selectedPod.ContainerNames)
  190. if err != nil {
  191. return err
  192. }
  193. selectedContainerName = selectedContainer
  194. }
  195. config := &PorterRunSharedConfig{
  196. Client: client,
  197. CLIConfig: cliConf,
  198. }
  199. err = config.setSharedConfig(ctx)
  200. if err != nil {
  201. return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
  202. }
  203. if existingPod {
  204. return executeRun(config, namespace, selectedPod.Name, selectedContainerName, execArgs)
  205. }
  206. return executeRunEphemeral(ctx, config, namespace, selectedPod.Name, selectedContainerName, execArgs)
  207. }
  208. func cleanup(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, _ []string) error {
  209. config := &PorterRunSharedConfig{
  210. Client: client,
  211. CLIConfig: cliConfig,
  212. }
  213. err := config.setSharedConfig(ctx)
  214. if err != nil {
  215. return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
  216. }
  217. proceed, err := utils.PromptSelect(
  218. fmt.Sprintf("You have chosen the '%s' namespace for cleanup. Do you want to proceed?", namespace),
  219. []string{"Yes", "No", "All namespaces"},
  220. )
  221. if err != nil {
  222. return err
  223. }
  224. if proceed == "No" {
  225. return nil
  226. }
  227. var podNames []string
  228. color.New(color.FgGreen).Println("Fetching ephemeral pods for cleanup")
  229. if proceed == "All namespaces" {
  230. namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
  231. if err != nil {
  232. return err
  233. }
  234. for _, namespace := range namespaces.Items {
  235. if pods, err := getEphemeralPods(ctx, namespace.Name, config.Clientset); err == nil {
  236. podNames = append(podNames, pods...)
  237. } else {
  238. return err
  239. }
  240. }
  241. } else {
  242. if pods, err := getEphemeralPods(ctx, namespace, config.Clientset); err == nil {
  243. podNames = append(podNames, pods...)
  244. } else {
  245. return err
  246. }
  247. }
  248. if len(podNames) == 0 {
  249. color.New(color.FgBlue).Println("No ephemeral pods to delete")
  250. return nil
  251. }
  252. selectedPods, err := utils.PromptMultiselect("Select ephemeral pods to delete", podNames)
  253. if err != nil {
  254. return err
  255. }
  256. for _, podName := range selectedPods {
  257. color.New(color.FgBlue).Printf("Deleting ephemeral pod: %s\n", podName)
  258. err = config.Clientset.CoreV1().Pods(namespace).Delete(
  259. ctx, podName, metav1.DeleteOptions{},
  260. )
  261. if err != nil {
  262. return err
  263. }
  264. }
  265. return nil
  266. }
  267. func getEphemeralPods(ctx context.Context, namespace string, clientset *kubernetes.Clientset) ([]string, error) {
  268. var podNames []string
  269. pods, err := clientset.CoreV1().Pods(namespace).List(
  270. ctx, metav1.ListOptions{LabelSelector: "porter/ephemeral-pod"},
  271. )
  272. if err != nil {
  273. return nil, err
  274. }
  275. for _, pod := range pods.Items {
  276. podNames = append(podNames, pod.Name)
  277. }
  278. return podNames, nil
  279. }
  280. type PorterRunSharedConfig struct {
  281. Client api.Client
  282. RestConf *rest.Config
  283. Clientset *kubernetes.Clientset
  284. RestClient *rest.RESTClient
  285. CLIConfig config.CLIConfig
  286. }
  287. func (p *PorterRunSharedConfig) setSharedConfig(ctx context.Context) error {
  288. pID := p.CLIConfig.Project
  289. cID := p.CLIConfig.Cluster
  290. kubeResp, err := p.Client.GetKubeconfig(ctx, pID, cID, p.CLIConfig.Kubeconfig)
  291. if err != nil {
  292. return err
  293. }
  294. kubeBytes := kubeResp.Kubeconfig
  295. cmdConf, err := clientcmd.NewClientConfigFromBytes(kubeBytes)
  296. if err != nil {
  297. return err
  298. }
  299. restConf, err := cmdConf.ClientConfig()
  300. if err != nil {
  301. return err
  302. }
  303. restConf.GroupVersion = &schema.GroupVersion{
  304. Group: "api",
  305. Version: "v1",
  306. }
  307. restConf.NegotiatedSerializer = runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{})
  308. p.RestConf = restConf
  309. clientset, err := kubernetes.NewForConfig(restConf)
  310. if err != nil {
  311. return err
  312. }
  313. p.Clientset = clientset
  314. restClient, err := rest.RESTClientFor(restConf)
  315. if err != nil {
  316. return err
  317. }
  318. p.RestClient = restClient
  319. return nil
  320. }
  321. type podSimple struct {
  322. Name string
  323. ContainerNames []string
  324. }
  325. func getPods(ctx context.Context, client api.Client, cliConf config.CLIConfig, namespace, releaseName string) ([]podSimple, error) {
  326. pID := cliConf.Project
  327. cID := cliConf.Cluster
  328. resp, err := client.GetK8sAllPods(ctx, pID, cID, namespace, releaseName)
  329. if err != nil {
  330. return nil, err
  331. }
  332. pods := *resp
  333. res := make([]podSimple, 0)
  334. for _, pod := range pods {
  335. if pod.Status.Phase == v1.PodRunning {
  336. containerNames := make([]string, 0)
  337. for _, container := range pod.Spec.Containers {
  338. containerNames = append(containerNames, container.Name)
  339. }
  340. res = append(res, podSimple{
  341. Name: pod.ObjectMeta.Name,
  342. ContainerNames: containerNames,
  343. })
  344. }
  345. }
  346. return res, nil
  347. }
  348. func executeRun(config *PorterRunSharedConfig, namespace, name, container string, args []string) error {
  349. req := config.RestClient.Post().
  350. Resource("pods").
  351. Name(name).
  352. Namespace(namespace).
  353. SubResource("exec")
  354. for _, arg := range args {
  355. req.Param("command", arg)
  356. }
  357. req.Param("stdin", "true")
  358. req.Param("stdout", "true")
  359. req.Param("tty", "true")
  360. req.Param("container", container)
  361. t := term.TTY{
  362. In: os.Stdin,
  363. Out: os.Stdout,
  364. Raw: true,
  365. }
  366. size := t.GetSize()
  367. sizeQueue := t.MonitorSize(size)
  368. return t.Safe(func() error {
  369. exec, err := remotecommand.NewSPDYExecutor(config.RestConf, "POST", req.URL())
  370. if err != nil {
  371. return err
  372. }
  373. return exec.Stream(remotecommand.StreamOptions{
  374. Stdin: os.Stdin,
  375. Stdout: os.Stdout,
  376. Stderr: os.Stderr,
  377. Tty: true,
  378. TerminalSizeQueue: sizeQueue,
  379. })
  380. })
  381. }
  382. func executeRunEphemeral(ctx context.Context, config *PorterRunSharedConfig, namespace, name, container string, args []string) error {
  383. existing, err := getExistingPod(ctx, config, name, namespace)
  384. if err != nil {
  385. return err
  386. }
  387. newPod, err := createEphemeralPodFromExisting(ctx, config, existing, container, args)
  388. if err != nil {
  389. return err
  390. }
  391. podName := newPod.ObjectMeta.Name
  392. // delete the ephemeral pod no matter what
  393. defer deletePod(ctx, config, podName, namespace) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  394. color.New(color.FgYellow).Printf("Waiting for pod %s to be ready...", podName)
  395. if err = waitForPod(ctx, config, newPod); err != nil {
  396. color.New(color.FgRed).Println("failed")
  397. return handlePodAttachError(ctx, err, config, namespace, podName, container)
  398. }
  399. err = checkForPodDeletionCronJob(ctx, config)
  400. if err != nil {
  401. return err
  402. }
  403. // refresh pod info for latest status
  404. newPod, err = config.Clientset.CoreV1().
  405. Pods(newPod.Namespace).
  406. Get(ctx, newPod.Name, metav1.GetOptions{})
  407. // pod exited while we were waiting. maybe an error maybe not.
  408. // we dont know if the user wanted an interactive shell or not.
  409. // if it was an error the logs hopefully say so.
  410. if isPodExited(newPod) {
  411. color.New(color.FgGreen).Println("complete!")
  412. var writtenBytes int64
  413. writtenBytes, _ = pipePodLogsToStdout(ctx, config, namespace, podName, container, false)
  414. if verbose || writtenBytes == 0 {
  415. color.New(color.FgYellow).Println("Could not get logs. Pod events:")
  416. pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  417. }
  418. return nil
  419. }
  420. color.New(color.FgGreen).Println("ready!")
  421. color.New(color.FgYellow).Println("Attempting connection to the container. If you don't see a command prompt, try pressing enter.")
  422. req := config.RestClient.Post().
  423. Resource("pods").
  424. Name(podName).
  425. Namespace(namespace).
  426. SubResource("attach")
  427. req.Param("stdin", "true")
  428. req.Param("stdout", "true")
  429. req.Param("tty", "true")
  430. req.Param("container", container)
  431. t := term.TTY{
  432. In: os.Stdin,
  433. Out: os.Stdout,
  434. Raw: true,
  435. }
  436. size := t.GetSize()
  437. sizeQueue := t.MonitorSize(size)
  438. if err = t.Safe(func() error {
  439. exec, err := remotecommand.NewSPDYExecutor(config.RestConf, "POST", req.URL())
  440. if err != nil {
  441. return err
  442. }
  443. return exec.Stream(remotecommand.StreamOptions{
  444. Stdin: os.Stdin,
  445. Stdout: os.Stdout,
  446. Stderr: os.Stderr,
  447. Tty: true,
  448. TerminalSizeQueue: sizeQueue,
  449. })
  450. }); err != nil {
  451. // ugly way to catch no TTY errors, such as when running command "echo \"hello\""
  452. return handlePodAttachError(ctx, err, config, namespace, podName, container)
  453. }
  454. if verbose {
  455. color.New(color.FgYellow).Println("Pod events:")
  456. pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  457. }
  458. return err
  459. }
  460. func checkForPodDeletionCronJob(ctx context.Context, config *PorterRunSharedConfig) error {
  461. // try and create the cron job and all of the other required resources as necessary,
  462. // starting with the service account, then role and then a role binding
  463. err := checkForServiceAccount(ctx, config)
  464. if err != nil {
  465. return err
  466. }
  467. err = checkForClusterRole(ctx, config)
  468. if err != nil {
  469. return err
  470. }
  471. err = checkForRoleBinding(ctx, config)
  472. if err != nil {
  473. return err
  474. }
  475. namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
  476. if err != nil {
  477. return err
  478. }
  479. for _, namespace := range namespaces.Items {
  480. cronJobs, err := config.Clientset.BatchV1().CronJobs(namespace.Name).List(
  481. ctx, metav1.ListOptions{},
  482. )
  483. if err != nil {
  484. return err
  485. }
  486. if namespace.Name == "default" {
  487. for _, cronJob := range cronJobs.Items {
  488. if cronJob.Name == "porter-ephemeral-pod-deletion-cronjob" {
  489. return nil
  490. }
  491. }
  492. } else {
  493. for _, cronJob := range cronJobs.Items {
  494. if cronJob.Name == "porter-ephemeral-pod-deletion-cronjob" {
  495. err = config.Clientset.BatchV1().CronJobs(namespace.Name).Delete(
  496. ctx, cronJob.Name, metav1.DeleteOptions{},
  497. )
  498. if err != nil {
  499. return err
  500. }
  501. }
  502. }
  503. }
  504. }
  505. // create the cronjob
  506. cronJob := &batchv1.CronJob{
  507. ObjectMeta: metav1.ObjectMeta{
  508. Name: "porter-ephemeral-pod-deletion-cronjob",
  509. },
  510. Spec: batchv1.CronJobSpec{
  511. Schedule: "0 * * * *",
  512. JobTemplate: batchv1.JobTemplateSpec{
  513. Spec: batchv1.JobSpec{
  514. Template: v1.PodTemplateSpec{
  515. Spec: v1.PodSpec{
  516. ServiceAccountName: "porter-ephemeral-pod-deletion-service-account",
  517. RestartPolicy: v1.RestartPolicyNever,
  518. Containers: []v1.Container{
  519. {
  520. Name: "ephemeral-pods-manager",
  521. Image: "public.ecr.aws/o1j4x7p4/porter-ephemeral-pods-manager:latest",
  522. ImagePullPolicy: v1.PullAlways,
  523. Args: []string{"delete"},
  524. },
  525. },
  526. },
  527. },
  528. },
  529. },
  530. },
  531. }
  532. _, err = config.Clientset.BatchV1().CronJobs("default").Create(
  533. ctx, cronJob, metav1.CreateOptions{},
  534. )
  535. if err != nil {
  536. return err
  537. }
  538. return nil
  539. }
  540. func checkForServiceAccount(ctx context.Context, config *PorterRunSharedConfig) error {
  541. namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
  542. if err != nil {
  543. return err
  544. }
  545. for _, namespace := range namespaces.Items {
  546. serviceAccounts, err := config.Clientset.CoreV1().ServiceAccounts(namespace.Name).List(
  547. ctx, metav1.ListOptions{},
  548. )
  549. if err != nil {
  550. return err
  551. }
  552. if namespace.Name == "default" {
  553. for _, svcAccount := range serviceAccounts.Items {
  554. if svcAccount.Name == "porter-ephemeral-pod-deletion-service-account" {
  555. return nil
  556. }
  557. }
  558. } else {
  559. for _, svcAccount := range serviceAccounts.Items {
  560. if svcAccount.Name == "porter-ephemeral-pod-deletion-service-account" {
  561. err = config.Clientset.CoreV1().ServiceAccounts(namespace.Name).Delete(
  562. ctx, svcAccount.Name, metav1.DeleteOptions{},
  563. )
  564. if err != nil {
  565. return err
  566. }
  567. }
  568. }
  569. }
  570. }
  571. serviceAccount := &v1.ServiceAccount{
  572. ObjectMeta: metav1.ObjectMeta{
  573. Name: "porter-ephemeral-pod-deletion-service-account",
  574. },
  575. }
  576. _, err = config.Clientset.CoreV1().ServiceAccounts("default").Create(
  577. ctx, serviceAccount, metav1.CreateOptions{},
  578. )
  579. if err != nil {
  580. return err
  581. }
  582. return nil
  583. }
  584. func checkForClusterRole(ctx context.Context, config *PorterRunSharedConfig) error {
  585. roles, err := config.Clientset.RbacV1().ClusterRoles().List(
  586. ctx, metav1.ListOptions{},
  587. )
  588. if err != nil {
  589. return err
  590. }
  591. for _, role := range roles.Items {
  592. if role.Name == "porter-ephemeral-pod-deletion-cluster-role" {
  593. return nil
  594. }
  595. }
  596. role := &rbacv1.ClusterRole{
  597. ObjectMeta: metav1.ObjectMeta{
  598. Name: "porter-ephemeral-pod-deletion-cluster-role",
  599. },
  600. Rules: []rbacv1.PolicyRule{
  601. {
  602. APIGroups: []string{""},
  603. Resources: []string{"pods"},
  604. Verbs: []string{"list", "delete"},
  605. },
  606. {
  607. APIGroups: []string{""},
  608. Resources: []string{"namespaces"},
  609. Verbs: []string{"list"},
  610. },
  611. },
  612. }
  613. _, err = config.Clientset.RbacV1().ClusterRoles().Create(
  614. ctx, role, metav1.CreateOptions{},
  615. )
  616. if err != nil {
  617. return err
  618. }
  619. return nil
  620. }
  621. func checkForRoleBinding(ctx context.Context, config *PorterRunSharedConfig) error {
  622. bindings, err := config.Clientset.RbacV1().ClusterRoleBindings().List(
  623. ctx, metav1.ListOptions{},
  624. )
  625. if err != nil {
  626. return err
  627. }
  628. for _, binding := range bindings.Items {
  629. if binding.Name == "porter-ephemeral-pod-deletion-cluster-rolebinding" {
  630. return nil
  631. }
  632. }
  633. binding := &rbacv1.ClusterRoleBinding{
  634. ObjectMeta: metav1.ObjectMeta{
  635. Name: "porter-ephemeral-pod-deletion-cluster-rolebinding",
  636. },
  637. RoleRef: rbacv1.RoleRef{
  638. APIGroup: "rbac.authorization.k8s.io",
  639. Kind: "ClusterRole",
  640. Name: "porter-ephemeral-pod-deletion-cluster-role",
  641. },
  642. Subjects: []rbacv1.Subject{
  643. {
  644. APIGroup: "",
  645. Kind: "ServiceAccount",
  646. Name: "porter-ephemeral-pod-deletion-service-account",
  647. Namespace: "default",
  648. },
  649. },
  650. }
  651. _, err = config.Clientset.RbacV1().ClusterRoleBindings().Create(
  652. ctx, binding, metav1.CreateOptions{},
  653. )
  654. if err != nil {
  655. return err
  656. }
  657. return nil
  658. }
  659. func waitForPod(ctx context.Context, config *PorterRunSharedConfig, pod *v1.Pod) error {
  660. var (
  661. w watch.Interface
  662. err error
  663. ok bool
  664. )
  665. // immediately after creating a pod, the API may return a 404. heuristically 1
  666. // second seems to be plenty.
  667. watchRetries := 3
  668. for i := 0; i < watchRetries; i++ {
  669. selector := fields.OneTermEqualSelector("metadata.name", pod.Name).String()
  670. w, err = config.Clientset.CoreV1().
  671. Pods(pod.Namespace).
  672. Watch(ctx, metav1.ListOptions{FieldSelector: selector})
  673. if err == nil {
  674. break
  675. }
  676. time.Sleep(time.Second)
  677. }
  678. if err != nil {
  679. return err
  680. }
  681. defer w.Stop()
  682. for {
  683. select {
  684. case <-time.Tick(time.Second):
  685. // poll every second in case we already missed the ready event while
  686. // creating the listener.
  687. pod, err = config.Clientset.CoreV1().
  688. Pods(pod.Namespace).
  689. Get(ctx, pod.Name, metav1.GetOptions{})
  690. if isPodReady(pod) || isPodExited(pod) {
  691. return nil
  692. }
  693. case evt := <-w.ResultChan():
  694. pod, ok = evt.Object.(*v1.Pod)
  695. if !ok {
  696. return fmt.Errorf("unexpected object type: %T", evt.Object)
  697. }
  698. if isPodReady(pod) || isPodExited(pod) {
  699. return nil
  700. }
  701. case <-time.After(time.Second * 10):
  702. return errors.New("timed out waiting for pod")
  703. }
  704. }
  705. }
  706. func isPodReady(pod *v1.Pod) bool {
  707. ready := false
  708. conditions := pod.Status.Conditions
  709. for i := range conditions {
  710. if conditions[i].Type == v1.PodReady {
  711. ready = pod.Status.Conditions[i].Status == v1.ConditionTrue
  712. }
  713. }
  714. return ready
  715. }
  716. func isPodExited(pod *v1.Pod) bool {
  717. return pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
  718. }
  719. func handlePodAttachError(ctx context.Context, err error, config *PorterRunSharedConfig, namespace, podName, container string) error {
  720. if verbose {
  721. color.New(color.FgYellow).Fprintf(os.Stderr, "Error: %s\n", err)
  722. }
  723. color.New(color.FgYellow).Fprintln(os.Stderr, "Could not open a shell to this container. Container logs:")
  724. var writtenBytes int64
  725. writtenBytes, _ = pipePodLogsToStdout(ctx, config, namespace, podName, container, false)
  726. if verbose || writtenBytes == 0 {
  727. color.New(color.FgYellow).Fprintln(os.Stderr, "Could not get logs. Pod events:")
  728. pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  729. }
  730. return err
  731. }
  732. func pipePodLogsToStdout(ctx context.Context, config *PorterRunSharedConfig, namespace, name, container string, follow bool) (int64, error) {
  733. podLogOpts := v1.PodLogOptions{
  734. Container: container,
  735. Follow: follow,
  736. }
  737. req := config.Clientset.CoreV1().Pods(namespace).GetLogs(name, &podLogOpts)
  738. podLogs, err := req.Stream(
  739. ctx,
  740. )
  741. if err != nil {
  742. return 0, err
  743. }
  744. defer podLogs.Close()
  745. return io.Copy(os.Stdout, podLogs)
  746. }
  747. func pipeEventsToStdout(ctx context.Context, config *PorterRunSharedConfig, namespace, name, _ string, _ bool) error {
  748. // update the config in case the operation has taken longer than token expiry time
  749. config.setSharedConfig(ctx) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  750. // creates the clientset
  751. resp, err := config.Clientset.CoreV1().Events(namespace).List(
  752. ctx,
  753. metav1.ListOptions{
  754. FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=%s", name, namespace),
  755. },
  756. )
  757. if err != nil {
  758. return err
  759. }
  760. for _, event := range resp.Items {
  761. color.New(color.FgRed).Println(event.Message)
  762. }
  763. return nil
  764. }
  765. func getExistingPod(ctx context.Context, config *PorterRunSharedConfig, name, namespace string) (*v1.Pod, error) {
  766. return config.Clientset.CoreV1().Pods(namespace).Get(
  767. ctx,
  768. name,
  769. metav1.GetOptions{},
  770. )
  771. }
  772. func deletePod(ctx context.Context, config *PorterRunSharedConfig, name, namespace string) error {
  773. // update the config in case the operation has taken longer than token expiry time
  774. config.setSharedConfig(ctx) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  775. err := config.Clientset.CoreV1().Pods(namespace).Delete(
  776. ctx,
  777. name,
  778. metav1.DeleteOptions{},
  779. )
  780. if err != nil {
  781. color.New(color.FgRed).Fprintf(os.Stderr, "Could not delete ephemeral pod: %s\n", err.Error())
  782. return err
  783. }
  784. color.New(color.FgGreen).Println("Sucessfully deleted ephemeral pod")
  785. return nil
  786. }
  787. func createEphemeralPodFromExisting(
  788. ctx context.Context,
  789. config *PorterRunSharedConfig,
  790. existing *v1.Pod,
  791. container string,
  792. args []string,
  793. ) (*v1.Pod, error) {
  794. newPod := existing.DeepCopy()
  795. // only copy the pod spec, overwrite metadata
  796. newPod.ObjectMeta = metav1.ObjectMeta{
  797. Name: strings.ToLower(fmt.Sprintf("%s-copy-%s", existing.ObjectMeta.Name, utils.String(4))),
  798. Namespace: existing.ObjectMeta.Namespace,
  799. }
  800. newPod.Status = v1.PodStatus{}
  801. // set restart policy to never
  802. newPod.Spec.RestartPolicy = v1.RestartPolicyNever
  803. // change the command in the pod to the passed in pod command
  804. cmdRoot := args[0]
  805. cmdArgs := make([]string, 0)
  806. // annotate with the ephemeral pod tag
  807. newPod.Labels = make(map[string]string)
  808. newPod.Labels["porter/ephemeral-pod"] = "true"
  809. if len(args) > 1 {
  810. cmdArgs = args[1:]
  811. }
  812. for i := 0; i < len(newPod.Spec.Containers); i++ {
  813. if newPod.Spec.Containers[i].Name == container {
  814. newPod.Spec.Containers[i].Command = []string{cmdRoot}
  815. newPod.Spec.Containers[i].Args = cmdArgs
  816. newPod.Spec.Containers[i].TTY = true
  817. newPod.Spec.Containers[i].Stdin = true
  818. newPod.Spec.Containers[i].StdinOnce = true
  819. var newCpu int
  820. if cpuMilli != 0 {
  821. newCpu = cpuMilli
  822. } else if newPod.Spec.Containers[i].Resources.Requests.Cpu() != nil && newPod.Spec.Containers[i].Resources.Requests.Cpu().MilliValue() > 500 {
  823. newCpu = 500
  824. }
  825. if newCpu != 0 {
  826. newPod.Spec.Containers[i].Resources.Limits[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%dm", newCpu))
  827. newPod.Spec.Containers[i].Resources.Requests[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%dm", newCpu))
  828. for j := 0; j < len(newPod.Spec.Containers[i].Env); j++ {
  829. if newPod.Spec.Containers[i].Env[j].Name == "PORTER_RESOURCES_CPU" {
  830. newPod.Spec.Containers[i].Env[j].Value = fmt.Sprintf("%dm", newCpu)
  831. break
  832. }
  833. }
  834. }
  835. var newMemory int
  836. if memoryMi != 0 {
  837. newMemory = memoryMi
  838. } else if newPod.Spec.Containers[i].Resources.Requests.Memory() != nil && newPod.Spec.Containers[i].Resources.Requests.Memory().Value() > 1000*1024*1024 {
  839. newMemory = 1000
  840. }
  841. if newMemory != 0 {
  842. newPod.Spec.Containers[i].Resources.Limits[v1.ResourceMemory] = resource.MustParse(fmt.Sprintf("%dMi", newMemory))
  843. newPod.Spec.Containers[i].Resources.Requests[v1.ResourceMemory] = resource.MustParse(fmt.Sprintf("%dMi", newMemory))
  844. for j := 0; j < len(newPod.Spec.Containers[i].Env); j++ {
  845. if newPod.Spec.Containers[i].Env[j].Name == "PORTER_RESOURCES_RAM" {
  846. newPod.Spec.Containers[i].Env[j].Value = fmt.Sprintf("%dMi", newMemory)
  847. break
  848. }
  849. }
  850. }
  851. }
  852. // remove health checks and probes
  853. newPod.Spec.Containers[i].LivenessProbe = nil
  854. newPod.Spec.Containers[i].ReadinessProbe = nil
  855. newPod.Spec.Containers[i].StartupProbe = nil
  856. }
  857. newPod.Spec.NodeName = ""
  858. // create the pod and return it
  859. return config.Clientset.CoreV1().Pods(existing.ObjectMeta.Namespace).Create(
  860. ctx,
  861. newPod,
  862. metav1.CreateOptions{},
  863. )
  864. }