2
0

run.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020
  1. package commands
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "os"
  8. "strings"
  9. "time"
  10. "github.com/fatih/color"
  11. api "github.com/porter-dev/porter/api/client"
  12. "github.com/porter-dev/porter/api/types"
  13. "github.com/porter-dev/porter/cli/cmd/config"
  14. "github.com/porter-dev/porter/cli/cmd/utils"
  15. "github.com/spf13/cobra"
  16. batchv1 "k8s.io/api/batch/v1"
  17. v1 "k8s.io/api/core/v1"
  18. rbacv1 "k8s.io/api/rbac/v1"
  19. "k8s.io/apimachinery/pkg/api/resource"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/fields"
  22. "k8s.io/apimachinery/pkg/watch"
  23. "k8s.io/kubectl/pkg/util/term"
  24. "k8s.io/apimachinery/pkg/runtime"
  25. "k8s.io/apimachinery/pkg/runtime/schema"
  26. "k8s.io/client-go/kubernetes"
  27. "k8s.io/client-go/rest"
  28. "k8s.io/client-go/tools/clientcmd"
  29. "k8s.io/client-go/tools/remotecommand"
  30. )
  31. var (
  32. namespace string
  33. verbose bool
  34. existingPod bool
  35. nonInteractive bool
  36. containerName string
  37. cpuMilli int
  38. memoryMi int
  39. )
  40. func registerCommand_Run(cliConf config.CLIConfig) *cobra.Command {
  41. runCmd := &cobra.Command{
  42. Use: "run [release] -- COMMAND [args...]",
  43. Args: cobra.MinimumNArgs(2),
  44. Short: "Runs a command inside a connected cluster container.",
  45. Run: func(cmd *cobra.Command, args []string) {
  46. err := checkLoginAndRunWithConfig(cmd, cliConf, args, run)
  47. if err != nil {
  48. os.Exit(1)
  49. }
  50. },
  51. }
  52. // cleanupCmd represents the "porter run cleanup" subcommand
  53. cleanupCmd := &cobra.Command{
  54. Use: "cleanup",
  55. Args: cobra.NoArgs,
  56. Short: "Delete any lingering ephemeral pods that were created with \"porter run\".",
  57. Run: func(cmd *cobra.Command, args []string) {
  58. err := checkLoginAndRunWithConfig(cmd, cliConf, args, cleanup)
  59. if err != nil {
  60. os.Exit(1)
  61. }
  62. },
  63. }
  64. runCmd.PersistentFlags().StringVar(
  65. &namespace,
  66. "namespace",
  67. "default",
  68. "namespace of release to connect to",
  69. )
  70. runCmd.PersistentFlags().BoolVarP(
  71. &existingPod,
  72. "existing_pod",
  73. "e",
  74. false,
  75. "whether to connect to an existing pod",
  76. )
  77. runCmd.PersistentFlags().BoolVarP(
  78. &verbose,
  79. "verbose",
  80. "v",
  81. false,
  82. "whether to print verbose output",
  83. )
  84. runCmd.PersistentFlags().BoolVar(
  85. &nonInteractive,
  86. "non-interactive",
  87. false,
  88. "whether to run in non-interactive mode",
  89. )
  90. runCmd.PersistentFlags().StringVarP(
  91. &containerName,
  92. "container",
  93. "c",
  94. "",
  95. "name of the container inside pod to run the command in",
  96. )
  97. runCmd.PersistentFlags().IntVarP(
  98. &cpuMilli,
  99. "cpu",
  100. "",
  101. 0,
  102. "cpu allocation in millicores (1000 millicores = 1 vCPU)",
  103. )
  104. runCmd.PersistentFlags().IntVarP(
  105. &memoryMi,
  106. "ram",
  107. "",
  108. 0,
  109. "ram allocation in Mi (1024 Mi = 1 GB)",
  110. )
  111. runCmd.AddCommand(cleanupCmd)
  112. return runCmd
  113. }
  114. func run(ctx context.Context, user *types.GetAuthenticatedUserResponse, client api.Client, cliConf config.CLIConfig, featureFlags config.FeatureFlags, cmd *cobra.Command, args []string) error {
  115. execArgs := args[1:]
  116. color.New(color.FgGreen).Println("Running", strings.Join(execArgs, " "), "for release", args[0])
  117. if nonInteractive {
  118. color.New(color.FgBlue).Println("Using non-interactive mode. The first available pod will be used to run the command.")
  119. }
  120. if len(execArgs) > 0 {
  121. release, err := client.GetRelease(
  122. ctx, cliConf.Project, cliConf.Cluster, namespace, args[0],
  123. )
  124. if err != nil {
  125. return fmt.Errorf("error fetching release %s: %w", args[0], err)
  126. }
  127. if release.BuildConfig != nil &&
  128. (strings.Contains(release.BuildConfig.Builder, "heroku") ||
  129. strings.Contains(release.BuildConfig.Builder, "paketo")) &&
  130. execArgs[0] != "/cnb/lifecycle/launcher" &&
  131. execArgs[0] != "launcher" {
  132. // this is a buildpacks release using a heroku builder, prepend the launcher
  133. execArgs = append([]string{"/cnb/lifecycle/launcher"}, execArgs...)
  134. }
  135. }
  136. podsSimple, err := getPods(ctx, client, cliConf, namespace, args[0])
  137. if err != nil {
  138. return fmt.Errorf("Could not retrieve list of pods: %s", err.Error())
  139. }
  140. // if length of pods is 0, throw error
  141. var selectedPod podSimple
  142. if len(podsSimple) == 0 {
  143. return fmt.Errorf("At least one pod must exist in this deployment.")
  144. } else if nonInteractive || len(podsSimple) == 1 {
  145. selectedPod = podsSimple[0]
  146. } else {
  147. podNames := make([]string, 0)
  148. for _, podSimple := range podsSimple {
  149. podNames = append(podNames, podSimple.Name)
  150. }
  151. selectedPodName, err := utils.PromptSelect("Select the pod:", podNames)
  152. if err != nil {
  153. return err
  154. }
  155. // find selected pod
  156. for _, podSimple := range podsSimple {
  157. if selectedPodName == podSimple.Name {
  158. selectedPod = podSimple
  159. }
  160. }
  161. }
  162. var selectedContainerName string
  163. if len(selectedPod.ContainerNames) == 0 {
  164. return fmt.Errorf("At least one container must exist in the selected pod.")
  165. } else if len(selectedPod.ContainerNames) == 1 {
  166. if containerName != "" && containerName != selectedPod.ContainerNames[0] {
  167. return fmt.Errorf("provided container %s does not exist in pod %s", containerName, selectedPod.Name)
  168. }
  169. selectedContainerName = selectedPod.ContainerNames[0]
  170. }
  171. if containerName != "" && selectedContainerName == "" {
  172. // check if provided container name exists in the pod
  173. for _, name := range selectedPod.ContainerNames {
  174. if name == containerName {
  175. selectedContainerName = name
  176. break
  177. }
  178. }
  179. if selectedContainerName == "" {
  180. return fmt.Errorf("provided container %s does not exist in pod %s", containerName, selectedPod.Name)
  181. }
  182. }
  183. if selectedContainerName == "" {
  184. if nonInteractive {
  185. return fmt.Errorf("container name must be specified using the --container flag when using non-interactive mode")
  186. }
  187. selectedContainer, err := utils.PromptSelect("Select the container:", selectedPod.ContainerNames)
  188. if err != nil {
  189. return err
  190. }
  191. selectedContainerName = selectedContainer
  192. }
  193. config := &PorterRunSharedConfig{
  194. Client: client,
  195. CLIConfig: cliConf,
  196. }
  197. err = config.setSharedConfig(ctx)
  198. if err != nil {
  199. return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
  200. }
  201. if existingPod {
  202. return executeRun(config, namespace, selectedPod.Name, selectedContainerName, execArgs)
  203. }
  204. return executeRunEphemeral(ctx, config, namespace, selectedPod.Name, selectedContainerName, execArgs)
  205. }
  206. func cleanup(ctx context.Context, _ *types.GetAuthenticatedUserResponse, client api.Client, cliConfig config.CLIConfig, _ config.FeatureFlags, _ *cobra.Command, _ []string) error {
  207. config := &PorterRunSharedConfig{
  208. Client: client,
  209. CLIConfig: cliConfig,
  210. }
  211. err := config.setSharedConfig(ctx)
  212. if err != nil {
  213. return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
  214. }
  215. proceed, err := utils.PromptSelect(
  216. fmt.Sprintf("You have chosen the '%s' namespace for cleanup. Do you want to proceed?", namespace),
  217. []string{"Yes", "No", "All namespaces"},
  218. )
  219. if err != nil {
  220. return err
  221. }
  222. if proceed == "No" {
  223. return nil
  224. }
  225. var podNames []string
  226. color.New(color.FgGreen).Println("Fetching ephemeral pods for cleanup")
  227. if proceed == "All namespaces" {
  228. namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
  229. if err != nil {
  230. return err
  231. }
  232. for _, namespace := range namespaces.Items {
  233. if pods, err := getEphemeralPods(ctx, namespace.Name, config.Clientset); err == nil {
  234. podNames = append(podNames, pods...)
  235. } else {
  236. return err
  237. }
  238. }
  239. } else {
  240. if pods, err := getEphemeralPods(ctx, namespace, config.Clientset); err == nil {
  241. podNames = append(podNames, pods...)
  242. } else {
  243. return err
  244. }
  245. }
  246. if len(podNames) == 0 {
  247. color.New(color.FgBlue).Println("No ephemeral pods to delete")
  248. return nil
  249. }
  250. selectedPods, err := utils.PromptMultiselect("Select ephemeral pods to delete", podNames)
  251. if err != nil {
  252. return err
  253. }
  254. for _, podName := range selectedPods {
  255. color.New(color.FgBlue).Printf("Deleting ephemeral pod: %s\n", podName)
  256. err = config.Clientset.CoreV1().Pods(namespace).Delete(
  257. ctx, podName, metav1.DeleteOptions{},
  258. )
  259. if err != nil {
  260. return err
  261. }
  262. }
  263. return nil
  264. }
  265. func getEphemeralPods(ctx context.Context, namespace string, clientset *kubernetes.Clientset) ([]string, error) {
  266. var podNames []string
  267. pods, err := clientset.CoreV1().Pods(namespace).List(
  268. ctx, metav1.ListOptions{LabelSelector: "porter/ephemeral-pod"},
  269. )
  270. if err != nil {
  271. return nil, err
  272. }
  273. for _, pod := range pods.Items {
  274. podNames = append(podNames, pod.Name)
  275. }
  276. return podNames, nil
  277. }
  278. type PorterRunSharedConfig struct {
  279. Client api.Client
  280. RestConf *rest.Config
  281. Clientset *kubernetes.Clientset
  282. RestClient *rest.RESTClient
  283. CLIConfig config.CLIConfig
  284. }
  285. func (p *PorterRunSharedConfig) setSharedConfig(ctx context.Context) error {
  286. pID := p.CLIConfig.Project
  287. cID := p.CLIConfig.Cluster
  288. kubeResp, err := p.Client.GetKubeconfig(ctx, pID, cID, p.CLIConfig.Kubeconfig)
  289. if err != nil {
  290. return err
  291. }
  292. kubeBytes := kubeResp.Kubeconfig
  293. cmdConf, err := clientcmd.NewClientConfigFromBytes(kubeBytes)
  294. if err != nil {
  295. return err
  296. }
  297. restConf, err := cmdConf.ClientConfig()
  298. if err != nil {
  299. return err
  300. }
  301. restConf.GroupVersion = &schema.GroupVersion{
  302. Group: "api",
  303. Version: "v1",
  304. }
  305. restConf.NegotiatedSerializer = runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{})
  306. p.RestConf = restConf
  307. clientset, err := kubernetes.NewForConfig(restConf)
  308. if err != nil {
  309. return err
  310. }
  311. p.Clientset = clientset
  312. restClient, err := rest.RESTClientFor(restConf)
  313. if err != nil {
  314. return err
  315. }
  316. p.RestClient = restClient
  317. return nil
  318. }
  319. type podSimple struct {
  320. Name string
  321. ContainerNames []string
  322. }
  323. func getPods(ctx context.Context, client api.Client, cliConf config.CLIConfig, namespace, releaseName string) ([]podSimple, error) {
  324. pID := cliConf.Project
  325. cID := cliConf.Cluster
  326. resp, err := client.GetK8sAllPods(ctx, pID, cID, namespace, releaseName)
  327. if err != nil {
  328. return nil, err
  329. }
  330. pods := *resp
  331. res := make([]podSimple, 0)
  332. for _, pod := range pods {
  333. if pod.Status.Phase == v1.PodRunning {
  334. containerNames := make([]string, 0)
  335. for _, container := range pod.Spec.Containers {
  336. containerNames = append(containerNames, container.Name)
  337. }
  338. res = append(res, podSimple{
  339. Name: pod.ObjectMeta.Name,
  340. ContainerNames: containerNames,
  341. })
  342. }
  343. }
  344. return res, nil
  345. }
  346. func executeRun(config *PorterRunSharedConfig, namespace, name, container string, args []string) error {
  347. req := config.RestClient.Post().
  348. Resource("pods").
  349. Name(name).
  350. Namespace(namespace).
  351. SubResource("exec")
  352. for _, arg := range args {
  353. req.Param("command", arg)
  354. }
  355. req.Param("stdin", "true")
  356. req.Param("stdout", "true")
  357. req.Param("tty", "true")
  358. req.Param("container", container)
  359. t := term.TTY{
  360. In: os.Stdin,
  361. Out: os.Stdout,
  362. Raw: true,
  363. }
  364. size := t.GetSize()
  365. sizeQueue := t.MonitorSize(size)
  366. return t.Safe(func() error {
  367. exec, err := remotecommand.NewSPDYExecutor(config.RestConf, "POST", req.URL())
  368. if err != nil {
  369. return err
  370. }
  371. return exec.Stream(remotecommand.StreamOptions{
  372. Stdin: os.Stdin,
  373. Stdout: os.Stdout,
  374. Stderr: os.Stderr,
  375. Tty: true,
  376. TerminalSizeQueue: sizeQueue,
  377. })
  378. })
  379. }
  380. func executeRunEphemeral(ctx context.Context, config *PorterRunSharedConfig, namespace, name, container string, args []string) error {
  381. existing, err := getExistingPod(ctx, config, name, namespace)
  382. if err != nil {
  383. return err
  384. }
  385. newPod, err := createEphemeralPodFromExisting(ctx, config, existing, container, args)
  386. if err != nil {
  387. return err
  388. }
  389. podName := newPod.ObjectMeta.Name
  390. // delete the ephemeral pod no matter what
  391. defer deletePod(ctx, config, podName, namespace) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  392. color.New(color.FgYellow).Printf("Waiting for pod %s to be ready...", podName)
  393. if err = waitForPod(ctx, config, newPod); err != nil {
  394. color.New(color.FgRed).Println("failed")
  395. return handlePodAttachError(ctx, err, config, namespace, podName, container)
  396. }
  397. err = checkForPodDeletionCronJob(ctx, config)
  398. if err != nil {
  399. return err
  400. }
  401. // refresh pod info for latest status
  402. newPod, err = config.Clientset.CoreV1().
  403. Pods(newPod.Namespace).
  404. Get(ctx, newPod.Name, metav1.GetOptions{})
  405. // pod exited while we were waiting. maybe an error maybe not.
  406. // we dont know if the user wanted an interactive shell or not.
  407. // if it was an error the logs hopefully say so.
  408. if isPodExited(newPod) {
  409. color.New(color.FgGreen).Println("complete!")
  410. var writtenBytes int64
  411. writtenBytes, _ = pipePodLogsToStdout(ctx, config, namespace, podName, container, false)
  412. if verbose || writtenBytes == 0 {
  413. color.New(color.FgYellow).Println("Could not get logs. Pod events:")
  414. pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  415. }
  416. return nil
  417. }
  418. color.New(color.FgGreen).Println("ready!")
  419. color.New(color.FgYellow).Println("Attempting connection to the container. If you don't see a command prompt, try pressing enter.")
  420. req := config.RestClient.Post().
  421. Resource("pods").
  422. Name(podName).
  423. Namespace(namespace).
  424. SubResource("attach")
  425. req.Param("stdin", "true")
  426. req.Param("stdout", "true")
  427. req.Param("tty", "true")
  428. req.Param("container", container)
  429. t := term.TTY{
  430. In: os.Stdin,
  431. Out: os.Stdout,
  432. Raw: true,
  433. }
  434. size := t.GetSize()
  435. sizeQueue := t.MonitorSize(size)
  436. if err = t.Safe(func() error {
  437. exec, err := remotecommand.NewSPDYExecutor(config.RestConf, "POST", req.URL())
  438. if err != nil {
  439. return err
  440. }
  441. return exec.Stream(remotecommand.StreamOptions{
  442. Stdin: os.Stdin,
  443. Stdout: os.Stdout,
  444. Stderr: os.Stderr,
  445. Tty: true,
  446. TerminalSizeQueue: sizeQueue,
  447. })
  448. }); err != nil {
  449. // ugly way to catch no TTY errors, such as when running command "echo \"hello\""
  450. return handlePodAttachError(ctx, err, config, namespace, podName, container)
  451. }
  452. if verbose {
  453. color.New(color.FgYellow).Println("Pod events:")
  454. pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  455. }
  456. return err
  457. }
  458. func checkForPodDeletionCronJob(ctx context.Context, config *PorterRunSharedConfig) error {
  459. // try and create the cron job and all of the other required resources as necessary,
  460. // starting with the service account, then role and then a role binding
  461. err := checkForServiceAccount(ctx, config)
  462. if err != nil {
  463. return err
  464. }
  465. err = checkForClusterRole(ctx, config)
  466. if err != nil {
  467. return err
  468. }
  469. err = checkForRoleBinding(ctx, config)
  470. if err != nil {
  471. return err
  472. }
  473. namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
  474. if err != nil {
  475. return err
  476. }
  477. for _, namespace := range namespaces.Items {
  478. cronJobs, err := config.Clientset.BatchV1().CronJobs(namespace.Name).List(
  479. ctx, metav1.ListOptions{},
  480. )
  481. if err != nil {
  482. return err
  483. }
  484. if namespace.Name == "default" {
  485. for _, cronJob := range cronJobs.Items {
  486. if cronJob.Name == "porter-ephemeral-pod-deletion-cronjob" {
  487. return nil
  488. }
  489. }
  490. } else {
  491. for _, cronJob := range cronJobs.Items {
  492. if cronJob.Name == "porter-ephemeral-pod-deletion-cronjob" {
  493. err = config.Clientset.BatchV1().CronJobs(namespace.Name).Delete(
  494. ctx, cronJob.Name, metav1.DeleteOptions{},
  495. )
  496. if err != nil {
  497. return err
  498. }
  499. }
  500. }
  501. }
  502. }
  503. // create the cronjob
  504. cronJob := &batchv1.CronJob{
  505. ObjectMeta: metav1.ObjectMeta{
  506. Name: "porter-ephemeral-pod-deletion-cronjob",
  507. },
  508. Spec: batchv1.CronJobSpec{
  509. Schedule: "0 * * * *",
  510. JobTemplate: batchv1.JobTemplateSpec{
  511. Spec: batchv1.JobSpec{
  512. Template: v1.PodTemplateSpec{
  513. Spec: v1.PodSpec{
  514. ServiceAccountName: "porter-ephemeral-pod-deletion-service-account",
  515. RestartPolicy: v1.RestartPolicyNever,
  516. Containers: []v1.Container{
  517. {
  518. Name: "ephemeral-pods-manager",
  519. Image: "public.ecr.aws/o1j4x7p4/porter-ephemeral-pods-manager:latest",
  520. ImagePullPolicy: v1.PullAlways,
  521. Args: []string{"delete"},
  522. },
  523. },
  524. },
  525. },
  526. },
  527. },
  528. },
  529. }
  530. _, err = config.Clientset.BatchV1().CronJobs("default").Create(
  531. ctx, cronJob, metav1.CreateOptions{},
  532. )
  533. if err != nil {
  534. return err
  535. }
  536. return nil
  537. }
  538. func checkForServiceAccount(ctx context.Context, config *PorterRunSharedConfig) error {
  539. namespaces, err := config.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
  540. if err != nil {
  541. return err
  542. }
  543. for _, namespace := range namespaces.Items {
  544. serviceAccounts, err := config.Clientset.CoreV1().ServiceAccounts(namespace.Name).List(
  545. ctx, metav1.ListOptions{},
  546. )
  547. if err != nil {
  548. return err
  549. }
  550. if namespace.Name == "default" {
  551. for _, svcAccount := range serviceAccounts.Items {
  552. if svcAccount.Name == "porter-ephemeral-pod-deletion-service-account" {
  553. return nil
  554. }
  555. }
  556. } else {
  557. for _, svcAccount := range serviceAccounts.Items {
  558. if svcAccount.Name == "porter-ephemeral-pod-deletion-service-account" {
  559. err = config.Clientset.CoreV1().ServiceAccounts(namespace.Name).Delete(
  560. ctx, svcAccount.Name, metav1.DeleteOptions{},
  561. )
  562. if err != nil {
  563. return err
  564. }
  565. }
  566. }
  567. }
  568. }
  569. serviceAccount := &v1.ServiceAccount{
  570. ObjectMeta: metav1.ObjectMeta{
  571. Name: "porter-ephemeral-pod-deletion-service-account",
  572. },
  573. }
  574. _, err = config.Clientset.CoreV1().ServiceAccounts("default").Create(
  575. ctx, serviceAccount, metav1.CreateOptions{},
  576. )
  577. if err != nil {
  578. return err
  579. }
  580. return nil
  581. }
  582. func checkForClusterRole(ctx context.Context, config *PorterRunSharedConfig) error {
  583. roles, err := config.Clientset.RbacV1().ClusterRoles().List(
  584. ctx, metav1.ListOptions{},
  585. )
  586. if err != nil {
  587. return err
  588. }
  589. for _, role := range roles.Items {
  590. if role.Name == "porter-ephemeral-pod-deletion-cluster-role" {
  591. return nil
  592. }
  593. }
  594. role := &rbacv1.ClusterRole{
  595. ObjectMeta: metav1.ObjectMeta{
  596. Name: "porter-ephemeral-pod-deletion-cluster-role",
  597. },
  598. Rules: []rbacv1.PolicyRule{
  599. {
  600. APIGroups: []string{""},
  601. Resources: []string{"pods"},
  602. Verbs: []string{"list", "delete"},
  603. },
  604. {
  605. APIGroups: []string{""},
  606. Resources: []string{"namespaces"},
  607. Verbs: []string{"list"},
  608. },
  609. },
  610. }
  611. _, err = config.Clientset.RbacV1().ClusterRoles().Create(
  612. ctx, role, metav1.CreateOptions{},
  613. )
  614. if err != nil {
  615. return err
  616. }
  617. return nil
  618. }
  619. func checkForRoleBinding(ctx context.Context, config *PorterRunSharedConfig) error {
  620. bindings, err := config.Clientset.RbacV1().ClusterRoleBindings().List(
  621. ctx, metav1.ListOptions{},
  622. )
  623. if err != nil {
  624. return err
  625. }
  626. for _, binding := range bindings.Items {
  627. if binding.Name == "porter-ephemeral-pod-deletion-cluster-rolebinding" {
  628. return nil
  629. }
  630. }
  631. binding := &rbacv1.ClusterRoleBinding{
  632. ObjectMeta: metav1.ObjectMeta{
  633. Name: "porter-ephemeral-pod-deletion-cluster-rolebinding",
  634. },
  635. RoleRef: rbacv1.RoleRef{
  636. APIGroup: "rbac.authorization.k8s.io",
  637. Kind: "ClusterRole",
  638. Name: "porter-ephemeral-pod-deletion-cluster-role",
  639. },
  640. Subjects: []rbacv1.Subject{
  641. {
  642. APIGroup: "",
  643. Kind: "ServiceAccount",
  644. Name: "porter-ephemeral-pod-deletion-service-account",
  645. Namespace: "default",
  646. },
  647. },
  648. }
  649. _, err = config.Clientset.RbacV1().ClusterRoleBindings().Create(
  650. ctx, binding, metav1.CreateOptions{},
  651. )
  652. if err != nil {
  653. return err
  654. }
  655. return nil
  656. }
  657. func waitForPod(ctx context.Context, config *PorterRunSharedConfig, pod *v1.Pod) error {
  658. var (
  659. w watch.Interface
  660. err error
  661. ok bool
  662. )
  663. // immediately after creating a pod, the API may return a 404. heuristically 1
  664. // second seems to be plenty.
  665. watchRetries := 3
  666. for i := 0; i < watchRetries; i++ {
  667. selector := fields.OneTermEqualSelector("metadata.name", pod.Name).String()
  668. w, err = config.Clientset.CoreV1().
  669. Pods(pod.Namespace).
  670. Watch(ctx, metav1.ListOptions{FieldSelector: selector})
  671. if err == nil {
  672. break
  673. }
  674. time.Sleep(time.Second)
  675. }
  676. if err != nil {
  677. return err
  678. }
  679. defer w.Stop()
  680. for {
  681. select {
  682. case <-time.Tick(time.Second):
  683. // poll every second in case we already missed the ready event while
  684. // creating the listener.
  685. pod, err = config.Clientset.CoreV1().
  686. Pods(pod.Namespace).
  687. Get(ctx, pod.Name, metav1.GetOptions{})
  688. if isPodReady(pod) || isPodExited(pod) {
  689. return nil
  690. }
  691. case evt := <-w.ResultChan():
  692. pod, ok = evt.Object.(*v1.Pod)
  693. if !ok {
  694. return fmt.Errorf("unexpected object type: %T", evt.Object)
  695. }
  696. if isPodReady(pod) || isPodExited(pod) {
  697. return nil
  698. }
  699. case <-time.After(time.Second * 10):
  700. return errors.New("timed out waiting for pod")
  701. }
  702. }
  703. }
  704. func isPodReady(pod *v1.Pod) bool {
  705. ready := false
  706. conditions := pod.Status.Conditions
  707. for i := range conditions {
  708. if conditions[i].Type == v1.PodReady {
  709. ready = pod.Status.Conditions[i].Status == v1.ConditionTrue
  710. }
  711. }
  712. return ready
  713. }
  714. func isPodExited(pod *v1.Pod) bool {
  715. return pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
  716. }
  717. func handlePodAttachError(ctx context.Context, err error, config *PorterRunSharedConfig, namespace, podName, container string) error {
  718. if verbose {
  719. color.New(color.FgYellow).Fprintf(os.Stderr, "Error: %s\n", err)
  720. }
  721. color.New(color.FgYellow).Fprintln(os.Stderr, "Could not open a shell to this container. Container logs:")
  722. var writtenBytes int64
  723. writtenBytes, _ = pipePodLogsToStdout(ctx, config, namespace, podName, container, false)
  724. if verbose || writtenBytes == 0 {
  725. color.New(color.FgYellow).Fprintln(os.Stderr, "Could not get logs. Pod events:")
  726. pipeEventsToStdout(ctx, config, namespace, podName, container, false) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  727. }
  728. return err
  729. }
  730. func pipePodLogsToStdout(ctx context.Context, config *PorterRunSharedConfig, namespace, name, container string, follow bool) (int64, error) {
  731. podLogOpts := v1.PodLogOptions{
  732. Container: container,
  733. Follow: follow,
  734. }
  735. req := config.Clientset.CoreV1().Pods(namespace).GetLogs(name, &podLogOpts)
  736. podLogs, err := req.Stream(
  737. ctx,
  738. )
  739. if err != nil {
  740. return 0, err
  741. }
  742. defer podLogs.Close()
  743. return io.Copy(os.Stdout, podLogs)
  744. }
  745. func pipeEventsToStdout(ctx context.Context, config *PorterRunSharedConfig, namespace, name, _ string, _ bool) error {
  746. // update the config in case the operation has taken longer than token expiry time
  747. config.setSharedConfig(ctx) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  748. // creates the clientset
  749. resp, err := config.Clientset.CoreV1().Events(namespace).List(
  750. ctx,
  751. metav1.ListOptions{
  752. FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=%s", name, namespace),
  753. },
  754. )
  755. if err != nil {
  756. return err
  757. }
  758. for _, event := range resp.Items {
  759. color.New(color.FgRed).Println(event.Message)
  760. }
  761. return nil
  762. }
  763. func getExistingPod(ctx context.Context, config *PorterRunSharedConfig, name, namespace string) (*v1.Pod, error) {
  764. return config.Clientset.CoreV1().Pods(namespace).Get(
  765. ctx,
  766. name,
  767. metav1.GetOptions{},
  768. )
  769. }
  770. func deletePod(ctx context.Context, config *PorterRunSharedConfig, name, namespace string) error {
  771. // update the config in case the operation has taken longer than token expiry time
  772. config.setSharedConfig(ctx) //nolint:errcheck,gosec // do not want to change logic of CLI. New linter error
  773. err := config.Clientset.CoreV1().Pods(namespace).Delete(
  774. ctx,
  775. name,
  776. metav1.DeleteOptions{},
  777. )
  778. if err != nil {
  779. color.New(color.FgRed).Fprintf(os.Stderr, "Could not delete ephemeral pod: %s\n", err.Error())
  780. return err
  781. }
  782. color.New(color.FgGreen).Println("Sucessfully deleted ephemeral pod")
  783. return nil
  784. }
  785. func createEphemeralPodFromExisting(
  786. ctx context.Context,
  787. config *PorterRunSharedConfig,
  788. existing *v1.Pod,
  789. container string,
  790. args []string,
  791. ) (*v1.Pod, error) {
  792. newPod := existing.DeepCopy()
  793. // only copy the pod spec, overwrite metadata
  794. newPod.ObjectMeta = metav1.ObjectMeta{
  795. Name: strings.ToLower(fmt.Sprintf("%s-copy-%s", existing.ObjectMeta.Name, utils.String(4))),
  796. Namespace: existing.ObjectMeta.Namespace,
  797. }
  798. newPod.Status = v1.PodStatus{}
  799. // set restart policy to never
  800. newPod.Spec.RestartPolicy = v1.RestartPolicyNever
  801. // change the command in the pod to the passed in pod command
  802. cmdRoot := args[0]
  803. cmdArgs := make([]string, 0)
  804. // annotate with the ephemeral pod tag
  805. newPod.Labels = make(map[string]string)
  806. newPod.Labels["porter/ephemeral-pod"] = "true"
  807. if len(args) > 1 {
  808. cmdArgs = args[1:]
  809. }
  810. for i := 0; i < len(newPod.Spec.Containers); i++ {
  811. if newPod.Spec.Containers[i].Name == container {
  812. newPod.Spec.Containers[i].Command = []string{cmdRoot}
  813. newPod.Spec.Containers[i].Args = cmdArgs
  814. newPod.Spec.Containers[i].TTY = true
  815. newPod.Spec.Containers[i].Stdin = true
  816. newPod.Spec.Containers[i].StdinOnce = true
  817. var newCpu int
  818. if cpuMilli != 0 {
  819. newCpu = cpuMilli
  820. } else if newPod.Spec.Containers[i].Resources.Requests.Cpu() != nil && newPod.Spec.Containers[i].Resources.Requests.Cpu().MilliValue() > 500 {
  821. newCpu = 500
  822. }
  823. if newCpu != 0 {
  824. newPod.Spec.Containers[i].Resources.Limits[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%dm", newCpu))
  825. newPod.Spec.Containers[i].Resources.Requests[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%dm", newCpu))
  826. for j := 0; j < len(newPod.Spec.Containers[i].Env); j++ {
  827. if newPod.Spec.Containers[i].Env[j].Name == "PORTER_RESOURCES_CPU" {
  828. newPod.Spec.Containers[i].Env[j].Value = fmt.Sprintf("%dm", newCpu)
  829. break
  830. }
  831. }
  832. }
  833. var newMemory int
  834. if memoryMi != 0 {
  835. newMemory = memoryMi
  836. } else if newPod.Spec.Containers[i].Resources.Requests.Memory() != nil && newPod.Spec.Containers[i].Resources.Requests.Memory().Value() > 1000*1024*1024 {
  837. newMemory = 1000
  838. }
  839. if newMemory != 0 {
  840. newPod.Spec.Containers[i].Resources.Limits[v1.ResourceMemory] = resource.MustParse(fmt.Sprintf("%dMi", newMemory))
  841. newPod.Spec.Containers[i].Resources.Requests[v1.ResourceMemory] = resource.MustParse(fmt.Sprintf("%dMi", newMemory))
  842. for j := 0; j < len(newPod.Spec.Containers[i].Env); j++ {
  843. if newPod.Spec.Containers[i].Env[j].Name == "PORTER_RESOURCES_RAM" {
  844. newPod.Spec.Containers[i].Env[j].Value = fmt.Sprintf("%dMi", newMemory)
  845. break
  846. }
  847. }
  848. }
  849. }
  850. // remove health checks and probes
  851. newPod.Spec.Containers[i].LivenessProbe = nil
  852. newPod.Spec.Containers[i].ReadinessProbe = nil
  853. newPod.Spec.Containers[i].StartupProbe = nil
  854. }
  855. newPod.Spec.NodeName = ""
  856. // create the pod and return it
  857. return config.Clientset.CoreV1().Pods(existing.ObjectMeta.Namespace).Create(
  858. ctx,
  859. newPod,
  860. metav1.CreateOptions{},
  861. )
  862. }