2
0

run.go 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. package cmd
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "os"
  8. "strings"
  9. "time"
  10. "github.com/fatih/color"
  11. api "github.com/porter-dev/porter/api/client"
  12. "github.com/porter-dev/porter/api/types"
  13. "github.com/porter-dev/porter/cli/cmd/utils"
  14. "github.com/spf13/cobra"
  15. batchv1 "k8s.io/api/batch/v1"
  16. v1 "k8s.io/api/core/v1"
  17. rbacv1 "k8s.io/api/rbac/v1"
  18. "k8s.io/apimachinery/pkg/api/resource"
  19. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  20. "k8s.io/apimachinery/pkg/fields"
  21. "k8s.io/apimachinery/pkg/watch"
  22. "k8s.io/kubectl/pkg/util/term"
  23. "k8s.io/apimachinery/pkg/runtime"
  24. "k8s.io/apimachinery/pkg/runtime/schema"
  25. "k8s.io/client-go/kubernetes"
  26. "k8s.io/client-go/rest"
  27. "k8s.io/client-go/tools/clientcmd"
  28. "k8s.io/client-go/tools/remotecommand"
  29. )
  30. var (
  31. namespace string
  32. verbose bool
  33. existingPod bool
  34. nonInteractive bool
  35. containerName string
  36. cpuMilli int
  37. memoryMi int
  38. )
  39. // runCmd represents the "porter run" base command when called
  40. // without any subcommands
  41. var runCmd = &cobra.Command{
  42. Use: "run [release] -- COMMAND [args...]",
  43. Args: cobra.MinimumNArgs(2),
  44. Short: "Runs a command inside a connected cluster container.",
  45. Run: func(cmd *cobra.Command, args []string) {
  46. err := checkLoginAndRun(args, run)
  47. if err != nil {
  48. os.Exit(1)
  49. }
  50. },
  51. }
  52. // cleanupCmd represents the "porter run cleanup" subcommand
  53. var cleanupCmd = &cobra.Command{
  54. Use: "cleanup",
  55. Args: cobra.NoArgs,
  56. Short: "Delete any lingering ephemeral pods that were created with \"porter run\".",
  57. Run: func(cmd *cobra.Command, args []string) {
  58. err := checkLoginAndRun(args, cleanup)
  59. if err != nil {
  60. os.Exit(1)
  61. }
  62. },
  63. }
  64. func init() {
  65. rootCmd.AddCommand(runCmd)
  66. runCmd.PersistentFlags().StringVar(
  67. &namespace,
  68. "namespace",
  69. "default",
  70. "namespace of release to connect to",
  71. )
  72. runCmd.PersistentFlags().BoolVarP(
  73. &existingPod,
  74. "existing_pod",
  75. "e",
  76. false,
  77. "whether to connect to an existing pod",
  78. )
  79. runCmd.PersistentFlags().BoolVarP(
  80. &verbose,
  81. "verbose",
  82. "v",
  83. false,
  84. "whether to print verbose output",
  85. )
  86. runCmd.PersistentFlags().BoolVar(
  87. &nonInteractive,
  88. "non-interactive",
  89. false,
  90. "whether to run in non-interactive mode",
  91. )
  92. runCmd.PersistentFlags().StringVarP(
  93. &containerName,
  94. "container",
  95. "c",
  96. "",
  97. "name of the container inside pod to run the command in",
  98. )
  99. runCmd.PersistentFlags().IntVarP(
  100. &cpuMilli,
  101. "cpu",
  102. "",
  103. 0,
  104. "cpu allocation in millicores (1000 millicores = 1 vCPU)",
  105. )
  106. runCmd.PersistentFlags().IntVarP(
  107. &memoryMi,
  108. "ram",
  109. "",
  110. 0,
  111. "ram allocation in Mi (1024 Mi = 1 GB)",
  112. )
  113. runCmd.AddCommand(cleanupCmd)
  114. }
  115. func run(_ *types.GetAuthenticatedUserResponse, client *api.Client, args []string) error {
  116. execArgs := args[1:]
  117. color.New(color.FgGreen).Println("Running", strings.Join(execArgs, " "), "for release", args[0])
  118. if nonInteractive {
  119. color.New(color.FgBlue).Println("Using non-interactive mode. The first available pod will be used to run the command.")
  120. }
  121. if len(execArgs) > 0 {
  122. release, err := client.GetRelease(
  123. context.Background(), cliConf.Project, cliConf.Cluster, namespace, args[0],
  124. )
  125. if err != nil {
  126. return fmt.Errorf("error fetching release %s: %w", args[0], err)
  127. }
  128. if release.BuildConfig != nil &&
  129. (strings.Contains(release.BuildConfig.Builder, "heroku") ||
  130. strings.Contains(release.BuildConfig.Builder, "paketo")) &&
  131. execArgs[0] != "/cnb/lifecycle/launcher" &&
  132. execArgs[0] != "launcher" {
  133. // this is a buildpacks release using a heroku builder, prepend the launcher
  134. execArgs = append([]string{"/cnb/lifecycle/launcher"}, execArgs...)
  135. }
  136. }
  137. podsSimple, err := getPods(client, namespace, args[0])
  138. if err != nil {
  139. return fmt.Errorf("Could not retrieve list of pods: %s", err.Error())
  140. }
  141. // if length of pods is 0, throw error
  142. var selectedPod podSimple
  143. if len(podsSimple) == 0 {
  144. return fmt.Errorf("At least one pod must exist in this deployment.")
  145. } else if nonInteractive || len(podsSimple) == 1 {
  146. selectedPod = podsSimple[0]
  147. } else {
  148. podNames := make([]string, 0)
  149. for _, podSimple := range podsSimple {
  150. podNames = append(podNames, podSimple.Name)
  151. }
  152. selectedPodName, err := utils.PromptSelect("Select the pod:", podNames)
  153. if err != nil {
  154. return err
  155. }
  156. // find selected pod
  157. for _, podSimple := range podsSimple {
  158. if selectedPodName == podSimple.Name {
  159. selectedPod = podSimple
  160. }
  161. }
  162. }
  163. var selectedContainerName string
  164. if len(selectedPod.ContainerNames) == 0 {
  165. return fmt.Errorf("At least one container must exist in the selected pod.")
  166. } else if len(selectedPod.ContainerNames) == 1 {
  167. if containerName != "" && containerName != selectedPod.ContainerNames[0] {
  168. return fmt.Errorf("provided container %s does not exist in pod %s", containerName, selectedPod.Name)
  169. }
  170. selectedContainerName = selectedPod.ContainerNames[0]
  171. }
  172. if containerName != "" && selectedContainerName == "" {
  173. // check if provided container name exists in the pod
  174. for _, name := range selectedPod.ContainerNames {
  175. if name == containerName {
  176. selectedContainerName = name
  177. break
  178. }
  179. }
  180. if selectedContainerName == "" {
  181. return fmt.Errorf("provided container %s does not exist in pod %s", containerName, selectedPod.Name)
  182. }
  183. }
  184. if selectedContainerName == "" {
  185. if nonInteractive {
  186. return fmt.Errorf("container name must be specified using the --container flag when using non-interactive mode")
  187. }
  188. selectedContainer, err := utils.PromptSelect("Select the container:", selectedPod.ContainerNames)
  189. if err != nil {
  190. return err
  191. }
  192. selectedContainerName = selectedContainer
  193. }
  194. config := &PorterRunSharedConfig{
  195. Client: client,
  196. }
  197. err = config.setSharedConfig()
  198. if err != nil {
  199. return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
  200. }
  201. if existingPod {
  202. return executeRun(config, namespace, selectedPod.Name, selectedContainerName, execArgs)
  203. }
  204. return executeRunEphemeral(config, namespace, selectedPod.Name, selectedContainerName, execArgs)
  205. }
  206. func cleanup(_ *types.GetAuthenticatedUserResponse, client *api.Client, _ []string) error {
  207. config := &PorterRunSharedConfig{
  208. Client: client,
  209. }
  210. err := config.setSharedConfig()
  211. if err != nil {
  212. return fmt.Errorf("Could not retrieve kube credentials: %s", err.Error())
  213. }
  214. proceed, err := utils.PromptSelect(
  215. fmt.Sprintf("You have chosen the '%s' namespace for cleanup. Do you want to proceed?", namespace),
  216. []string{"Yes", "No", "All namespaces"},
  217. )
  218. if err != nil {
  219. return err
  220. }
  221. if proceed == "No" {
  222. return nil
  223. }
  224. var podNames []string
  225. color.New(color.FgGreen).Println("Fetching ephemeral pods for cleanup")
  226. if proceed == "All namespaces" {
  227. namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
  228. if err != nil {
  229. return err
  230. }
  231. for _, namespace := range namespaces.Items {
  232. if pods, err := getEphemeralPods(namespace.Name, config.Clientset); err == nil {
  233. podNames = append(podNames, pods...)
  234. } else {
  235. return err
  236. }
  237. }
  238. } else {
  239. if pods, err := getEphemeralPods(namespace, config.Clientset); err == nil {
  240. podNames = append(podNames, pods...)
  241. } else {
  242. return err
  243. }
  244. }
  245. if len(podNames) == 0 {
  246. color.New(color.FgBlue).Println("No ephemeral pods to delete")
  247. return nil
  248. }
  249. selectedPods, err := utils.PromptMultiselect("Select ephemeral pods to delete", podNames)
  250. if err != nil {
  251. return err
  252. }
  253. for _, podName := range selectedPods {
  254. color.New(color.FgBlue).Printf("Deleting ephemeral pod: %s\n", podName)
  255. err = config.Clientset.CoreV1().Pods(namespace).Delete(
  256. context.Background(), podName, metav1.DeleteOptions{},
  257. )
  258. if err != nil {
  259. return err
  260. }
  261. }
  262. return nil
  263. }
  264. func getEphemeralPods(namespace string, clientset *kubernetes.Clientset) ([]string, error) {
  265. var podNames []string
  266. pods, err := clientset.CoreV1().Pods(namespace).List(
  267. context.Background(), metav1.ListOptions{LabelSelector: "porter/ephemeral-pod"},
  268. )
  269. if err != nil {
  270. return nil, err
  271. }
  272. for _, pod := range pods.Items {
  273. podNames = append(podNames, pod.Name)
  274. }
  275. return podNames, nil
  276. }
  277. type PorterRunSharedConfig struct {
  278. Client *api.Client
  279. RestConf *rest.Config
  280. Clientset *kubernetes.Clientset
  281. RestClient *rest.RESTClient
  282. }
  283. func (p *PorterRunSharedConfig) setSharedConfig() error {
  284. pID := cliConf.Project
  285. cID := cliConf.Cluster
  286. kubeResp, err := p.Client.GetKubeconfig(context.Background(), pID, cID, cliConf.Kubeconfig)
  287. if err != nil {
  288. return err
  289. }
  290. kubeBytes := kubeResp.Kubeconfig
  291. cmdConf, err := clientcmd.NewClientConfigFromBytes(kubeBytes)
  292. if err != nil {
  293. return err
  294. }
  295. restConf, err := cmdConf.ClientConfig()
  296. if err != nil {
  297. return err
  298. }
  299. restConf.GroupVersion = &schema.GroupVersion{
  300. Group: "api",
  301. Version: "v1",
  302. }
  303. restConf.NegotiatedSerializer = runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{})
  304. p.RestConf = restConf
  305. clientset, err := kubernetes.NewForConfig(restConf)
  306. if err != nil {
  307. return err
  308. }
  309. p.Clientset = clientset
  310. restClient, err := rest.RESTClientFor(restConf)
  311. if err != nil {
  312. return err
  313. }
  314. p.RestClient = restClient
  315. return nil
  316. }
  317. type podSimple struct {
  318. Name string
  319. ContainerNames []string
  320. }
  321. func getPods(client *api.Client, namespace, releaseName string) ([]podSimple, error) {
  322. pID := cliConf.Project
  323. cID := cliConf.Cluster
  324. resp, err := client.GetK8sAllPods(context.TODO(), pID, cID, namespace, releaseName)
  325. if err != nil {
  326. return nil, err
  327. }
  328. pods := *resp
  329. res := make([]podSimple, 0)
  330. for _, pod := range pods {
  331. if pod.Status.Phase == v1.PodRunning {
  332. containerNames := make([]string, 0)
  333. for _, container := range pod.Spec.Containers {
  334. containerNames = append(containerNames, container.Name)
  335. }
  336. res = append(res, podSimple{
  337. Name: pod.ObjectMeta.Name,
  338. ContainerNames: containerNames,
  339. })
  340. }
  341. }
  342. return res, nil
  343. }
  344. func executeRun(config *PorterRunSharedConfig, namespace, name, container string, args []string) error {
  345. req := config.RestClient.Post().
  346. Resource("pods").
  347. Name(name).
  348. Namespace(namespace).
  349. SubResource("exec")
  350. for _, arg := range args {
  351. req.Param("command", arg)
  352. }
  353. req.Param("stdin", "true")
  354. req.Param("stdout", "true")
  355. req.Param("tty", "true")
  356. req.Param("container", container)
  357. t := term.TTY{
  358. In: os.Stdin,
  359. Out: os.Stdout,
  360. Raw: true,
  361. }
  362. size := t.GetSize()
  363. sizeQueue := t.MonitorSize(size)
  364. return t.Safe(func() error {
  365. exec, err := remotecommand.NewSPDYExecutor(config.RestConf, "POST", req.URL())
  366. if err != nil {
  367. return err
  368. }
  369. return exec.Stream(remotecommand.StreamOptions{
  370. Stdin: os.Stdin,
  371. Stdout: os.Stdout,
  372. Stderr: os.Stderr,
  373. Tty: true,
  374. TerminalSizeQueue: sizeQueue,
  375. })
  376. })
  377. }
  378. func executeRunEphemeral(config *PorterRunSharedConfig, namespace, name, container string, args []string) error {
  379. existing, err := getExistingPod(config, name, namespace)
  380. if err != nil {
  381. return err
  382. }
  383. newPod, err := createEphemeralPodFromExisting(config, existing, container, args)
  384. if err != nil {
  385. return err
  386. }
  387. podName := newPod.ObjectMeta.Name
  388. // delete the ephemeral pod no matter what
  389. defer deletePod(config, podName, namespace)
  390. color.New(color.FgYellow).Printf("Waiting for pod %s to be ready...", podName)
  391. if err = waitForPod(config, newPod); err != nil {
  392. color.New(color.FgRed).Println("failed")
  393. return handlePodAttachError(err, config, namespace, podName, container)
  394. }
  395. err = checkForPodDeletionCronJob(config)
  396. if err != nil {
  397. return err
  398. }
  399. // refresh pod info for latest status
  400. newPod, err = config.Clientset.CoreV1().
  401. Pods(newPod.Namespace).
  402. Get(context.Background(), newPod.Name, metav1.GetOptions{})
  403. // pod exited while we were waiting. maybe an error maybe not.
  404. // we dont know if the user wanted an interactive shell or not.
  405. // if it was an error the logs hopefully say so.
  406. if isPodExited(newPod) {
  407. color.New(color.FgGreen).Println("complete!")
  408. var writtenBytes int64
  409. writtenBytes, _ = pipePodLogsToStdout(config, namespace, podName, container, false)
  410. if verbose || writtenBytes == 0 {
  411. color.New(color.FgYellow).Println("Could not get logs. Pod events:")
  412. pipeEventsToStdout(config, namespace, podName, container, false)
  413. }
  414. return nil
  415. }
  416. color.New(color.FgGreen).Println("ready!")
  417. color.New(color.FgYellow).Println("Attempting connection to the container. If you don't see a command prompt, try pressing enter.")
  418. req := config.RestClient.Post().
  419. Resource("pods").
  420. Name(podName).
  421. Namespace(namespace).
  422. SubResource("attach")
  423. req.Param("stdin", "true")
  424. req.Param("stdout", "true")
  425. req.Param("tty", "true")
  426. req.Param("container", container)
  427. t := term.TTY{
  428. In: os.Stdin,
  429. Out: os.Stdout,
  430. Raw: true,
  431. }
  432. size := t.GetSize()
  433. sizeQueue := t.MonitorSize(size)
  434. if err = t.Safe(func() error {
  435. exec, err := remotecommand.NewSPDYExecutor(config.RestConf, "POST", req.URL())
  436. if err != nil {
  437. return err
  438. }
  439. return exec.Stream(remotecommand.StreamOptions{
  440. Stdin: os.Stdin,
  441. Stdout: os.Stdout,
  442. Stderr: os.Stderr,
  443. Tty: true,
  444. TerminalSizeQueue: sizeQueue,
  445. })
  446. }); err != nil {
  447. // ugly way to catch no TTY errors, such as when running command "echo \"hello\""
  448. return handlePodAttachError(err, config, namespace, podName, container)
  449. }
  450. if verbose {
  451. color.New(color.FgYellow).Println("Pod events:")
  452. pipeEventsToStdout(config, namespace, podName, container, false)
  453. }
  454. return err
  455. }
  456. func checkForPodDeletionCronJob(config *PorterRunSharedConfig) error {
  457. // try and create the cron job and all of the other required resources as necessary,
  458. // starting with the service account, then role and then a role binding
  459. err := checkForServiceAccount(config)
  460. if err != nil {
  461. return err
  462. }
  463. err = checkForClusterRole(config)
  464. if err != nil {
  465. return err
  466. }
  467. err = checkForRoleBinding(config)
  468. if err != nil {
  469. return err
  470. }
  471. namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
  472. if err != nil {
  473. return err
  474. }
  475. for _, namespace := range namespaces.Items {
  476. cronJobs, err := config.Clientset.BatchV1().CronJobs(namespace.Name).List(
  477. context.Background(), metav1.ListOptions{},
  478. )
  479. if err != nil {
  480. return err
  481. }
  482. if namespace.Name == "default" {
  483. for _, cronJob := range cronJobs.Items {
  484. if cronJob.Name == "porter-ephemeral-pod-deletion-cronjob" {
  485. return nil
  486. }
  487. }
  488. } else {
  489. for _, cronJob := range cronJobs.Items {
  490. if cronJob.Name == "porter-ephemeral-pod-deletion-cronjob" {
  491. err = config.Clientset.BatchV1().CronJobs(namespace.Name).Delete(
  492. context.Background(), cronJob.Name, metav1.DeleteOptions{},
  493. )
  494. if err != nil {
  495. return err
  496. }
  497. }
  498. }
  499. }
  500. }
  501. // create the cronjob
  502. cronJob := &batchv1.CronJob{
  503. ObjectMeta: metav1.ObjectMeta{
  504. Name: "porter-ephemeral-pod-deletion-cronjob",
  505. },
  506. Spec: batchv1.CronJobSpec{
  507. Schedule: "0 * * * *",
  508. JobTemplate: batchv1.JobTemplateSpec{
  509. Spec: batchv1.JobSpec{
  510. Template: v1.PodTemplateSpec{
  511. Spec: v1.PodSpec{
  512. ServiceAccountName: "porter-ephemeral-pod-deletion-service-account",
  513. RestartPolicy: v1.RestartPolicyNever,
  514. Containers: []v1.Container{
  515. {
  516. Name: "ephemeral-pods-manager",
  517. Image: "public.ecr.aws/o1j4x7p4/porter-ephemeral-pods-manager:latest",
  518. ImagePullPolicy: v1.PullAlways,
  519. Args: []string{"delete"},
  520. },
  521. },
  522. },
  523. },
  524. },
  525. },
  526. },
  527. }
  528. _, err = config.Clientset.BatchV1().CronJobs("default").Create(
  529. context.Background(), cronJob, metav1.CreateOptions{},
  530. )
  531. if err != nil {
  532. return err
  533. }
  534. return nil
  535. }
  536. func checkForServiceAccount(config *PorterRunSharedConfig) error {
  537. namespaces, err := config.Clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
  538. if err != nil {
  539. return err
  540. }
  541. for _, namespace := range namespaces.Items {
  542. serviceAccounts, err := config.Clientset.CoreV1().ServiceAccounts(namespace.Name).List(
  543. context.Background(), metav1.ListOptions{},
  544. )
  545. if err != nil {
  546. return err
  547. }
  548. if namespace.Name == "default" {
  549. for _, svcAccount := range serviceAccounts.Items {
  550. if svcAccount.Name == "porter-ephemeral-pod-deletion-service-account" {
  551. return nil
  552. }
  553. }
  554. } else {
  555. for _, svcAccount := range serviceAccounts.Items {
  556. if svcAccount.Name == "porter-ephemeral-pod-deletion-service-account" {
  557. err = config.Clientset.CoreV1().ServiceAccounts(namespace.Name).Delete(
  558. context.Background(), svcAccount.Name, metav1.DeleteOptions{},
  559. )
  560. if err != nil {
  561. return err
  562. }
  563. }
  564. }
  565. }
  566. }
  567. serviceAccount := &v1.ServiceAccount{
  568. ObjectMeta: metav1.ObjectMeta{
  569. Name: "porter-ephemeral-pod-deletion-service-account",
  570. },
  571. }
  572. _, err = config.Clientset.CoreV1().ServiceAccounts("default").Create(
  573. context.Background(), serviceAccount, metav1.CreateOptions{},
  574. )
  575. if err != nil {
  576. return err
  577. }
  578. return nil
  579. }
  580. func checkForClusterRole(config *PorterRunSharedConfig) error {
  581. roles, err := config.Clientset.RbacV1().ClusterRoles().List(
  582. context.Background(), metav1.ListOptions{},
  583. )
  584. if err != nil {
  585. return err
  586. }
  587. for _, role := range roles.Items {
  588. if role.Name == "porter-ephemeral-pod-deletion-cluster-role" {
  589. return nil
  590. }
  591. }
  592. role := &rbacv1.ClusterRole{
  593. ObjectMeta: metav1.ObjectMeta{
  594. Name: "porter-ephemeral-pod-deletion-cluster-role",
  595. },
  596. Rules: []rbacv1.PolicyRule{
  597. {
  598. APIGroups: []string{""},
  599. Resources: []string{"pods"},
  600. Verbs: []string{"list", "delete"},
  601. },
  602. {
  603. APIGroups: []string{""},
  604. Resources: []string{"namespaces"},
  605. Verbs: []string{"list"},
  606. },
  607. },
  608. }
  609. _, err = config.Clientset.RbacV1().ClusterRoles().Create(
  610. context.Background(), role, metav1.CreateOptions{},
  611. )
  612. if err != nil {
  613. return err
  614. }
  615. return nil
  616. }
  617. func checkForRoleBinding(config *PorterRunSharedConfig) error {
  618. bindings, err := config.Clientset.RbacV1().ClusterRoleBindings().List(
  619. context.Background(), metav1.ListOptions{},
  620. )
  621. if err != nil {
  622. return err
  623. }
  624. for _, binding := range bindings.Items {
  625. if binding.Name == "porter-ephemeral-pod-deletion-cluster-rolebinding" {
  626. return nil
  627. }
  628. }
  629. binding := &rbacv1.ClusterRoleBinding{
  630. ObjectMeta: metav1.ObjectMeta{
  631. Name: "porter-ephemeral-pod-deletion-cluster-rolebinding",
  632. },
  633. RoleRef: rbacv1.RoleRef{
  634. APIGroup: "rbac.authorization.k8s.io",
  635. Kind: "ClusterRole",
  636. Name: "porter-ephemeral-pod-deletion-cluster-role",
  637. },
  638. Subjects: []rbacv1.Subject{
  639. {
  640. APIGroup: "",
  641. Kind: "ServiceAccount",
  642. Name: "porter-ephemeral-pod-deletion-service-account",
  643. Namespace: "default",
  644. },
  645. },
  646. }
  647. _, err = config.Clientset.RbacV1().ClusterRoleBindings().Create(
  648. context.Background(), binding, metav1.CreateOptions{},
  649. )
  650. if err != nil {
  651. return err
  652. }
  653. return nil
  654. }
  655. func waitForPod(config *PorterRunSharedConfig, pod *v1.Pod) error {
  656. var (
  657. w watch.Interface
  658. err error
  659. ok bool
  660. )
  661. // immediately after creating a pod, the API may return a 404. heuristically 1
  662. // second seems to be plenty.
  663. watchRetries := 3
  664. for i := 0; i < watchRetries; i++ {
  665. selector := fields.OneTermEqualSelector("metadata.name", pod.Name).String()
  666. w, err = config.Clientset.CoreV1().
  667. Pods(pod.Namespace).
  668. Watch(context.Background(), metav1.ListOptions{FieldSelector: selector})
  669. if err == nil {
  670. break
  671. }
  672. time.Sleep(time.Second)
  673. }
  674. if err != nil {
  675. return err
  676. }
  677. defer w.Stop()
  678. for {
  679. select {
  680. case <-time.Tick(time.Second):
  681. // poll every second in case we already missed the ready event while
  682. // creating the listener.
  683. pod, err = config.Clientset.CoreV1().
  684. Pods(pod.Namespace).
  685. Get(context.Background(), pod.Name, metav1.GetOptions{})
  686. if isPodReady(pod) || isPodExited(pod) {
  687. return nil
  688. }
  689. case evt := <-w.ResultChan():
  690. pod, ok = evt.Object.(*v1.Pod)
  691. if !ok {
  692. return fmt.Errorf("unexpected object type: %T", evt.Object)
  693. }
  694. if isPodReady(pod) || isPodExited(pod) {
  695. return nil
  696. }
  697. case <-time.After(time.Second * 10):
  698. return errors.New("timed out waiting for pod")
  699. }
  700. }
  701. }
  702. func isPodReady(pod *v1.Pod) bool {
  703. ready := false
  704. conditions := pod.Status.Conditions
  705. for i := range conditions {
  706. if conditions[i].Type == v1.PodReady {
  707. ready = pod.Status.Conditions[i].Status == v1.ConditionTrue
  708. }
  709. }
  710. return ready
  711. }
  712. func isPodExited(pod *v1.Pod) bool {
  713. return pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
  714. }
  715. func handlePodAttachError(err error, config *PorterRunSharedConfig, namespace, podName, container string) error {
  716. if verbose {
  717. color.New(color.FgYellow).Fprintf(os.Stderr, "Error: %s\n", err)
  718. }
  719. color.New(color.FgYellow).Fprintln(os.Stderr, "Could not open a shell to this container. Container logs:")
  720. var writtenBytes int64
  721. writtenBytes, _ = pipePodLogsToStdout(config, namespace, podName, container, false)
  722. if verbose || writtenBytes == 0 {
  723. color.New(color.FgYellow).Fprintln(os.Stderr, "Could not get logs. Pod events:")
  724. pipeEventsToStdout(config, namespace, podName, container, false)
  725. }
  726. return err
  727. }
  728. func pipePodLogsToStdout(config *PorterRunSharedConfig, namespace, name, container string, follow bool) (int64, error) {
  729. podLogOpts := v1.PodLogOptions{
  730. Container: container,
  731. Follow: follow,
  732. }
  733. req := config.Clientset.CoreV1().Pods(namespace).GetLogs(name, &podLogOpts)
  734. podLogs, err := req.Stream(
  735. context.Background(),
  736. )
  737. if err != nil {
  738. return 0, err
  739. }
  740. defer podLogs.Close()
  741. return io.Copy(os.Stdout, podLogs)
  742. }
  743. func pipeEventsToStdout(config *PorterRunSharedConfig, namespace, name, container string, follow bool) error {
  744. // update the config in case the operation has taken longer than token expiry time
  745. config.setSharedConfig()
  746. // creates the clientset
  747. resp, err := config.Clientset.CoreV1().Events(namespace).List(
  748. context.TODO(),
  749. metav1.ListOptions{
  750. FieldSelector: fmt.Sprintf("involvedObject.name=%s,involvedObject.namespace=%s", name, namespace),
  751. },
  752. )
  753. if err != nil {
  754. return err
  755. }
  756. for _, event := range resp.Items {
  757. color.New(color.FgRed).Println(event.Message)
  758. }
  759. return nil
  760. }
  761. func getExistingPod(config *PorterRunSharedConfig, name, namespace string) (*v1.Pod, error) {
  762. return config.Clientset.CoreV1().Pods(namespace).Get(
  763. context.Background(),
  764. name,
  765. metav1.GetOptions{},
  766. )
  767. }
  768. func deletePod(config *PorterRunSharedConfig, name, namespace string) error {
  769. // update the config in case the operation has taken longer than token expiry time
  770. config.setSharedConfig()
  771. err := config.Clientset.CoreV1().Pods(namespace).Delete(
  772. context.Background(),
  773. name,
  774. metav1.DeleteOptions{},
  775. )
  776. if err != nil {
  777. color.New(color.FgRed).Fprintf(os.Stderr, "Could not delete ephemeral pod: %s\n", err.Error())
  778. return err
  779. }
  780. color.New(color.FgGreen).Println("Sucessfully deleted ephemeral pod")
  781. return nil
  782. }
  783. func createEphemeralPodFromExisting(
  784. config *PorterRunSharedConfig,
  785. existing *v1.Pod,
  786. container string,
  787. args []string,
  788. ) (*v1.Pod, error) {
  789. newPod := existing.DeepCopy()
  790. // only copy the pod spec, overwrite metadata
  791. newPod.ObjectMeta = metav1.ObjectMeta{
  792. Name: strings.ToLower(fmt.Sprintf("%s-copy-%s", existing.ObjectMeta.Name, utils.String(4))),
  793. Namespace: existing.ObjectMeta.Namespace,
  794. }
  795. newPod.Status = v1.PodStatus{}
  796. // set restart policy to never
  797. newPod.Spec.RestartPolicy = v1.RestartPolicyNever
  798. // change the command in the pod to the passed in pod command
  799. cmdRoot := args[0]
  800. cmdArgs := make([]string, 0)
  801. // annotate with the ephemeral pod tag
  802. newPod.Labels = make(map[string]string)
  803. newPod.Labels["porter/ephemeral-pod"] = "true"
  804. if len(args) > 1 {
  805. cmdArgs = args[1:]
  806. }
  807. for i := 0; i < len(newPod.Spec.Containers); i++ {
  808. if newPod.Spec.Containers[i].Name == container {
  809. newPod.Spec.Containers[i].Command = []string{cmdRoot}
  810. newPod.Spec.Containers[i].Args = cmdArgs
  811. newPod.Spec.Containers[i].TTY = true
  812. newPod.Spec.Containers[i].Stdin = true
  813. newPod.Spec.Containers[i].StdinOnce = true
  814. var newCpu int
  815. if cpuMilli != 0 {
  816. newCpu = cpuMilli
  817. } else if newPod.Spec.Containers[i].Resources.Requests.Cpu() != nil && newPod.Spec.Containers[i].Resources.Requests.Cpu().MilliValue() > 500 {
  818. newCpu = 500
  819. }
  820. if newCpu != 0 {
  821. newPod.Spec.Containers[i].Resources.Limits[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%dm", newCpu))
  822. newPod.Spec.Containers[i].Resources.Requests[v1.ResourceCPU] = resource.MustParse(fmt.Sprintf("%dm", newCpu))
  823. for j := 0; j < len(newPod.Spec.Containers[i].Env); j++ {
  824. if newPod.Spec.Containers[i].Env[j].Name == "PORTER_RESOURCES_CPU" {
  825. newPod.Spec.Containers[i].Env[j].Value = fmt.Sprintf("%dm", newCpu)
  826. break
  827. }
  828. }
  829. }
  830. var newMemory int
  831. if memoryMi != 0 {
  832. newMemory = memoryMi
  833. } else if newPod.Spec.Containers[i].Resources.Requests.Memory() != nil && newPod.Spec.Containers[i].Resources.Requests.Memory().Value() > 1000*1024*1024 {
  834. newMemory = 1000
  835. }
  836. if newMemory != 0 {
  837. newPod.Spec.Containers[i].Resources.Limits[v1.ResourceMemory] = resource.MustParse(fmt.Sprintf("%dMi", newMemory))
  838. newPod.Spec.Containers[i].Resources.Requests[v1.ResourceMemory] = resource.MustParse(fmt.Sprintf("%dMi", newMemory))
  839. for j := 0; j < len(newPod.Spec.Containers[i].Env); j++ {
  840. if newPod.Spec.Containers[i].Env[j].Name == "PORTER_RESOURCES_RAM" {
  841. newPod.Spec.Containers[i].Env[j].Value = fmt.Sprintf("%dMi", newMemory)
  842. break
  843. }
  844. }
  845. }
  846. }
  847. // remove health checks and probes
  848. newPod.Spec.Containers[i].LivenessProbe = nil
  849. newPod.Spec.Containers[i].ReadinessProbe = nil
  850. newPod.Spec.Containers[i].StartupProbe = nil
  851. }
  852. newPod.Spec.NodeName = ""
  853. // create the pod and return it
  854. return config.Clientset.CoreV1().Pods(existing.ObjectMeta.Namespace).Create(
  855. context.Background(),
  856. newPod,
  857. metav1.CreateOptions{},
  858. )
  859. }