update.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. package datastore
  2. import (
  3. "context"
  4. "errors"
  5. "net/http"
  6. "strings"
  7. "connectrpc.com/connect"
  8. porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
  9. "github.com/porter-dev/porter/api/server/authz"
  10. "github.com/porter-dev/porter/api/server/handlers"
  11. "github.com/porter-dev/porter/api/server/handlers/release"
  12. "github.com/porter-dev/porter/api/server/shared"
  13. "github.com/porter-dev/porter/api/server/shared/apierrors"
  14. "github.com/porter-dev/porter/api/server/shared/config"
  15. "github.com/porter-dev/porter/api/types"
  16. "github.com/porter-dev/porter/internal/datastore"
  17. "github.com/porter-dev/porter/internal/helm"
  18. "github.com/porter-dev/porter/internal/kubernetes"
  19. "github.com/porter-dev/porter/internal/models"
  20. "github.com/porter-dev/porter/internal/telemetry"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. )
  23. // UpdateDatastoreHandler is a struct for updating datastores.
  24. // Currently, this is expected to used once (on create) and then not again, however the 'update' terminology was proactively used
  25. // so we can reuse this handler when we support updates in the future.
  26. type UpdateDatastoreHandler struct {
  27. handlers.PorterHandlerReadWriter
  28. authz.KubernetesAgentGetter
  29. }
  30. // NewUpdateDatastoreHandler constructs a datastore UpdateDatastoreHandler
  31. func NewUpdateDatastoreHandler(
  32. config *config.Config,
  33. decoderValidator shared.RequestDecoderValidator,
  34. writer shared.ResultWriter,
  35. ) *UpdateDatastoreHandler {
  36. return &UpdateDatastoreHandler{
  37. PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
  38. KubernetesAgentGetter: authz.NewOutOfClusterAgentGetter(config),
  39. }
  40. }
  41. // UpdateDatastoreRequest is the expected format of the request body
  42. type UpdateDatastoreRequest struct {
  43. Name string `json:"name"`
  44. Type string `json:"type"`
  45. Engine string `json:"engine"`
  46. Values map[string]interface{} `json:"values"`
  47. }
  48. // ServeHTTP updates a datastore using the decoded values
  49. func (h *UpdateDatastoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
  50. ctx, span := telemetry.NewSpan(r.Context(), "serve-update-datastore")
  51. defer span.End()
  52. project, _ := ctx.Value(types.ProjectScope).(*models.Project)
  53. cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
  54. request := &UpdateDatastoreRequest{}
  55. if ok := h.DecodeAndValidate(w, r, request); !ok {
  56. err := telemetry.Error(ctx, span, nil, "error decoding update datastore request")
  57. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
  58. return
  59. }
  60. telemetry.WithAttributes(span,
  61. telemetry.AttributeKV{Key: "name", Value: request.Name},
  62. telemetry.AttributeKV{Key: "type", Value: request.Type},
  63. telemetry.AttributeKV{Key: "engine", Value: request.Engine},
  64. )
  65. // TODO: replace this with ccp call
  66. err := h.InstallDatastore(ctx, InstallDatastoreInput{
  67. Name: request.Name,
  68. Type: request.Type,
  69. Engine: request.Engine,
  70. Values: request.Values,
  71. Request: r,
  72. })
  73. if err != nil {
  74. err := telemetry.Error(ctx, span, err, "error installing datastore")
  75. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
  76. return
  77. }
  78. record, err := datastore.CreateOrGetRecord(ctx, datastore.CreateOrGetRecordInput{
  79. ProjectID: project.ID,
  80. ClusterID: cluster.ID,
  81. Name: request.Name,
  82. Type: request.Type,
  83. Engine: request.Engine,
  84. DatastoreRepository: h.Repo().Datastore(),
  85. ClusterRepository: h.Repo().Cluster(),
  86. })
  87. if err != nil {
  88. err := telemetry.Error(ctx, span, err, "error retrieving datastore record")
  89. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
  90. return
  91. }
  92. updateReq := connect.NewRequest(&porterv1.UpdateDatastoreRequest{
  93. ProjectId: int64(project.ID),
  94. DatastoreId: record.ID.String(),
  95. })
  96. _, err = h.Config().ClusterControlPlaneClient.UpdateDatastore(ctx, updateReq)
  97. if err != nil {
  98. err := telemetry.Error(ctx, span, err, "error calling ccp update datastore")
  99. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
  100. return
  101. }
  102. // TODO: create an API-level representation of the db model rather than returning the model directly
  103. h.WriteResult(w, r, record)
  104. }
  105. // InstallDatastoreInput is the input type for InstallDatastore
  106. type InstallDatastoreInput struct {
  107. Name string
  108. Type string
  109. Engine string
  110. Values map[string]interface{}
  111. Request *http.Request
  112. }
  113. // InstallDatastore installs a datastore by helm installing a template with the provided values
  114. func (h *UpdateDatastoreHandler) InstallDatastore(ctx context.Context, inp InstallDatastoreInput) error {
  115. ctx, span := telemetry.NewSpan(ctx, "datastore-install")
  116. defer span.End()
  117. proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
  118. cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
  119. telemetry.WithAttributes(span,
  120. telemetry.AttributeKV{Key: "name", Value: inp.Name},
  121. telemetry.AttributeKV{Key: "type", Value: inp.Type},
  122. telemetry.AttributeKV{Key: "engine", Value: inp.Engine},
  123. )
  124. templateName, err := templateNameFromDatastoreTypeAndEngine(inp.Type, inp.Engine)
  125. if err != nil {
  126. return telemetry.Error(ctx, span, err, "error getting template name from datastore type and engine")
  127. }
  128. telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "template-name", Value: templateName})
  129. helmAgent, err := h.GetHelmAgent(ctx, inp.Request, cluster, release.Namespace_ACKSystem)
  130. if err != nil {
  131. return telemetry.Error(ctx, span, err, "error creating helm agent")
  132. }
  133. chart, err := release.LoadChart(ctx, h.Config(), &release.LoadAddonChartOpts{
  134. ProjectID: proj.ID,
  135. RepoURL: h.Config().Metadata.DefaultAddonHelmRepoURL,
  136. TemplateName: templateName,
  137. })
  138. if err != nil {
  139. return telemetry.Error(ctx, span, nil, "error loading chart")
  140. }
  141. registries, err := h.Repo().Registry().ListRegistriesByProjectID(cluster.ProjectID)
  142. if err != nil {
  143. return telemetry.Error(ctx, span, err, "error retrieving project registry")
  144. }
  145. vpcConfig, err := h.getVPCConfig(ctx, templateName, proj, cluster)
  146. if err != nil {
  147. return telemetry.Error(ctx, span, err, "error retrieving vpc config")
  148. }
  149. if err := h.performAddonPreinstall(ctx, inp.Request, templateName, cluster); err != nil {
  150. return telemetry.Error(ctx, span, err, "error performing addon preinstall")
  151. }
  152. values := inp.Values
  153. values["vpcConfig"] = vpcConfig
  154. conf := &helm.InstallChartConfig{
  155. Chart: chart,
  156. Name: inp.Name,
  157. Namespace: release.Namespace_ACKSystem,
  158. Values: values,
  159. Cluster: cluster,
  160. Repo: h.Repo(),
  161. Registries: registries,
  162. }
  163. _, err = helmAgent.InstallChart(ctx, conf, h.Config().DOConf, h.Config().ServerConf.DisablePullSecretsInjection)
  164. if err != nil {
  165. return telemetry.Error(ctx, span, err, "error installing chart")
  166. }
  167. return nil
  168. }
  169. func (h *UpdateDatastoreHandler) getVPCConfig(ctx context.Context, templateName string, project *models.Project, cluster *models.Cluster) (map[string]any, error) {
  170. ctx, span := telemetry.NewSpan(ctx, "datastore-get-vpc-config")
  171. defer span.End()
  172. telemetry.WithAttributes(span,
  173. telemetry.AttributeKV{Key: "cloud-provider", Value: cluster.CloudProvider},
  174. telemetry.AttributeKV{Key: "template-name", Value: templateName},
  175. )
  176. vpcConfig := map[string]any{}
  177. if cluster.CloudProvider != SupportedDatastoreCloudProvider_AWS {
  178. return vpcConfig, nil
  179. }
  180. awsTemplates := map[string]string{
  181. "elasticache-redis": "elasticache",
  182. "rds-postgresql": "rds",
  183. "rds-postgresql-aurora": "rds",
  184. }
  185. serviceType, ok := awsTemplates[templateName]
  186. if !ok {
  187. return vpcConfig, nil
  188. }
  189. req := connect.NewRequest(&porterv1.SharedNetworkSettingsRequest{
  190. ProjectId: int64(project.ID),
  191. ClusterId: int64(cluster.ID),
  192. ServiceType: serviceType,
  193. })
  194. resp, err := h.Config().ClusterControlPlaneClient.SharedNetworkSettings(ctx, req)
  195. if err != nil {
  196. return vpcConfig, telemetry.Error(ctx, span, err, "error fetching cluster network settings from ccp")
  197. }
  198. vpcConfig["cidrBlock"] = resp.Msg.CidrRange
  199. vpcConfig["subnetIDs"] = resp.Msg.SubnetIds
  200. switch resp.Msg.CloudProvider {
  201. case *porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_AWS.Enum():
  202. vpcConfig["awsRegion"] = resp.Msg.Region
  203. vpcConfig["vpcID"] = resp.Msg.GetEksCloudProviderNetwork().Id
  204. telemetry.WithAttributes(span,
  205. telemetry.AttributeKV{Key: "aws-region", Value: resp.Msg.Region},
  206. telemetry.AttributeKV{Key: "vpc-id", Value: resp.Msg.GetEksCloudProviderNetwork().Id},
  207. )
  208. }
  209. telemetry.WithAttributes(span,
  210. telemetry.AttributeKV{Key: "cidr-block", Value: resp.Msg.CidrRange},
  211. telemetry.AttributeKV{Key: "subnet-ids", Value: strings.Join(resp.Msg.SubnetIds, ",")},
  212. )
  213. return vpcConfig, nil
  214. }
  215. func (h *UpdateDatastoreHandler) scaleAckChartDeployment(ctx context.Context, chart string, agent *kubernetes.Agent) error {
  216. ctx, span := telemetry.NewSpan(ctx, "scale-ack-chart")
  217. defer span.End()
  218. telemetry.WithAttributes(span,
  219. telemetry.AttributeKV{Key: "chart-name", Value: chart},
  220. )
  221. scale, err := agent.Clientset.AppsV1().Deployments(release.Namespace_ACKSystem).GetScale(ctx, chart, metav1.GetOptions{})
  222. if err != nil {
  223. return telemetry.Error(ctx, span, err, "failed getting deployment")
  224. }
  225. if scale.Spec.Replicas > 0 {
  226. return nil
  227. }
  228. scale.Spec.Replicas = 1
  229. if _, err := agent.Clientset.AppsV1().Deployments(release.Namespace_ACKSystem).UpdateScale(ctx, chart, scale, metav1.UpdateOptions{}); err != nil {
  230. return telemetry.Error(ctx, span, err, "failed scaling deployment up")
  231. }
  232. return nil
  233. }
  234. func (h *UpdateDatastoreHandler) performAddonPreinstall(ctx context.Context, r *http.Request, templateName string, cluster *models.Cluster) error {
  235. ctx, span := telemetry.NewSpan(ctx, "datastore-addon-preinstall")
  236. defer span.End()
  237. awsTemplates := map[string][]string{
  238. "elasticache-redis": {"ack-chart-ec2", "ack-chart-elasticache"},
  239. "rds-postgresql": {"ack-chart-ec2", "ack-chart-rds"},
  240. "rds-postgresql-aurora": {"ack-chart-ec2", "ack-chart-rds"},
  241. }
  242. telemetry.WithAttributes(span,
  243. telemetry.AttributeKV{Key: "template-name", Value: templateName},
  244. telemetry.AttributeKV{Key: "cloud-provider", Value: cluster.CloudProvider},
  245. )
  246. if cluster.CloudProvider != SupportedDatastoreCloudProvider_AWS {
  247. return nil
  248. }
  249. if _, ok := awsTemplates[templateName]; !ok {
  250. return nil
  251. }
  252. agent, err := h.GetAgent(r, cluster, "")
  253. if err != nil {
  254. return telemetry.Error(ctx, span, err, "failed to get k8s agent")
  255. }
  256. if _, err = agent.GetNamespace(release.Namespace_EnvironmentGroups); err != nil {
  257. if _, err := agent.CreateNamespace(release.Namespace_EnvironmentGroups, map[string]string{}); err != nil {
  258. return telemetry.Error(ctx, span, err, "failed creating porter-env-group namespace")
  259. }
  260. }
  261. for _, chart := range awsTemplates[templateName] {
  262. if err := h.scaleAckChartDeployment(ctx, chart, agent); err != nil {
  263. return telemetry.Error(ctx, span, err, "failed scaling ack chart deployment")
  264. }
  265. }
  266. return nil
  267. }
  268. func templateNameFromDatastoreTypeAndEngine(databaseType string, databaseEngine string) (string, error) {
  269. switch databaseType {
  270. case "RDS":
  271. switch databaseEngine {
  272. case "POSTGRES":
  273. return "rds-postgresql", nil
  274. case "AURORA-POSTGRES":
  275. return "rds-postgresql-aurora", nil
  276. default:
  277. return "", errors.New("invalid database engine")
  278. }
  279. case "ELASTICACHE":
  280. switch databaseEngine {
  281. case "REDIS":
  282. return "elasticache-redis", nil
  283. default:
  284. return "", errors.New("invalid database engine")
  285. }
  286. default:
  287. return "", errors.New("invalid database type")
  288. }
  289. }