update.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. package datastore
  2. import (
  3. "context"
  4. "encoding/base64"
  5. "encoding/json"
  6. "errors"
  7. "net/http"
  8. "strings"
  9. "connectrpc.com/connect"
  10. "github.com/porter-dev/api-contracts/generated/go/helpers"
  11. porterv1 "github.com/porter-dev/api-contracts/generated/go/porter/v1"
  12. "github.com/porter-dev/porter/api/server/authz"
  13. "github.com/porter-dev/porter/api/server/handlers"
  14. "github.com/porter-dev/porter/api/server/handlers/release"
  15. "github.com/porter-dev/porter/api/server/shared"
  16. "github.com/porter-dev/porter/api/server/shared/apierrors"
  17. "github.com/porter-dev/porter/api/server/shared/config"
  18. "github.com/porter-dev/porter/api/types"
  19. "github.com/porter-dev/porter/internal/datastore"
  20. "github.com/porter-dev/porter/internal/helm"
  21. "github.com/porter-dev/porter/internal/kubernetes"
  22. "github.com/porter-dev/porter/internal/models"
  23. "github.com/porter-dev/porter/internal/repository"
  24. "github.com/porter-dev/porter/internal/telemetry"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/utils/pointer"
  27. )
  28. // UpdateDatastoreHandler is a struct for updating datastores.
  29. // Currently, this is expected to used once (on create) and then not again, however the 'update' terminology was proactively used
  30. // so we can reuse this handler when we support updates in the future.
  31. type UpdateDatastoreHandler struct {
  32. handlers.PorterHandlerReadWriter
  33. authz.KubernetesAgentGetter
  34. }
  35. // NewUpdateDatastoreHandler constructs a datastore UpdateDatastoreHandler
  36. func NewUpdateDatastoreHandler(
  37. config *config.Config,
  38. decoderValidator shared.RequestDecoderValidator,
  39. writer shared.ResultWriter,
  40. ) *UpdateDatastoreHandler {
  41. return &UpdateDatastoreHandler{
  42. PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer),
  43. KubernetesAgentGetter: authz.NewOutOfClusterAgentGetter(config),
  44. }
  45. }
  46. // UpdateDatastoreRequest is the expected format of the request body
  47. type UpdateDatastoreRequest struct {
  48. Name string `json:"name"`
  49. Type string `json:"type"`
  50. Engine string `json:"engine"`
  51. Values map[string]interface{} `json:"values"`
  52. }
  53. // UpdateDatastoreResponse is the expected format of the response body
  54. type UpdateDatastoreResponse struct{}
  55. // ServeHTTP updates a datastore using the decoded values
  56. func (h *UpdateDatastoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
  57. ctx, span := telemetry.NewSpan(r.Context(), "serve-update-datastore")
  58. defer span.End()
  59. project, _ := ctx.Value(types.ProjectScope).(*models.Project)
  60. cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
  61. request := &UpdateDatastoreRequest{}
  62. if ok := h.DecodeAndValidate(w, r, request); !ok {
  63. err := telemetry.Error(ctx, span, nil, "error decoding update datastore request")
  64. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
  65. return
  66. }
  67. betaFeaturesEnabled := project.GetFeatureFlag(models.BetaFeaturesEnabled, h.Config().LaunchDarklyClient)
  68. telemetry.WithAttributes(span,
  69. telemetry.AttributeKV{Key: "name", Value: request.Name},
  70. telemetry.AttributeKV{Key: "type", Value: request.Type},
  71. telemetry.AttributeKV{Key: "engine", Value: request.Engine},
  72. telemetry.AttributeKV{Key: "beta-features-enabled", Value: betaFeaturesEnabled},
  73. )
  74. if !betaFeaturesEnabled {
  75. err := h.legacy_DatastoreCreateFlow(ctx, request, project, cluster, r)
  76. if err != nil {
  77. err = telemetry.Error(ctx, span, err, "error creating datastore")
  78. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
  79. return
  80. }
  81. h.WriteResult(w, r, UpdateDatastoreResponse{})
  82. return
  83. }
  84. region, err := h.getClusterRegion(ctx, project.ID, cluster.ID)
  85. if err != nil {
  86. err = telemetry.Error(ctx, span, err, "error getting cluster region")
  87. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
  88. return
  89. }
  90. // assume we are creating for now; will add update support later
  91. datastoreProto := &porterv1.ManagedDatastore{
  92. CloudProvider: porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_AWS,
  93. CloudProviderCredentialIdentifier: cluster.CloudProviderCredentialIdentifier,
  94. Region: region,
  95. ConnectedClusters: &porterv1.ConnectedClusters{
  96. ConnectedClusterIds: []int64{int64(cluster.ID)},
  97. },
  98. }
  99. marshaledValues, err := json.Marshal(request.Values)
  100. if err != nil {
  101. err = telemetry.Error(ctx, span, err, "error marshaling values")
  102. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
  103. return
  104. }
  105. var datastoreValues struct {
  106. Config struct {
  107. Name string `json:"name"`
  108. DatabaseName string `json:"databaseName"`
  109. MasterUsername string `json:"masterUsername"`
  110. MasterUserPassword string `json:"masterUserPassword"`
  111. AllocatedStorage int64 `json:"allocatedStorage"`
  112. InstanceClass string `json:"instanceClass"`
  113. EngineVersion string `json:"engineVersion"`
  114. } `json:"config"`
  115. }
  116. err = json.Unmarshal(marshaledValues, &datastoreValues)
  117. if err != nil {
  118. err = telemetry.Error(ctx, span, err, "error unmarshaling rds postgres values")
  119. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
  120. return
  121. }
  122. if datastoreValues.Config.Name == "" {
  123. err = telemetry.Error(ctx, span, nil, "datastore name is required")
  124. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
  125. return
  126. }
  127. datastoreProto.Name = datastoreValues.Config.Name
  128. switch request.Type {
  129. case "RDS":
  130. var engine porterv1.EnumAwsRdsEngine
  131. switch request.Engine {
  132. case "POSTGRES":
  133. engine = porterv1.EnumAwsRdsEngine_ENUM_AWS_RDS_ENGINE_POSTGRESQL
  134. case "AURORA-POSTGRES":
  135. engine = porterv1.EnumAwsRdsEngine_ENUM_AWS_RDS_ENGINE_AURORA_POSTGRESQL
  136. default:
  137. err = telemetry.Error(ctx, span, nil, "invalid rds engine")
  138. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
  139. return
  140. }
  141. datastoreProto.Kind = porterv1.EnumDatastoreKind_ENUM_DATASTORE_KIND_AWS_RDS
  142. datastoreProto.KindValues = &porterv1.ManagedDatastore_AwsRdsKind{
  143. AwsRdsKind: &porterv1.AwsRds{
  144. DatabaseName: pointer.String(datastoreValues.Config.DatabaseName),
  145. MasterUsername: pointer.String(datastoreValues.Config.MasterUsername),
  146. MasterUserPasswordLiteral: pointer.String(datastoreValues.Config.MasterUserPassword),
  147. AllocatedStorageGigabytes: pointer.Int64(datastoreValues.Config.AllocatedStorage),
  148. InstanceClass: pointer.String(datastoreValues.Config.InstanceClass),
  149. Engine: engine,
  150. EngineVersion: pointer.String(datastoreValues.Config.EngineVersion),
  151. },
  152. }
  153. case "ELASTICACHE":
  154. datastoreProto.Kind = porterv1.EnumDatastoreKind_ENUM_DATASTORE_KIND_AWS_ELASTICACHE
  155. datastoreProto.KindValues = &porterv1.ManagedDatastore_AwsElasticacheKind{
  156. AwsElasticacheKind: &porterv1.AwsElasticache{
  157. Engine: porterv1.EnumAwsElasticacheEngine_ENUM_AWS_ELASTICACHE_ENGINE_REDIS,
  158. InstanceClass: pointer.String(datastoreValues.Config.InstanceClass),
  159. MasterUserPasswordLiteral: pointer.String(datastoreValues.Config.MasterUserPassword),
  160. EngineVersion: pointer.String(datastoreValues.Config.EngineVersion),
  161. },
  162. }
  163. default:
  164. err = telemetry.Error(ctx, span, nil, "invalid datastore type")
  165. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusBadRequest))
  166. return
  167. }
  168. req := connect.NewRequest(&porterv1.PatchCloudContractRequest{
  169. ProjectId: int64(project.ID),
  170. Operation: porterv1.EnumPatchCloudContractOperation_ENUM_PATCH_CLOUD_CONTRACT_OPERATION_UPDATE,
  171. ResourceType: porterv1.EnumPatchCloudContractType_ENUM_PATCH_CLOUD_CONTRACT_TYPE_DATASTORE,
  172. ResourceValues: &porterv1.PatchCloudContractRequest_Datastore{
  173. Datastore: datastoreProto,
  174. },
  175. })
  176. _, err = h.Config().ClusterControlPlaneClient.PatchCloudContract(ctx, req)
  177. if err != nil {
  178. err = telemetry.Error(ctx, span, err, "error patching cloud contract")
  179. h.HandleAPIError(w, r, apierrors.NewErrPassThroughToClient(err, http.StatusInternalServerError))
  180. return
  181. }
  182. h.WriteResult(w, r, UpdateDatastoreResponse{})
  183. }
  184. func (h *UpdateDatastoreHandler) legacy_DatastoreCreateFlow(
  185. ctx context.Context,
  186. request *UpdateDatastoreRequest,
  187. project *models.Project,
  188. cluster *models.Cluster,
  189. r *http.Request,
  190. ) error {
  191. ctx, span := telemetry.NewSpan(ctx, "legacy-datastore-create")
  192. defer span.End()
  193. err := h.InstallDatastore(ctx, InstallDatastoreInput{
  194. Name: request.Name,
  195. Type: request.Type,
  196. Engine: request.Engine,
  197. Values: request.Values,
  198. Request: r,
  199. })
  200. if err != nil {
  201. return telemetry.Error(ctx, span, err, "error installing datastore")
  202. }
  203. record, err := datastore.CreateOrGetRecord(ctx, datastore.CreateOrGetRecordInput{
  204. ProjectID: project.ID,
  205. ClusterID: cluster.ID,
  206. Name: request.Name,
  207. Type: request.Type,
  208. Engine: request.Engine,
  209. DatastoreRepository: h.Repo().Datastore(),
  210. ClusterRepository: h.Repo().Cluster(),
  211. })
  212. if err != nil {
  213. return telemetry.Error(ctx, span, err, "error retrieving datastore record")
  214. }
  215. updateReq := connect.NewRequest(&porterv1.UpdateDatastoreRequest{
  216. ProjectId: int64(project.ID),
  217. DatastoreId: record.ID.String(),
  218. })
  219. _, err = h.Config().ClusterControlPlaneClient.UpdateDatastore(ctx, updateReq)
  220. if err != nil {
  221. return telemetry.Error(ctx, span, err, "error calling ccp update datastore")
  222. }
  223. return nil
  224. }
  225. // InstallDatastoreInput is the input type for InstallDatastore
  226. type InstallDatastoreInput struct {
  227. Name string
  228. Type string
  229. Engine string
  230. Values map[string]interface{}
  231. Request *http.Request
  232. }
  233. // InstallDatastore installs a datastore by helm installing a template with the provided values
  234. func (h *UpdateDatastoreHandler) InstallDatastore(ctx context.Context, inp InstallDatastoreInput) error {
  235. ctx, span := telemetry.NewSpan(ctx, "datastore-install")
  236. defer span.End()
  237. proj, _ := ctx.Value(types.ProjectScope).(*models.Project)
  238. cluster, _ := ctx.Value(types.ClusterScope).(*models.Cluster)
  239. telemetry.WithAttributes(span,
  240. telemetry.AttributeKV{Key: "name", Value: inp.Name},
  241. telemetry.AttributeKV{Key: "type", Value: inp.Type},
  242. telemetry.AttributeKV{Key: "engine", Value: inp.Engine},
  243. )
  244. templateName, err := templateNameFromDatastoreTypeAndEngine(inp.Type, inp.Engine)
  245. if err != nil {
  246. return telemetry.Error(ctx, span, err, "error getting template name from datastore type and engine")
  247. }
  248. telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "template-name", Value: templateName})
  249. helmAgent, err := h.GetHelmAgent(ctx, inp.Request, cluster, release.Namespace_ACKSystem)
  250. if err != nil {
  251. return telemetry.Error(ctx, span, err, "error creating helm agent")
  252. }
  253. chart, err := release.LoadChart(ctx, h.Config(), &release.LoadAddonChartOpts{
  254. ProjectID: proj.ID,
  255. RepoURL: h.Config().Metadata.DefaultAddonHelmRepoURL,
  256. TemplateName: templateName,
  257. })
  258. if err != nil {
  259. return telemetry.Error(ctx, span, nil, "error loading chart")
  260. }
  261. registries, err := h.Repo().Registry().ListRegistriesByProjectID(cluster.ProjectID)
  262. if err != nil {
  263. return telemetry.Error(ctx, span, err, "error retrieving project registry")
  264. }
  265. vpcConfig, err := h.getVPCConfig(ctx, templateName, proj, cluster)
  266. if err != nil {
  267. return telemetry.Error(ctx, span, err, "error retrieving vpc config")
  268. }
  269. if err := h.performAddonPreinstall(ctx, inp.Request, templateName, cluster); err != nil {
  270. return telemetry.Error(ctx, span, err, "error performing addon preinstall")
  271. }
  272. values := inp.Values
  273. values["vpcConfig"] = vpcConfig
  274. conf := &helm.InstallChartConfig{
  275. Chart: chart,
  276. Name: inp.Name,
  277. Namespace: release.Namespace_ACKSystem,
  278. Values: values,
  279. Cluster: cluster,
  280. Repo: h.Repo(),
  281. Registries: registries,
  282. }
  283. _, err = helmAgent.InstallChart(ctx, conf, h.Config().DOConf, h.Config().ServerConf.DisablePullSecretsInjection)
  284. if err != nil {
  285. return telemetry.Error(ctx, span, err, "error installing chart")
  286. }
  287. return nil
  288. }
  289. func (h *UpdateDatastoreHandler) getVPCConfig(ctx context.Context, templateName string, project *models.Project, cluster *models.Cluster) (map[string]any, error) {
  290. ctx, span := telemetry.NewSpan(ctx, "datastore-get-vpc-config")
  291. defer span.End()
  292. telemetry.WithAttributes(span,
  293. telemetry.AttributeKV{Key: "cloud-provider", Value: cluster.CloudProvider},
  294. telemetry.AttributeKV{Key: "template-name", Value: templateName},
  295. )
  296. vpcConfig := map[string]any{}
  297. if cluster.CloudProvider != SupportedDatastoreCloudProvider_AWS {
  298. return vpcConfig, nil
  299. }
  300. awsTemplates := map[string]string{
  301. "elasticache-redis": "elasticache",
  302. "rds-postgresql": "rds",
  303. "rds-postgresql-aurora": "rds",
  304. }
  305. serviceType, ok := awsTemplates[templateName]
  306. if !ok {
  307. return vpcConfig, nil
  308. }
  309. req := connect.NewRequest(&porterv1.SharedNetworkSettingsRequest{
  310. ProjectId: int64(project.ID),
  311. ClusterId: int64(cluster.ID),
  312. ServiceType: serviceType,
  313. })
  314. resp, err := h.Config().ClusterControlPlaneClient.SharedNetworkSettings(ctx, req)
  315. if err != nil {
  316. return vpcConfig, telemetry.Error(ctx, span, err, "error fetching cluster network settings from ccp")
  317. }
  318. vpcConfig["cidrBlock"] = resp.Msg.CidrRange
  319. vpcConfig["subnetIDs"] = resp.Msg.SubnetIds
  320. switch resp.Msg.CloudProvider {
  321. case *porterv1.EnumCloudProvider_ENUM_CLOUD_PROVIDER_AWS.Enum():
  322. vpcConfig["awsRegion"] = resp.Msg.Region
  323. vpcConfig["vpcID"] = resp.Msg.GetEksCloudProviderNetwork().Id
  324. telemetry.WithAttributes(span,
  325. telemetry.AttributeKV{Key: "aws-region", Value: resp.Msg.Region},
  326. telemetry.AttributeKV{Key: "vpc-id", Value: resp.Msg.GetEksCloudProviderNetwork().Id},
  327. )
  328. }
  329. telemetry.WithAttributes(span,
  330. telemetry.AttributeKV{Key: "cidr-block", Value: resp.Msg.CidrRange},
  331. telemetry.AttributeKV{Key: "subnet-ids", Value: strings.Join(resp.Msg.SubnetIds, ",")},
  332. )
  333. return vpcConfig, nil
  334. }
  335. func (h *UpdateDatastoreHandler) scaleAckChartDeployment(ctx context.Context, chart string, agent *kubernetes.Agent) error {
  336. ctx, span := telemetry.NewSpan(ctx, "scale-ack-chart")
  337. defer span.End()
  338. telemetry.WithAttributes(span,
  339. telemetry.AttributeKV{Key: "chart-name", Value: chart},
  340. )
  341. scale, err := agent.Clientset.AppsV1().Deployments(release.Namespace_ACKSystem).GetScale(ctx, chart, metav1.GetOptions{})
  342. if err != nil {
  343. return telemetry.Error(ctx, span, err, "failed getting deployment")
  344. }
  345. if scale.Spec.Replicas > 0 {
  346. return nil
  347. }
  348. scale.Spec.Replicas = 1
  349. if _, err := agent.Clientset.AppsV1().Deployments(release.Namespace_ACKSystem).UpdateScale(ctx, chart, scale, metav1.UpdateOptions{}); err != nil {
  350. return telemetry.Error(ctx, span, err, "failed scaling deployment up")
  351. }
  352. return nil
  353. }
  354. func (h *UpdateDatastoreHandler) performAddonPreinstall(ctx context.Context, r *http.Request, templateName string, cluster *models.Cluster) error {
  355. ctx, span := telemetry.NewSpan(ctx, "datastore-addon-preinstall")
  356. defer span.End()
  357. awsTemplates := map[string][]string{
  358. "elasticache-redis": {"ack-chart-ec2", "ack-chart-elasticache"},
  359. "rds-postgresql": {"ack-chart-ec2", "ack-chart-rds"},
  360. "rds-postgresql-aurora": {"ack-chart-ec2", "ack-chart-rds"},
  361. }
  362. telemetry.WithAttributes(span,
  363. telemetry.AttributeKV{Key: "template-name", Value: templateName},
  364. telemetry.AttributeKV{Key: "cloud-provider", Value: cluster.CloudProvider},
  365. )
  366. if cluster.CloudProvider != SupportedDatastoreCloudProvider_AWS {
  367. return nil
  368. }
  369. if _, ok := awsTemplates[templateName]; !ok {
  370. return nil
  371. }
  372. agent, err := h.GetAgent(r, cluster, "")
  373. if err != nil {
  374. return telemetry.Error(ctx, span, err, "failed to get k8s agent")
  375. }
  376. if _, err = agent.GetNamespace(release.Namespace_EnvironmentGroups); err != nil {
  377. if _, err := agent.CreateNamespace(release.Namespace_EnvironmentGroups, map[string]string{}); err != nil {
  378. return telemetry.Error(ctx, span, err, "failed creating porter-env-group namespace")
  379. }
  380. }
  381. for _, chart := range awsTemplates[templateName] {
  382. if err := h.scaleAckChartDeployment(ctx, chart, agent); err != nil {
  383. return telemetry.Error(ctx, span, err, "failed scaling ack chart deployment")
  384. }
  385. }
  386. return nil
  387. }
  388. func templateNameFromDatastoreTypeAndEngine(databaseType string, databaseEngine string) (string, error) {
  389. switch databaseType {
  390. case "RDS":
  391. switch databaseEngine {
  392. case "POSTGRES":
  393. return "rds-postgresql", nil
  394. case "AURORA-POSTGRES":
  395. return "rds-postgresql-aurora", nil
  396. default:
  397. return "", errors.New("invalid database engine")
  398. }
  399. case "ELASTICACHE":
  400. switch databaseEngine {
  401. case "REDIS":
  402. return "elasticache-redis", nil
  403. default:
  404. return "", errors.New("invalid database engine")
  405. }
  406. default:
  407. return "", errors.New("invalid database type")
  408. }
  409. }
  410. // getClusterRegion is a very hacky way of getting the region of the cluster; this will be replaced once we allow the user to specify region from the frontend
  411. func (h *UpdateDatastoreHandler) getClusterRegion(
  412. ctx context.Context,
  413. projectId uint,
  414. clusterId uint,
  415. ) (string, error) {
  416. ctx, span := telemetry.NewSpan(ctx, "get-cluster-region")
  417. defer span.End()
  418. telemetry.WithAttributes(span,
  419. telemetry.AttributeKV{Key: "project-id", Value: projectId},
  420. telemetry.AttributeKV{Key: "cluster-id", Value: clusterId},
  421. )
  422. var region string
  423. var clusterContractRecord *models.APIContractRevision
  424. clusterContractRevisions, err := h.Config().Repo.APIContractRevisioner().List(ctx, projectId, repository.WithClusterID(clusterId), repository.WithLatest(true))
  425. if err != nil {
  426. return region, telemetry.Error(ctx, span, err, "error getting latest cluster contract revisions")
  427. }
  428. if len(clusterContractRevisions) == 0 {
  429. return region, telemetry.Error(ctx, span, nil, "no cluster contract revisions found")
  430. }
  431. clusterContractRecord = clusterContractRevisions[0]
  432. var clusterContractProto porterv1.Contract
  433. decoded, err := base64.StdEncoding.DecodeString(clusterContractRecord.Base64Contract)
  434. if err != nil {
  435. return region, telemetry.Error(ctx, span, err, "error decoding cluster contract")
  436. }
  437. err = helpers.UnmarshalContractObject(decoded, &clusterContractProto)
  438. if err != nil {
  439. return region, telemetry.Error(ctx, span, err, "error unmarshalling cluster contract")
  440. }
  441. clusterProto := clusterContractProto.Cluster
  442. if clusterProto == nil {
  443. return region, telemetry.Error(ctx, span, nil, "cluster contract proto is nil")
  444. }
  445. eksKindValues := clusterProto.GetEksKind()
  446. if eksKindValues == nil {
  447. return region, telemetry.Error(ctx, span, nil, "eks kind values are nil")
  448. }
  449. region = eksKindValues.Region
  450. telemetry.WithAttributes(span, telemetry.AttributeKV{Key: "region", Value: region})
  451. return region, nil
  452. }