Преглед изворни кода

add cluster destroyed track

Alexander Belanger пре 4 година
родитељ
комит
cdc8fe3fca

+ 11 - 0
api/server/handlers/infra/delete.go

@@ -8,6 +8,7 @@ import (
 	"github.com/porter-dev/porter/api/server/shared/apierrors"
 	"github.com/porter-dev/porter/api/server/shared/config"
 	"github.com/porter-dev/porter/api/types"
+	"github.com/porter-dev/porter/internal/analytics"
 	"github.com/porter-dev/porter/internal/kubernetes"
 	"github.com/porter-dev/porter/internal/kubernetes/provisioner"
 	"github.com/porter-dev/porter/internal/models"
@@ -37,6 +38,16 @@ func (c *InfraDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
+	if infra.Kind == types.InfraDOKS || infra.Kind == types.InfraGKE || infra.Kind == types.InfraEKS {
+		c.Config().AnalyticsClient.Track(analytics.ClusterDestroyingStartTrack(
+			&analytics.ClusterDestroyingStartTrackOpts{
+				ClusterScopedTrackOpts: analytics.GetClusterScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, 0),
+				ClusterType:            infra.Kind,
+				InfraID:                infra.ID,
+			},
+		))
+	}
+
 	infra.Status = types.StatusDestroying
 	infra, err := c.Repo().Infra().UpdateInfra(infra)
 

+ 4 - 0
internal/analytics/track_events.go

@@ -30,4 +30,8 @@ const (
 	ApplicationLaunchSuccess SegmentEvent = "Application Launch Success"
 
 	ApplicationDeploymentWebhook SegmentEvent = "Triggered Re-deploy via Webhook"
+
+	// delete events
+	ClusterDestroyingStart   SegmentEvent = "Cluster Destroying Start"
+	ClusterDestroyingSuccess SegmentEvent = "Cluster Destroying Success"
 )

+ 44 - 0
internal/analytics/tracks.go

@@ -442,3 +442,47 @@ func RegistryProvisioningSuccessTrack(opts *RegistryProvisioningSuccessTrackOpts
 		getDefaultSegmentTrack(additionalProps, RegistryProvisioningSuccess),
 	)
 }
+
+// ClusterDestroyingStartTrackOpts are the options for creating a track when a cluster
+// has started destroying
+type ClusterDestroyingStartTrackOpts struct {
+	*ClusterScopedTrackOpts
+
+	ClusterType types.InfraKind
+	InfraID     uint
+}
+
+// ClusterDestroyingStartTrack returns a track for when a cluster
+// has started destroying
+func ClusterDestroyingStartTrack(opts *ClusterDestroyingStartTrackOpts) segmentTrack {
+	additionalProps := make(map[string]interface{})
+	additionalProps["cluster_type"] = opts.ClusterType
+	additionalProps["infra_id"] = opts.InfraID
+
+	return getSegmentClusterTrack(
+		opts.ClusterScopedTrackOpts,
+		getDefaultSegmentTrack(additionalProps, ClusterDestroyingStart),
+	)
+}
+
+// ClusterDestroyingSuccessTrackOpts are the options for creating a track when a cluster
+// has successfully provisioned
+type ClusterDestroyingSuccessTrackOpts struct {
+	*ClusterScopedTrackOpts
+
+	ClusterType types.InfraKind
+	InfraID     uint
+}
+
+// ClusterDestroyingSuccessTrack returns a new track for when a cluster
+// has successfully provisioned
+func ClusterDestroyingSuccessTrack(opts *ClusterDestroyingSuccessTrackOpts) segmentTrack {
+	additionalProps := make(map[string]interface{})
+	additionalProps["cluster_type"] = opts.ClusterType
+	additionalProps["infra_id"] = opts.InfraID
+
+	return getSegmentClusterTrack(
+		opts.ClusterScopedTrackOpts,
+		getDefaultSegmentTrack(additionalProps, ClusterDestroyingSuccess),
+	)
+}

+ 10 - 0
internal/kubernetes/provisioner/global_stream.go

@@ -402,6 +402,16 @@ func GlobalStreamListener(
 				if err != nil {
 					continue
 				}
+
+				if infra.Kind == types.InfraDOKS || infra.Kind == types.InfraGKE || infra.Kind == types.InfraEKS {
+					analyticsClient.Track(analytics.ClusterDestroyingSuccessTrack(
+						&analytics.ClusterDestroyingSuccessTrackOpts{
+							ClusterScopedTrackOpts: analytics.GetClusterScopedTrackOpts(infra.CreatedByUserID, infra.ProjectID, 0),
+							ClusterType:            infra.Kind,
+							InfraID:                infra.ID,
+						},
+					))
+				}
 			}
 
 			// acknowledge the message as read