mem_test.go 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. // don't run this test by default
  2. //go:build ignore
  3. package memtest
  4. import (
  5. "context"
  6. "fmt"
  7. "io"
  8. "net/http"
  9. "os"
  10. "os/exec"
  11. "runtime"
  12. "testing"
  13. "time"
  14. "github.com/opencost/opencost/pkg/cmd"
  15. "github.com/stretchr/testify/require"
  16. "github.com/testcontainers/testcontainers-go"
  17. "github.com/testcontainers/testcontainers-go/wait"
  18. )
  19. func RunOpencost(t *testing.T, promEndpoint string) {
  20. t.Setenv("KUBECONFIG", kubeConfigPath())
  21. t.Setenv("PROMETHEUS_SERVER_ENDPOINT", fmt.Sprintf("http://%s", promEndpoint))
  22. t.Setenv("KUBERNETES_PORT", "443")
  23. t.Setenv("PPROF_ENABLED", "true")
  24. t.Setenv("CACHE_WARMING_ENABLED", "false")
  25. t.Setenv("DISABLE_AGGREGATE_COST_MODEL_CACHE", "true")
  26. t.Setenv("MAX_QUERY_CONCURRENCY", "5")
  27. t.Setenv("CONFIG_PATH", "../../configs")
  28. go func() {
  29. err := cmd.Execute(nil)
  30. require.NoError(t, err)
  31. }()
  32. maxUsage := uint64(0)
  33. go trackMaxMemoryUsage(context.TODO(), 100*time.Microsecond, &maxUsage)
  34. time.Sleep(10 * time.Second) // wait for opencost to start
  35. generateLoad(t)
  36. t.Log("Max memory usage, KiB:", maxUsage/1024)
  37. // uncomment to pause execution and explore the prometheus UI
  38. //t.Logf("Execution complete. Analyze http://%s for more results", promEndpoint)
  39. //select {} // block execution
  40. }
  41. func generateLoad(t *testing.T) {
  42. testFetch(t, fmt.Sprintf("http://localhost:9003/metrics"))
  43. //end := time.Now()
  44. //start := end.Add(-time.Hour * 24)
  45. //testFetch(t, fmt.Sprintf("http://localhost:9003/allocation/compute?aggregate=namespace,controllerKind,controller,label:app,label:team,label:pod_template_hash&idleByNode=true&includeIdle=true&includeProportionalAssetResourceCosts=true&step=window&window=%s,%s", start.Format("2006-01-02T15:04:05Z"), end.Format("2006-01-02T15:04:05Z")))
  46. //testFetch(t, fmt.Sprintf("http://localhost:9003/assets?window=%s,%s", start.Format("2006-01-02T15:04:05Z"), end.Format("2006-01-02T15:04:05Z")))
  47. }
  48. func testFetch(t *testing.T, url string) {
  49. startTime := time.Now()
  50. resp, err := http.Get(url)
  51. require.NoError(t, err)
  52. require.Less(t, resp.StatusCode, 300)
  53. require.GreaterOrEqual(t, resp.StatusCode, 200)
  54. data, err := io.ReadAll(resp.Body)
  55. require.NoError(t, err)
  56. t.Logf("%s, %v MiB, %s", time.Since(startTime), len(data)/1024/1024, url)
  57. }
  58. func trackMaxMemoryUsage(ctx context.Context, interval time.Duration, maxUsage *uint64) {
  59. var memStats runtime.MemStats
  60. t := time.NewTicker(interval)
  61. for {
  62. select {
  63. case <-ctx.Done():
  64. return
  65. case <-t.C:
  66. runtime.ReadMemStats(&memStats)
  67. // Update maxUsage if the current HeapAlloc is greater
  68. if memStats.HeapAlloc > *maxUsage {
  69. *maxUsage = memStats.HeapAlloc
  70. }
  71. }
  72. }
  73. }
  74. func TestMemoryUsage(t *testing.T) {
  75. LaunchKubeProxy(t)
  76. endpoint := LaunchVictoriaMetrics(t)
  77. RunOpencost(t, endpoint)
  78. }
  79. func LaunchKubeProxy(t *testing.T) {
  80. t.Helper()
  81. cmd := exec.CommandContext(context.Background(), "kubectl", "proxy", "--accept-hosts", `^localhost$,^127\.0\.0\.1$,^\[::1\]$,^host.testcontainers.internal$`)
  82. t.Cleanup(func() {
  83. err := cmd.Cancel()
  84. require.NoError(t, err)
  85. })
  86. cmd.Stdout = os.Stdout
  87. err := cmd.Start()
  88. require.NoError(t, err)
  89. }
  90. func LaunchVictoriaMetrics(t *testing.T) string {
  91. t.Helper()
  92. ctx := context.Background()
  93. vm, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
  94. ContainerRequest: testcontainers.ContainerRequest{
  95. Image: "victoriametrics/victoria-metrics:latest",
  96. ExposedPorts: []string{"8428/tcp"},
  97. WaitingFor: wait.ForListeningPort("8428/tcp"), // note, container isn't accessible on this port, a random free port is used instead. Check logs for the actual port.
  98. HostAccessPorts: []int{
  99. 8001, // kube proxy
  100. 9003, // opencost
  101. },
  102. Files: []testcontainers.ContainerFile{
  103. {
  104. HostFilePath: "prometheus_config.yaml",
  105. ContainerFilePath: "/prometheus_config.yaml",
  106. },
  107. {
  108. ContainerFilePath: "/.kube/config",
  109. HostFilePath: kubeConfigPath(),
  110. },
  111. },
  112. Cmd: []string{"-promscrape.config=/prometheus_config.yaml"},
  113. LogConsumerCfg: &testcontainers.LogConsumerConfig{
  114. Consumers: []testcontainers.LogConsumer{
  115. &testcontainers.StdoutLogConsumer{},
  116. },
  117. },
  118. },
  119. Started: true,
  120. })
  121. require.NoError(t, err)
  122. t.Cleanup(func() {
  123. err := vm.Terminate(ctx)
  124. require.NoError(t, err)
  125. })
  126. endpoint, err := vm.Endpoint(ctx, "")
  127. require.NoError(t, err)
  128. t.Logf("VictoriaMetrics endpoint: %s", endpoint)
  129. return endpoint
  130. }
  131. func kubeConfigPath() string {
  132. env := os.Getenv("KUBECONFIG")
  133. if env != "" {
  134. return env
  135. }
  136. return os.ExpandEnv("$HOME/.kube/config")
  137. }