From 765baba47d1f92d83094315a0e349880fdf08eed Mon Sep 17 00:00:00 2001 From: Michael Martin Date: Thu, 23 May 2024 10:23:17 -0700 Subject: [PATCH] tests(wasm): add start/restart test for wasmtime cache --- .../20-wasm/10-wasmtime_spec.lua | 107 +++++++++++++++++- 1 file changed, 106 insertions(+), 1 deletion(-) diff --git a/spec/02-integration/20-wasm/10-wasmtime_spec.lua b/spec/02-integration/20-wasm/10-wasmtime_spec.lua index be39cb867db8..7a1ba07c185c 100644 --- a/spec/02-integration/20-wasm/10-wasmtime_spec.lua +++ b/spec/02-integration/20-wasm/10-wasmtime_spec.lua @@ -59,6 +59,111 @@ describe("#wasm wasmtime (role: " .. role .. ")", function() "expected cache config file to reference the cache directory") end) end - end) -- cache_config + end) -- kong prepare + + describe("kong stop/start/restart", function() + local conf + local prefix = "./wasm" + local log = prefix .. "/logs/error.log" + local status_port + local client + local cp_prefix = "./wasm-cp" + + lazy_setup(function() + if role == "traditional" then + helpers.get_db_utils("postgres") + end + + helpers.clean_prefix(prefix) + status_port = helpers.get_available_port() + + assert(helpers.kong_exec("prepare", { + database = role == "data_plane" and "off" or "postgres", + nginx_conf = "spec/fixtures/custom_nginx.template", + wasm = true, + prefix = prefix, + role = role, + --wasm_filters_path = helpers.test_conf.wasm_filters_path, + wasm_filters = "tests,response_transformer", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + + status_listen = "127.0.0.1:" .. status_port, + nginx_main_worker_processes = 2, + })) + + conf = assert(helpers.get_running_conf(prefix)) + + -- we need to briefly spin up a control plane, or else we will get + -- error.log entries when our data plane tries to connect + if role == "data_plane" then + helpers.get_db_utils("postgres") + + assert(helpers.start_kong({ + database = "postgres", + nginx_conf = "spec/fixtures/custom_nginx.template", + wasm = true, + prefix = cp_prefix, + role = "control_plane", + wasm_filters = "tests,response_transformer", + cluster_cert = "spec/fixtures/kong_clustering.crt", + cluster_cert_key = "spec/fixtures/kong_clustering.key", + status_listen = "off", + nginx_main_worker_processes = 2, + })) + end + end) + + lazy_teardown(function() + if client then + client:close() + end + + helpers.stop_kong(prefix) + + if role == "data_plane" then + helpers.stop_kong(cp_prefix) + end + end) + + it("does not introduce any errors", function() + local function assert_no_errors() + assert.logfile(log).has.no.line("[error]", true, 0) + assert.logfile(log).has.no.line("[alert]", true, 0) + assert.logfile(log).has.no.line("[emerg]", true, 0) + assert.logfile(log).has.no.line("[crit]", true, 0) + end + + local function assert_kong_status(context) + if not client then + client = helpers.proxy_client(1000, status_port) + client.reopen = true + end + + assert.eventually(function() + local res, err = client:send({ path = "/status", method = "GET" }) + if res and res.status == 200 then + return true + end + + return nil, err or "non-200 status" + end) + .is_truthy("failed waiting for kong status " .. context) + end + + assert(helpers.start_kong(conf, nil, true)) + assert_no_errors() + + assert_kong_status("after fresh startup") + assert_no_errors() + + assert(helpers.restart_kong(conf)) + assert_no_errors() + + assert_kong_status("after restart") + assert_no_errors() + end) + end) -- kong stop/start/restart + end) -- wasmtime end -- each role