From cd85513f58260d81bee66a0ae6994e60d46e537c Mon Sep 17 00:00:00 2001 From: Antoine Jacquemin Date: Mon, 17 Jun 2024 22:04:25 +0200 Subject: [PATCH] edit cost name --- kong/llm/drivers/shared.lua | 6 +++--- kong/plugins/prometheus/exporter.lua | 4 ++-- spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua | 2 +- .../39-ai-request-transformer/02-integration_spec.lua | 2 +- .../40-ai-response-transformer/02-integration_spec.lua | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/kong/llm/drivers/shared.lua b/kong/llm/drivers/shared.lua index 79393d491c72..6eadc9daaf1e 100644 --- a/kong/llm/drivers/shared.lua +++ b/kong/llm/drivers/shared.lua @@ -41,7 +41,7 @@ local log_entry_keys = { PROMPT_TOKENS = "prompt_tokens", COMPLETION_TOKENS = "completion_tokens", TOTAL_TOKENS = "total_tokens", - COST_REQUEST = "cost_request", + COST = "cost", -- cache keys VECTOR_DB = "vector_db", @@ -486,7 +486,7 @@ function _M.post_request(conf, response_object) [log_entry_keys.PROMPT_TOKENS] = 0, [log_entry_keys.COMPLETION_TOKENS] = 0, [log_entry_keys.TOTAL_TOKENS] = 0, - [log_entry_keys.COST_REQUEST] = 0, + [log_entry_keys.COST] = 0, }, [log_entry_keys.CACHE_CONTAINER] = { [log_entry_keys.VECTOR_DB] = "", @@ -523,7 +523,7 @@ function _M.post_request(conf, response_object) if response_object.usage.prompt_tokens and response_object.usage.completion_tokens and conf.model.options.input_cost and conf.model.options.output_cost then - request_analytics_plugin[log_entry_keys.TOKENS_CONTAINER][log_entry_keys.COST_REQUEST] = + request_analytics_plugin[log_entry_keys.TOKENS_CONTAINER][log_entry_keys.COST] = (response_object.usage.prompt_tokens * conf.model.options.input_cost + response_object.usage.completion_tokens * conf.model.options.output_cost) / 1000000 -- 1 million end diff --git a/kong/plugins/prometheus/exporter.lua b/kong/plugins/prometheus/exporter.lua index 091b6685dc69..f06e03a6c41d 100644 --- a/kong/plugins/prometheus/exporter.lua +++ b/kong/plugins/prometheus/exporter.lua @@ -365,8 +365,8 @@ local function log(message, serialized) labels_table_ai_llm_status[7] = workspace metrics.ai_llm_requests:inc(1, labels_table_ai_llm_status) - if ai_plugin.usage.cost_request and ai_plugin.usage.cost_request > 0 then - metrics.ai_llm_cost:inc(ai_plugin.usage.cost_request, labels_table_ai_llm_status) + if ai_plugin.usage.cost and ai_plugin.usage.cost > 0 then + metrics.ai_llm_cost:inc(ai_plugin.usage.cost, labels_table_ai_llm_status) end labels_table_ai_llm_tokens[1] = ai_plugin.meta.provider_name diff --git a/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua b/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua index a3df462b3d8d..e1c85368e31d 100644 --- a/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua +++ b/spec/03-plugins/38-ai-proxy/02-openai_integration_spec.lua @@ -47,7 +47,7 @@ local _EXPECTED_CHAT_STATS = { }, usage = { completion_tokens = 12, - cost_request = 0.00037, + cost = 0.00037, prompt_tokens = 25, total_tokens = 37, }, diff --git a/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua b/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua index d846b4f5c55a..2bc3e9951f79 100644 --- a/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua +++ b/spec/03-plugins/39-ai-request-transformer/02-integration_spec.lua @@ -127,7 +127,7 @@ local _EXPECTED_CHAT_STATS = { }, usage = { completion_tokens = 12, - cost_request = 0.00037, + cost = 0.00037, prompt_tokens = 25, total_tokens = 37, }, diff --git a/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua b/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua index d5e4a4ecb611..711c6f110eae 100644 --- a/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua +++ b/spec/03-plugins/40-ai-response-transformer/02-integration_spec.lua @@ -184,7 +184,7 @@ local _EXPECTED_CHAT_STATS = { }, usage = { completion_tokens = 12, - cost_request = 0.00037, + cost = 0.00037, prompt_tokens = 25, total_tokens = 37, },