Skip to content

Commit

Permalink
fix: file structure for callbackhanders
Browse files Browse the repository at this point in the history
  • Loading branch information
csgulati09 committed Jun 4, 2024
1 parent 4ff1b08 commit ded6324
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 4 deletions.
3 changes: 2 additions & 1 deletion portkey_ai/api_resources/global_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@
VERSION = "0.1.0"
DEFAULT_TIMEOUT = 60
PORTKEY_HEADER_PREFIX = "x-portkey-"
PORTKEY_BASE_URL = "https://api.portkey.ai/v1"
# PORTKEY_BASE_URL = "https://api.portkey.ai/v1"
PORTKEY_BASE_URL = "https://api.portkeydev.com/v1"
PORTKEY_GATEWAY_URL = PORTKEY_BASE_URL
PORTKEY_API_KEY_ENV = "PORTKEY_API_KEY"
PORTKEY_PROXY_ENV = "PORTKEY_PROXY"
Expand Down
4 changes: 4 additions & 0 deletions portkey_ai/llms/callback/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .portkey_langchain_callback import PortkeyLangchain
from .portkey_llama_callback import PortkeyLlamaindex

__all__ = ["PortkeyLangchain", "PortkeyLlamaindex"]
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from portkey_ai.api_resources.apis.logger import Logger


class PortkeyCallbackHandler(BaseCallbackHandler):
class PortkeyLangchain(BaseCallbackHandler):
def __init__(
self,
api_key: str,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from llama_index.core.utilities.token_counting import TokenCounter


class PortkeyCallbackHandler(LlamaIndexBaseCallbackHandler):
class PortkeyLlamaindex(LlamaIndexBaseCallbackHandler):
startTimestamp: int = 0
endTimestamp: float = 0

Expand Down Expand Up @@ -113,8 +113,10 @@ def llm_event_stop(self, payload: Any, event_id) -> None:
data = payload.get(EventPayload.RESPONSE, {})

chunks = payload.get(EventPayload.MESSAGES, {})
self.token_llm = self._token_counter.estimate_tokens_in_messages(chunks)
print("chunks", chunks)

self.token_llm = self._token_counter.estimate_tokens_in_messages(chunks)
print("token_llm", self.token_llm)
self.response["status"] = 200
self.response["body"] = {
"choices": [
Expand All @@ -137,6 +139,8 @@ def llm_event_stop(self, payload: Any, event_id) -> None:
self.response["headers"] = {}
self.response["streamingMode"] = self.streamingMode

print("response", self.response)

self.log_object.update(
{
"request": self.request,
Expand Down

0 comments on commit ded6324

Please sign in to comment.