This repository was archived by the owner on Jan 21, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcallback.py
More file actions
59 lines (48 loc) · 2.11 KB
/
callback.py
File metadata and controls
59 lines (48 loc) · 2.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
"""Callback handlers used in the app."""
import logging
from typing import Any, Dict, List, Optional
from langchain.callbacks.base import AsyncCallbackHandler
from schemas import ChatResponse
from uuid import UUID
from langchain.schema import LLMResult
class StreamingLLMCallbackHandler(AsyncCallbackHandler):
"""Callback handler for streaming LLM responses."""
def __init__(self, websocket, client_id):
self.websocket = websocket
self.client_id = client_id
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
resp = ChatResponse(sender="bot", message=token, type="stream")
await self.websocket.send_json(resp.dict())
async def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
"""Run when LLM starts running."""
logging.info(f"[{self.client_id}] [stream-tutor] [PROMPT] - {prompts}")
async def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
logging.info(f"[{self.client_id}] [stream-tutor] [RESPONSE] - {response}")
class QuestionGenCallbackHandler(AsyncCallbackHandler):
"""Callback handler for question generation."""
def __init__(self, websocket, client_id):
self.websocket = websocket
self.client_id = client_id
async def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
"""Run when LLM starts running."""
resp = ChatResponse(sender="bot", message="Exercise generation...", type="info")
logging.info(f"[{self.client_id}] - Question generation started...")
logging.info(f"[{self.client_id}] - {prompts}")
await self.websocket.send_json(resp.dict())
async def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
logging.info(f"[{self.client_id}] - Response: {response}")
logging.info(f"[{self.client_id}] - Question generation end...")