|
| 1 | +import inspect |
| 2 | +import os |
| 3 | +from enum import Enum |
| 4 | +from logging import getLogger |
| 5 | +from typing import List, Callable, Any |
| 6 | + |
| 7 | +from chatfaq_sdk import ChatFAQSDK |
| 8 | +from chatfaq_sdk.clients import query_prompt, llm_request |
| 9 | +from chatfaq_sdk.layers import Message, ToolUse, ToolResult, StreamingMessage, Layer |
| 10 | + |
| 11 | +logger = getLogger(__name__) |
| 12 | + |
| 13 | + |
| 14 | +class MessageSender(Enum): |
| 15 | + system = "system" |
| 16 | + assistant = "assistant" |
| 17 | + user = "user" |
| 18 | + |
| 19 | + |
| 20 | +class StreamingMessageWithReferences(Layer): |
| 21 | + """ |
| 22 | + This layer is used to send a streaming message with references to the user. |
| 23 | + The special thing is that the references come in the last chunk, instead of when initializing the layer as done in StreamingMessage |
| 24 | + """ |
| 25 | + _type = "message_chunk" |
| 26 | + _streaming = True |
| 27 | + |
| 28 | + def __init__( |
| 29 | + self, |
| 30 | + generator, |
| 31 | + *args, |
| 32 | + **kwargs, |
| 33 | + ): |
| 34 | + super().__init__(*args, **kwargs) |
| 35 | + self.generator = generator |
| 36 | + |
| 37 | + async def build_payloads(self, ctx, data): |
| 38 | + async for chunk in self.generator: |
| 39 | + references = chunk.get("references", {}) |
| 40 | + if references: # now we send the references only in the final message |
| 41 | + payload = { |
| 42 | + "payload": { |
| 43 | + "content": chunk.get("content"), |
| 44 | + "references": references, |
| 45 | + "tool_calls": [], |
| 46 | + } |
| 47 | + } |
| 48 | + yield ( |
| 49 | + [payload], |
| 50 | + True, # last_chunk |
| 51 | + ) |
| 52 | + |
| 53 | + else: |
| 54 | + yield ( |
| 55 | + [ |
| 56 | + { |
| 57 | + "payload": { |
| 58 | + "content": chunk.get("content"), |
| 59 | + "tool_calls": [], |
| 60 | + } |
| 61 | + } |
| 62 | + ], |
| 63 | + False, # last_chunk |
| 64 | + ) |
| 65 | + |
| 66 | + |
| 67 | +class AgentAbs: |
| 68 | + intro_msg = "" |
| 69 | + |
| 70 | + def __init__(self): |
| 71 | + self.conversation = [] |
| 72 | + self.prompt = None |
| 73 | + self.add_assistant_message(self.intro_msg) |
| 74 | + |
| 75 | + def add_message(self, sender: MessageSender, message: str): |
| 76 | + self.conversation.append((sender, message)) |
| 77 | + |
| 78 | + def add_user_message(self, message: Any): |
| 79 | + self.add_message(MessageSender.user, message) |
| 80 | + |
| 81 | + def add_assistant_message(self, message: str): |
| 82 | + self.add_message(MessageSender.assistant, message) |
| 83 | + |
| 84 | + def _from_serialized(self, data): |
| 85 | + pass |
| 86 | + |
| 87 | + def _serialize(self): |
| 88 | + return {} |
| 89 | + |
| 90 | + async def _async_init(self, sdk: ChatFAQSDK, *args, **kwargs): |
| 91 | + raise NotImplementedError |
| 92 | + |
| 93 | + @classmethod |
| 94 | + def from_serialized(cls, data): |
| 95 | + instance = cls() |
| 96 | + instance.conversation = [] |
| 97 | + |
| 98 | + for s, m in data['conversation']: |
| 99 | + instance.add_message(MessageSender(s), m) |
| 100 | + |
| 101 | + instance._from_serialized(data) |
| 102 | + |
| 103 | + return instance |
| 104 | + |
| 105 | + def serialize(self): |
| 106 | + return { |
| 107 | + "conversation": [(s.value, m) for s, m in self.conversation], |
| 108 | + **self._serialize() |
| 109 | + } |
| 110 | + |
| 111 | + @classmethod |
| 112 | + async def async_init(cls, sdk: ChatFAQSDK, *args, **kwargs): |
| 113 | + instance = cls() |
| 114 | + await instance._async_init(sdk, *args, **kwargs) |
| 115 | + return instance |
| 116 | + |
| 117 | + def format_conversation(self): |
| 118 | + res = [] |
| 119 | + for sender, message in self.conversation: |
| 120 | + res.append( |
| 121 | + { |
| 122 | + "role": sender.value, |
| 123 | + "content": message, |
| 124 | + } |
| 125 | + ) |
| 126 | + return [ |
| 127 | + { |
| 128 | + "role": MessageSender.system.value if self.conversation else MessageSender.user.value, |
| 129 | + "content": self.prompt, |
| 130 | + }, |
| 131 | + *res, |
| 132 | + ] |
| 133 | + |
| 134 | + @staticmethod |
| 135 | + async def request_prompt( |
| 136 | + sdk: ChatFAQSDK, prompt_name: str, default_prompt: str |
| 137 | + ): |
| 138 | + if prompt := await query_prompt(sdk, prompt_name): |
| 139 | + logger.info(f"{prompt_name} found") |
| 140 | + return prompt |
| 141 | + else: |
| 142 | + logger.warning(f"{prompt_name} not found, using default") |
| 143 | + return default_prompt |
| 144 | + |
| 145 | + async def set_prompt( |
| 146 | + self, sdk: ChatFAQSDK, prompt_name: str, default_prompt: str |
| 147 | + ): |
| 148 | + self.prompt = await self.request_prompt(sdk, prompt_name, default_prompt) |
| 149 | + |
| 150 | + async def tool_use_loop(self, sdk: ChatFAQSDK, ctx: dict, tools: List[Callable], logging=False): |
| 151 | + if logging: |
| 152 | + logger.info("\n" + "-" * 50 + " TOOL USE LOOP \n") |
| 153 | + logger.info("\033[42m" + "\033[30m tools \033[0m") |
| 154 | + logger.info("\033[92m" + str(tools) + "\033[0m") |
| 155 | + |
| 156 | + while True: |
| 157 | + messages = self.format_conversation() |
| 158 | + if logging: |
| 159 | + logger.info("\n" + "-" * 50 + " PROMPT \n") |
| 160 | + logger.info("\033[43m" + "\033[30m prompt \033[0m") |
| 161 | + logger.info("\033[93m" + messages[0]["content"] + "\033[0m") |
| 162 | + |
| 163 | + response = await llm_request( |
| 164 | + sdk, |
| 165 | + os.getenv("LLM"), |
| 166 | + use_conversation_context=False, |
| 167 | + conversation_id=ctx["conversation_id"], |
| 168 | + bot_channel_name=ctx["bot_channel_name"], |
| 169 | + messages=messages, |
| 170 | + tools=tools, |
| 171 | + tool_choice="auto", |
| 172 | + stream=False, |
| 173 | + ) |
| 174 | + if logging: |
| 175 | + logger.info("\n" + "-" * 50 + " RESPONSE \n") |
| 176 | + logger.info("\033[45m" + "\033[30m response \033[0m") |
| 177 | + logger.info("\033[95m" + str(response) + "\033[0m") |
| 178 | + |
| 179 | + tool_results = [] |
| 180 | + for content in response["content"]: |
| 181 | + if content["type"] == "text": |
| 182 | + self.add_assistant_message(content["text"]) |
| 183 | + yield Message(content["text"]) |
| 184 | + elif content["type"] == "tool_use": |
| 185 | + tool_use = content["tool_use"] |
| 186 | + # Find the corresponding tool |
| 187 | + tool = None |
| 188 | + for t in tools: |
| 189 | + if t.__name__ == tool_use["name"]: |
| 190 | + tool = t |
| 191 | + if not tool: |
| 192 | + raise ValueError(f"Tool {tool_use['name']} not found") |
| 193 | + # Execute the tool |
| 194 | + try: |
| 195 | + if inspect.iscoroutinefunction(tool): |
| 196 | + result = await tool(**tool_use["args"], sdk=sdk, ctx=ctx, agent=self) |
| 197 | + else: |
| 198 | + result = tool(**tool_use["args"], sdk=sdk, ctx=ctx, agent=self) |
| 199 | + except Exception as e: |
| 200 | + result = f"Error executing tool {tool_use['name']}: {str(e)}" |
| 201 | + |
| 202 | + if inspect.isasyncgen(result): |
| 203 | + yield StreamingMessageWithReferences(result) |
| 204 | + result = str("submitted") |
| 205 | + |
| 206 | + yield ToolUse(name=tool_use["name"], id=tool_use["id"], args=tool_use["args"]) |
| 207 | + yield ToolResult( |
| 208 | + id=tool_use["id"], name=tool_use["name"], result=result |
| 209 | + ) |
| 210 | + tool_results.append({ |
| 211 | + "id": tool_use["id"], |
| 212 | + "name": tool_use["name"], |
| 213 | + "result": result |
| 214 | + }) |
| 215 | + if not tool_results: |
| 216 | + break |
| 217 | + # Append assistant and user messages for the next iteration |
| 218 | + self.add_assistant_message(response["content"]) |
| 219 | + self.add_user_message([ |
| 220 | + {"type": "tool_result", "tool_result": tr} for tr in tool_results |
| 221 | + ]) |
| 222 | + |
| 223 | + async def conversation_loop(self, sdk, ctx): |
| 224 | + raise NotImplementedError |
| 225 | + |
| 226 | + def solved(self): |
| 227 | + raise NotImplementedError |
0 commit comments