Skip to content

Commit b94979e

Browse files
committed
Add Agent class for streamlined LLM tool interaction
1 parent f2f26a3 commit b94979e

3 files changed

Lines changed: 81 additions & 71 deletions

File tree

sdk/chatfaq_sdk/clients/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
from logging import getLogger
22
from typing import Callable, Dict, List, Optional, Union
33

4-
from pydantic import BaseModel
5-
64
from chatfaq_sdk import ChatFAQSDK
75
from chatfaq_sdk.types import CacheConfig
86
from chatfaq_sdk.utils import function_to_json

sdk/chatfaq_sdk/clients/agent.py

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import inspect
2+
3+
from chatfaq_sdk import ChatFAQSDK
4+
from chatfaq_sdk.clients import llm_request
5+
from chatfaq_sdk.layers import Message, ToolUse, ToolResult
6+
from chatfaq_sdk.utils import convert_mml_to_llm_format
7+
8+
9+
class Agent:
10+
"""
11+
An agent that uses the ChatFAQ SDK to interact with an LLM.
12+
13+
The agent takes a list of tools and a system instruction, and uses them to generate responses to user messages.
14+
It streams messages, tool uses, and tool results.
15+
"""
16+
def __init__(self, sdk: ChatFAQSDK, model_name: str, tools: list, system_instruction: str = None):
17+
self.sdk = sdk
18+
self.tools = tools
19+
self.system_instruction = system_instruction
20+
self.model_name = model_name
21+
22+
async def run(self, ctx: dict):
23+
messages = convert_mml_to_llm_format(ctx["conv_mml"][1:]) # Skip the greeting message
24+
if self.system_instruction:
25+
messages.insert(0, {"role": "system", "content": self.system_instruction})
26+
27+
while True:
28+
response = await llm_request(
29+
self.sdk,
30+
self.model_name,
31+
use_conversation_context=False,
32+
conversation_id=ctx["conversation_id"],
33+
bot_channel_name=ctx["bot_channel_name"],
34+
messages=messages,
35+
tools=self.tools,
36+
tool_choice="auto",
37+
stream=False,
38+
)
39+
tool_results = []
40+
for content in response["content"]:
41+
if content["type"] == "text":
42+
yield Message(content["text"])
43+
elif content["type"] == "tool_use":
44+
tool_use = content["tool_use"]
45+
yield ToolUse(name=tool_use["name"], id=tool_use["id"], args=tool_use["args"])
46+
# Find the corresponding tool
47+
tool = next(t for t in self.tools if t.__name__ == tool_use["name"])
48+
# Execute the tool
49+
try:
50+
if inspect.iscoroutinefunction(tool):
51+
result = await tool(**tool_use["args"])
52+
else:
53+
result = tool(**tool_use["args"])
54+
except Exception as e:
55+
result = f"Error executing tool {tool_use['name']}: {str(e)}"
56+
yield ToolResult(id=tool_use["id"], name=tool_use["name"], result=result)
57+
tool_results.append({
58+
"id": tool_use["id"],
59+
"name": tool_use["name"],
60+
"result": result
61+
})
62+
if not tool_results:
63+
break
64+
# Append assistant and user messages for the next iteration
65+
messages.append({
66+
"role": "assistant",
67+
"content": response["content"]
68+
})
69+
messages.append({
70+
"role": "user",
71+
"content": [{"type": "tool_result", "tool_result": tr} for tr in tool_results]
72+
})

sdk/examples/agent_example/fsm_definition.py

Lines changed: 9 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
from chatfaq_sdk import ChatFAQSDK
2-
from chatfaq_sdk.clients import llm_request
2+
from chatfaq_sdk.clients.agent import Agent
33
from chatfaq_sdk.fsm import FSMDefinition, State, Transition
4-
from chatfaq_sdk.layers import Message, StreamingMessage, ToolUse, ToolResult
5-
from chatfaq_sdk.utils import convert_mml_to_llm_format
6-
4+
from chatfaq_sdk.layers import Message
75

86
MODEL_NAME = "gemini-2.0-flash"
97

@@ -21,77 +19,19 @@ async def send_greeting(sdk: ChatFAQSDK, ctx: dict):
2119

2220

2321
async def send_answer(sdk: ChatFAQSDK, ctx: dict):
24-
messages = convert_mml_to_llm_format(ctx["conv_mml"][1:]) # skip the greeting message and get the user message
25-
messages.insert(0, {"role": "system", "content": "You are a knowledgeable weather assistant. Use provided tools when necessary."})
26-
27-
response = await llm_request(
28-
sdk,
29-
MODEL_NAME,
30-
use_conversation_context=False,
31-
conversation_id=ctx["conversation_id"],
32-
bot_channel_name=ctx["bot_channel_name"],
33-
messages=messages,
22+
agent = Agent(
23+
sdk=sdk,
3424
tools=[get_weather],
35-
tool_choice="auto",
36-
stream=False,
25+
system_instruction="You are a knowledgeable weather assistant. Use provided tools when necessary."
3726
)
38-
print(response)
39-
40-
tool_results = []
41-
for content in response["content"]:
42-
if content["type"] == "text":
43-
yield Message(content["text"])
44-
elif content["type"] == "tool_use":
45-
yield ToolUse(name=content["tool_use"]["name"], id=content["tool_use"]["id"], args=content["tool_use"]["args"])
46-
if content["tool_use"]["name"] == "get_weather":
47-
result = get_weather(content["tool_use"]["args"]["location"])
48-
yield ToolResult(id=content["tool_use"]["id"], name=content["tool_use"]["name"], result=result)
49-
tool_results.append(
50-
{
51-
"id": content["tool_use"]["id"],
52-
"name": content["tool_use"]["name"],
53-
"result": result
54-
}
55-
)
56-
57-
if tool_results:
58-
# If there are tool results it means that the model has called a tool
59-
# so we need to append the tool results to the messages and ask the model to continue
60-
messages.append({
61-
"role": "assistant",
62-
"content": response["content"]
63-
})
64-
messages.append({
65-
"role": "user",
66-
"content": [{"type": "tool_result", "tool_result": tool_result} for tool_result in tool_results]
67-
})
68-
69-
response = await llm_request(
70-
sdk,
71-
MODEL_NAME,
72-
messages=messages,
73-
use_conversation_context=False,
74-
conversation_id=ctx["conversation_id"],
75-
bot_channel_name=ctx["bot_channel_name"],
76-
tools=[get_weather],
77-
tool_choice="auto",
78-
stream=False,
79-
)
80-
print(response)
81-
yield Message(response["content"][0]["text"])
82-
27+
async for item in agent.run(ctx):
28+
yield item
8329

8430

8531
greeting_state = State(name="Greeting", events=[send_greeting], initial=True)
32+
answering_state = State(name="Answering", events=[send_answer])
8633

87-
answering_state = State(
88-
name="Answering",
89-
events=[send_answer],
90-
)
91-
92-
_to_answer = Transition(
93-
dest=answering_state,
94-
)
34+
_to_answer = Transition(dest=answering_state)
9535

9636
fsm_definition = FSMDefinition(
9737
states=[greeting_state, answering_state],

0 commit comments

Comments
 (0)