-
Notifications
You must be signed in to change notification settings - Fork 0
/
haystack.py
114 lines (83 loc) · 4.04 KB
/
haystack.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import os
from datasets import load_dataset
from haystack.agents.base import Tool
from haystack.agents.conversational import ConversationalAgent
from haystack.agents.memory import ConversationSummaryMemory
from haystack.document_stores import InMemoryDocumentStore
from haystack.nodes import BM25Retriever, PromptNode
from haystack.pipelines import DocumentSearchPipeline
import chainlit as cl
openai_api_key = os.environ.get("OPENAI_API_KEY")
if not openai_api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable")
@cl.cache
def get_retriever():
document_store = InMemoryDocumentStore(use_bm25=True)
dataset = load_dataset("bilgeyucel/seven-wonders", split="train")
document_store.write_documents(dataset)
return BM25Retriever(document_store)
@cl.cache
def get_agent(retriever):
pipeline = DocumentSearchPipeline(retriever)
search_tool = Tool(
name="seven_wonders_search",
pipeline_or_node=pipeline,
description="useful for when you need to answer questions about the seven wonders of the world: Colossus of Rhodes, Statue of Zeus, Great Pyramid of Giza, Mausoleum at Halicarnassus, Temple of Artemis, Lighthouse of Alexandria and Hanging Gardens of Babylon",
output_variable="documents",
)
conversational_agent_prompt_node = PromptNode(
"gpt-3.5-turbo",
api_key=openai_api_key,
max_length=256,
stop_words=["Observation:"],
)
memory = ConversationSummaryMemory(
conversational_agent_prompt_node,
prompt_template="deepset/conversational-summary",
summary_frequency=3,
)
agent_prompt = """
In the following conversation, a human user interacts with an AI Agent. The human user poses questions, and the AI Agent goes through several steps to provide well-informed answers.
The AI Agent must use the available tools to find the up-to-date information. The final answer to the question should be truthfully based solely on the output of the tools. The AI Agent should ignore its knowledge when answering the questions.
The AI Agent has access to these tools:
{tool_names_with_descriptions}
The following is the previous conversation between a human and The AI Agent:
{memory}
AI Agent responses must start with one of the following:
Thought: [the AI Agent's reasoning process]
Tool: [tool names] (on a new line) Tool Input: [input as a question for the selected tool WITHOUT quotation marks and on a new line] (These must always be provided together and on separate lines.)
Observation: [tool's result]
Final Answer: [final answer to the human user's question]
When selecting a tool, the AI Agent must provide both the "Tool:" and "Tool Input:" pair in the same response, but on separate lines.
The AI Agent should not ask the human user for additional information, clarification, or context.
If the AI Agent cannot find a specific answer after exhausting available tools and approaches, it answers with Final Answer: inconclusive
Question: {query}
Thought:
{transcript}
"""
return ConversationalAgent(
prompt_node=conversational_agent_prompt_node,
memory=memory,
prompt_template=agent_prompt,
tools=[search_tool],
)
retriever = get_retriever()
agent = get_agent(retriever)
cl.HaystackAgentCallbackHandler(agent)
@cl.author_rename
def rename(orig_author: str):
rename_dict = {"custom-at-query-time": "Agent Step"}
return rename_dict.get(orig_author, orig_author)
@cl.on_chat_start
async def init():
question = "What did Rhodes Statue look like?"
await cl.Message(author="User", content=question).send()
response = await cl.make_async(agent.run)(question)
await cl.Message(author="Agent", content=response["answers"][0].answer).send()
# Ask follow up questions since the agent remembers the conversation:
# question = "When did it collapse?"
# question = "How tall was it?"
@cl.on_message
async def answer(message: cl.Message):
response = await cl.make_async(agent.run)(message.content)
await cl.Message(author="Agent", content=response["answers"][0].answer).send()