errchh
commited on
Commit
·
9461725
1
Parent(s):
6bbce09
fix edges
Browse files
agent.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# import libraries for langgraph, huggingface
|
| 2 |
import os
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
from typing import TypedDict, List, Dict, Any, Optional, Annotated
|
|
@@ -106,7 +105,7 @@ def search_web(query: str) -> Dict[str, str]:
|
|
| 106 |
docs = SerpAPIWrapper()
|
| 107 |
docs.run(query)
|
| 108 |
formatted_result = f'<Document source="{docs.metadata["source"]}" page="{docs.metadata.get("page", "")}"/>\n{docs.page_content}\n</Document>'
|
| 109 |
-
return {"
|
| 110 |
|
| 111 |
|
| 112 |
# ArXiv search tool
|
|
@@ -119,7 +118,7 @@ def search_arxiv(query: str) -> Dict[str, str]:
|
|
| 119 |
arxiv = ArxivAPIWrapper()
|
| 120 |
docs = arxiv.run(query)
|
| 121 |
formatted_result = f'<Document source="{docs.metadata["source"]}" page="{docs.metadata.get("page", "")}"/>\n{docs.page_content}\n</Document>'
|
| 122 |
-
return {"
|
| 123 |
|
| 124 |
|
| 125 |
# build retriever
|
|
@@ -180,14 +179,16 @@ def build_graph():
|
|
| 180 |
builder.add_edge(START, "assistant")
|
| 181 |
builder.add_conditional_edges(
|
| 182 |
"assistant",
|
| 183 |
-
# If the latest message requires a tool, route to tools
|
| 184 |
-
# Otherwise, provide a direct response
|
| 185 |
tools_condition,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
)
|
| 187 |
builder.add_edge("tools", "assistant")
|
| 188 |
|
| 189 |
-
return builder.compile()
|
| 190 |
-
|
| 191 |
|
| 192 |
if __name__ == "__main__":
|
| 193 |
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
from typing import TypedDict, List, Dict, Any, Optional, Annotated
|
|
|
|
| 105 |
docs = SerpAPIWrapper()
|
| 106 |
docs.run(query)
|
| 107 |
formatted_result = f'<Document source="{docs.metadata["source"]}" page="{docs.metadata.get("page", "")}"/>\n{docs.page_content}\n</Document>'
|
| 108 |
+
return {"web_results": formatted_result}
|
| 109 |
|
| 110 |
|
| 111 |
# ArXiv search tool
|
|
|
|
| 118 |
arxiv = ArxivAPIWrapper()
|
| 119 |
docs = arxiv.run(query)
|
| 120 |
formatted_result = f'<Document source="{docs.metadata["source"]}" page="{docs.metadata.get("page", "")}"/>\n{docs.page_content}\n</Document>'
|
| 121 |
+
return {"arxiv_results": formatted_result}
|
| 122 |
|
| 123 |
|
| 124 |
# build retriever
|
|
|
|
| 179 |
builder.add_edge(START, "assistant")
|
| 180 |
builder.add_conditional_edges(
|
| 181 |
"assistant",
|
|
|
|
|
|
|
| 182 |
tools_condition,
|
| 183 |
+
{
|
| 184 |
+
# If the latest message requires a tool, route to tools
|
| 185 |
+
"tools": "tools",
|
| 186 |
+
# Otherwise, provide a direct response
|
| 187 |
+
END: END,
|
| 188 |
+
}
|
| 189 |
)
|
| 190 |
builder.add_edge("tools", "assistant")
|
| 191 |
|
|
|
|
|
|
|
| 192 |
|
| 193 |
if __name__ == "__main__":
|
| 194 |
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
|
app.py
CHANGED
|
@@ -23,30 +23,9 @@ class BasicAgent:
|
|
| 23 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 24 |
# Wrap the question in a HumanMessage from langchain_core
|
| 25 |
messages = [HumanMessage(content=question)]
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
# Extract the final answer message from the state
|
| 30 |
-
# The final message should be the content of the last AIMessage with content
|
| 31 |
-
submitted_answer = "Could not get answer from agent output." # Default if extraction fails
|
| 32 |
-
|
| 33 |
-
# Check if the result is a dictionary and contains a list of messages
|
| 34 |
-
if isinstance(result, dict) and "messages" in result and isinstance(result["messages"], list):
|
| 35 |
-
# Iterate through messages in reverse to find the last one from the AI
|
| 36 |
-
for msg in reversed(result["messages"]):
|
| 37 |
-
# Check if the message is an AIMessage and has content (non-empty string or non-empty list/dict representation)
|
| 38 |
-
# The 'content' attribute exists on AIMessage, so hasattr check is redundant.
|
| 39 |
-
if isinstance(msg, AIMessage) and msg.content is not None:
|
| 40 |
-
# Ensure the content is treated as a string for submission.
|
| 41 |
-
# LangChain AIMessage content can be str or list[str | dict].
|
| 42 |
-
# str() handles conversion for both cases.
|
| 43 |
-
submitted_answer = str(msg.content)
|
| 44 |
-
break # Found the last AI message with content, stop searching
|
| 45 |
-
|
| 46 |
-
# If the loop finishes without finding an AIMessage with content,
|
| 47 |
-
# submitted_answer remains the default value.
|
| 48 |
-
|
| 49 |
-
return submitted_answer
|
| 50 |
|
| 51 |
|
| 52 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
|
| 23 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 24 |
# Wrap the question in a HumanMessage from langchain_core
|
| 25 |
messages = [HumanMessage(content=question)]
|
| 26 |
+
messages = self.graph.invoke({"messages": messages})
|
| 27 |
+
answer = messages['messages'][-1].content
|
| 28 |
+
return answer[14:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|