Spaces:
Running
Running
File size: 10,494 Bytes
326220d eda1760 8ffce3a 1a257e8 8ffce3a 326220d 0a897f3 33fcbb0 326220d 57b1755 4349024 c5964f7 949958d c5964f7 e24c6f4 57b1755 c5964f7 57b1755 949958d 33fcbb0 4349024 57b1755 326220d 8ffce3a 4349024 eda1760 0f92db1 40f5d55 c8ed8e6 41a98b7 eda1760 c5964f7 8ffce3a eda1760 8ffce3a c5964f7 eda1760 c5964f7 8ffce3a c5964f7 8ffce3a 9deaced c5964f7 eda1760 c5964f7 8ffce3a c5964f7 9deaced 8ffce3a 4349024 eda1760 c5964f7 9deaced c5964f7 9deaced c5964f7 eda1760 c5964f7 eda1760 9deaced 0a897f3 8ffce3a 326220d e0837eb 949958d e0837eb 949958d e0837eb 949958d 8ffce3a 326220d 8ffce3a 33fcbb0 77426d9 33fcbb0 0a897f3 33fcbb0 0a897f3 b5f3e19 326220d 0a897f3 eda1760 4349024 e0837eb eda1760 afc96b5 eda1760 afc96b5 eda1760 e6c6c6c eda1760 77426d9 326220d e0837eb 8ffce3a 949958d 8ffce3a 0a897f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 |
import logging
import os
import shutil
import sys
from pathlib import Path
import gradio as gr
from mcp import StdioServerParameters
from smolagents import InferenceClientModel
from smolagents.mcp_client import MCPClient
from src.upgrade_advisor.config import ( # noqa: E402
AGENT_MODEL,
CHAT_HISTORY_TURNS_CUTOFF,
CHAT_HISTORY_WORD_CUTOFF,
GITHUB_PAT,
GITHUB_READ_ONLY,
GITHUB_TOOLSETS,
)
from src.upgrade_advisor.agents.package import PackageDiscoveryAgent # noqa: E402
from src.upgrade_advisor.chat.chat import ( # noqa: E402
qn_rewriter,
run_document_qa,
summarize_chat_history,
)
from src.upgrade_advisor.misc import ( # noqa: E402
_monkeypatch_gradio_save_history,
get_example_questions,
)
from src.upgrade_advisor.theme import Christmas # noqa: E402
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# this is to use the gradio-upload-mcp server for file uploads
uploads_dir = Path("uploads")
uploads_dir.mkdir(exist_ok=True)
uploads_dir = uploads_dir.resolve()
_monkeypatch_gradio_save_history()
def get_agent_model(model_name: str, oauth_token: gr.OAuthToken = None):
token = os.getenv("HF_TOKEN", None) or oauth_token.token if oauth_token else None
# provider = os.getenv("HF_INFERENCE_PROVIDER", "together")
model = InferenceClientModel(
model_id=model_name,
token=token,
timeout=1000,
)
return model
async def chat_fn(
message,
history,
persisted_attachments=None,
profile: gr.OAuthProfile = None,
oauth_token: gr.OAuthToken = None,
):
# parse incoming history is a list of dicts with 'role' and 'content' keys
from datetime import datetime
token = os.getenv("HF_TOKEN", None) or oauth_token.token if oauth_token else None
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
logger.info(f"Received message: {message}")
logger.info(f"History: {history}")
if len(history) > 0:
summarized_history = await summarize_chat_history(
history,
turns_cutoff=CHAT_HISTORY_TURNS_CUTOFF,
word_cutoff=CHAT_HISTORY_WORD_CUTOFF,
token=token,
)
else:
summarized_history = ""
incoming_attachments = message.get("files", []) if isinstance(message, dict) else []
persisted_attachments = persisted_attachments or []
# If no new attachments are provided, keep using the previously persisted ones.
attachments = incoming_attachments or persisted_attachments
latest_attachment = attachments[-1] if attachments else []
logger.info(f"Summarized chat history:\n{summarized_history}")
logger.info(f"With attachments: {attachments} (incoming: {incoming_attachments})")
logger.info(f"Latest attachment: {latest_attachment}")
logger.info(f"Persisted attachments: {persisted_attachments}")
# if attachements are present message is a dict with 'text' and 'files' keys
message = message.get("text", "") if isinstance(message, dict) else message
# overwrite messages with the text content only
message = message.strip()
rewritten_message, is_rewritten_good = await qn_rewriter(
message,
summarized_history,
token=token,
)
if is_rewritten_good:
logger.info(f"Rewritten question: {rewritten_message}")
else:
logger.info(f"Using original question: {message}")
rewritten_message = None
# Collect events from the agent run
# add chat summary to message
message = f"""
CHAT SUMMARY SO FAR:
{summarized_history}
CURRENT QUESTION FROM USER:
{message}
"""
if len(attachments) > 0:
message += """Attached FILE:\n"""
# use the last file from the list of files only, as
# the single file is expected to be a pyproject.toml
# copy to uploads directory
if latest_attachment:
# take the last uploaded file
source_file = latest_attachment
file_name = f"{timestamp}_{Path(latest_attachment).name}"
elif len(persisted_attachments) > 0:
# take the last persisted file if no new uploads
source_file = persisted_attachments[-1]
file_name = f"{timestamp}_{Path(persisted_attachments[-1]).name}"
else:
source_file = None
file_name = None
logger.info(f"Copying uploaded file {source_file} to {uploads_dir}")
shutil.copy(source_file, uploads_dir / file_name)
message += f"""
FILE PATH: {uploads_dir / file_name}\n
"""
logger.info(f"Final message to agent:\n{message}")
# Run the package discovery agent to build context
context = agent.discover_package_info(
user_input=message,
reframed_question=rewritten_message,
)
# Build a concise context from tool outputs
logger.info(f"Built context of length {len(context)}")
logger.info(f"Context content:\n{context}")
# Run a document QA pass using the user's question
qa_answer = await run_document_qa(
question=message,
context=context,
rewritten_question=rewritten_message,
token=token,
)
logger.info(f"QA answer: {qa_answer}")
yield (
{
"role": "assistant",
"content": qa_answer,
},
attachments,
)
def main():
logger.info("Starting MCP client...")
try:
gh_mcp_params = dict(
url="https://api.githubcopilot.com/mcp/",
transport="streamable-http",
headers={
"Authorization": f"Bearer {GITHUB_PAT}",
"X-MCP-Toolsets": GITHUB_TOOLSETS,
"X-MCP-Readonly": GITHUB_READ_ONLY,
},
)
# pypi_mcp_params = dict(
# # url="https://mcp-1st-birthday-pypi-mcp.hf.space/gradio_api/mcp/",
# url="https://mcp-1st-birthday-uv-pypi-mcp.hf.space/gradio_api/mcp/",
# transport="streamable-http",
# )
upload_mcp_params = StdioServerParameters(
command="uvx",
args=[
"--from",
"gradio[mcp]",
"gradio",
"upload-mcp",
# Base must be the Gradio root; upload-mcp adds
# /gradio_api/upload.
# The docs are misleading here, it has gradio_api/upload as the base.
"https://mcp-1st-birthday-uv-pypi-mcp.hf.space/",
uploads_dir.as_posix(),
],
)
# Gradio chat interface state to persist uploaded files
files_state = gr.State([])
example_questions = get_example_questions(n=4)
christmas = Christmas()
with MCPClient(
server_parameters=[
gh_mcp_params,
upload_mcp_params,
],
structured_output=True,
) as toolset:
logger.info("MCP clients connected successfully")
global agent
model = get_agent_model(model_name=AGENT_MODEL)
agent = PackageDiscoveryAgent(
model=model,
tools=toolset,
)
# rewrite with Blocks
with gr.Blocks() as demo:
gr.LoginButton()
gr.Markdown(f"""
# π» FixMyEnv: Package Upgrade Advisor ππ§ππ¦βοΈ
Welcome to the Package Upgrade Advisor!
This AI-powered assistant helps you identify and resolve
outdated or vulnerable packages in your Python projects.
Simply ask a question about package upgrades, and if you
have a `pyproject.toml` or `requirements.txt` file, feel free
to attach it for more tailored advice.
""",
container=True,
)
with gr.Accordion("How to Use", open=False):
gr.Markdown(f"""
1. Type your question in the chat box below.
2. (Optional) Attach your `pyproject.toml` or `requirements.txt`
file using the upload button. Uploaded files are
immediately removed after the session ends.
3. Click "Submit" and wait for the AI to analyze your query
and provide recommendations.
Note: The assistant uses Huggingface Inference API for
[{AGENT_MODEL}](https://huggingface.co/{AGENT_MODEL}) LLM
capabilities with Smolagents Tool calling and GitHub MCP
for package data retrieval. Huggingface login is therefore
required to use the app. This gradio app serves as an MCP Server
as well!
""",
)
gr.ChatInterface(
fn=chat_fn,
chatbot=gr.Chatbot(
height=600,
),
textbox=gr.MultimodalTextbox(
label="pyproject.toml or requirements.txt file can be attached",
file_types=[".toml", ".txt"],
file_count="single",
min_width=100,
sources="upload",
),
additional_inputs=[files_state],
additional_outputs=[files_state],
save_history=True,
examples=example_questions,
stop_btn=True,
cache_examples=False,
)
demo.launch(mcp_server=True, share=False, theme=christmas)
finally:
logger.info("Cleaning up MCP client resources")
# remove contents of uploads_dir
for f in uploads_dir.iterdir():
if f.is_dir():
try:
shutil.rmtree(f)
except Exception:
logger.exception(f"Failed to delete uploaded directory: {f}")
else:
try:
f.unlink()
except Exception:
logger.exception(f"Failed to delete uploaded file: {f}")
logger.info("Shutdown complete.")
if __name__ == "__main__":
main()
|