tayyab-077 commited on
Commit
a6c748c
Β·
1 Parent(s): 8c9ab96
Files changed (2) hide show
  1. app.py +17 -77
  2. src/model_loader.py +6 -7
app.py CHANGED
@@ -1,18 +1,14 @@
1
  import gradio as gr
2
- import os
3
- import tempfile
4
- import textwrap
5
  from datetime import datetime
6
- from typing import List, Dict, Any, Optional
7
-
8
  from src.model_loader import load_local_model
9
  from src.conversation import ConversationMemory
10
  from src.chatbot import LocalChatbot
11
 
12
- # ----------------------
13
- # Model setup
14
- # ----------------------
15
- MODEL_PATH = "models/gemma-2-2b-it-Q4_K_M" # quantized 2B
16
  llm = load_local_model(MODEL_PATH, device=-1) # CPU
17
  memory = ConversationMemory(max_len=60)
18
  bot = LocalChatbot(llm, memory)
@@ -27,49 +23,10 @@ INTENT_TEMPLATES = {
27
  def now_ts():
28
  return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
29
 
30
- # ----------------------
31
- # Export TXT/PDF
32
- # ----------------------
33
- def export_chat_files(history: List[Dict[str, Any]]) -> Dict[str, Optional[str]]:
34
- tmpdir = tempfile.gettempdir()
35
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
36
- txt_path = os.path.join(tmpdir, f"chat_history_{timestamp}.txt")
37
-
38
- with open(txt_path, "w", encoding="utf-8") as f:
39
- for msg in history:
40
- content = msg.get("content", "")
41
- lines = content.splitlines()
42
- lines = [l.replace("USER:", "").replace("ASSISTANT:", "").strip() for l in lines]
43
- f.write("\n".join(lines).strip() + "\n")
44
- f.write("-" * 60 + "\n")
45
-
46
- pdf_path = None
47
- try:
48
- from reportlab.lib.pagesizes import A4
49
- from reportlab.pdfgen import canvas
50
-
51
- pdf_path = os.path.join(tmpdir, f"chat_history_{timestamp}.pdf")
52
- c = canvas.Canvas(pdf_path, pagesize=A4)
53
- width, height = A4
54
- margin = 40
55
- textobject = c.beginText(margin, height - margin)
56
- textobject.setFont("Helvetica", 10)
57
- with open(txt_path, "r", encoding="utf-8") as fh:
58
- for line in fh:
59
- for wrapped in textwrap.wrap(line.rstrip(), 100):
60
- textobject.textLine(wrapped)
61
- c.drawText(textobject)
62
- c.showPage()
63
- c.save()
64
- except:
65
- pdf_path = None
66
-
67
- return {"txt": txt_path, "pdf": pdf_path}
68
-
69
- # ----------------------
70
- # Core chat function
71
- # ----------------------
72
- def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
73
  if history is None:
74
  history = []
75
 
@@ -93,6 +50,7 @@ def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
93
 
94
  history.append({"role": "user", "content": user_msg})
95
  history.append({"role": "assistant", "content": bot_reply_ts})
 
96
  try:
97
  memory.add(user_msg, bot_reply)
98
  except:
@@ -100,26 +58,16 @@ def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
100
 
101
  return history
102
 
103
- # ----------------------
104
  # Gradio UI
105
- # ----------------------
106
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
107
- with gr.Row():
108
- with gr.Column(scale=1, min_width=220):
109
- gr.Markdown("### ⚑ Tools & Export")
110
- new_chat_btn = gr.Button("βž• New Chat")
111
- export_btn = gr.Button("πŸ“₯ Export TXT/PDF")
112
-
113
- with gr.Column(scale=3):
114
- gr.Markdown("<h3>Smart Learning Assistant - Tayyab</h3>")
115
- chatbot = gr.Chatbot(height=480, type="messages")
116
 
117
- with gr.Row():
118
- msg = gr.Textbox(placeholder="Type a message", show_label=False, lines=3)
119
- send_btn = gr.Button("Send")
120
-
121
- file_txt = gr.File(visible=False)
122
- file_pdf = gr.File(visible=False)
123
 
124
  send_btn.click(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
125
  msg.submit(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
@@ -129,13 +77,5 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
129
  return []
130
  new_chat_btn.click(new_chat, outputs=[chatbot])
131
 
132
- def export_handler(history):
133
- files = export_chat_files(history or [])
134
- return (
135
- gr.update(value=files.get("txt"), visible=True),
136
- gr.update(value=files.get("pdf"), visible=bool(files.get("pdf")))
137
- )
138
- export_btn.click(export_handler, inputs=[chatbot], outputs=[file_txt, file_pdf])
139
-
140
  if __name__ == "__main__":
141
  demo.launch()
 
1
  import gradio as gr
 
 
 
2
  from datetime import datetime
3
+ from pathlib import Path
 
4
  from src.model_loader import load_local_model
5
  from src.conversation import ConversationMemory
6
  from src.chatbot import LocalChatbot
7
 
8
+ # ----------------------------
9
+ # Model & Memory
10
+ # ----------------------------
11
+ MODEL_PATH = "togethercomputer/Gemini-2-2B" # public HF model
12
  llm = load_local_model(MODEL_PATH, device=-1) # CPU
13
  memory = ConversationMemory(max_len=60)
14
  bot = LocalChatbot(llm, memory)
 
23
  def now_ts():
24
  return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
25
 
26
+ # ----------------------------
27
+ # Chat Function
28
+ # ----------------------------
29
+ def generate_reply(user_msg, history=None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  if history is None:
31
  history = []
32
 
 
50
 
51
  history.append({"role": "user", "content": user_msg})
52
  history.append({"role": "assistant", "content": bot_reply_ts})
53
+
54
  try:
55
  memory.add(user_msg, bot_reply)
56
  except:
 
58
 
59
  return history
60
 
61
+ # ----------------------------
62
  # Gradio UI
63
+ # ----------------------------
64
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
65
+ gr.Markdown("## ⚑ Smart Learning Assistant - Tayyab")
 
 
 
 
 
 
 
 
66
 
67
+ chatbot = gr.Chatbot(height=480)
68
+ msg = gr.Textbox(placeholder="Type your message...", lines=3)
69
+ send_btn = gr.Button("Send")
70
+ new_chat_btn = gr.Button("βž• New Chat")
 
 
71
 
72
  send_btn.click(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
73
  msg.submit(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
 
77
  return []
78
  new_chat_btn.click(new_chat, outputs=[chatbot])
79
 
 
 
 
 
 
 
 
 
80
  if __name__ == "__main__":
81
  demo.launch()
src/model_loader.py CHANGED
@@ -1,18 +1,17 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
2
 
3
- def load_local_model(model_path, device=0):
4
  """
5
- Loads a local quantized model for CPU or GPU.
6
- device=-1 => CPU, device>=0 => GPU
7
  """
8
  tokenizer = AutoTokenizer.from_pretrained(model_path)
9
  model = AutoModelForCausalLM.from_pretrained(model_path)
10
-
11
- # Use pipeline for text generation
12
  generator = pipeline(
13
  "text-generation",
14
  model=model,
15
  tokenizer=tokenizer,
16
- device=device
17
  )
18
  return generator
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
2
 
3
+ def load_local_model(model_path: str, device: int = -1):
4
  """
5
+ Loads a Hugging Face model on CPU/GPU.
6
+ device=-1 β†’ CPU, device=0 β†’ first GPU
7
  """
8
  tokenizer = AutoTokenizer.from_pretrained(model_path)
9
  model = AutoModelForCausalLM.from_pretrained(model_path)
10
+
 
11
  generator = pipeline(
12
  "text-generation",
13
  model=model,
14
  tokenizer=tokenizer,
15
+ device=device # -1=CPU
16
  )
17
  return generator