LMLK commited on
Commit
ce6a691
1 Parent(s): 71ff2e0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +68 -7
README.md CHANGED
@@ -14,6 +14,9 @@ AMD-Llama-135m is a language model trained on AMD MI250 GPUs. Based on LLaMA2 mo
14
  ## Quickstart
15
  AMD-Llama-135m-code-GGUF can be loaded and used via Llama.cpp, here is a program with GUI.
16
 
 
 
 
17
 
18
  ```python
19
  import sys
@@ -23,11 +26,11 @@ from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QPushButton,
23
  QFileDialog, QProgressBar, QMessageBox, QMenu)
24
  from PyQt5.QtCore import Qt, QThread, pyqtSignal
25
  from llama_cpp import Llama
26
-
27
 
28
  class Worker(QThread):
29
  finished = pyqtSignal(str)
30
- progress = pyqtSignal(int, int) # Pass total tokens as well
31
 
32
  def __init__(self, model, messages, max_tokens):
33
  super().__init__()
@@ -50,13 +53,12 @@ class Worker(QThread):
50
  if "choices" in chunk:
51
  content = chunk["choices"][0]["delta"].get("content", "")
52
  full_response += content
53
- total_tokens += 1 # Assume each chunk is 1 token (adjust if needed)
54
  self.progress.emit(total_tokens, self.max_tokens)
55
  self.finished.emit(full_response)
56
  except Exception as e:
57
  self.finished.emit(f"Error generating response: {str(e)}")
58
 
59
-
60
  class ChatbotGUI(QWidget):
61
  def __init__(self):
62
  super().__init__()
@@ -67,6 +69,8 @@ class ChatbotGUI(QWidget):
67
  self.messages = [
68
  {"role": "system", "content": "You are a helpful AI assistant."}
69
  ]
 
 
70
 
71
  self.initUI()
72
 
@@ -80,6 +84,27 @@ class ChatbotGUI(QWidget):
80
  model_layout.addWidget(model_label)
81
  model_layout.addWidget(load_button)
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  # Chat display
84
  self.chat_display = QTextEdit()
85
  self.chat_display.setReadOnly(True)
@@ -107,6 +132,8 @@ class ChatbotGUI(QWidget):
107
  # Main layout
108
  main_layout = QVBoxLayout()
109
  main_layout.addLayout(model_layout)
 
 
110
  main_layout.addWidget(self.chat_display)
111
  main_layout.addWidget(self.progress_bar)
112
  main_layout.addLayout(input_layout)
@@ -118,7 +145,7 @@ class ChatbotGUI(QWidget):
118
  model_path, _ = QFileDialog.getOpenFileName(self, "Load GGUF Model", "", "GGUF Files (*.gguf)")
119
  if model_path:
120
  try:
121
- self.model = Llama(model_path=model_path, n_ctx=2048, n_gpu_layers=-1)
122
  model_name = os.path.basename(model_path)
123
  self.layout().itemAt(0).itemAt(0).widget().setText(f"Model: {model_name}")
124
  QMessageBox.information(self, "Success", "Model loaded successfully!")
@@ -126,6 +153,34 @@ class ChatbotGUI(QWidget):
126
  error_message = f"Error loading model: {str(e)}"
127
  QMessageBox.critical(self, "Error", error_message)
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  def send_message(self):
130
  user_message = self.user_input.text()
131
  if user_message and self.model:
@@ -133,11 +188,15 @@ class ChatbotGUI(QWidget):
133
  self.update_chat_display(f"You: {user_message}")
134
  self.user_input.clear()
135
 
136
- max_tokens = 500 # Set your desired max tokens here
137
  self.progress_bar.show()
138
  self.progress_bar.setRange(0, max_tokens)
139
  self.progress_bar.setValue(0)
140
 
 
 
 
 
141
  self.worker = Worker(self.model, self.messages, max_tokens)
142
  self.worker.finished.connect(self.on_response_finished)
143
  self.worker.progress.connect(self.on_response_progress)
@@ -148,7 +207,7 @@ class ChatbotGUI(QWidget):
148
  self.messages.append({"role": "assistant", "content": assistant_message})
149
  self.update_chat_display(f"Assistant: {assistant_message}")
150
 
151
- # Python Code Download (Check for triple backticks)
152
  if assistant_message.startswith("```python") and assistant_message.endswith("```"):
153
  self.offer_code_download(assistant_message)
154
 
@@ -178,6 +237,8 @@ class ChatbotGUI(QWidget):
178
  {"role": "system", "content": "You are a helpful AI assistant."}
179
  ]
180
  self.chat_display.clear()
 
 
181
 
182
  def show_context_menu(self, point):
183
  menu = QMenu(self)
 
14
  ## Quickstart
15
  AMD-Llama-135m-code-GGUF can be loaded and used via Llama.cpp, here is a program with GUI.
16
 
17
+ ```bash
18
+ pip install PyQt5 llama-cpp-python pymupdf
19
+ ```
20
 
21
  ```python
22
  import sys
 
26
  QFileDialog, QProgressBar, QMessageBox, QMenu)
27
  from PyQt5.QtCore import Qt, QThread, pyqtSignal
28
  from llama_cpp import Llama
29
+ import fitz # For PDF processing
30
 
31
  class Worker(QThread):
32
  finished = pyqtSignal(str)
33
+ progress = pyqtSignal(int, int)
34
 
35
  def __init__(self, model, messages, max_tokens):
36
  super().__init__()
 
53
  if "choices" in chunk:
54
  content = chunk["choices"][0]["delta"].get("content", "")
55
  full_response += content
56
+ total_tokens += 1
57
  self.progress.emit(total_tokens, self.max_tokens)
58
  self.finished.emit(full_response)
59
  except Exception as e:
60
  self.finished.emit(f"Error generating response: {str(e)}")
61
 
 
62
  class ChatbotGUI(QWidget):
63
  def __init__(self):
64
  super().__init__()
 
69
  self.messages = [
70
  {"role": "system", "content": "You are a helpful AI assistant."}
71
  ]
72
+ self.thread_count = 12
73
+ self.pdf_content = ""
74
 
75
  self.initUI()
76
 
 
84
  model_layout.addWidget(model_label)
85
  model_layout.addWidget(load_button)
86
 
87
+ # PDF upload section
88
+ pdf_label = QLabel("PDF: No PDF loaded")
89
+ upload_pdf_button = QPushButton("Upload PDF")
90
+ upload_pdf_button.clicked.connect(self.upload_pdf)
91
+
92
+ pdf_layout = QHBoxLayout()
93
+ pdf_layout.addWidget(pdf_label)
94
+ pdf_layout.addWidget(upload_pdf_button)
95
+
96
+ # Thread count section
97
+ thread_label = QLabel(f"Thread Count: {self.thread_count}")
98
+ self.thread_input = QLineEdit()
99
+ self.thread_input.setPlaceholderText("Enter new thread count")
100
+ update_thread_button = QPushButton("Update Threads")
101
+ update_thread_button.clicked.connect(self.update_thread_count)
102
+
103
+ thread_layout = QHBoxLayout()
104
+ thread_layout.addWidget(thread_label)
105
+ thread_layout.addWidget(self.thread_input)
106
+ thread_layout.addWidget(update_thread_button)
107
+
108
  # Chat display
109
  self.chat_display = QTextEdit()
110
  self.chat_display.setReadOnly(True)
 
132
  # Main layout
133
  main_layout = QVBoxLayout()
134
  main_layout.addLayout(model_layout)
135
+ main_layout.addLayout(pdf_layout) # PDF before threads
136
+ main_layout.addLayout(thread_layout)
137
  main_layout.addWidget(self.chat_display)
138
  main_layout.addWidget(self.progress_bar)
139
  main_layout.addLayout(input_layout)
 
145
  model_path, _ = QFileDialog.getOpenFileName(self, "Load GGUF Model", "", "GGUF Files (*.gguf)")
146
  if model_path:
147
  try:
148
+ self.model = Llama(model_path=model_path, n_ctx=2048, n_gpu_layers=-1, n_threads=self.thread_count)
149
  model_name = os.path.basename(model_path)
150
  self.layout().itemAt(0).itemAt(0).widget().setText(f"Model: {model_name}")
151
  QMessageBox.information(self, "Success", "Model loaded successfully!")
 
153
  error_message = f"Error loading model: {str(e)}"
154
  QMessageBox.critical(self, "Error", error_message)
155
 
156
+ def update_thread_count(self):
157
+ try:
158
+ new_thread_count = int(self.thread_input.text())
159
+ if new_thread_count > 0:
160
+ self.thread_count = new_thread_count
161
+ self.layout().itemAt(2).itemAt(0).widget().setText(f"Thread Count: {self.thread_count}") # Updated index
162
+ self.thread_input.clear()
163
+ if self.model:
164
+ self.model.set_thread_count(self.thread_count)
165
+ QMessageBox.information(self, "Success", f"Thread count updated to {self.thread_count}")
166
+ else:
167
+ raise ValueError("Thread count must be a positive integer")
168
+ except ValueError as e:
169
+ QMessageBox.warning(self, "Invalid Input", str(e))
170
+
171
+ def upload_pdf(self):
172
+ pdf_path, _ = QFileDialog.getOpenFileName(self, "Upload PDF", "", "PDF Files (*.pdf)")
173
+ if pdf_path:
174
+ try:
175
+ doc = fitz.open(pdf_path)
176
+ self.pdf_content = ""
177
+ for page in doc:
178
+ self.pdf_content += page.get_text()
179
+ self.layout().itemAt(1).itemAt(0).widget().setText(f"PDF: {os.path.basename(pdf_path)}") # Updated index
180
+ QMessageBox.information(self, "Success", "PDF loaded successfully!")
181
+ except Exception as e:
182
+ QMessageBox.critical(self, "Error", f"Error loading PDF: {str(e)}")
183
+
184
  def send_message(self):
185
  user_message = self.user_input.text()
186
  if user_message and self.model:
 
188
  self.update_chat_display(f"You: {user_message}")
189
  self.user_input.clear()
190
 
191
+ max_tokens = 1000
192
  self.progress_bar.show()
193
  self.progress_bar.setRange(0, max_tokens)
194
  self.progress_bar.setValue(0)
195
 
196
+ # Add PDF content if available
197
+ if self.pdf_content:
198
+ self.messages.append({"role": "user", "content": self.pdf_content})
199
+
200
  self.worker = Worker(self.model, self.messages, max_tokens)
201
  self.worker.finished.connect(self.on_response_finished)
202
  self.worker.progress.connect(self.on_response_progress)
 
207
  self.messages.append({"role": "assistant", "content": assistant_message})
208
  self.update_chat_display(f"Assistant: {assistant_message}")
209
 
210
+ # Python Code Download
211
  if assistant_message.startswith("```python") and assistant_message.endswith("```"):
212
  self.offer_code_download(assistant_message)
213
 
 
237
  {"role": "system", "content": "You are a helpful AI assistant."}
238
  ]
239
  self.chat_display.clear()
240
+ self.pdf_content = "" # Clear PDF content
241
+ self.layout().itemAt(1).itemAt(0).widget().setText("PDF: No PDF loaded") # Updated index
242
 
243
  def show_context_menu(self, point):
244
  menu = QMenu(self)