llama_chat_test / app.py
sawac's picture
Update app.py
6f38b94 verified
raw
history blame
2.42 kB
import gradio as gr
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
import os
import threading
import time
repo_id = "bartowski/Qwen2.5-1.5B-Instruct-GGUF"
filename = "Qwen2.5-1.5B-Instruct-Q8_0.gguf"
CONTEXT_SIZE = 2048
N_THREADS = 2 #FreeのCPUは2なので
llm = None
model_loaded = False
def load_model(progress=gr.Progress()):
global llm, model_loaded
progress(0, desc="モデルのダウンロードを開始")
model_path = hf_hub_download(repo_id=repo_id, filename=filename)
progress(0.5, desc="モデルをメモリに読み込み中")
llm = Llama(
model_path=model_path,
n_threads=N_THREADS,
n_batch=32,
verbose=False,
n_ctx=CONTEXT_SIZE,
)
progress(1, desc="モデルの読み込み完了")
model_loaded = True
return "モデルの読み込みが完了しました。"
def get_llama_response(prompt):
global llm, model_loaded
if not model_loaded:
return [{"choices": [{"text": "モデルを読み込んでいます。しばらくお待ちください..."}]}]
try:
return llm(prompt, max_tokens=1024, temperature=0.7, top_p=0.95, repeat_penalty=1.1, stream=True)
except Exception as e:
return [{"choices": [{"text": f"エラーが発生しました: {str(e)}"}]}]
def greet(prompt, intensity):
global model_loaded
if not model_loaded:
return "モデルを読み込んでいます。しばらくお待ちください..."
full_response = ""
for output in get_llama_response(prompt):
if len(output['choices']) > 0:
text_chunk = output['choices'][0]['text']
full_response += text_chunk
yield full_response
return full_response + "!" * int(intensity)
with gr.Blocks() as demo:
gr.Markdown("# Llama.cpp-python-sample (Streaming)")
gr.Markdown(f"MODEL: {filename} from {repo_id}")
loading_status = gr.Textbox(label="Loading Status")
with gr.Row():
input_text = gr.Textbox(label="Enter your prompt")
intensity = gr.Slider(minimum=0, maximum=10, step=1, label="Intensity")
output_text = gr.Textbox(label="Generated Response")
submit_button = gr.Button("Submit")
submit_button.click(fn=greet, inputs=[input_text, intensity], outputs=output_text)
demo.load(fn=load_model, outputs=loading_status)
demo.queue()
demo.launch()