|
import gradio as gr |
|
from langchain_community.llms import LlamaCpp |
|
|
|
import_llm = LlamaCpp( |
|
model_path = "./model/model.gguf", |
|
n_ctx=4096, |
|
max_tokens=4096, |
|
temperature=0.0, |
|
top_p = 0.95, |
|
top_k = 50, |
|
verbose=False, |
|
streaming=True |
|
) |
|
|
|
PROMPT = """Anda adalah seorang asisten virtual, tugas Anda adalah menjawab pertanyaan sesuai pengetahuan umum Anda. |
|
|
|
Pertanyaan Pengguna: {} |
|
|
|
Jawaban Anda:""" |
|
|
|
PROMPT_HISTORY = """Anda adalah seorang asisten virtual, tugas Anda adalah menjawab pertanyaan sesuai pengetahuan umum Anda. |
|
|
|
Berikut adalah riwayat percakapan anda: |
|
[RIWAYAT] |
|
{} |
|
[/RIWAYAT] |
|
|
|
Pertanyaan Lanjutan: {} |
|
|
|
Jawaban Anda:""" |
|
|
|
global llm |
|
llm = None |
|
|
|
chat_template = { |
|
"user": "<start_of_turn>user\n{}<end_of_turn>\n", |
|
"assistant": "<start_of_turn>model\n{}<end_of_turn>\n", |
|
"generation_prompt": "<start_of_turn>model\n" |
|
} |
|
|
|
api_version_file = open("./versions.txt", "r") |
|
api_version = api_version_file.read().strip() |
|
|
|
model_version_file = open("./versions.txt", "r") |
|
model_version = model_version_file.read().strip() |
|
|
|
with gr.Blocks(title="Gemma2-2b-it-GGUF") as nlp: |
|
gr.HTML("<h2 style= 'font-size: 3rem'>Gemma2-2b-it-GGUF</h2> \ |
|
<div style = 'display: flex; flex-direction: row; align-items: center; gap: 0 5px;'>\ |
|
<div><span style ='font-size: 16px; background-color: gray; padding: 3px 5px; border-radius: 5px;'>API Version: {}</span></div>\ |
|
<div><span style ='font-size: 16px; background-color: gray; padding: 3px 5px; margin: 2px 0; border-radius: 5px;'>Model Version: {}</span></div>\ |
|
</div>".format(api_version, model_version)) |
|
bot = gr.Chatbot(label="Gemma2-2b-it") |
|
msg = gr.Textbox( |
|
placeholder="Tanya sesuatu! (Tekan \"enter\" untuk mengirim)", label="", elem_id="inputTextBox") |
|
clear = gr.Button('Clear', variant='stop') |
|
|
|
if llm == None: |
|
llm = import_llm |
|
|
|
def user(user_message, history): |
|
return "", history + [[user_message, None]] |
|
|
|
def predict(history): |
|
prompt = "" |
|
|
|
if len(history) == 1: |
|
chat = history[0] |
|
user_input = PROMPT.format(chat[0]) |
|
prompt += chat_template['user'].format(user_input) |
|
prompt += chat_template['generation_prompt'] |
|
else: |
|
chat_history = "" |
|
for chat in history[:-1]: |
|
if chat[0] is not None: |
|
chat_history += f"Pertanyaan: {chat[0]}\n" |
|
if chat[1] is not None: |
|
chat_history += f"Jawaban: {chat[1]}\n" |
|
user_input = PROMPT_HISTORY.format( |
|
chat_history, history[-1][0] if history[-1][0] is not None else "" |
|
) |
|
prompt += chat_template['user'].format(user_input) |
|
prompt += chat_template['generation_prompt'] |
|
|
|
history[-1][1] = "" |
|
for token in llm.stream(prompt): |
|
history[-1][1] += token |
|
yield history |
|
|
|
msg.submit(user, [msg, bot], [msg, bot], queue=False).then( |
|
predict, bot, bot |
|
) |
|
clear.click(lambda: None, None, bot, queue=False) |
|
|
|
nlp.launch(max_threads=1) |