Spaces:
Runtime error
Runtime error
from typing import Iterator | |
from llama_cpp import Llama | |
from huggingface_hub import hf_hub_download | |
from conversation import get_default_conv_template | |
def download_model(): | |
# See https://github.com/OpenAccess-AI-Collective/ggml-webui/blob/main/tabbed.py | |
# https://huggingface.co/spaces/kat33/llama.cpp/blob/main/app.py | |
print(f"Downloading model: {model_repo}/{model_filename}") | |
file = hf_hub_download( | |
repo_id=model_repo, filename=model_filename | |
) | |
print("Downloaded " + file) | |
return file | |
model_repo = "audreyt/Taiwan-LLaMa-v1.0-GGML" | |
model_filename = "Taiwan-LLaMa-13b-1.0.ggmlv3.q4_K_S.bin" | |
# model_filename = "Chinese-Llama-2-7b.ggmlv3.q8_0.bin" | |
model_path = download_model() | |
# load Llama-2 | |
llm = Llama(model_path=model_path, n_ctx=4000, verbose=False) | |
def get_prompt(message: str, chat_history: list[tuple[str, str]], | |
system_prompt: str) -> str: | |
# Modified from | |
# https://huggingface.co/spaces/yentinglin/Taiwan-LLaMa2/blob/main/app.py | |
conv = get_default_conv_template("vicuna").copy() | |
roles = {"human": conv.roles[0], "gpt": conv.roles[1]} # map human to USER and gpt to ASSISTANT | |
conv.system = system_prompt | |
for user, bot in chat_history: | |
conv.append_message(roles['human'], user) | |
conv.append_message(roles["gpt"], bot) | |
conv.append_message(roles['human'], message) | |
texts = conv.get_prompt() | |
# print(f"get_prompt: \'{texts}\'") # debug | |
return ''.join(texts) | |
def generate(prompt, max_new_tokens, temperature, top_p, top_k): | |
return llm(prompt, | |
max_tokens=max_new_tokens, | |
stop=["</s>"], | |
temperature=temperature, | |
top_p=top_p, | |
top_k=top_k, | |
stream=False) | |
def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int: | |
prompt = get_prompt(message, chat_history, system_prompt) | |
input_ids = llm.tokenize(prompt.encode('utf-8')) | |
return len(input_ids) | |
def run(message: str, | |
chat_history: list[tuple[str, str]], | |
system_prompt: str, | |
max_new_tokens: int = 1024, | |
temperature: float = 0.8, | |
top_p: float = 0.95, | |
top_k: int = 50) -> Iterator[str]: | |
prompt = get_prompt(message, chat_history, system_prompt) | |
output = generate(prompt, max_new_tokens, temperature, top_p, top_k) | |
yield output['choices'][0]['text'] | |
# outputs = [] | |
# for resp in streamer: | |
# outputs.append(resp['choices'][0]['text']) | |
# yield ''.join(outputs) | |