zac's picture
Update app.py
1ad9711
raw
history blame
1.69 kB
import gradio as gr
import copy
import time
import ctypes #to run on C api directly
import llama_cpp
from llama_cpp import Llama
from huggingface_hub import hf_hub_download #load from huggingfaces
llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/OpenAssistant-Llama2-13B-Orca-v2-8K-3166-GGML", filename="openassistant-llama2-13b-orca-v2-8k-3166.ggmlv3.q6_K.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
history = []
pre_prompt = " The user and the AI are having a conversation : "
def generate_text(input_text, history):
print("history ",history)
print("input ", input_text)
temp =""
if history == []:
input_text_with_history = f"{pre_prompt}"+ "\n" + f"Q: {input_text} " + "\n" +" A:"
else:
input_text_with_history = f"{history[-1][1]}"+ "\n"
input_text_with_history += f"Q: {input_text}" + "\n" +" A:"
print("new input", input_text_with_history)
output = llm(input_text_with_history, max_tokens=1024, stop=["Q:", "\n"], stream=True)
for out in output:
stream = copy.deepcopy(out)
print(stream["choices"][0]["text"])
temp += stream["choices"][0]["text"]
yield temp
history =["init",input_text_with_history]
demo = gr.ChatInterface(generate_text,
title="LLM on CPU",
description="Running LLM with https://github.com/abetlen/llama-cpp-python. btw the text streaming thing was the hardest thing to impliment",
examples=["Hello", "Am I cool?", "Are tomatoes vegetables?"],
cache_examples=True,
retry_btn=None,
undo_btn="Delete Previous",
clear_btn="Clear",)
demo.queue(concurrency_count=1, max_size=5)
demo.launch()