File size: 1,529 Bytes
f474fd0
 
 
 
 
 
 
 
04ec6d0
 
 
 
 
 
9db87a1
c31cb2d
 
 
 
04ec6d0
 
f474fd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
from peft import PeftModel
import transformers
import gradio as gr
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
from transformers import Trainer

BASE_MODEL = "TheBloke/stable-vicuna-13B-HF"

# Create a custom device map
# This will vary based on the architecture of your model and the memory capacity of your GPU and CPU
device_map = {0: [0, 1, 2], 1: [3, 4, 5]}

model = LlamaForCausalLM.from_pretrained(
    BASE_MODEL,
    torch_dtype=torch.float16,
    load_in_8bit=True,
    device_map="auto",
    offload_folder="./cache",
)

tokenizer = LlamaTokenizer.from_pretrained(BASE_MODEL)
tokenizer.pad_token_id = 0
tokenizer.padding_side = "left"

def format_prompt(prompt: str) -> str:
    return f"### Human: {prompt}\n### Assistant:"

generation_config = GenerationConfig(
    max_new_tokens=128,
    temperature=0.2,
    repetition_penalty=1.0,
)

def generate_text(prompt: str):
    formatted_prompt = format_prompt(prompt)

    inputs = tokenizer(
        formatted_prompt, 
        padding=False, 
        add_special_tokens=False, 
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        tokens = model.generate(**inputs, generation_config=generation_config)

    response = tokenizer.decode(tokens[0], skip_special_tokens=True)
    assistant_index = response.find("### Assistant:") + len("### Assistant:")
    return response[assistant_index:].strip()

iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
iface.launch()