Loewolf-Chat / app.py
Loewolf's picture
Update app.py
80dac4b
raw
history blame
1.75 kB
import gradio as gr
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
# Initialisierung des Modells und des Tokenizers
tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1.2")
model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1.2")
def generate_text(prompt):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
attention_mask = torch.ones(input_ids.shape, dtype=torch.long)
max_length = model.config.n_positions if len(input_ids[0]) > model.config.n_positions else len(input_ids[0]) + 50
beam_output = model.generate(
input_ids,
attention_mask=attention_mask,
max_length=max_length,
min_length=4,
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True,
temperature=0.7,
top_p=0.95,
top_k=70,
length_penalty=2.0,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id
)
text = tokenizer.decode(beam_output[0], skip_special_tokens=True)
return text
DESCRIPTION = """\
#Löwolf GPT1 Chat
Es wird neues Löwolf GPT 1.2 verwendet.
Löwolf Chat verwendet immer das aktuelle GPT Modell von Löwolf Community!
"""
css = """
h1 {
text-align: center;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
.contain {
max-width: 900px;
margin: auto;
padding-top: 1.5rem;
}
"""
iface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(lines=2, placeholder="Type a message...", label="Your Message"),
outputs=gr.Textbox(label="Löwolf Chat Responses", placeholder="Responses will appear here...", interactive=False, lines=10),
css=css
)
iface.launch()