File size: 1,748 Bytes
f863056
248d772
 
 
 
80dac4b
 
248d772
 
 
7dff0fa
248d772
63b868b
248d772
 
 
 
8954938
b24ef2a
248d772
 
b24ef2a
7dff0fa
433119f
 
248d772
8954938
248d772
2909fb3
0858488
248d772
 
50b9f27
146a36e
 
80dac4b
 
 
 
146a36e
0858488
bc36cc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0858488
 
bc36cc9
 
 
 
f32085b
bc36cc9
 
6c78411
bc36cc9
6c78411
a727207
 
4429d9b
8954938
50b9f27
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch

# Initialisierung des Modells und des Tokenizers
tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1.2")
model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1.2")

def generate_text(prompt):
    input_ids = tokenizer.encode(prompt, return_tensors="pt")
    attention_mask = torch.ones(input_ids.shape, dtype=torch.long)

    max_length = model.config.n_positions if len(input_ids[0]) > model.config.n_positions else len(input_ids[0]) + 50
    beam_output = model.generate(
        input_ids,
        attention_mask=attention_mask,
        max_length=max_length,
        min_length=4,
        num_beams=5,
        no_repeat_ngram_size=2,
        early_stopping=True,
        temperature=0.7,
        top_p=0.95,
        top_k=70,
        length_penalty=2.0,
        do_sample=True,
        eos_token_id=tokenizer.eos_token_id,
        pad_token_id=tokenizer.eos_token_id 
    )
    
    text = tokenizer.decode(beam_output[0], skip_special_tokens=True)
    return text

DESCRIPTION = """\
#Löwolf GPT1 Chat

Es wird neues Löwolf GPT 1.2 verwendet.

Löwolf Chat verwendet immer das aktuelle GPT Modell von Löwolf Community!
"""
css = """
h1 {
  text-align: center;
}

#duplicate-button {
  margin: auto;
  color: white;
  background: #1565c0;
  border-radius: 100vh;
}

.contain {
  max-width: 900px;
  margin: auto;
  padding-top: 1.5rem;
}

"""

iface = gr.Interface(
    fn=generate_text,
    inputs=gr.Textbox(lines=2, placeholder="Type a message...", label="Your Message"),
    outputs=gr.Textbox(label="Löwolf Chat Responses", placeholder="Responses will appear here...", interactive=False, lines=10),
    
    css=css
)

iface.launch()