Loewolf commited on
Commit
248d772
1 Parent(s): 06ea162

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -64
app.py CHANGED
@@ -1,73 +1,51 @@
1
- import os
2
- from threading import Thread
3
- from typing import Iterator
4
-
5
  import gradio as gr
6
- import spaces
7
- from transformers import AutoModelForCausalLM, AutoTokenizer
8
-
9
- # Konfigurationsparameter
10
- MAX_MAX_NEW_TOKENS = 100
11
- DEFAULT_MAX_NEW_TOKENS = 20
12
- MAX_INPUT_TOKEN_LENGTH = 400 # Begrenzung auf 400 Tokens
13
-
14
- # Modell und Tokenizer laden
15
- model_id = "Loewolf/GPT_1"
16
- model = AutoModelForCausalLM.from_pretrained(model_id)
17
- tokenizer = AutoTokenizer.from_pretrained(model_id)
18
-
19
- # Gradio Chat Interface Funktion
20
- def generate(
21
- message: str,
22
- chat_history: list[tuple[str, str]],
23
- system_prompt: str,
24
- max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
25
- temperature: float = 0.6,
26
- top_p: float = 0.9,
27
- top_k: int = 50,
28
- repetition_penalty: float = 1.2,
29
- ) -> str:
30
- conversation = []
31
- if system_prompt:
32
- conversation.append({"role": "system", "content": system_prompt})
33
- for user, assistant in chat_history:
34
- conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
35
- conversation.append({"role": "user", "content": message})
36
-
37
- input_ids = tokenizer(conversation, return_tensors="pt", truncation=True, max_length=MAX_INPUT_TOKEN_LENGTH)
38
- generate_kwargs = dict(
39
- input_ids=input_ids["input_ids"],
40
- max_length=input_ids["input_ids"].shape[1] + max_new_tokens,
41
- temperature=temperature,
42
- top_p=top_p,
43
- top_k=top_k,
44
- repetition_penalty=repetition_penalty,
45
- pad_token_id=tokenizer.eos_token_id
46
  )
47
 
48
- outputs = model.generate(**generate_kwargs)
49
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
50
 
51
- # Gradio Interface
52
- chat_interface = gr.Interface(
53
- fn=generate,
54
- inputs=[
55
- gr.Textbox(label="Message"),
56
- gr.JSON(label="Chat History"),
57
- gr.Textbox(label="System Prompt", lines=2),
58
- gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS),
59
- gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.6),
60
- gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9),
61
- gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50),
62
- gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
63
- ],
64
  outputs="text",
65
- live=True
 
66
  )
67
 
68
- # Starten des Gradio-Servers
69
- if __name__ == "__main__":
70
- chat_interface.launch()
71
-
72
 
73
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
+ import torch
4
+
5
+ # Initialisierung des Modells und des Tokenizers
6
+ tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1")
7
+ model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1")
8
+
9
+ def generate_text(prompt):
10
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
11
+
12
+ # Erstelle eine Attention-Mask, die überall '1' ist
13
+ attention_mask = torch.ones(input_ids.shape, dtype=torch.bool)
14
+
15
+ # Bestimmung der maximalen Länge
16
+ max_length = model.config.n_positions if len(input_ids[0]) > model.config.n_positions else len(input_ids[0]) + 20
17
+
18
+ # Erzeugen von Text mit spezifischen Parametern
19
+ beam_output = model.generate(
20
+ input_ids,
21
+ attention_mask=attention_mask,
22
+ max_length=max_length,
23
+ min_length=4, # Mindestlänge der Antwort
24
+ num_beams=5,
25
+ no_repeat_ngram_size=2,
26
+ early_stopping=True,
27
+ temperature=0.9,
28
+ top_p=0.90,
29
+ top_k=50,
30
+ length_penalty=2.0,
31
+ do_sample=True,
32
+ eos_token_id=tokenizer.eos_token_id, # EOS Token setzen
33
+ pad_token_id=tokenizer.eos_token_id
 
 
 
 
 
 
 
 
34
  )
35
 
36
+ text = tokenizer.decode(beam_output[0], skip_special_tokens=True)
37
+ return text
38
 
39
+ # Erstellung des Chatbot-Interface mit dem Titel "Löwolf Chat"
40
+ iface = gr.Interface(
41
+ fn=generate_text,
42
+ inputs=gr.Textbox(label="Schreibe hier...", placeholder="Stelle deine Frage..."),
 
 
 
 
 
 
 
 
 
43
  outputs="text",
44
+ title="Löwolf Chat",
45
+ description="Willkommen beim Löwolf Chat. Stelle deine Fragen und erhalte Antworten vom KI-Chatbot."
46
  )
47
 
48
+ # Starten des Chatbot-Interfaces
49
+ iface.launch()
 
 
50
 
51