Spaces:
Sleeping
Sleeping
upload gradio app
Browse files- README.md +4 -4
- __pycache__/gr_app.cpython-310.pyc +0 -0
- flagged/log.csv +3 -0
- gr_app.py +108 -0
- app.py → st_app.py +1 -1
README.md
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: 🏃
|
4 |
colorFrom: green
|
5 |
colorTo: indigo
|
6 |
-
sdk:
|
7 |
-
sdk_version:
|
8 |
-
app_file:
|
9 |
pinned: false
|
10 |
---
|
11 |
|
|
|
1 |
---
|
2 |
+
title: Zitatgenerator
|
3 |
emoji: 🏃
|
4 |
colorFrom: green
|
5 |
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.7.1
|
8 |
+
app_file: gr_app.py
|
9 |
pinned: false
|
10 |
---
|
11 |
|
__pycache__/gr_app.cpython-310.pyc
ADDED
Binary file (2.68 kB). View file
|
|
flagged/log.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
Zitat generieren für,Modus,output,flag,username,timestamp
|
2 |
+
Christoph,Ausgeglichen,"""Das Böse ist eine Verirrung, ein Irrtum."" - Christoph",,,2023-12-01 22:36:37.119252
|
3 |
+
,Ausgeglichen,,,,2023-12-01 22:41:25.516168
|
gr_app.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from ctransformers import AutoModelForCausalLM
|
3 |
+
import random
|
4 |
+
|
5 |
+
system_prompt = """Dies ist eine Unterhaltung zwischen \
|
6 |
+
einem intelligenten, hilfsbereitem \
|
7 |
+
KI-Assistenten und einem Nutzer.
|
8 |
+
Der Assistent gibt Antworten in Form von Zitaten."""
|
9 |
+
|
10 |
+
prompt_format = "<|im_start|>system\n{system_prompt}\
|
11 |
+
<|im_end|>\n<|im_start|>user\nZitiere {prompt}\
|
12 |
+
<|im_end|>\n<|im_start|>assistant\n"
|
13 |
+
|
14 |
+
modes = {
|
15 |
+
"Authentisch": {"temperature": 0.05, "top_k": 10},
|
16 |
+
"Ausgeglichen": {"temperature": 0.5, "top_p": 0.9},
|
17 |
+
"Chaotisch": {"temperature": 0.9},
|
18 |
+
}
|
19 |
+
|
20 |
+
authors = [
|
21 |
+
"Johann Wolfgang von Goethe",
|
22 |
+
"Friedrich Schiller",
|
23 |
+
"Immanuel Kant",
|
24 |
+
"Oscar Wilde",
|
25 |
+
"Lü Bu We",
|
26 |
+
"Wilhelm Busch",
|
27 |
+
"Friedrich Nietzsche",
|
28 |
+
"Karl Marx",
|
29 |
+
"William Shakespeare",
|
30 |
+
"Kurt Tucholsky",
|
31 |
+
"Georg Christoph Lichtenberg",
|
32 |
+
"Arthur Schopenhauer",
|
33 |
+
"Seneca der Jüngere",
|
34 |
+
"Martin Luther",
|
35 |
+
"Mark Twain",
|
36 |
+
"Cicero",
|
37 |
+
"Marie von Ebner-Eschenbach",
|
38 |
+
"Novalis",
|
39 |
+
"Franz Kafka",
|
40 |
+
"Jean-Jacques Rousseau",
|
41 |
+
"Heinrich Heine",
|
42 |
+
"Honoré de Balzac",
|
43 |
+
"Georg Büchner",
|
44 |
+
"Gotthold Ephraim Lessing",
|
45 |
+
"Markus M. Ronner",
|
46 |
+
"Gerhard Uhlenbruck",
|
47 |
+
"Theodor Fontane",
|
48 |
+
"Jean Paul",
|
49 |
+
"Leo Tolstoi",
|
50 |
+
"Friedrich Hebbel",
|
51 |
+
"Horaz",
|
52 |
+
"Albert Einstein",
|
53 |
+
"Jesus von Nazareth",
|
54 |
+
"Angela Merkel",
|
55 |
+
"Ambrose Bierce",
|
56 |
+
"Christian Morgenstern",
|
57 |
+
"Friedrich Hölderlin",
|
58 |
+
"Joseph Joubert",
|
59 |
+
"François de La Rochefoucauld",
|
60 |
+
"Otto von Bismarck",
|
61 |
+
"Fjodor Dostojewski",
|
62 |
+
"Ovid",
|
63 |
+
"Rudolf Steiner",
|
64 |
+
"Ludwig Börne",
|
65 |
+
"Hugo von Hofmannsthal",
|
66 |
+
"Laotse",
|
67 |
+
"Thomas von Aquin",
|
68 |
+
"Ludwig Wittgenstein",
|
69 |
+
"Friedrich Engels",
|
70 |
+
"Charles de Montesquieu",
|
71 |
+
]
|
72 |
+
|
73 |
+
model = AutoModelForCausalLM.from_pretrained(
|
74 |
+
"caretech-owl/leo-hessionai-7B-quotes-gguf", model_type="Llama"
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
def quote(author: str = "", mode: str = "") -> str:
|
79 |
+
author = author or random.choice(authors)
|
80 |
+
mode = mode or "Authentisch"
|
81 |
+
query = prompt_format.format(
|
82 |
+
system_prompt=system_prompt,
|
83 |
+
prompt=author,
|
84 |
+
)
|
85 |
+
print("=" * 20)
|
86 |
+
print(query)
|
87 |
+
output = model(query, stop="<|im_end|>", max_new_tokens=200, **modes[mode])
|
88 |
+
print("-" * 20)
|
89 |
+
print(output)
|
90 |
+
return output
|
91 |
+
|
92 |
+
|
93 |
+
with gr.Blocks() as demo:
|
94 |
+
gr.Markdown("# Zitatgenerator")
|
95 |
+
with gr.Row():
|
96 |
+
author = gr.Textbox(
|
97 |
+
label="Zitat generieren für", lines=1, placeholder="Autornamen..."
|
98 |
+
)
|
99 |
+
mode = gr.Dropdown(
|
100 |
+
choices=["Authentisch", "Ausgeglichen", "Chaotisch"],
|
101 |
+
label="Modus",
|
102 |
+
value="Ausgeglichen",
|
103 |
+
)
|
104 |
+
output = gr.Textbox(label="Zitat")
|
105 |
+
quote_btn = gr.Button("Generiere Zitat")
|
106 |
+
quote_btn.click(fn=quote, inputs=[author, mode], outputs=output)
|
107 |
+
|
108 |
+
demo.launch()
|
app.py → st_app.py
RENAMED
@@ -111,7 +111,7 @@ if generate:
|
|
111 |
)
|
112 |
st.session_state["author"] = question
|
113 |
|
114 |
-
with st.spinner("
|
115 |
query = prompt_format.format(
|
116 |
system_prompt=system_prompt,
|
117 |
prompt=st.session_state["author"],
|
|
|
111 |
)
|
112 |
st.session_state["author"] = question
|
113 |
|
114 |
+
with st.spinner("Denke über Zitat nach (das kann etwas dauern)..."):
|
115 |
query = prompt_format.format(
|
116 |
system_prompt=system_prompt,
|
117 |
prompt=st.session_state["author"],
|