Spaces:
Sleeping
Sleeping
Revert fix5
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
# Nekochu/Luminia-13B-v3
|
16 |
-
This Space demonstrates model Nekochu/Luminia-13B-v3 by Nekochu, a Llama 2 model with 13B parameters fine-tuned for SD gen prompt
|
17 |
"""
|
18 |
|
19 |
LICENSE = """
|
@@ -21,18 +21,14 @@ LICENSE = """
|
|
21 |
---.
|
22 |
"""
|
23 |
|
24 |
-
def load_model(model_id):
|
25 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
26 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
27 |
-
tokenizer.use_default_system_prompt = False
|
28 |
-
return model, tokenizer
|
29 |
-
|
30 |
if not torch.cuda.is_available():
|
31 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
32 |
|
|
|
33 |
MODELS = [
|
34 |
-
"Nekochu/Luminia-13B-v3",
|
35 |
-
"Nekochu/Llama-2-13B-German-ORPO",
|
|
|
36 |
]
|
37 |
|
38 |
@spaces.GPU(duration=120)
|
@@ -47,7 +43,10 @@ def generate(
|
|
47 |
top_k: int = 50,
|
48 |
repetition_penalty: float = 1.2,
|
49 |
) -> Iterator[str]:
|
50 |
-
model
|
|
|
|
|
|
|
51 |
|
52 |
conversation = []
|
53 |
if system_prompt:
|
@@ -82,10 +81,17 @@ def generate(
|
|
82 |
outputs.append(text)
|
83 |
yield "".join(outputs)
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
chat_interface = gr.ChatInterface(
|
86 |
fn=generate,
|
87 |
additional_inputs=[
|
88 |
-
|
89 |
gr.Textbox(label="System prompt", lines=6),
|
90 |
gr.Slider(
|
91 |
label="Max new tokens",
|
|
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
# Nekochu/Luminia-13B-v3
|
16 |
+
This Space demonstrates model [Nekochu/Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3) by Nekochu, a Llama 2 model with 13B parameters fine-tuned for SD gen prompt
|
17 |
"""
|
18 |
|
19 |
LICENSE = """
|
|
|
21 |
---.
|
22 |
"""
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
if not torch.cuda.is_available():
|
25 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
26 |
|
27 |
+
# Define the available models
|
28 |
MODELS = [
|
29 |
+
{"name": "Nekochu/Luminia-13B-v3", "id": "Nekochu/Luminia-13B-v3"},
|
30 |
+
{"name": "Nekochu/Llama-2-13B-German-ORPO", "id": "Nekochu/Llama-2-13B-German-ORPO"},
|
31 |
+
# Add more models here in the future
|
32 |
]
|
33 |
|
34 |
@spaces.GPU(duration=120)
|
|
|
43 |
top_k: int = 50,
|
44 |
repetition_penalty: float = 1.2,
|
45 |
) -> Iterator[str]:
|
46 |
+
# Load the model and tokenizer based on the selected model ID
|
47 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
48 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
49 |
+
tokenizer.use_default_system_prompt = False
|
50 |
|
51 |
conversation = []
|
52 |
if system_prompt:
|
|
|
81 |
outputs.append(text)
|
82 |
yield "".join(outputs)
|
83 |
|
84 |
+
# Add a dropdown for model selection
|
85 |
+
model_dropdown = gr.Dropdown(
|
86 |
+
label="Select Model",
|
87 |
+
choices=[model["name"] for model in MODELS],
|
88 |
+
value=MODELS[0]["name"], # Default to the first model
|
89 |
+
)
|
90 |
+
|
91 |
chat_interface = gr.ChatInterface(
|
92 |
fn=generate,
|
93 |
additional_inputs=[
|
94 |
+
model_dropdown,
|
95 |
gr.Textbox(label="System prompt", lines=6),
|
96 |
gr.Slider(
|
97 |
label="Max new tokens",
|