Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
# L-MChat
|
16 |
-
This Space demonstrates [L-MChat](https://huggingface.co/collections/Artples/l-mchat-663265a8351231c428318a8f) by L-AI. <br> To select the Model that you want to use please go to the Adavanced Inputs, the
|
17 |
"""
|
18 |
|
19 |
if not torch.cuda.is_available():
|
@@ -79,7 +79,7 @@ chat_interface = gr.ChatInterface(
|
|
79 |
fn=generate,
|
80 |
additional_inputs=[
|
81 |
gr.Textbox(label="System prompt", lines=6),
|
82 |
-
gr.Radio(["Fast-Model", "Quality-Model"], label="Model", value="
|
83 |
gr.Slider(
|
84 |
label="Max new tokens",
|
85 |
minimum=1,
|
|
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
# L-MChat
|
16 |
+
This Space demonstrates [L-MChat](https://huggingface.co/collections/Artples/l-mchat-663265a8351231c428318a8f) by L-AI. <br> To select the Model that you want to use please go to the Adavanced Inputs, the Quality-Model (L-MChat-7b) is activated by default.
|
17 |
"""
|
18 |
|
19 |
if not torch.cuda.is_available():
|
|
|
79 |
fn=generate,
|
80 |
additional_inputs=[
|
81 |
gr.Textbox(label="System prompt", lines=6),
|
82 |
+
gr.Radio(["Fast-Model", "Quality-Model"], label="Model", value="Quality-Model"),
|
83 |
gr.Slider(
|
84 |
label="Max new tokens",
|
85 |
minimum=1,
|