Update app.py
Browse files
app.py
CHANGED
@@ -113,7 +113,7 @@ with gr.Blocks(title="Axolotl Launcher") as demo:
|
|
113 |
resize_token_embeddings_to_32x = gr.Checkbox(
|
114 |
label="Resize Token Embeddings to 32x", value=False)
|
115 |
with gr.Accordion("Adv. Config", open=False):
|
116 |
-
with gr.Accordion("Model Derivation & Configuration Overrides"):
|
117 |
with gr.Column():
|
118 |
is_falcon_derived_model = gr.Checkbox(
|
119 |
label="Is Falcon Derived Model", value=False)
|
@@ -128,7 +128,7 @@ with gr.Blocks(title="Axolotl Launcher") as demo:
|
|
128 |
bnb_config_kwargs = gr.TextArea(label="BnB Config KWArgs",
|
129 |
placeholder="YAML or JSON format")
|
130 |
|
131 |
-
with gr.Accordion("Quantization & Precision"):
|
132 |
with gr.Column():
|
133 |
with gr.Row():
|
134 |
gptq = gr.Checkbox(label="GPTQ", value=False)
|
@@ -143,7 +143,7 @@ with gr.Blocks(title="Axolotl Launcher") as demo:
|
|
143 |
bfloat16 = gr.Checkbox(label="BFloat16", value=False)
|
144 |
float16 = gr.Checkbox(label="Float16", value=False)
|
145 |
|
146 |
-
with gr.Accordion("GPU & LoRA Settings"):
|
147 |
gpu_memory_limit = gr.Textbox(label="GPU Memory Limit")
|
148 |
lora_on_cpu = gr.Checkbox(label="LoRA on CPU", value=False)
|
149 |
datasets = gr.TextArea(label="Datasets",
|
@@ -171,7 +171,7 @@ with gr.Blocks(title="Axolotl Launcher") as demo:
|
|
171 |
dataset_shard_num = gr.Number(label="Dataset Shard Num")
|
172 |
dataset_shard_idx = gr.Number(label="Dataset Shard Index")
|
173 |
|
174 |
-
with gr.Accordion("Training & Evaluation"):
|
175 |
with gr.Row():
|
176 |
sequence_len = gr.Number(label="Sequence Length", value=2048)
|
177 |
pad_to_sequence_len = gr.Checkbox(label="Pad to Sequence Length",
|
|
|
113 |
resize_token_embeddings_to_32x = gr.Checkbox(
|
114 |
label="Resize Token Embeddings to 32x", value=False)
|
115 |
with gr.Accordion("Adv. Config", open=False):
|
116 |
+
with gr.Accordion("Model Derivation & Configuration Overrides", open=False):
|
117 |
with gr.Column():
|
118 |
is_falcon_derived_model = gr.Checkbox(
|
119 |
label="Is Falcon Derived Model", value=False)
|
|
|
128 |
bnb_config_kwargs = gr.TextArea(label="BnB Config KWArgs",
|
129 |
placeholder="YAML or JSON format")
|
130 |
|
131 |
+
with gr.Accordion("Quantization & Precision", open=False):
|
132 |
with gr.Column():
|
133 |
with gr.Row():
|
134 |
gptq = gr.Checkbox(label="GPTQ", value=False)
|
|
|
143 |
bfloat16 = gr.Checkbox(label="BFloat16", value=False)
|
144 |
float16 = gr.Checkbox(label="Float16", value=False)
|
145 |
|
146 |
+
with gr.Accordion("GPU & LoRA Settings", open=False):
|
147 |
gpu_memory_limit = gr.Textbox(label="GPU Memory Limit")
|
148 |
lora_on_cpu = gr.Checkbox(label="LoRA on CPU", value=False)
|
149 |
datasets = gr.TextArea(label="Datasets",
|
|
|
171 |
dataset_shard_num = gr.Number(label="Dataset Shard Num")
|
172 |
dataset_shard_idx = gr.Number(label="Dataset Shard Index")
|
173 |
|
174 |
+
with gr.Accordion("Training & Evaluation", open=False):
|
175 |
with gr.Row():
|
176 |
sequence_len = gr.Number(label="Sequence Length", value=2048)
|
177 |
pad_to_sequence_len = gr.Checkbox(label="Pad to Sequence Length",
|