Thelocallab commited on
Commit
4a79f76
1 Parent(s): 2cb125f

Upload folder using huggingface_hub

Browse files
dataset.toml CHANGED
@@ -9,6 +9,6 @@ batch_size = 1
9
  keep_tokens = 1
10
 
11
  [[datasets.subsets]]
12
- image_dir = '/app/fluxgym/datasets/sarahrunpod'
13
- class_tokens = 'sarahrunpod'
14
  num_repeats = 10
 
9
  keep_tokens = 1
10
 
11
  [[datasets.subsets]]
12
+ image_dir = '/app/fluxgym/datasets/sarahrunpod2'
13
+ class_tokens = 'sarahrunpod2'
14
  num_repeats = 10
sample_prompts.txt CHANGED
@@ -1 +1 @@
1
- sarahrunpod
 
1
+ sarahrunpod2
sarahrunpod2-000005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76a9cdaf5154f431cb98470b40a52072db2a5e8afe0055b66f0cce7de0d456c0
3
+ size 317081760
sarahrunpod2-000010.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:824c1ad8fc601f2140e0612bbd1f566f7c472035a783a406e8f578f2800ef26e
3
+ size 317081760
sarahrunpod2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32fae89f9134b97448c4faa03e73f66148518d716c81d14fff3ce4d47a28cdfe
3
+ size 317081760
train.sh CHANGED
@@ -15,18 +15,18 @@ accelerate launch \
15
  --mixed_precision bf16 \
16
  --save_precision bf16 \
17
  --network_module networks.lora_flux \
18
- --network_dim 4 \
19
  --optimizer_type adamw8bit \
20
- --learning_rate 8e-4 \
21
  --cache_text_encoder_outputs \
22
  --cache_text_encoder_outputs_to_disk \
23
  --fp8_base \
24
  --highvram \
25
- --max_train_epochs 16 \
26
- --save_every_n_epochs 4 \
27
- --dataset_config "/app/fluxgym/outputs/sarahrunpod/dataset.toml" \
28
- --output_dir "/app/fluxgym/outputs/sarahrunpod" \
29
- --output_name sarahrunpod \
30
  --timestep_sampling shift \
31
  --discrete_flow_shift 3.1582 \
32
  --model_prediction_type raw \
 
15
  --mixed_precision bf16 \
16
  --save_precision bf16 \
17
  --network_module networks.lora_flux \
18
+ --network_dim 32 \
19
  --optimizer_type adamw8bit \
20
+ --learning_rate 4e-4 \
21
  --cache_text_encoder_outputs \
22
  --cache_text_encoder_outputs_to_disk \
23
  --fp8_base \
24
  --highvram \
25
+ --max_train_epochs 12 \
26
+ --save_every_n_epochs 5 \
27
+ --dataset_config "/app/fluxgym/outputs/sarahrunpod2/dataset.toml" \
28
+ --output_dir "/app/fluxgym/outputs/sarahrunpod2" \
29
+ --output_name sarahrunpod2 \
30
  --timestep_sampling shift \
31
  --discrete_flow_shift 3.1582 \
32
  --model_prediction_type raw \