accelerate launch \ --mixed_precision bf16 \ --num_cpu_threads_per_process 1 \ sd-scripts/flux_train_network.py \ --pretrained_model_name_or_path "/app/fluxgym/models/unet/flux1-dev.sft" \ --clip_l "/app/fluxgym/models/clip/clip_l.safetensors" \ --t5xxl "/app/fluxgym/models/clip/t5xxl_fp16.safetensors" \ --ae "/app/fluxgym/models/vae/ae.sft" \ --cache_latents_to_disk \ --save_model_as safetensors \ --sdpa --persistent_data_loader_workers \ --max_data_loader_n_workers 2 \ --seed 42 \ --gradient_checkpointing \ --mixed_precision bf16 \ --save_precision bf16 \ --network_module networks.lora_flux \ --network_dim 4 \ --optimizer_type adamw8bit \ --learning_rate 8e-4 \ --cache_text_encoder_outputs \ --cache_text_encoder_outputs_to_disk \ --fp8_base \ --highvram \ --max_train_epochs 6 \ --save_every_n_epochs 4 \ --dataset_config "/app/fluxgym/outputs/flux-lora-finetuned/dataset.toml" \ --output_dir "/app/fluxgym/outputs/flux-lora-finetuned" \ --output_name flux-lora-finetuned \ --timestep_sampling shift \ --discrete_flow_shift 3.1582 \ --model_prediction_type raw \ --guidance_scale 1 \ --loss_type l2 \