File size: 3,679 Bytes
ea718e2 9902b4c ea718e2 9902b4c ea718e2 9902b4c ea718e2 9902b4c 6309d29 ea718e2 6309d29 9902b4c ea718e2 9902b4c ea718e2 9902b4c ea718e2 9902b4c 6309d29 ea718e2 9902b4c ea718e2 9902b4c 6309d29 9902b4c ea718e2 9902b4c ea718e2 9902b4c ea718e2 9902b4c ea718e2 9902b4c ea718e2 9902b4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
job: custom_job
config:
name: flux_train_replicate
process:
- type: custom_sd_trainer
training_folder: output
device: cuda:0
trigger_word: HST
network:
type: lora
linear: 32
linear_alpha: 32
network_kwargs:
only_if_contains:
- transformer.transformer_blocks.7.norm1.linear
- transformer.transformer_blocks.7.norm1_context.linear
- transformer.transformer_blocks.7.attn.to_q
- transformer.transformer_blocks.7.attn.to_k
- transformer.transformer_blocks.7.attn.to_v
- transformer.transformer_blocks.7.attn.add_k_proj
- transformer.transformer_blocks.7.attn.add_v_proj
- transformer.transformer_blocks.7.attn.add_q_proj
- transformer.transformer_blocks.7.attn.to_out.0
- transformer.transformer_blocks.7.attn.to_add_out
- transformer.transformer_blocks.7.ff.net.0.proj
- transformer.transformer_blocks.7.ff.net.2
- transformer.transformer_blocks.7.ff_context.net.0.proj
- transformer.transformer_blocks.7.ff_context.net.2
- transformer.transformer_blocks.13.norm1.linear
- transformer.transformer_blocks.13.norm1_context.linear
- transformer.transformer_blocks.13.attn.to_q
- transformer.transformer_blocks.13.attn.to_k
- transformer.transformer_blocks.13.attn.to_v
- transformer.transformer_blocks.13.attn.add_k_proj
- transformer.transformer_blocks.13.attn.add_v_proj
- transformer.transformer_blocks.13.attn.add_q_proj
- transformer.transformer_blocks.13.attn.to_out.0
- transformer.transformer_blocks.13.attn.to_add_out
- transformer.transformer_blocks.13.ff.net.0.proj
- transformer.transformer_blocks.13.ff.net.2
- transformer.transformer_blocks.13.ff_context.net.0.proj
- transformer.transformer_blocks.13.ff_context.net.2
- transformer.single_transformer_blocks.7.norm.linear
- transformer.single_transformer_blocks.7.proj_mlp
- transformer.single_transformer_blocks.7.proj_out
- transformer.single_transformer_blocks.7.attn.to_q
- transformer.single_transformer_blocks.7.attn.to_k
- transformer.single_transformer_blocks.7.attn.to_v
- transformer.single_transformer_blocks.13.norm.linear
- transformer.single_transformer_blocks.13.proj_mlp
- transformer.single_transformer_blocks.13.proj_out
- transformer.single_transformer_blocks.13.attn.to_q
- transformer.single_transformer_blocks.13.attn.to_k
- transformer.single_transformer_blocks.13.attn.to_v
save:
dtype: float16
save_every: 501
max_step_saves_to_keep: 1
datasets:
- folder_path: input_images
caption_ext: txt
caption_dropout_rate: 0.05
shuffle_tokens: false
cache_latents_to_disk: false
cache_latents: true
resolution:
- 512
- 768
- 1024
train:
batch_size: 4
steps: 500
gradient_accumulation_steps: 1
train_unet: true
train_text_encoder: false
content_or_style: balanced
gradient_checkpointing: true
noise_scheduler: flowmatch
optimizer: adamw8bit
lr: 0.001
ema_config:
use_ema: true
ema_decay: 0.99
dtype: bf16
model:
name_or_path: FLUX.1-dev
is_flux: true
quantize: true
sample:
sampler: flowmatch
sample_every: 501
width: 1024
height: 1024
prompts: []
neg: ''
seed: 42
walk_seed: true
guidance_scale: 3.5
sample_steps: 28
meta:
name: flux_train_replicate
version: '1.0'
|