File size: 11,739 Bytes
46c0d05 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
epoch 1/20
Traceback (most recent call last):
File "/workspace/kohya-trainer/sdxl_train.py", line 649, in <module>
train(args)
File "/workspace/kohya-trainer/sdxl_train.py", line 475, in train
accelerator.backward(loss)
File "/workspace/venv/lib/python3.10/site-packages/accelerate/accelerator.py", line 1743, in backward
self.scaler.scale(loss).backward(**kwargs)
File "/workspace/venv/lib/python3.10/site-packages/torch/_tensor.py", line 487, in backward
torch.autograd.backward(
File "/workspace/venv/lib/python3.10/site-packages/torch/autograd/__init__.py", line 200, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/workspace/venv/lib/python3.10/site-packages/torch/autograd/function.py", line 274, in apply
return user_fn(self, *args)
File "/workspace/venv/lib/python3.10/site-packages/torch/utils/checkpoint.py", line 157, in backward
torch.autograd.backward(outputs_with_grad, args_with_grad)
File "/workspace/venv/lib/python3.10/site-packages/torch/autograd/__init__.py", line 200, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 26.00 MiB (GPU 0; 23.65 GiB total capacity; 18.73 GiB already allocated; 25.69 MiB free; 19.47 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
[31mโญโโโโโโโโโโโโโโโโโโโโโ [39m[1mTraceback (most recent call last)[31m[22m โโโโโโโโโโโโโโโโโโโโโโโฎ
[31mโ[39m /workspace/kohya-trainer/[1msdxl_train.py[22m:[94m649[39m in [92m<module>[39m [31mโ
[31mโ[39m [31mโ
[31mโ[39m 646 โ args = parser.parse_args() [31mโ
[31mโ[39m 647 โ args = train_util.read_config_from_file(args, parser) [31mโ
[31mโ[39m 648 โ [31mโ
[31mโ[39m [31mโฑ [39m649 โ train(args) [31mโ
[31mโ[39m 650 [31mโ
[31mโ[39m [31mโ
[31mโ[39m /workspace/kohya-trainer/[1msdxl_train.py[22m:[94m475[39m in [92mtrain[39m [31mโ
[31mโ[39m [31mโ
[31mโ[39m 472 โ โ โ โ [94melse[39m: [31mโ
[31mโ[39m 473 โ โ โ โ โ loss = torch.nn.functional.mse_loss(noise_pred.flo [31mโ
[31mโ[39m 474 โ โ โ โ [31mโ
[31mโ[39m [31mโฑ [39m475 โ โ โ โ accelerator.backward(loss) [31mโ
[31mโ[39m 476 โ โ โ โ [94mif[39m accelerator.sync_gradients [95mand[39m args.max_grad_norm ! [31mโ
[31mโ[39m 477 โ โ โ โ โ params_to_clip = [] [31mโ
[31mโ[39m 478 โ โ โ โ โ [94mfor[39m m [95min[39m training_models: [31mโ
[31mโ[39m [31mโ
[31mโ[39m /workspace/venv/lib/python3.10/site-packages/accelerate/[1maccelerator.py[22m:[94m1743[39m [31mโ
[31mโ[39m in [92mbackward[39m [31mโ
[31mโ[39m [31mโ
[31mโ[39m 1740 โ โ [94melif[39m [96mself[39m.distributed_type == DistributedType.MEGATRON_LM: [31mโ
[31mโ[39m 1741 โ โ โ [94mreturn[39m [31mโ
[31mโ[39m 1742 โ โ [94melif[39m [96mself[39m.scaler [95mis[39m [95mnot[39m [94mNone[39m: [31mโ
[31mโ[39m [31mโฑ [39m1743 โ โ โ [96mself[39m.scaler.scale(loss).backward(**kwargs) [31mโ
[31mโ[39m 1744 โ โ [94melse[39m: [31mโ
[31mโ[39m 1745 โ โ โ loss.backward(**kwargs) [31mโ
[31mโ[39m 1746 [31mโ
[31mโ[39m [31mโ
[31mโ[39m /workspace/venv/lib/python3.10/site-packages/torch/[1m_tensor.py[22m:[94m487[39m in [31mโ
[31mโ[39m [92mbackward[39m [31mโ
[31mโ[39m [31mโ
[31mโ[39m 484 โ โ โ โ create_graph=create_graph, [31mโ
[31mโ[39m 485 โ โ โ โ inputs=inputs, [31mโ
[31mโ[39m 486 โ โ โ ) [31mโ
[31mโ[39m [31mโฑ [39m 487 โ โ torch.autograd.backward( [31mโ
[31mโ[39m 488 โ โ โ [96mself[39m, gradient, retain_graph, create_graph, inputs=inputs [31mโ
[31mโ[39m 489 โ โ ) [31mโ
[31mโ[39m 490 [31mโ
[31mโ[39m [31mโ
[31mโ[39m /workspace/venv/lib/python3.10/site-packages/torch/autograd/[1m__init__.py[22m:[94m200[39m [31mโ
[31mโ[39m in [92mbackward[39m [31mโ
[31mโ[39m [31mโ
[31mโ[39m 197 โ # The reason we repeat same the comment below is that [31mโ
[31mโ[39m 198 โ # some Python versions print out the first line of a multi-line fu [31mโ
[31mโ[39m 199 โ # calls in the traceback and some print out the last line [31mโ
[31mโ[39m [31mโฑ [39m200 โ Variable._execution_engine.run_backward( # Calls into the C++ eng [31mโ
[31mโ[39m 201 โ โ tensors, grad_tensors_, retain_graph, create_graph, inputs, [31mโ
[31mโ[39m 202 โ โ allow_unreachable=[94mTrue[39m, accumulate_grad=[94mTrue[39m) # Calls into th [31mโ
[31mโ[39m 203 [31mโ
[31mโ[39m [31mโ
[31mโ[39m /workspace/venv/lib/python3.10/site-packages/torch/autograd/[1mfunction.py[22m:[94m274[39m [31mโ
[31mโ[39m in [92mapply[39m [31mโ
[31mโ[39m [31mโ
[31mโ[39m 271 โ โ โ โ โ โ โ [33m"Function is not allowed. You should on[39m [31mโ
[31mโ[39m 272 โ โ โ โ โ โ โ [33m"of them."[39m) [31mโ
[31mโ[39m 273 โ โ user_fn = vjp_fn [94mif[39m vjp_fn [95mis[39m [95mnot[39m Function.vjp [94melse[39m backward_f [31mโ
[31mโ[39m [31mโฑ [39m274 โ โ [94mreturn[39m user_fn([96mself[39m, *args) [31mโ
[31mโ[39m 275 โ [31mโ
[31mโ[39m 276 โ [94mdef[39m [92mapply_jvp[39m([96mself[39m, *args): [31mโ
[31mโ[39m 277 โ โ # _forward_cls is defined by derived class [31mโ
[31mโ[39m [31mโ
[31mโ[39m /workspace/venv/lib/python3.10/site-packages/torch/utils/[1mcheckpoint.py[22m:[94m157[39m [31mโ
[31mโ[39m in [92mbackward[39m [31mโ
[31mโ[39m [31mโ
[31mโ[39m 154 โ โ โ [94mraise[39m [96mRuntimeError[39m( [31mโ
[31mโ[39m 155 โ โ โ โ [33m"none of output has requires_grad=True,"[39m [31mโ
[31mโ[39m 156 โ โ โ โ [33m" this checkpoint() is not necessary"[39m) [31mโ
[31mโ[39m [31mโฑ [39m157 โ โ torch.autograd.backward(outputs_with_grad, args_with_grad) [31mโ
[31mโ[39m 158 โ โ grads = [96mtuple[39m(inp.grad [94mif[39m [96misinstance[39m(inp, torch.Tensor) [94melse[39m [94mN[39m [31mโ
[31mโ[39m 159 โ โ โ โ โ [94mfor[39m inp [95min[39m detached_inputs) [31mโ
[31mโ[39m 160 [31mโ
[31mโ[39m [31mโ
[31mโ[39m /workspace/venv/lib/python3.10/site-packages/torch/autograd/[1m__init__.py[22m:[94m200[39m [31mโ
[31mโ[39m in [92mbackward[39m [31mโ
[31mโ[39m [31mโ
[31mโ[39m 197 โ # The reason we repeat same the comment below is that [31mโ
[31mโ[39m 198 โ # some Python versions print out the first line of a multi-line fu [31mโ
[31mโ[39m 199 โ # calls in the traceback and some print out the last line [31mโ
[31mโ[39m [31mโฑ [39m200 โ Variable._execution_engine.run_backward( # Calls into the C++ eng [31mโ
[31mโ[39m 201 โ โ tensors, grad_tensors_, retain_graph, create_graph, inputs, [31mโ
[31mโ[39m 202 โ โ allow_unreachable=[94mTrue[39m, accumulate_grad=[94mTrue[39m) # Calls into th [31mโ
[31mโ[39m 203 [31mโ
[31mโฐโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฏ
[1mOutOfMemoryError: [22mCUDA out of memory. Tried to allocate [1m26.00[22m MiB [1m([22mGPU [1m0[22m; [1m23.65
GiB total capacity; [1m18.73[22m GiB already allocated; [1m25.69[22m MiB free; [1m19.47[22m GiB
reserved in total by PyTorch[1m)[22m If reserved memory is >> allocated memory try
setting max_split_size_mb to avoid fragmentation. See documentation for Memory
Management and PYTORCH_CUDA_ALLOC_CONF |