runtime error
Traceback (most recent call last): File "/home/user/app/app.py", line 185, in <module> learn = load_learner_custom('best_model.pt') File "/home/user/app/app.py", line 180, in load_learner_custom res = torch.load(fname) File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 1026, in load return _load(opened_zipfile, File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 1438, in _load result = unpickler.load() File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 1408, in persistent_load typed_storage = load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location)) File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 1382, in load_tensor wrap_storage=restore_location(storage, location), File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 391, in default_restore_location result = fn(storage, location) File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 266, in _cuda_deserialize device = validate_cuda_device(location) File "/usr/local/lib/python3.10/site-packages/torch/serialization.py", line 250, in validate_cuda_device raise RuntimeError('Attempting to deserialize object on a CUDA ' RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
Container logs:
Fetching error logs...