Spaces:
Runtime error
Runtime error
0%| | 0/1000 [00:00<?, ?it/s] | |
0%| | 0/1000 [00:00<?, ?it/s]Traceback (most recent call last): | |
File "main.py", line 444, in <module> | |
main() | |
File "main.py", line 383, in main | |
train_result = trainer.train(resume_from_checkpoint=checkpoint) | |
File "E:\Documents\Desktop\ChatGLM-6B\ptuning\trainer.py", line 1635, in train | |
return inner_training_loop( | |
File "E:\Documents\Desktop\ChatGLM-6B\ptuning\trainer.py", line 1904, in _inner_training_loop | |
tr_loss_step = self.training_step(model, inputs) | |
File "E:\Documents\Desktop\ChatGLM-6B\ptuning\trainer.py", line 2647, in training_step | |
loss = self.compute_loss(model, inputs) | |
File "E:\Documents\Desktop\ChatGLM-6B\ptuning\trainer.py", line 2679, in compute_loss | |
outputs = model(**inputs) | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 1191, in forward | |
transformer_outputs = self.transformer( | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 986, in forward | |
layer_ret = torch.utils.checkpoint.checkpoint( | |
File "D:\Program\Python38\lib\site-packages\torch\utils\checkpoint.py", line 249, in checkpoint | |
return CheckpointFunction.apply(function, preserve, *args) | |
File "D:\Program\Python38\lib\site-packages\torch\autograd\function.py", line 506, in apply | |
return super().apply(*args, **kwargs) # type: ignore[misc] | |
File "D:\Program\Python38\lib\site-packages\torch\utils\checkpoint.py", line 107, in forward | |
outputs = run_function(*args) | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 627, in forward | |
attention_outputs = self.attention( | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 460, in forward | |
cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 201, in forward | |
if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached): | |
KeyboardInterrupt | |
Error in sys.excepthook: | |
Traceback (most recent call last): | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1694, in print | |
extend(render(renderable, render_options)) | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1330, in render | |
yield from self.render(render_output, _options) | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1326, in render | |
for render_output in iter_render: | |
File "D:\Program\Python38\lib\site-packages\rich\constrain.py", line 29, in __rich_console__ | |
yield from console.render(self.renderable, child_options) | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1326, in render | |
for render_output in iter_render: | |
File "D:\Program\Python38\lib\site-packages\rich\panel.py", line 220, in __rich_console__ | |
lines = console.render_lines(renderable, child_options, style=style) | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1366, in render_lines | |
lines = list( | |
File "D:\Program\Python38\lib\site-packages\rich\segment.py", line 292, in split_and_crop_lines | |
for segment in segments: | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1326, in render | |
for render_output in iter_render: | |
File "D:\Program\Python38\lib\site-packages\rich\padding.py", line 97, in __rich_console__ | |
lines = console.render_lines( | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1366, in render_lines | |
lines = list( | |
File "D:\Program\Python38\lib\site-packages\rich\segment.py", line 292, in split_and_crop_lines | |
for segment in segments: | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1330, in render | |
yield from self.render(render_output, _options) | |
File "D:\Program\Python38\lib\site-packages\rich\console.py", line 1326, in render | |
for render_output in iter_render: | |
File "D:\Program\Python38\lib\site-packages\rich\syntax.py", line 609, in __rich_console__ | |
segments = Segments(self._get_syntax(console, options)) | |
File "D:\Program\Python38\lib\site-packages\rich\segment.py", line 668, in __init__ | |
self.segments = list(segments) | |
File "D:\Program\Python38\lib\site-packages\rich\syntax.py", line 637, in _get_syntax | |
text = self.highlight(processed_code, self.line_range) | |
File "D:\Program\Python38\lib\site-packages\rich\syntax.py", line 509, in highlight | |
text.append_tokens(tokens_to_spans()) | |
File "D:\Program\Python38\lib\site-packages\rich\text.py", line 995, in append_tokens | |
for content, style in tokens: | |
File "D:\Program\Python38\lib\site-packages\rich\syntax.py", line 497, in tokens_to_spans | |
_token_type, token = next(tokens) | |
File "D:\Program\Python38\lib\site-packages\rich\syntax.py", line 484, in line_tokenize | |
for token_type, token in lexer.get_tokens(code): | |
File "D:\Program\Python38\lib\site-packages\pygments\lexer.py", line 190, in streamer | |
for _, t, v in self.get_tokens_unprocessed(text): | |
File "D:\Program\Python38\lib\site-packages\pygments\lexer.py", line 632, in get_tokens_unprocessed | |
m = rexmatch(text, pos) | |
KeyboardInterrupt | |
Original exception was: | |
Traceback (most recent call last): | |
File "main.py", line 444, in <module> | |
main() | |
File "main.py", line 383, in main | |
train_result = trainer.train(resume_from_checkpoint=checkpoint) | |
File "E:\Documents\Desktop\ChatGLM-6B\ptuning\trainer.py", line 1635, in train | |
return inner_training_loop( | |
File "E:\Documents\Desktop\ChatGLM-6B\ptuning\trainer.py", line 1904, in _inner_training_loop | |
tr_loss_step = self.training_step(model, inputs) | |
File "E:\Documents\Desktop\ChatGLM-6B\ptuning\trainer.py", line 2647, in training_step | |
loss = self.compute_loss(model, inputs) | |
File "E:\Documents\Desktop\ChatGLM-6B\ptuning\trainer.py", line 2679, in compute_loss | |
outputs = model(**inputs) | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 1191, in forward | |
transformer_outputs = self.transformer( | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 986, in forward | |
layer_ret = torch.utils.checkpoint.checkpoint( | |
File "D:\Program\Python38\lib\site-packages\torch\utils\checkpoint.py", line 249, in checkpoint | |
return CheckpointFunction.apply(function, preserve, *args) | |
File "D:\Program\Python38\lib\site-packages\torch\autograd\function.py", line 506, in apply | |
return super().apply(*args, **kwargs) # type: ignore[misc] | |
File "D:\Program\Python38\lib\site-packages\torch\utils\checkpoint.py", line 107, in forward | |
outputs = run_function(*args) | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 627, in forward | |
attention_outputs = self.attention( | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 460, in forward | |
cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) | |
File "D:\Program\Python38\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "C:\Users\Lenovo/.cache\huggingface\modules\transformers_modules\chatglm-6b-int4\modeling_chatglm.py", line 201, in forward | |
if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached): | |
KeyboardInterrupt |