from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_id = "Acryl-Jonathan/coder-0.1" tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.bfloat16).cuda() prompt= """{} ### Instruction: {} ### Response:""" system_message = """You are an expert in C/C++ debugging. Please detect the error codes and propose guidelines for fixing them. #### IMPORTANT RULES 1. Only Use Korean 2. Organize the detected errors clearly and in order by code lines. 3. Describe how you detected errors and the appropriate measures you took to correct them. 4. comment detected error command line number #### Final answer detected error code line : line number corrected error correcting guidelines """ user_message="""```cpp #include using namespace std; int main() { int a, b; cout << "Enter two numbers: "; cin >> a >> c; if (a > 0 || b > 0) { cout << "Both numbers are positive." << endl; } else { cout << "At least one number is not positive." << endl; } for (int i = 0; i < 5; i++); { cout << "i: " << i << endl; } return "Done"; }``` """ input_prmpt = prompt.format(system_message, user_message) inputs = tokenizer(input_prmpt , return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_length=128) print(tokenizer.decode(outputs[0], skip_special_tokens=True))