import gradio as gr import ctranslate2 from transformers import AutoModel, AutoTokenizer # Load the model and tokenizer from Hugging Face model_id = "Makima57/deepseek-math-Numina" model = AutoModel.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8") # Function to generate predictions using the model def get_prediction(question): input_text = model_prompt + question input_tokens = tokenizer.tokenize(input_text) results = generator.generate_batch([input_tokens]) output_tokens = results[0].sequences[0] predicted_answer = tokenizer.convert_tokens_to_string(output_tokens) return predicted_answer # Gradio interface for user input and output def gradio_interface(question, correct_answer): predicted_answer = get_prediction(question) return { "question": question, "predicted_answer": predicted_answer, "correct_answer": correct_answer, } # Gradio app setup interface = gr.Interface( fn=gradio_interface, inputs=[ gr.Textbox(label="Math Question"), gr.Textbox(label="Correct Answer"), ], outputs=[ gr.JSON(label="Results") ], title="Math Question Solver", description="Enter a math question to get the model prediction." ) if __name__ == "__main__": interface.launch()