Makima57's picture
Upload app.py with huggingface_hub
974ca49 verified
raw
history blame
2.06 kB
import gradio as gr
import ctranslate2
from transformers import AutoTokenizer
from huggingface_hub import snapshot_download
# Define the model and tokenizer loading
model_prompt = "Solve the following mathematical problem: "
tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
# Function to generate predictions using the model
def get_prediction(question):
input_text = model_prompt + question
input_tokens = tokenizer.tokenize(input_text)
results = generator.generate_batch([input_tokens])
output_tokens = results[0].sequences[0]
predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
return predicted_answer
# Function to perform majority voting across multiple predictions
def majority_vote(question, num_iterations=10):
all_predictions = []
for _ in range(num_iterations):
prediction = get_prediction(question)
all_predictions.append(prediction)
majority_voted_pred = max(set(all_predictions), key=all_predictions.count)
return majority_voted_pred, all_predictions
# Gradio interface for user input and output
def gradio_interface(question, correct_answer):
final_prediction, all_predictions = majority_vote(question, num_iterations=10)
return {
"Question": question,
"Generated Answers (10 iterations)": all_predictions,
"Majority-Voted Prediction": final_prediction,
"Correct Answer": correct_answer
}
# Gradio app setup
interface = gr.Interface(
fn=gradio_interface,
inputs=[
gr.Textbox(label="Math Question"),
gr.Textbox(label="Correct Answer"),
],
outputs=[
gr.JSON(label="Results"), # Display the results in a JSON format
],
title="Math Question Solver",
description="Enter a math question to get the model prediction and see all generated answers.",
)
if __name__ == "__main__":
interface.launch()