EmTpro01's picture
Update app.py
b808def verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the fine-tuned model and tokenizer
model_name = "EmTpro01/llama-3.2-Code-Generator" # Replace with your Hugging Face model name
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Define the prediction function
def generate_code(prompt):
# Tokenize the input
inputs = tokenizer(prompt, return_tensors="pt")
# Generate code
outputs = model.generate(inputs["input_ids"], max_length=200, num_return_sequences=1)
# Decode the output
generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_code
# Set up Gradio interface
with gr.Blocks() as demo:
gr.Markdown("## Code Generation with Fine-Tuned Llama Model")
with gr.Row():
prompt = gr.Textbox(label="Input Prompt", placeholder="Enter a prompt for code generation...")
output = gr.Textbox(label="Generated Code")
generate_button = gr.Button("Generate Code")
generate_button.click(generate_code, inputs=prompt, outputs=output)
# Launch the interface
demo.launch()