import re import torch import gradio as gr from transformers import GPT2LMHeadModel, GPT2Tokenizer # Load the model and tokenizer from Hugging Face repository model_repo_id = "Ajay12345678980/QA_bot" # Your model repository # Initialize the model and tokenizer model = GPT2LMHeadModel.from_pretrained(model_repo_id) tokenizer = GPT2Tokenizer.from_pretrained(model_repo_id) # Define the prediction function def predict(text): try: # Encode the input text inputs = tokenizer.encode(text, return_tensors="pt") # Generate output using the model with torch.no_grad(): outputs = model.generate(inputs, max_length=50, do_sample=True) # Decode the generated output prediction = tokenizer.decode(outputs[0], skip_special_tokens=True) # Extract the text between "Answer" and "ANS" using regex match = re.search(r'Answer\s*(.*?)\s*' markers." except Exception as e: # Handle and print any exceptions for debugging return f"An error occurred: {str(e)}" # Gradio interface setup interface = gr.Interface( fn=predict, inputs="text", outputs="text", title="GPT-2 Text Generation", description="Enter some text and see what the model generates!" ) # Launch the Gradio app if __name__ == "__main__": interface.launch()