import gradio as gr from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM import os import requests # Load environment variable for Hugging Face API token token = os.getenv("HF_TOKEN") headers = {"Authorization": f"Bearer {token}"} # Load summarization model and tokenizer tokenizer = T5Tokenizer.from_pretrained("sumedh/t5-base-amazonreviews", clean_up_tokenization_spaces=True) model = T5ForConditionalGeneration.from_pretrained("sumedh/t5-base-amazonreviews") summarizer = pipeline("summarization", model=model, tokenizer=tokenizer) # Translation API details API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-en-es" # Summarization and Translation Function def texto_sum(text): # Summarize the input text summary = summarizer(text, do_sample=False)[0]['summary_text'] # Translate summary using the Hugging Face API response = requests.post(API_URL, headers=headers, json={"inputs": summary}) translation = response.json() # Check if translation is successful if 'error' in translation: return f"Error in translation: {translation['error']}" return translation[0]['translation_text'] # Gradio interface demo = gr.Interface( fn=texto_sum, inputs=gr.Textbox(label="Texto a introducir:", placeholder="Introduce el texto a resumir aquĆ­..."), outputs="text" ) # Launch the interface demo.launch()