File size: 1,375 Bytes
d6f1344 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
from transformers import pipeline from datasets import load_dataset import gradio as gr # --- Hugging Face Pipeline Setup --- # Replace 'model_id' with the identifier of the model you wish to use model_id = "declare-lab/flan-alpaca-gpt4-xl" text_generator = pipeline(model=model_id) # --- Dataset Loading (Optional) --- # Replace with the dataset of your choice dataset_name = "luisotorres/wikipedia-crypto-articles" # Example dataset dataset_split = 'train' # Choose from 'train', 'test', 'validation', etc. dataset = load_dataset(dataset_name, split=dataset_split) # --- Text Generation Function --- def generate_text(prompt, max_tokens=128, system_message=""): # Generating text using the pipeline generated_text = text_generator(prompt, max_length=max_tokens, do_sample=True)[0]["generated_text"] # Incorporating the system message full_message = system_message + "\n\nGenerated Text:\n" + generated_text return full_message # --- Gradio Interface Setup --- iface = gr.Interface( fn=generate_text, inputs=[ gr.inputs.Textbox(lines=2, placeholder="Enter Prompt Here..."), gr.inputs.Slider(minimum=1, maximum=256, default=128, label="Max Tokens"), gr.inputs.Textbox(label="System Message", placeholder="Enter a system message here...") ], outputs="text" ) # --- Launch the Interface --- iface.launch() |