Commit
·
d6f1344
1
Parent(s):
987b605
Create HG_model+dataset+UI_pipeline_Template.txt
Browse files
HG_model+dataset+UI_pipeline_Template.txt
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
from datasets import load_dataset
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
# --- Hugging Face Pipeline Setup ---
|
6 |
+
# Replace 'model_id' with the identifier of the model you wish to use
|
7 |
+
model_id = "declare-lab/flan-alpaca-gpt4-xl"
|
8 |
+
text_generator = pipeline(model=model_id)
|
9 |
+
|
10 |
+
# --- Dataset Loading (Optional) ---
|
11 |
+
# Replace with the dataset of your choice
|
12 |
+
dataset_name = "luisotorres/wikipedia-crypto-articles" # Example dataset
|
13 |
+
dataset_split = 'train' # Choose from 'train', 'test', 'validation', etc.
|
14 |
+
dataset = load_dataset(dataset_name, split=dataset_split)
|
15 |
+
|
16 |
+
# --- Text Generation Function ---
|
17 |
+
def generate_text(prompt, max_tokens=128, system_message=""):
|
18 |
+
# Generating text using the pipeline
|
19 |
+
generated_text = text_generator(prompt, max_length=max_tokens, do_sample=True)[0]["generated_text"]
|
20 |
+
|
21 |
+
# Incorporating the system message
|
22 |
+
full_message = system_message + "\n\nGenerated Text:\n" + generated_text
|
23 |
+
return full_message
|
24 |
+
|
25 |
+
# --- Gradio Interface Setup ---
|
26 |
+
iface = gr.Interface(
|
27 |
+
fn=generate_text,
|
28 |
+
inputs=[
|
29 |
+
gr.inputs.Textbox(lines=2, placeholder="Enter Prompt Here..."),
|
30 |
+
gr.inputs.Slider(minimum=1, maximum=256, default=128, label="Max Tokens"),
|
31 |
+
gr.inputs.Textbox(label="System Message", placeholder="Enter a system message here...")
|
32 |
+
],
|
33 |
+
outputs="text"
|
34 |
+
)
|
35 |
+
|
36 |
+
# --- Launch the Interface ---
|
37 |
+
iface.launch()
|