CryptoScoutv1 commited on
Commit
987b605
·
1 Parent(s): de173bf

Create HG_model+Dataset_template.txt

Browse files
Files changed (1) hide show
  1. HG_model+Dataset_template.txt +33 -0
HG_model+Dataset_template.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import gradio as gr
3
+
4
+ # --- Model and Tokenizer Setup ---
5
+ model_id = "gpt2" # Replace with your chosen model
6
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
7
+ model = AutoModelForCausalLM.from_pretrained(model_id)
8
+
9
+ # --- Text Generation Function ---
10
+ def generate_text(prompt, max_tokens=50, system_message=""):
11
+ # Your model inference here...
12
+ encoded_input = tokenizer(prompt, return_tensors='pt')
13
+ max_length = len(encoded_input["input_ids"][0]) + max_tokens
14
+ output_sequences = model.generate(input_ids=encoded_input["input_ids"], max_length=max_length)
15
+ generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
16
+
17
+ # Incorporating the system message
18
+ full_message = system_message + "\n\nGenerated Text:\n" + generated_text
19
+ return full_message
20
+
21
+ # --- Gradio Interface Setup ---
22
+ iface = gr.Interface(
23
+ fn=generate_text,
24
+ inputs=[
25
+ gr.inputs.Textbox(lines=2, placeholder="Enter Text Here..."),
26
+ gr.inputs.Slider(minimum=1, maximum=100, default=50, label="Max Tokens"),
27
+ gr.inputs.Textbox(label="System Message", placeholder="Enter a system message here...")
28
+ ],
29
+ outputs="text"
30
+ )
31
+
32
+ # --- Launch the Interface ---
33
+ iface.launch()