infokarthikraja commited on
Commit
8a1e711
·
verified ·
1 Parent(s): 813e61d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -0
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Load the model and tokenizer
5
+ model_name = "huihui-ai/Llama-3.2-3B-Instruct-abliterated"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ model_name,
9
+ device_map="auto",
10
+ low_cpu_mem_usage=True
11
+ )
12
+
13
+ # Define the text generation function
14
+ def generate_text(prompt):
15
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
16
+ outputs = model.generate(inputs["input_ids"], max_length=100)
17
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
18
+
19
+ # Create the Gradio interface
20
+ iface = gr.Interface(
21
+ fn=generate_text,
22
+ inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here..."),
23
+ outputs="text",
24
+ title="Llama 3.2 3B Instruct Abliterated",
25
+ description="An uncensored language model. Enter your prompt to receive a response."
26
+ )
27
+
28
+ if __name__ == "__main__":
29
+ iface.launch()