sachin commited on
Commit
9d470d9
1 Parent(s): f67cd2f
Files changed (1) hide show
  1. app.py +23 -4
app.py CHANGED
@@ -1,7 +1,26 @@
 
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
2
  import gradio as gr
3
 
4
+ # Load the model and tokenizer
5
+ model = AutoModelForCausalLM.from_pretrained("sarvamai/sarvam-1")
6
+ tokenizer = AutoTokenizer.from_pretrained("sarvamai/sarvam-1")
7
+ tokenizer.pad_token_id = tokenizer.eos_token_id
8
 
9
+ # Create the text generation pipeline
10
+ pipe = TextGenerationPipeline(model=model, tokenizer=tokenizer, device="cuda", torch_dtype="bfloat16", return_full_text=False)
11
+
12
+ # Define prediction function
13
+ def generate_text(prompt):
14
+ return pipe(prompt)[0]['generated_text']
15
+
16
+ # Set up Gradio interface
17
+ demo = gr.Interface(
18
+ fn=generate_text,
19
+ inputs=gr.Textbox(label="Enter your prompt"),
20
+ outputs=gr.Textbox(label="Generated text"),
21
+ title="Text Generation with Sarvam-1",
22
+ description="Enter a prompt to generate text using the Sarvam-1 model."
23
+ )
24
+
25
+ # Launch the demo
26
+ demo.launch(share=True)