Lorenzo Brunori commited on
Commit
02f4bf6
·
1 Parent(s): 7e06aaa

velvet test

Browse files
Files changed (2) hide show
  1. app.py +43 -4
  2. requirements.txt +5 -0
app.py CHANGED
@@ -1,7 +1,46 @@
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+
3
+ # def greet(name):
4
+ # return "Hello " + name + "!!"
5
+
6
+ # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
+ # demo.launch()
8
+
9
+
10
+
11
  import gradio as gr
12
+ from transformers import AutoTokenizer, AutoModelForCausalLM
13
+
14
+ tokenizer = AutoTokenizer.from_pretrained("Almawave/Velvet-14B")
15
+ model = AutoModelForCausalLM.from_pretrained("Almawave/Velvet-14B")
16
+
17
+ def generate_text(input_text):
18
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
19
+ attention_mask = torch.ones(input_ids.shape)
20
+
21
+ output = model.generate(
22
+ input_ids,
23
+ attention_mask=attention_mask,
24
+ max_length=200,
25
+ do_sample=True,
26
+ top_k=10,
27
+ num_return_sequences=1,
28
+ eos_token_id=tokenizer.eos_token_id,
29
+ )
30
+
31
+ output_text = tokenizer.decode(output[0], skip_special_tokens=True)
32
+ print(output_text)
33
+
34
+ # Remove Prompt Echo from Generated Text
35
+ cleaned_output_text = output_text.replace(input_text, "")
36
+ return cleaned_output_text
37
 
 
 
38
 
39
+ text_generation_interface = gr.Interface(
40
+ fn=generate_text,
41
+ inputs=[
42
+ gr.inputs.Textbox(label="Input Text"),
43
+ ],
44
+ outputs=gr.inputs.Textbox(label="Generated Text"),
45
+ title="Falcon-7B Instruct",
46
+ ).launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ datasets
2
+ transformers
3
+ accelerate
4
+ einops
5
+ safetensors