Spaces:
Sleeping
Sleeping
kz209
commited on
Commit
•
44d180e
1
Parent(s):
0b41ab5
test model
Browse files- pages/summarization_example.py +9 -4
- utils/__init__.py +1 -1
- utils/model.py +29 -0
pages/summarization_example.py
CHANGED
@@ -2,6 +2,11 @@ from dotenv import load_dotenv
|
|
2 |
from transformers import pipeline
|
3 |
import gradio as gr
|
4 |
|
|
|
|
|
|
|
|
|
|
|
5 |
load_dotenv()
|
6 |
|
7 |
examples = {
|
@@ -26,11 +31,11 @@ def generate_answer(sources, model_name):
|
|
26 |
summarization: """
|
27 |
content = meta_prompt.format(sources=sources)
|
28 |
|
29 |
-
pipe = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True, max_length=500)
|
30 |
-
messages = [{"role": "user", "content": content}]
|
31 |
-
answer =
|
32 |
|
33 |
-
return answer
|
34 |
|
35 |
def process_input(input_text, model_selection):
|
36 |
if input_text:
|
|
|
2 |
from transformers import pipeline
|
3 |
import gradio as gr
|
4 |
|
5 |
+
from utils.model import Model
|
6 |
+
|
7 |
+
|
8 |
+
model = Model()
|
9 |
+
|
10 |
load_dotenv()
|
11 |
|
12 |
examples = {
|
|
|
31 |
summarization: """
|
32 |
content = meta_prompt.format(sources=sources)
|
33 |
|
34 |
+
# pipe = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True, max_length=500)
|
35 |
+
# messages = [{"role": "user", "content": content}]
|
36 |
+
answer = model.gen(content)
|
37 |
|
38 |
+
return answer
|
39 |
|
40 |
def process_input(input_text, model_selection):
|
41 |
if input_text:
|
utils/__init__.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
# This is the __init__.py file for the utils package
|
2 |
# You can add any initialization code or import statements here
|
3 |
|
4 |
-
__all__ = ['multiple_stream']
|
|
|
1 |
# This is the __init__.py file for the utils package
|
2 |
# You can add any initialization code or import statements here
|
3 |
|
4 |
+
__all__ = ['multiple_stream', 'model']
|
utils/model.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
import transformers
|
3 |
+
import torch
|
4 |
+
|
5 |
+
|
6 |
+
class Model():
|
7 |
+
def __init__(self, model="tiiuae/falcon-7b-instruct") -> None:
|
8 |
+
pass
|
9 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model)
|
10 |
+
self.pipeline = transformers.pipeline(
|
11 |
+
"text-generation",
|
12 |
+
model=model,
|
13 |
+
tokenizer=self.tokenizer,
|
14 |
+
torch_dtype=torch.bfloat16,
|
15 |
+
trust_remote_code=True,
|
16 |
+
device_map="auto",
|
17 |
+
)
|
18 |
+
|
19 |
+
def gen(self, prompt, temp=0.0, max_length=200):
|
20 |
+
sequences = self.pipeline(
|
21 |
+
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
|
22 |
+
max_length=max_length,
|
23 |
+
do_sample=True,
|
24 |
+
temperature=temp,
|
25 |
+
num_return_sequences=1,
|
26 |
+
eos_token_id=self.tokenizer.eos_token_id,
|
27 |
+
)
|
28 |
+
|
29 |
+
return '\n'.join([seq['generated_text'] for seq in sequences])
|