Spaces:
Sleeping
Sleeping
freQuensy23
commited on
Commit
•
f0c7657
1
Parent(s):
3ca9013
Replace front
Browse files- app.py +6 -18
- generators.py +2 -2
app.py
CHANGED
@@ -28,25 +28,13 @@ async def handle(system_input: str, user_input: str):
|
|
28 |
with gr.Blocks() as demo:
|
29 |
system_input = gr.Textbox(label='System Input', value='You are AI assistant', lines=2)
|
30 |
with gr.Row():
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
with gr.Column():
|
35 |
-
t5 = gr.Textbox(label='t5', lines=4, interactive=False)
|
36 |
-
gr.Text("Google\n12 Dec 2019")
|
37 |
-
with gr.Column():
|
38 |
-
bloom = gr.Textbox(label='bloom [GPU]', lines=4, interactive=False)
|
39 |
-
gr.Text('Big Science\n11 Jul 2022')
|
40 |
with gr.Row():
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
with gr.Column():
|
45 |
-
mistral = gr.Textbox(label='mistral-v01', lines=4, interactive=False)
|
46 |
-
gr.Text("MistralAI\n20 Sep 2023")
|
47 |
-
with gr.Column():
|
48 |
-
llama3 = gr.Textbox(label='llama-3.1', lines=4, interactive=False)
|
49 |
-
gr.Text('MetaAI\n18 Jul 2024')
|
50 |
|
51 |
user_input = gr.Textbox(label='User Input', lines=2, value='Calculate expression: 7-3=')
|
52 |
gen_button = gr.Button('Generate')
|
|
|
28 |
with gr.Blocks() as demo:
|
29 |
system_input = gr.Textbox(label='System Input', value='You are AI assistant', lines=2)
|
30 |
with gr.Row():
|
31 |
+
gpt = gr.Textbox(label='gpt-2', lines=4, interactive=False, info='OpenAI\n14 February 2019')
|
32 |
+
t5 = gr.Textbox(label='t5', lines=4, interactive=False, info='Google\n12 Dec 2019')
|
33 |
+
bloom = gr.Textbox(label='bloom [GPU]', lines=4, interactive=False, info='Big Science\n11 Jul 2022')
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
with gr.Row():
|
35 |
+
llama2 = gr.Textbox(label='llama-2', lines=4, interactive=False, info='MetaAI\n18 Jul 2023')
|
36 |
+
mistral = gr.Textbox(label='mistral-v01', lines=4, interactive=False, info='MistralAI\n20 Sep 2023')
|
37 |
+
llama3 = gr.Textbox(label='llama-3.1', lines=4, interactive=False, info='MetaAI\n18 Jul 2024')
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
user_input = gr.Textbox(label='User Input', lines=2, value='Calculate expression: 7-3=')
|
40 |
gen_button = gr.Button('Generate')
|
generators.py
CHANGED
@@ -11,7 +11,7 @@ from huggingface_hub import InferenceClient
|
|
11 |
import random
|
12 |
import torch
|
13 |
from huggingface_hub import AsyncInferenceClient
|
14 |
-
from transformers import LlamaTokenizer, LlamaForCausalLM, AutoTokenizer
|
15 |
|
16 |
|
17 |
async def query_llm(payload, model_name):
|
@@ -85,7 +85,7 @@ def generate_openllama(system_input, user_input):
|
|
85 |
def generate_bloom(system_input, user_input):
|
86 |
model_path = 'bigscience/bloom-7b1'
|
87 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
88 |
-
model =
|
89 |
model_path, torch_dtype=torch.float16, device_map='cuda',
|
90 |
)
|
91 |
input_text = f"{system_input}\n{user_input}"
|
|
|
11 |
import random
|
12 |
import torch
|
13 |
from huggingface_hub import AsyncInferenceClient
|
14 |
+
from transformers import LlamaTokenizer, LlamaForCausalLM, AutoTokenizer, AutoModelForCausalLM
|
15 |
|
16 |
|
17 |
async def query_llm(payload, model_name):
|
|
|
85 |
def generate_bloom(system_input, user_input):
|
86 |
model_path = 'bigscience/bloom-7b1'
|
87 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
88 |
+
model = AutoModelForCausalLM.from_pretrained(
|
89 |
model_path, torch_dtype=torch.float16, device_map='cuda',
|
90 |
)
|
91 |
input_text = f"{system_input}\n{user_input}"
|