Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,20 +22,15 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
22 |
|
23 |
device = "cpu" # for GPU usage or "cpu" for CPU usage
|
24 |
|
25 |
-
|
26 |
-
|
27 |
|
28 |
-
tokenizer1 = AutoTokenizer.from_pretrained(MODEL_LIST[1])
|
29 |
-
model1 = AutoModelForCausalLM.from_pretrained(MODEL_LIST[1]).to(device)
|
30 |
-
|
31 |
-
tokenizer2 = AutoTokenizer.from_pretrained(MODEL_LIST[2])
|
32 |
-
model2 = AutoModelForCausalLM.from_pretrained(MODEL_LIST[2]).to(device)
|
33 |
|
34 |
#@spaces.GPU()
|
35 |
def stream_chat(
|
36 |
message: str,
|
37 |
history: list,
|
38 |
-
temperature: float = 0.
|
39 |
max_new_tokens: int = 1024,
|
40 |
top_p: float = 1.0,
|
41 |
top_k: int = 20,
|
@@ -55,8 +50,6 @@ def stream_chat(
|
|
55 |
conversation.append({"role": "user", "content": message})
|
56 |
|
57 |
|
58 |
-
model = model2
|
59 |
-
tokenizer = tokenizer2
|
60 |
|
61 |
input_text=tokenizer.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
62 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
|
|
22 |
|
23 |
device = "cpu" # for GPU usage or "cpu" for CPU usage
|
24 |
|
25 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_LIST[0])
|
26 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_LIST[0]).to(device)
|
27 |
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
#@spaces.GPU()
|
30 |
def stream_chat(
|
31 |
message: str,
|
32 |
history: list,
|
33 |
+
temperature: float = 0.4,
|
34 |
max_new_tokens: int = 1024,
|
35 |
top_p: float = 1.0,
|
36 |
top_k: int = 20,
|
|
|
50 |
conversation.append({"role": "user", "content": message})
|
51 |
|
52 |
|
|
|
|
|
53 |
|
54 |
input_text=tokenizer.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
|
55 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|