adding test models and comments
Browse files
models.py
CHANGED
@@ -1,16 +1,18 @@
|
|
1 |
models=[
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
"meta-llama/Llama-2-13b-chat-hf",
|
6 |
-
"meta-llama/Llama-2-13b-hf",
|
7 |
-
"meta-llama/Llama-2-7b-hf",
|
8 |
-
"meta-llama/Llama-2-7b-chat-hf",
|
9 |
-
"meta-llama/Llama-2-70b-chat",
|
10 |
-
"meta-llama/Llama-2-70b-chat-hf",
|
11 |
-
"google/gemma-2b",
|
12 |
"google/gemma-2b-it",
|
13 |
-
"google/gemma-7b",
|
14 |
"google/gemma-7b-it",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
"ura-hcmut/GemSUra-2B"
|
16 |
]
|
|
|
1 |
models=[
|
2 |
+
|
3 |
+
# MUST USE CHAT (RLHF) MODELS FROM HUB WITH INFERENCE AVAILABLE
|
4 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
"google/gemma-2b-it",
|
|
|
6 |
"google/gemma-7b-it",
|
7 |
+
"HuggingFaceH4/zephyr-7b-beta",
|
8 |
+
"microsoft/phi-2",
|
9 |
+
"M4-ai/TinyMistral-6x248M",
|
10 |
+
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
11 |
+
"openai-community/gpt2-xl",
|
12 |
+
"bigscience/bloom",
|
13 |
+
"stabilityai/stablelm-2-zephyr-1_6b",
|
14 |
+
"stabilityai/stablelm-zephyr-3b",
|
15 |
+
"huggingtweets/porns_xx",
|
16 |
+
# "meta-llama/Llama-2-70b-hf",
|
17 |
"ura-hcmut/GemSUra-2B"
|
18 |
]
|