Create config.json
Browse files- config.json +59 -0
config.json
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Model": "CogniFusion-XTTS-slerp",
|
3 |
+
"Description": "CogniFusion-XTTS-slerp is a merge of the following models using LazyMergekit: Or4cl3-1/cognitive-agent-xtts-optimized and Or4cl3-1/multimodal-fusion-optimized",
|
4 |
+
"Configuration": {
|
5 |
+
"slices": [
|
6 |
+
{
|
7 |
+
"sources": [
|
8 |
+
{
|
9 |
+
"model": "Or4cl3-1/cognitive-agent-xtts-optimized",
|
10 |
+
"layer_range": [0, 32]
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"model": "Or4cl3-1/multimodal-fusion-optimized",
|
14 |
+
"layer_range": [0, 32]
|
15 |
+
}
|
16 |
+
]
|
17 |
+
}
|
18 |
+
],
|
19 |
+
"merge_method": "slerp",
|
20 |
+
"base_model": "Or4cl3-1/cognitive-agent-xtts-optimized",
|
21 |
+
"parameters": {
|
22 |
+
"t": [
|
23 |
+
{
|
24 |
+
"filter": "self_attn",
|
25 |
+
"value": [0, 0.5, 0.3, 0.7, 1]
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"filter": "mlp",
|
29 |
+
"value": [1, 0.5, 0.7, 0.3, 0]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"value": 0.5
|
33 |
+
}
|
34 |
+
]
|
35 |
+
},
|
36 |
+
"dtype": "bfloat16"
|
37 |
+
},
|
38 |
+
"Ethical Considerations": "Add ethical considerations and any additional optimization parameters here",
|
39 |
+
"Usage": {
|
40 |
+
"Installation": "!pip install -qU transformers accelerate",
|
41 |
+
"Code": [
|
42 |
+
"from transformers import AutoTokenizer",
|
43 |
+
"import transformers",
|
44 |
+
"import torch",
|
45 |
+
"model = \"Or4cl3-1/CogniFusion-XTTS-slerp\"",
|
46 |
+
"messages = [{\"role\": \"user\", \"content\": \"What is a large language model?\"}]",
|
47 |
+
"tokenizer = AutoTokenizer.from_pretrained(model)",
|
48 |
+
"prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)",
|
49 |
+
"pipeline = transformers.pipeline(",
|
50 |
+
" \"text-generation\",",
|
51 |
+
" model=model,",
|
52 |
+
" torch_dtype=torch.float16,",
|
53 |
+
" device_map=\"auto\",",
|
54 |
+
")",
|
55 |
+
"outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)",
|
56 |
+
"print(outputs[0][\"generated_text\"])"
|
57 |
+
]
|
58 |
+
}
|
59 |
+
}
|