CogniFusion-XTTS-slerp / config.json
Or4cl3-1's picture
Create config.json
eea8e4e verified
{
"Model": "CogniFusion-XTTS-slerp",
"Description": "CogniFusion-XTTS-slerp is a merge of the following models using LazyMergekit: Or4cl3-1/cognitive-agent-xtts-optimized and Or4cl3-1/multimodal-fusion-optimized",
"Configuration": {
"slices": [
{
"sources": [
{
"model": "Or4cl3-1/cognitive-agent-xtts-optimized",
"layer_range": [0, 32]
},
{
"model": "Or4cl3-1/multimodal-fusion-optimized",
"layer_range": [0, 32]
}
]
}
],
"merge_method": "slerp",
"base_model": "Or4cl3-1/cognitive-agent-xtts-optimized",
"parameters": {
"t": [
{
"filter": "self_attn",
"value": [0, 0.5, 0.3, 0.7, 1]
},
{
"filter": "mlp",
"value": [1, 0.5, 0.7, 0.3, 0]
},
{
"value": 0.5
}
]
},
"dtype": "bfloat16"
},
"Ethical Considerations": "Add ethical considerations and any additional optimization parameters here",
"Usage": {
"Installation": "!pip install -qU transformers accelerate",
"Code": [
"from transformers import AutoTokenizer",
"import transformers",
"import torch",
"model = \"Or4cl3-1/CogniFusion-XTTS-slerp\"",
"messages = [{\"role\": \"user\", \"content\": \"What is a large language model?\"}]",
"tokenizer = AutoTokenizer.from_pretrained(model)",
"prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)",
"pipeline = transformers.pipeline(",
" \"text-generation\",",
" model=model,",
" torch_dtype=torch.float16,",
" device_map=\"auto\",",
")",
"outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)",
"print(outputs[0][\"generated_text\"])"
]
}
}