ChimeraLlama-3-8B / mergekit_config.yml
mlabonne's picture
Upload folder using huggingface_hub
0dc142a verified
raw
history blame
583 Bytes
models:
- model: NousResearch/Meta-Llama-3-8B
# No parameters necessary for base model
- model: NousResearch/Meta-Llama-3-8B-Instruct
parameters:
density: 0.58
weight: 0.4
- model: mlabonne/OrpoLlama-3-8B
parameters:
density: 0.52
weight: 0.2
- model: Locutusque/Llama-3-Orca-1.0-8B
parameters:
density: 0.52
weight: 0.2
- model: abacusai/Llama-3-Smaug-8B
parameters:
density: 0.52
weight: 0.2
merge_method: dare_ties
base_model: NousResearch/Meta-Llama-3-8B
parameters:
int8_mask: true
dtype: float16