base_model: nitky/Llama-3.1-SuperSwallow-70B-Instruct-v0.1 | |
dtype: bfloat16 | |
merge_method: dare_ties | |
parameters: | |
int8_mask: 1.0 | |
slices: | |
- sources: | |
- layer_range: [0, 80] | |
model: nitky/Llama-3.1-SuperSwallow-70B-Instruct-v0.1 | |
- layer_range: [0, 80] | |
model: NousResearch/Hermes-3-Llama-3.1-70B | |
parameters: | |
density: 0.53 | |
weight: 0.3 | |
- layer_range: [0, 80] | |
model: Saxo/Linkbricks-Horizon-AI-Japanese-Advanced-V4-70B | |
parameters: | |
density: 0.53 | |
weight: 0.4 | |
- layer_range: [0, 80] | |
model: MaziyarPanahi/calme-2.3-llama3.1-70b | |
parameters: | |
density: 0.53 | |
weight: 0.3 | |
tokenizer_source: union |