--- license: apache-2.0 library_name: transformers tags: - mergekit - merge base_model: - bamec66557/VICIOUS_MESH-12B-BETA - bamec66557/VICIOUS_MESH-12B-OMEGA model-index: - name: Mistral-Nemo-VICIOUS_MESH-12B-2407 results: - task: type: text-generation name: Text Generation dataset: name: IFEval (0-Shot) type: HuggingFaceH4/ifeval args: num_few_shot: 0 metrics: - type: inst_level_strict_acc and prompt_level_strict_acc value: 67.21 name: strict accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bamec66557/Mistral-Nemo-VICIOUS_MESH-12B-2407 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BBH (3-Shot) type: BBH args: num_few_shot: 3 metrics: - type: acc_norm value: 31.36 name: normalized accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bamec66557/Mistral-Nemo-VICIOUS_MESH-12B-2407 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MATH Lvl 5 (4-Shot) type: hendrycks/competition_math args: num_few_shot: 4 metrics: - type: exact_match value: 12.08 name: exact match source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bamec66557/Mistral-Nemo-VICIOUS_MESH-12B-2407 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GPQA (0-shot) type: Idavidrein/gpqa args: num_few_shot: 0 metrics: - type: acc_norm value: 8.84 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bamec66557/Mistral-Nemo-VICIOUS_MESH-12B-2407 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MuSR (0-shot) type: TAUR-Lab/MuSR args: num_few_shot: 0 metrics: - type: acc_norm value: 14.34 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bamec66557/Mistral-Nemo-VICIOUS_MESH-12B-2407 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU-PRO (5-shot) type: TIGER-Lab/MMLU-Pro config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 29.76 name: accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bamec66557/Mistral-Nemo-VICIOUS_MESH-12B-2407 name: Open LLM Leaderboard --- # merge This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the SLERP merge method. ### Models Merged The following models were included in the merge: * [bamec66557/VICIOUS_MESH-12B-BETA](https://huggingface.co/bamec66557/VICIOUS_MESH-12B-BETA) * [bamec66557/VICIOUS_MESH-12B-OMEGA](https://huggingface.co/bamec66557/VICIOUS_MESH-12B-OMEGA) ### Configuration The following YAML configuration was used to produce this model: ```yaml base_model: bamec66557/VICIOUS_MESH-12B-OMEGA dtype: bfloat16 merge_method: slerp tokenizer_source: base # Slices Configuration slices: - sources: - model: bamec66557/VICIOUS_MESH-12B-OMEGA layer_range: [0, 10] - model: bamec66557/VICIOUS_MESH-12B-BETA layer_range: [0, 10] parameters: t: - name: self_attn value: [0.5, 0.55, 0.6, 0.65, 0.7] - name: mlp value: [1.0, 1.05, 1.1, 1.15, 1.2] - name: layer_norm value: [0.9, 0.95, 1.0, 1.05, 1.1] - sources: - model: bamec66557/VICIOUS_MESH-12B-OMEGA layer_range: [10, 20] - model: bamec66557/VICIOUS_MESH-12B-BETA layer_range: [10, 20] parameters: t: - name: self_attn value: [0.4, 0.45, 0.5, 0.55, 0.6] - name: mlp value: [1.1, 1.15, 1.2, 1.25, 1.3] - name: layer_norm value: [1.0, 1.05, 1.1, 1.15, 1.2] - sources: - model: bamec66557/VICIOUS_MESH-12B-OMEGA layer_range: [20, 30] - model: bamec66557/VICIOUS_MESH-12B-BETA layer_range: [20, 30] parameters: t: - name: self_attn value: [0.6, 0.65, 0.7, 0.75, 0.8] - name: mlp value: [0.9, 0.95, 1.0, 1.05, 1.1] - name: layer_norm value: [0.85, 0.9, 0.95, 1.0, 1.05] - sources: - model: bamec66557/VICIOUS_MESH-12B-OMEGA layer_range: [30, 40] - model: bamec66557/VICIOUS_MESH-12B-BETA layer_range: [30, 40] parameters: t: - name: self_attn value: [0.7, 0.75, 0.8, 0.85, 0.9] - name: mlp value: [0.8, 0.85, 0.9, 0.95, 1.0] - name: layer_norm value: [0.8, 0.85, 0.9, 0.95, 1.0] # Regularization regularization: - method: gradient_penalty scale: 0.05 # Increased influence for gradient control - method: weight_clipping clip_range: [-0.2, 0.2] # Broader clipping range for flexibility - method: random_noise scale: 0.01 # Stronger noise injection - method: attention_dropout scale: 0.1 # Higher dropout to reduce attention fixation # Postprocessing postprocessing: - operation: entropy_regularization scale: 0.05 # Stronger encouragement for diverse outputs - operation: non_linear_scaling parameters: function: tanh - operation: sharpening intensity: 0.5 # Enhanced sharpening for precise outputs - operation: gaussian_smoothing sigma: 1.5 # Increased smoothing for stable outputs - operation: normalize - operation: dynamic_scaling scale_range: [0.8, 1.2] # Expanded dynamic range for scaling - operation: smoothing parameters: adaptive: true range: [0.85, 1.15] # Wider adaptive smoothing range kernel_size: 5 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/bamec66557__Mistral-Nemo-VICIOUS_MESH-12B-2407-details) | Metric |Value| |-------------------|----:| |Avg. |27.26| |IFEval (0-Shot) |67.21| |BBH (3-Shot) |31.36| |MATH Lvl 5 (4-Shot)|12.08| |GPQA (0-shot) | 8.84| |MuSR (0-shot) |14.34| |MMLU-PRO (5-shot) |29.76|