diff --git a/model_hubs/Skywork-13B-Base-3T/config.json b/model_hubs/Skywork-13B-Base-3T/config.json
deleted file mode 100644
index 176a4ca6fc2d7e436819a6c762c7967edb3a7b3f..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/config.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
- "architectures": [
- "SkyworkForCausalLM"
- ],
- "auto_map": {
- "AutoConfig": "configuration_skywork.SkyworkConfig",
- "AutoModelForCausalLM": "modeling_skywork.SkyworkForCausalLM"
- },
- "bos_token_id": 1,
- "eos_token_id": 2,
- "pad_token_id": 0,
- "hidden_act": "silu",
- "hidden_size": 4608,
- "initializer_range": 0.01,
- "intermediate_size": 12288,
- "max_position_embeddings": 131072,
- "model_type": "skywork",
- "num_attention_heads": 36,
- "num_hidden_layers": 52,
- "num_key_value_heads": 36,
- "rms_norm_eps": 1e-06,
- "tie_word_embeddings": false,
- "torch_dtype": "bfloat16",
- "transformers_version": "4.33.1",
- "use_cache": true,
- "vocab_size": 65519
- }
\ No newline at end of file
diff --git a/model_hubs/Skywork-13B-Base-3T/configuration_skywork.py b/model_hubs/Skywork-13B-Base-3T/configuration_skywork.py
deleted file mode 100644
index dbbad8ae1e08d431a14c5de719267629feb4cd5a..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/configuration_skywork.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright (c) SkyworkAI and the HuggingFace Inc. team. All rights reserved.
-# This code is built upon Huggingface's transformers repository.
-
-
-from transformers.configuration_utils import PretrainedConfig
-from transformers.utils import logging
-
-
-logger = logging.get_logger(__name__)
-
-LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
-
-
-class SkyworkConfig(PretrainedConfig):
-
- model_type = "skywork"
- keys_to_ignore_at_inference = ["past_key_values"]
-
- def __init__(
- self,
- vocab_size=32000,
- hidden_size=4096,
- intermediate_size=11008,
- num_hidden_layers=32,
- num_attention_heads=32,
- num_key_value_heads=None,
- hidden_act="silu",
- max_position_embeddings=2048,
- initializer_range=0.02,
- rms_norm_eps=1e-6,
- use_cache=True,
- pad_token_id=None,
- bos_token_id=1,
- eos_token_id=2,
- pretraining_tp=1,
- tie_word_embeddings=False,
- rope_theta=10000.0,
- rope_scaling=None,
- **kwargs,
- ):
- self.vocab_size = vocab_size
- self.max_position_embeddings = max_position_embeddings
- self.hidden_size = hidden_size
- self.intermediate_size = intermediate_size
- self.num_hidden_layers = num_hidden_layers
- self.num_attention_heads = num_attention_heads
-
- # for backward compatibility
- if num_key_value_heads is None:
- num_key_value_heads = num_attention_heads
-
- self.num_key_value_heads = num_key_value_heads
- self.hidden_act = hidden_act
- self.initializer_range = initializer_range
- self.rms_norm_eps = rms_norm_eps
- self.pretraining_tp = pretraining_tp
- self.use_cache = use_cache
- self.rope_theta = rope_theta
- self.rope_scaling = rope_scaling
- self._rope_scaling_validation()
-
- super().__init__(
- pad_token_id=pad_token_id,
- bos_token_id=bos_token_id,
- eos_token_id=eos_token_id,
- tie_word_embeddings=tie_word_embeddings,
- **kwargs,
- )
-
- def _rope_scaling_validation(self):
- """
- Validate the `rope_scaling` configuration.
- """
- if self.rope_scaling is None:
- return
-
- if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
- raise ValueError(
- "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
- f"got {self.rope_scaling}"
- )
- rope_scaling_type = self.rope_scaling.get("type", None)
- rope_scaling_factor = self.rope_scaling.get("factor", None)
- if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic", "ntk"]:
- raise ValueError(
- f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
- )
- if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
- raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
diff --git a/model_hubs/Skywork-13B-Base-3T/generation_config.json b/model_hubs/Skywork-13B-Base-3T/generation_config.json
deleted file mode 100644
index aece903f676603332b5bc1b1a29d6e44a8c02464..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/generation_config.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "bos_token_id": 1,
- "do_sample": true,
- "eos_token_id": 2,
- "max_length": 4096,
- "pad_token_id": 0,
- "temperature": 0.6,
- "top_p": 0.9,
- "transformers_version": "4.33.1"
-}
\ No newline at end of file
diff --git a/model_hubs/Skywork-13B-Base-3T/modeling_skywork.py b/model_hubs/Skywork-13B-Base-3T/modeling_skywork.py
deleted file mode 100644
index 93d2898e0e7d379dc6883c4e34043e537689b8bb..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/modeling_skywork.py
+++ /dev/null
@@ -1,911 +0,0 @@
-# Copyright (c) SkyworkAI and the HuggingFace Inc. team. All rights reserved.
-# This code is built upon Huggingface's transformers repository.
-
-import math
-from typing import List, Optional, Tuple, Union
-
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-from torch import nn
-from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
-
-from transformers.activations import ACT2FN
-from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
-from transformers.modeling_utils import PreTrainedModel
-from transformers.utils import logging
-from .configuration_skywork import SkyworkConfig
-
-
-logger = logging.get_logger(__name__)
-
-_CONFIG_FOR_DOC = "SkyworkConfig"
-
-
-# Copied from transformers.models.bart.modeling_bart._make_causal_mask
-def _make_causal_mask(
- input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
-):
- """
- Make causal mask used for bi-directional self-attention.
- """
- bsz, tgt_len = input_ids_shape
- mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
- mask_cond = torch.arange(mask.size(-1), device=device)
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
- mask = mask.to(dtype)
-
- if past_key_values_length > 0:
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
- return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
-
-
-# Copied from transformers.models.bart.modeling_bart._expand_mask
-def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
- """
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
- """
- bsz, src_len = mask.size()
- tgt_len = tgt_len if tgt_len is not None else src_len
-
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
-
- inverted_mask = 1.0 - expanded_mask
-
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
-
-
-class SkyworkRMSNorm(nn.Module):
- def __init__(self, hidden_size, eps=1e-6):
- """
- SkyworkRMSNorm is equivalent to T5LayerNorm
- """
- super().__init__()
- self.weight = nn.Parameter(torch.ones(hidden_size))
- self.variance_epsilon = eps
-
- def forward(self, hidden_states):
- input_dtype = hidden_states.dtype
- hidden_states = hidden_states.to(torch.float32)
- variance = hidden_states.pow(2).mean(-1, keepdim=True)
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
- return self.weight * hidden_states.to(input_dtype)
-
-
-class SkyworkRotaryEmbedding(torch.nn.Module):
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
- super().__init__()
-
- self.dim = dim
- self.max_position_embeddings = max_position_embeddings
- self.base = base
- inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
- self.register_buffer("inv_freq", inv_freq, persistent=False)
-
- # Build here to make `torch.jit.trace` work.
- self._set_cos_sin_cache(
- seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
- )
-
- def _set_cos_sin_cache(self, seq_len, device, dtype):
- self.max_seq_len_cached = seq_len
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
-
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
- emb = torch.cat((freqs, freqs), dim=-1)
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
-
- def forward(self, x, seq_len=None):
- # x: [bs, num_attention_heads, seq_len, head_size]
- if seq_len > self.max_seq_len_cached:
- self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
-
- return (
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
- )
-
-
-class SkyworkLinearScalingRotaryEmbedding(SkyworkRotaryEmbedding):
- """SkyworkRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
-
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
- self.scaling_factor = scaling_factor
- super().__init__(dim, max_position_embeddings, base, device)
-
- def _set_cos_sin_cache(self, seq_len, device, dtype):
- self.max_seq_len_cached = seq_len
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
- t = t / self.scaling_factor
-
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
- emb = torch.cat((freqs, freqs), dim=-1)
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
-
-
-class SkyworkDynamicNTKScalingRotaryEmbedding(SkyworkRotaryEmbedding):
- """SkyworkRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
-
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
- self.scaling_factor = scaling_factor
- super().__init__(dim, max_position_embeddings, base, device)
-
- def _set_cos_sin_cache(self, seq_len, device, dtype):
- self.max_seq_len_cached = seq_len
-
- if seq_len > self.max_position_embeddings:
- base = self.base * (
- (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
- ) ** (self.dim / (self.dim - 2))
- inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
- self.register_buffer("inv_freq", inv_freq, persistent=False)
-
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
-
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
- emb = torch.cat((freqs, freqs), dim=-1)
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
-
-
-
-class SkyworkNTKScalingRotaryEmbedding(torch.nn.Module):
- def __init__(self, dim, max_position_embeddings=2048, base=10000, scaling_factor=100, device=None):
- super().__init__()
-
- self.dim = dim
- self.max_position_embeddings = max_position_embeddings
- self.base = base * scaling_factor
- inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
- self.register_buffer("inv_freq", inv_freq, persistent=False)
-
- # Build here to make `torch.jit.trace` work.
- self._set_cos_sin_cache(
- seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
- )
-
- def _set_cos_sin_cache(self, seq_len, device, dtype):
- self.max_seq_len_cached = seq_len
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
- emb = torch.cat((freqs, freqs), dim=-1)
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
-
- def forward(self, x, seq_len=None):
- if seq_len > self.max_seq_len_cached:
- self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
-
- return (
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
- )
-
-def rotate_half(x):
- """Rotates half the hidden dims of the input."""
- x1 = x[..., : x.shape[-1] // 2]
- x2 = x[..., x.shape[-1] // 2 :]
- return torch.cat((-x2, x1), dim=-1)
-
-
-def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
- # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
- cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
- sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
- cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
- sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
- q_embed = (q * cos) + (rotate_half(q) * sin)
- k_embed = (k * cos) + (rotate_half(k) * sin)
- return q_embed, k_embed
-
-
-class SkyworkMLP(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.config = config
- self.hidden_size = config.hidden_size
- self.intermediate_size = config.intermediate_size
- self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
- self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
- self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
- self.act_fn = ACT2FN[config.hidden_act]
-
- def forward(self, x):
- if self.config.pretraining_tp > 1:
- slice = self.intermediate_size // self.config.pretraining_tp
- gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
- up_proj_slices = self.up_proj.weight.split(slice, dim=0)
- down_proj_slices = self.down_proj.weight.split(slice, dim=1)
-
- gate_proj = torch.cat(
- [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
- )
- up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
-
- intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
- down_proj = [
- F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
- ]
- down_proj = sum(down_proj)
- else:
- down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
-
- return down_proj
-
-
-def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
- """
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
- num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
- """
- batch, num_key_value_heads, slen, head_dim = hidden_states.shape
- if n_rep == 1:
- return hidden_states
- hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
- return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
-
-
-class SkyworkAttention(nn.Module):
- """Multi-headed attention from 'Attention Is All You Need' paper"""
-
- def __init__(self, config: SkyworkConfig):
- super().__init__()
- self.config = config
- self.hidden_size = config.hidden_size
- self.num_heads = config.num_attention_heads
- self.head_dim = self.hidden_size // self.num_heads
- self.num_key_value_heads = config.num_key_value_heads
- self.num_key_value_groups = self.num_heads // self.num_key_value_heads
- self.max_position_embeddings = config.max_position_embeddings
- self.rope_theta = config.rope_theta
-
- if (self.head_dim * self.num_heads) != self.hidden_size:
- raise ValueError(
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
- f" and `num_heads`: {self.num_heads})."
- )
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
- self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
- self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
- self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
- self._init_rope()
-
- def _init_rope(self):
- if self.config.rope_scaling is None:
- self.rotary_emb = SkyworkRotaryEmbedding(
- self.head_dim,
- max_position_embeddings=self.max_position_embeddings,
- base=self.rope_theta,
- )
- else:
- scaling_type = self.config.rope_scaling["type"]
- scaling_factor = self.config.rope_scaling["factor"]
- if scaling_type == "linear":
- self.rotary_emb = SkyworkLinearScalingRotaryEmbedding(
- self.head_dim,
- max_position_embeddings=self.max_position_embeddings,
- scaling_factor=scaling_factor,
- base=self.rope_theta,
- )
- elif scaling_type == "dynamic":
- self.rotary_emb = SkyworkDynamicNTKScalingRotaryEmbedding(
- self.head_dim,
- max_position_embeddings=self.max_position_embeddings,
- scaling_factor=scaling_factor,
- base=self.rope_theta,
- )
- elif scaling_type == "ntk":
- self.rotary_emb = SkyworkNTKScalingRotaryEmbedding(
- self.head_dim,
- max_position_embeddings=self.max_position_embeddings,
- scaling_factor=scaling_factor,
- base=self.rope_theta,
- )
- else:
- raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
- print('-'*80)
- print(f"USING COSTOM MODELING, scaling_type is {scaling_type}, scaling_factor is {scaling_factor}")
-
- def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
-
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- output_attentions: bool = False,
- use_cache: bool = False,
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
- bsz, q_len, _ = hidden_states.size()
-
- if self.config.pretraining_tp > 1:
- key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
- query_slices = self.q_proj.weight.split(
- (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
- )
- key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
- value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
-
- query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
- query_states = torch.cat(query_states, dim=-1)
-
- key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
- key_states = torch.cat(key_states, dim=-1)
-
- value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
- value_states = torch.cat(value_states, dim=-1)
-
- else:
- query_states = self.q_proj(hidden_states)
- key_states = self.k_proj(hidden_states)
- value_states = self.v_proj(hidden_states)
-
- query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
- key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
- value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
-
- kv_seq_len = key_states.shape[-2]
- if past_key_value is not None:
- kv_seq_len += past_key_value[0].shape[-2]
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
-
- if past_key_value is not None:
- # reuse k, v, self_attention
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
-
- past_key_value = (key_states, value_states) if use_cache else None
-
- # repeat k/v heads if n_kv_heads < n_heads
- key_states = repeat_kv(key_states, self.num_key_value_groups)
- value_states = repeat_kv(value_states, self.num_key_value_groups)
-
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
-
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
- raise ValueError(
- f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
- f" {attn_weights.size()}"
- )
-
- if attention_mask is not None:
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
- raise ValueError(
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
- )
- attn_weights = attn_weights + attention_mask
-
- # upcast attention to fp32
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
- attn_output = torch.matmul(attn_weights, value_states)
-
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
- raise ValueError(
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
- f" {attn_output.size()}"
- )
-
- attn_output = attn_output.transpose(1, 2).contiguous()
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
-
- if self.config.pretraining_tp > 1:
- attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
- o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
- attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
- else:
- attn_output = self.o_proj(attn_output)
-
- if not output_attentions:
- attn_weights = None
-
- return attn_output, attn_weights, past_key_value
-
-
-class SkyworkDecoderLayer(nn.Module):
- def __init__(self, config: SkyworkConfig):
- super().__init__()
- self.hidden_size = config.hidden_size
- self.self_attn = SkyworkAttention(config=config)
- self.mlp = SkyworkMLP(config)
- self.input_layernorm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
- self.post_attention_layernorm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
-
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- output_attentions: Optional[bool] = False,
- use_cache: Optional[bool] = False,
- ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
- """
- Args:
- hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
- attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
- `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
- returned tensors for more detail.
- use_cache (`bool`, *optional*):
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
- (see `past_key_values`).
- past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
- """
-
- residual = hidden_states
-
- hidden_states = self.input_layernorm(hidden_states)
-
- # Self Attention
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
- hidden_states=hidden_states,
- attention_mask=attention_mask,
- position_ids=position_ids,
- past_key_value=past_key_value,
- output_attentions=output_attentions,
- use_cache=use_cache,
- )
- hidden_states = residual + hidden_states
-
- # Fully Connected
- residual = hidden_states
- hidden_states = self.post_attention_layernorm(hidden_states)
- hidden_states = self.mlp(hidden_states)
- hidden_states = residual + hidden_states
-
- outputs = (hidden_states,)
-
- if output_attentions:
- outputs += (self_attn_weights,)
-
- if use_cache:
- outputs += (present_key_value,)
-
- return outputs
-
-class SkyworkPreTrainedModel(PreTrainedModel):
- config_class = SkyworkConfig
- base_model_prefix = "model"
- supports_gradient_checkpointing = True
- _no_split_modules = ["SkyworkDecoderLayer"]
- _skip_keys_device_placement = "past_key_values"
-
- def _init_weights(self, module):
- std = self.config.initializer_range
- if isinstance(module, nn.Linear):
- module.weight.data.normal_(mean=0.0, std=std)
- if module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=std)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
-
- def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, SkyworkModel):
- module.gradient_checkpointing = value
-
-class SkyworkModel(SkyworkPreTrainedModel):
- """
- Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`SkyworkDecoderLayer`]
-
- Args:
- config: SkyworkConfig
- """
-
- def __init__(self, config: SkyworkConfig):
- super().__init__(config)
- self.padding_idx = config.pad_token_id
- self.vocab_size = config.vocab_size
-
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
- self.layers = nn.ModuleList([SkyworkDecoderLayer(config) for _ in range(config.num_hidden_layers)])
- self.norm = SkyworkRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
-
- self.gradient_checkpointing = False
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.embed_tokens
-
- def set_input_embeddings(self, value):
- self.embed_tokens = value
-
- # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
- def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
- # create causal mask
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- combined_attention_mask = None
- if input_shape[-1] > 1:
- combined_attention_mask = _make_causal_mask(
- input_shape,
- inputs_embeds.dtype,
- device=inputs_embeds.device,
- past_key_values_length=past_key_values_length,
- )
-
- if attention_mask is not None:
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
- inputs_embeds.device
- )
- combined_attention_mask = (
- expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
- )
-
- return combined_attention_mask
-
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[List[torch.FloatTensor]] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, BaseModelOutputWithPast]:
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
-
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- # retrieve input_ids and inputs_embeds
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
- elif input_ids is not None:
- batch_size, seq_length = input_ids.shape
- elif inputs_embeds is not None:
- batch_size, seq_length, _ = inputs_embeds.shape
- else:
- raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
-
- seq_length_with_past = seq_length
- past_key_values_length = 0
-
- if past_key_values is not None:
- past_key_values_length = past_key_values[0][0].shape[2]
- seq_length_with_past = seq_length_with_past + past_key_values_length
-
- if position_ids is None:
- device = input_ids.device if input_ids is not None else inputs_embeds.device
- position_ids = torch.arange(
- past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
- )
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
- else:
- position_ids = position_ids.view(-1, seq_length).long()
-
- if inputs_embeds is None:
- inputs_embeds = self.embed_tokens(input_ids)
- # embed positions
- if attention_mask is None:
- attention_mask = torch.ones(
- (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
- )
- attention_mask = self._prepare_decoder_attention_mask(
- attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
- )
-
- hidden_states = inputs_embeds
-
- if self.gradient_checkpointing and self.training:
- if use_cache:
- logger.warning_once(
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
- )
- use_cache = False
-
- # decoder layers
- all_hidden_states = () if output_hidden_states else None
- all_self_attns = () if output_attentions else None
- next_decoder_cache = () if use_cache else None
-
- for idx, decoder_layer in enumerate(self.layers):
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
-
- past_key_value = past_key_values[idx] if past_key_values is not None else None
-
- if self.gradient_checkpointing and self.training:
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- # None for past_key_value
- return module(*inputs, past_key_value, output_attentions)
-
- return custom_forward
-
- layer_outputs = torch.utils.checkpoint.checkpoint(
- create_custom_forward(decoder_layer),
- hidden_states,
- attention_mask,
- position_ids,
- )
- else:
- layer_outputs = decoder_layer(
- hidden_states,
- attention_mask=attention_mask,
- position_ids=position_ids,
- past_key_value=past_key_value,
- output_attentions=output_attentions,
- use_cache=use_cache,
- )
-
- hidden_states = layer_outputs[0]
-
- if use_cache:
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
-
- if output_attentions:
- all_self_attns += (layer_outputs[1],)
-
- hidden_states = self.norm(hidden_states)
-
- # add hidden states from the last decoder layer
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
-
- next_cache = next_decoder_cache if use_cache else None
- if not return_dict:
- return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
- return BaseModelOutputWithPast(
- last_hidden_state=hidden_states,
- past_key_values=next_cache,
- hidden_states=all_hidden_states,
- attentions=all_self_attns,
- )
-
-
-class SkyworkForCausalLM(SkyworkPreTrainedModel):
- _tied_weights_keys = ["lm_head.weight"]
-
- def __init__(self, config):
- super().__init__(config)
- self.model = SkyworkModel(config)
- self.vocab_size = config.vocab_size
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.model.embed_tokens
-
- def set_input_embeddings(self, value):
- self.model.embed_tokens = value
-
- def get_output_embeddings(self):
- return self.lm_head
-
- def set_output_embeddings(self, new_embeddings):
- self.lm_head = new_embeddings
-
- def set_decoder(self, decoder):
- self.model = decoder
-
- def get_decoder(self):
- return self.model
-
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[List[torch.FloatTensor]] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, CausalLMOutputWithPast]:
-
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
- outputs = self.model(
- input_ids=input_ids,
- attention_mask=attention_mask,
- position_ids=position_ids,
- past_key_values=past_key_values,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- hidden_states = outputs[0]
- if self.config.pretraining_tp > 1:
- lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
- logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
- logits = torch.cat(logits, dim=-1)
- else:
- logits = self.lm_head(hidden_states)
- logits = logits.float()
-
- loss = None
- if labels is not None:
- # Shift so that tokens < n predict n
- shift_logits = logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
- # Flatten the tokens
- loss_fct = CrossEntropyLoss()
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
- shift_labels = shift_labels.view(-1)
- # Enable model parallelism
- shift_labels = shift_labels.to(shift_logits.device)
- loss = loss_fct(shift_logits, shift_labels)
-
- if not return_dict:
- output = (logits,) + outputs[1:]
- return (loss,) + output if loss is not None else output
-
- return CausalLMOutputWithPast(
- loss=loss,
- logits=logits,
- past_key_values=outputs.past_key_values,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
- def prepare_inputs_for_generation(
- self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
- ):
- if past_key_values:
- input_ids = input_ids[:, -1:]
-
- position_ids = kwargs.get("position_ids", None)
- if attention_mask is not None and position_ids is None:
- # create position_ids on the fly for batch generation
- position_ids = attention_mask.long().cumsum(-1) - 1
- position_ids.masked_fill_(attention_mask == 0, 1)
- if past_key_values:
- position_ids = position_ids[:, -1].unsqueeze(-1)
-
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
- if inputs_embeds is not None and past_key_values is None:
- model_inputs = {"inputs_embeds": inputs_embeds}
- else:
- model_inputs = {"input_ids": input_ids}
-
- model_inputs.update(
- {
- "position_ids": position_ids,
- "past_key_values": past_key_values,
- "use_cache": kwargs.get("use_cache"),
- "attention_mask": attention_mask,
- }
- )
- return model_inputs
-
- @staticmethod
- def _reorder_cache(past_key_values, beam_idx):
- reordered_past = ()
- for layer_past in past_key_values:
- reordered_past += (
- tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
- )
- return reordered_past
-
-
-class SkyworkForSequenceClassification(SkyworkPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- self.num_labels = config.num_labels
- self.model = SkyworkModel(config)
- self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.model.embed_tokens
-
- def set_input_embeddings(self, value):
- self.model.embed_tokens = value
-
- def forward(
- self,
- input_ids: torch.LongTensor = None,
- attention_mask: Optional[torch.Tensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[List[torch.FloatTensor]] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
-
-
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- transformer_outputs = self.model(
- input_ids,
- attention_mask=attention_mask,
- position_ids=position_ids,
- past_key_values=past_key_values,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = transformer_outputs[0]
- logits = self.score(hidden_states)
-
- if input_ids is not None:
- batch_size = input_ids.shape[0]
- else:
- batch_size = inputs_embeds.shape[0]
-
- if self.config.pad_token_id is None and batch_size != 1:
- raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
- if self.config.pad_token_id is None:
- sequence_lengths = -1
- else:
- if input_ids is not None:
- sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to(
- logits.device
- )
- else:
- sequence_lengths = -1
-
- pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
-
- loss = None
- if labels is not None:
- labels = labels.to(logits.device)
- if self.config.problem_type is None:
- if self.num_labels == 1:
- self.config.problem_type = "regression"
- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
- self.config.problem_type = "single_label_classification"
- else:
- self.config.problem_type = "multi_label_classification"
-
- if self.config.problem_type == "regression":
- loss_fct = MSELoss()
- if self.num_labels == 1:
- loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
- else:
- loss = loss_fct(pooled_logits, labels)
- elif self.config.problem_type == "single_label_classification":
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
- elif self.config.problem_type == "multi_label_classification":
- loss_fct = BCEWithLogitsLoss()
- loss = loss_fct(pooled_logits, labels)
- if not return_dict:
- output = (pooled_logits,) + transformer_outputs[1:]
- return ((loss,) + output) if loss is not None else output
-
- return SequenceClassifierOutputWithPast(
- loss=loss,
- logits=pooled_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00001-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00001-of-00053.bin
deleted file mode 100644
index 9303b7bd1e0c4c1eed4bba66ac2ed298e222e707..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00001-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:69626f67345dd2378ea1155f152804fb4886b151f2e43ebe3b2d6f33c80e606e
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00002-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00002-of-00053.bin
deleted file mode 100644
index dcdb1009bafbfb50ce114291f5f07f33ea5be3b2..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00002-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a7b651c6dde0c0a430a94dce24d3560bd07db9ed35f1f1cac9edd530e441b5f0
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00003-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00003-of-00053.bin
deleted file mode 100644
index 5d5fdd9878d5120cef78cf977aff6e879e53ba8f..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00003-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:8359b7ecc78b02a619751c96f60aec6fee4a2595db3f36cd61a5391838fc7ce1
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00004-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00004-of-00053.bin
deleted file mode 100644
index 4024f0cf462272635fbf74ec65a80485704ef0e3..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00004-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:69c333cb8fcfbe365e3bdcd260e5ff91601da65662a5718d002639937ef3cefb
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00005-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00005-of-00053.bin
deleted file mode 100644
index 63fb4ca1846d2cf5643a9e0e0f50e78e1335a607..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00005-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:4277ccd1d2175e039075cc6fe2b95e213a590e9eabd35ef26785b998b4f2ad84
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00006-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00006-of-00053.bin
deleted file mode 100644
index 574726695542cd009aa60e9a5ed445a95b34aeef..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00006-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:596649ca586587c17956487cda102ce7ce3c5c950ad89ba1bb8c9ef9224b5a01
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00007-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00007-of-00053.bin
deleted file mode 100644
index 6012fb8d748d3dc45c04f3e5d2fe614eb2c67de3..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00007-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:953fdae0c98a276f579647ab7595cf9548ec3e46cc433364ba23cfa9b2a77e0a
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00008-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00008-of-00053.bin
deleted file mode 100644
index 81e5ee182890730806fc79a669c386d93a76fa4f..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00008-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:9648394bc03bc87c8913c31893fbdc55dbbbfeaf7041fc4e5b4946469261f026
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00009-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00009-of-00053.bin
deleted file mode 100644
index 481f3aeda71f79884e3f495815184f6bd3c6808f..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00009-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:72c026f7373e85d29e17dd6501c0622da44211273993cc641da8551468a8063d
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00010-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00010-of-00053.bin
deleted file mode 100644
index 6cb445096e2cfc9a7e582b4dce625fabcd1814d8..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00010-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:9b1f53581597d14404ffd763f646f2a8346f89d3e84fd17f88ea0bc779bcd8cf
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00011-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00011-of-00053.bin
deleted file mode 100644
index 60b7d4629b2acfb518c405970cf59848a87dc9dd..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00011-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:4be0dc98193a0e3f421c7d2ebe1fa910ea62d70e9bb32b0f4cac7b69326c550c
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00012-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00012-of-00053.bin
deleted file mode 100644
index 41468e1d8288ca66cc1448da451c226a040a69a6..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00012-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:94f369d5ba5c26dbf9b2b4ab27803068b220db8a38db5ac644eb24fa79a7b963
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00013-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00013-of-00053.bin
deleted file mode 100644
index 4ae8dfaf384070f2ef564c0a71538a15d17eadc9..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00013-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:b766e0e9ceb543f6ca5ebd5e4dac937bf58a7e14a3d13e1ae6d50fec820bfb8b
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00014-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00014-of-00053.bin
deleted file mode 100644
index 92f6d874ba17d482860ab107ef6f1deda2e8e9fa..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00014-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a5806356cccd558f1d05643ba00e23db77a8c06a174bfa5831efcd8283582776
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00015-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00015-of-00053.bin
deleted file mode 100644
index d0c0586f395d56fea43df48e08037f3b8ee208a0..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00015-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:d198045292b6d3d0a78f907d67491541e4e5768feb9dfcde5dda18a7f8d8cdfa
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00016-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00016-of-00053.bin
deleted file mode 100644
index 78bc25d0818412b843a18d3bd5c3d62b17e1e706..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00016-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:db78b41b807f6c7b1bb6aa7372dbc14c9da9ce0cd4d7c4e89955cfa6e4400f0b
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00017-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00017-of-00053.bin
deleted file mode 100644
index 56d1a731b4c11f3dbe1d719f64adc5a2f58711b1..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00017-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:87ef520c42607a36a63f8c7e5e6513e46ccdcbaa077f6a7fa0a17f5663a19cae
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00018-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00018-of-00053.bin
deleted file mode 100644
index 1ca52ae7937f95f69459ae1a2f6bdae86feb15f2..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00018-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a43230a0381dc5d4a45f874376d6d1b7c24fb02dd6d9ad62dc934fa4bdbf22c9
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00019-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00019-of-00053.bin
deleted file mode 100644
index d80b64ef4bc1cd6eabfbe096b357bb653dc0ed97..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00019-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:4ea20023ae3238034245a47e2d782fa43798074640af65d355d08b5e2c7a3968
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00020-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00020-of-00053.bin
deleted file mode 100644
index d0fe303c992941ba78a23a881c45da8bc4b45111..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00020-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:18465d3d867b0c517d4dcdf61c8088e26a7a16c42d5d17e6efa1d685725d5028
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00021-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00021-of-00053.bin
deleted file mode 100644
index 922d42ae5fb9fbbf9de285483b646e378260b49f..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00021-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:977e108f2584ff9a4f846f3ec4b515c4e28216b1439df82adb7e611a452929a7
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00022-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00022-of-00053.bin
deleted file mode 100644
index dce9fe1c16c68f81ff9dd601f7582ec91925ad30..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00022-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:7236496347fda53cf7d1c4daef8c380b6e42d1ec0a6edb44fd9d0e38b6db6419
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00023-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00023-of-00053.bin
deleted file mode 100644
index 9d428b9c6d69735100d76b81c47b0ce76807de23..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00023-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:b17bedf66127af7638e660befbc5defd047024d8b63fcd590626af272e3c5cc1
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00024-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00024-of-00053.bin
deleted file mode 100644
index efd20cdb648b1be9123028d24e07a1476115b4f3..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00024-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:e76c6c43337c94f45fd1b3f3365d14dfabbfd38cfc9074db75b18d9504052d67
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00025-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00025-of-00053.bin
deleted file mode 100644
index 672fc5bddd84cd5fbfa955e0b553a00c6f840f57..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00025-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:250d260874617a3944545996ba6bd0291b5c748a6e8c603a235fb1f882a93b47
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00026-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00026-of-00053.bin
deleted file mode 100644
index 7cf3dcf75f8dd2e212cf73cc561e3cb36efb544e..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00026-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:52023afeed136ba561793db69a90c23655f637175890e19080a01f62f87cdac9
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00027-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00027-of-00053.bin
deleted file mode 100644
index 6c2a64b4b153e2785898051928094355946da704..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00027-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:54d3e82f5176315c470e4615fd5e1be85383351077fcceb684ab436f3e6796a1
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00028-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00028-of-00053.bin
deleted file mode 100644
index 3d8ac9971e13ccde8cfa6e35889c7e1cd49eebc6..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00028-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:7aff27ef911125a991c6b58d8e809b319e847ff558cd12d49b025e06d3c5728e
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00029-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00029-of-00053.bin
deleted file mode 100644
index 021f0c6884915f5bbad2ce59add724002d7c795f..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00029-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:c1832c51bef229f9014fa518c5cff4dcc39723ab853c880335796f5740914c80
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00030-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00030-of-00053.bin
deleted file mode 100644
index ef1154485bf75cac5ec4aba6f77743698a0b725d..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00030-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:4a72d1a732e9a99abf661f5e109c589635d1a86886bac7f2aa80a7b6c409f8be
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00031-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00031-of-00053.bin
deleted file mode 100644
index 05ea64b551989a86be125b2c6517d56b364361c4..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00031-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:4114c138eb5a1bf68bd29a125c31e24201c3a5cf95ec3757c2b2f366b45befe6
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00032-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00032-of-00053.bin
deleted file mode 100644
index 4d8187ebb92ce48307016ab9bc49b4d5b8fa3a8d..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00032-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:31aaa207f4a4c1042868c0320cec86c8f7aed5ba0940c2552f9c6be88526398a
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00033-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00033-of-00053.bin
deleted file mode 100644
index 14b2e3af217d371f819d0edaa3e3a55e26609dd5..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00033-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:abf15bdd7289fabf72251cd45026e78b6bdd5c8a8c849d3ab4658521a9b5383e
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00034-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00034-of-00053.bin
deleted file mode 100644
index 1f2f4b8ae797e2d88866545c33b82c3b1e198cc6..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00034-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a7ce1cb13ef764d3da8c01e65a7cb12e9b78fc7410555441851ad05138cbfdbc
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00035-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00035-of-00053.bin
deleted file mode 100644
index 0375cdd22a38fe10a37583b404a5e62c98fd47e5..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00035-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:b0c95670bea6f294572d6e0c7d6f410a378ed546c8beb239ee86d4b858574096
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00036-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00036-of-00053.bin
deleted file mode 100644
index 37b06dd03e963ce25d9905478a30e9e40cdb9ee2..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00036-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:de7e7cc3ece84abbc58aa053644369e5efce366da96dfd94a70477eaacc4edcc
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00037-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00037-of-00053.bin
deleted file mode 100644
index 9ea37af207c1e8b71536cae9251fc36fbff4bcfe..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00037-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:46c8254b6f12d8bb032f4cfa9c4596f06692eb307cfd2a617d7c4639282fdcf4
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00038-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00038-of-00053.bin
deleted file mode 100644
index 6e786f61dc0a4454552c2130124ca3cd2d48b8d8..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00038-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:6784ebd555e163c58262116bd192a2bc2679d30311c8387fb8a652f9ae3d082b
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00039-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00039-of-00053.bin
deleted file mode 100644
index 8abae969626746f1d9648ed3a78bda0993616fe6..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00039-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:30db78de4bdbedd9066f08a8f0fc4fa6558ec22503df34d289bcad5dee26dbb8
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00040-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00040-of-00053.bin
deleted file mode 100644
index c94c8fe92d17c6dc5c5d241f95c554e768bed582..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00040-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:571fe808760c495bf1b97396ec9ef4ce3bd5c1003d692dfcb7d3ad7197023a67
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00041-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00041-of-00053.bin
deleted file mode 100644
index a62db24345c4816505c53beb85ae4017f3ff6f56..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00041-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:9ca91a4280c92688d97088bf2d3f1a0da6dcbed864f91f73ae7cec4bfc8496a0
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00042-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00042-of-00053.bin
deleted file mode 100644
index 284845b41de77d53b1263c9eea855e595190cfa0..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00042-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:572a6a020f42889d4e41596017022e880a4565cac9fa7b6072e6db21955dee78
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00043-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00043-of-00053.bin
deleted file mode 100644
index f673217e823bb804fe724f26ec97411b100ecac6..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00043-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:59ebb7958ba6de1cd89e31ef83e6786eacc07d8de467234543a6356466e820e0
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00044-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00044-of-00053.bin
deleted file mode 100644
index be8fcca5a22cc82a0dbc2bfd4ec35c6faf4942a1..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00044-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:e3b364a668f019e0896567dfcde7ed9d131bef3b602b4bb9e932430820ca4101
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00045-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00045-of-00053.bin
deleted file mode 100644
index 237fc1d08a95e3e454c33aa901cb79f97f45649d..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00045-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:c1c4819c9218302cfe4ad737bc079eb75aff0c58f854e82d391de66b8ae8724c
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00046-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00046-of-00053.bin
deleted file mode 100644
index 015a269508b74e3615d58284fee375abf18abfff..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00046-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:80692b714d33ec725a47009bec70a296365667698ea8d8b49c48d49cf753f969
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00047-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00047-of-00053.bin
deleted file mode 100644
index 1f9c9d4d9f5aa5316ff01205bf479642bf8b8455..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00047-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:cf8cf7e93f03160ef12731981c49e2295a85df6daa06aeb5f9ba02c486dabb6a
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00048-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00048-of-00053.bin
deleted file mode 100644
index e2c01149adf9d712ad6ed00049f00260aa32e0f1..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00048-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:08ea3cc15bd70ec948e83cdce1f857dd7580c2924acddec3215469c59b9c256b
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00049-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00049-of-00053.bin
deleted file mode 100644
index d6fdd0c26be778a728c2fba5e54e31198c73c684..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00049-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:25dcf34c85d0475596421183723a163a1a09b9964f9375ed671935d0060994dd
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00050-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00050-of-00053.bin
deleted file mode 100644
index e9681156ba7959f9f007aa6d9a60c5076fa2af08..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00050-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:bfd81aa8a0213eb25a9272944191d3c504de4eb60cba099b8c9f29093b55034e
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00051-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00051-of-00053.bin
deleted file mode 100644
index a8bd944ef9e20bc610746133db68a98e1673c09a..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00051-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:8e815bbf127cb17a72b0be5bf29d03fcd42b9d2e7824f96ff0c897433fe22f31
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00052-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00052-of-00053.bin
deleted file mode 100644
index b4b3a925263fbdd2695e5058d70c059ec2f6c68d..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00052-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a52f7e43ffa1d8003d4671d77c2ad6e9f7495889b2af607b42c0055a0aeef8f5
-size 509630194
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00053-of-00053.bin b/model_hubs/Skywork-13B-Base-3T/pytorch_model-00053-of-00053.bin
deleted file mode 100644
index ff029f005d1bad7adeef623f62b3d864a8fa4acf..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model-00053-of-00053.bin
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3fad3641e4921e518a700f4d6c0b86299ee2343b263190e7bce5968e98afde8d
-size 1207656908
diff --git a/model_hubs/Skywork-13B-Base-3T/pytorch_model.bin.index.json b/model_hubs/Skywork-13B-Base-3T/pytorch_model.bin.index.json
deleted file mode 100644
index 163c37a78b34efe7cc858ea3fdca93e4c7c25699..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/pytorch_model.bin.index.json
+++ /dev/null
@@ -1 +0,0 @@
-{"metadata": {"total_size": 27708239872}, "weight_map": {"model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00053.bin", "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00053.bin", "model.layers.1.input_layernorm.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00053.bin", "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.mlp.up_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.1.mlp.down_proj.weight": "pytorch_model-00002-of-00053.bin", "model.layers.2.input_layernorm.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00053.bin", "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.mlp.up_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.2.mlp.down_proj.weight": "pytorch_model-00003-of-00053.bin", "model.layers.3.input_layernorm.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00004-of-00053.bin", "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.mlp.up_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.3.mlp.down_proj.weight": "pytorch_model-00004-of-00053.bin", "model.layers.4.input_layernorm.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00005-of-00053.bin", "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.mlp.up_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.4.mlp.down_proj.weight": "pytorch_model-00005-of-00053.bin", "model.layers.5.input_layernorm.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00006-of-00053.bin", "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.mlp.up_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.5.mlp.down_proj.weight": "pytorch_model-00006-of-00053.bin", "model.layers.6.input_layernorm.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00007-of-00053.bin", "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.mlp.up_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.6.mlp.down_proj.weight": "pytorch_model-00007-of-00053.bin", "model.layers.7.input_layernorm.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00008-of-00053.bin", "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.mlp.up_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.7.mlp.down_proj.weight": "pytorch_model-00008-of-00053.bin", "model.layers.8.input_layernorm.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00009-of-00053.bin", "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.mlp.up_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.8.mlp.down_proj.weight": "pytorch_model-00009-of-00053.bin", "model.layers.9.input_layernorm.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00010-of-00053.bin", "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.mlp.up_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.9.mlp.down_proj.weight": "pytorch_model-00010-of-00053.bin", "model.layers.10.input_layernorm.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00011-of-00053.bin", "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.mlp.up_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.10.mlp.down_proj.weight": "pytorch_model-00011-of-00053.bin", "model.layers.11.input_layernorm.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00012-of-00053.bin", "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.mlp.up_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.11.mlp.down_proj.weight": "pytorch_model-00012-of-00053.bin", "model.layers.12.input_layernorm.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00013-of-00053.bin", "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.mlp.up_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.12.mlp.down_proj.weight": "pytorch_model-00013-of-00053.bin", "model.layers.13.input_layernorm.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00014-of-00053.bin", "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.mlp.up_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.13.mlp.down_proj.weight": "pytorch_model-00014-of-00053.bin", "model.layers.14.input_layernorm.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00015-of-00053.bin", "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.mlp.up_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.14.mlp.down_proj.weight": "pytorch_model-00015-of-00053.bin", "model.layers.15.input_layernorm.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00016-of-00053.bin", "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.mlp.up_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.15.mlp.down_proj.weight": "pytorch_model-00016-of-00053.bin", "model.layers.16.input_layernorm.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00017-of-00053.bin", "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.mlp.up_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.16.mlp.down_proj.weight": "pytorch_model-00017-of-00053.bin", "model.layers.17.input_layernorm.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00018-of-00053.bin", "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.mlp.up_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.17.mlp.down_proj.weight": "pytorch_model-00018-of-00053.bin", "model.layers.18.input_layernorm.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00019-of-00053.bin", "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.mlp.up_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.18.mlp.down_proj.weight": "pytorch_model-00019-of-00053.bin", "model.layers.19.input_layernorm.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00020-of-00053.bin", "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.mlp.up_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.19.mlp.down_proj.weight": "pytorch_model-00020-of-00053.bin", "model.layers.20.input_layernorm.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00021-of-00053.bin", "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.mlp.up_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.20.mlp.down_proj.weight": "pytorch_model-00021-of-00053.bin", "model.layers.21.input_layernorm.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00022-of-00053.bin", "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.mlp.up_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.21.mlp.down_proj.weight": "pytorch_model-00022-of-00053.bin", "model.layers.22.input_layernorm.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00023-of-00053.bin", "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.mlp.up_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.22.mlp.down_proj.weight": "pytorch_model-00023-of-00053.bin", "model.layers.23.input_layernorm.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00024-of-00053.bin", "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.mlp.up_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.23.mlp.down_proj.weight": "pytorch_model-00024-of-00053.bin", "model.layers.24.input_layernorm.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00025-of-00053.bin", "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.mlp.up_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.24.mlp.down_proj.weight": "pytorch_model-00025-of-00053.bin", "model.layers.25.input_layernorm.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00026-of-00053.bin", "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.mlp.up_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.25.mlp.down_proj.weight": "pytorch_model-00026-of-00053.bin", "model.layers.26.input_layernorm.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00027-of-00053.bin", "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.mlp.up_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.26.mlp.down_proj.weight": "pytorch_model-00027-of-00053.bin", "model.layers.27.input_layernorm.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00028-of-00053.bin", "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.mlp.up_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.27.mlp.down_proj.weight": "pytorch_model-00028-of-00053.bin", "model.layers.28.input_layernorm.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00029-of-00053.bin", "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.mlp.up_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.28.mlp.down_proj.weight": "pytorch_model-00029-of-00053.bin", "model.layers.29.input_layernorm.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00030-of-00053.bin", "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.mlp.up_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.29.mlp.down_proj.weight": "pytorch_model-00030-of-00053.bin", "model.layers.30.input_layernorm.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00031-of-00053.bin", "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.mlp.up_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.30.mlp.down_proj.weight": "pytorch_model-00031-of-00053.bin", "model.layers.31.input_layernorm.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00032-of-00053.bin", "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.mlp.up_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.31.mlp.down_proj.weight": "pytorch_model-00032-of-00053.bin", "model.layers.32.input_layernorm.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.post_attention_layernorm.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.q_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.k_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.v_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.o_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.self_attn.rotary_emb.inv_freq": "pytorch_model-00033-of-00053.bin", "model.layers.32.mlp.gate_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.mlp.up_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.32.mlp.down_proj.weight": "pytorch_model-00033-of-00053.bin", "model.layers.33.input_layernorm.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.post_attention_layernorm.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.q_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.k_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.v_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.o_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.self_attn.rotary_emb.inv_freq": "pytorch_model-00034-of-00053.bin", "model.layers.33.mlp.gate_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.mlp.up_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.33.mlp.down_proj.weight": "pytorch_model-00034-of-00053.bin", "model.layers.34.input_layernorm.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.post_attention_layernorm.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.q_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.k_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.v_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.o_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.self_attn.rotary_emb.inv_freq": "pytorch_model-00035-of-00053.bin", "model.layers.34.mlp.gate_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.mlp.up_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.34.mlp.down_proj.weight": "pytorch_model-00035-of-00053.bin", "model.layers.35.input_layernorm.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.post_attention_layernorm.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.q_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.k_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.v_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.o_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.self_attn.rotary_emb.inv_freq": "pytorch_model-00036-of-00053.bin", "model.layers.35.mlp.gate_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.mlp.up_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.35.mlp.down_proj.weight": "pytorch_model-00036-of-00053.bin", "model.layers.36.input_layernorm.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.post_attention_layernorm.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.q_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.k_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.v_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.o_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.self_attn.rotary_emb.inv_freq": "pytorch_model-00037-of-00053.bin", "model.layers.36.mlp.gate_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.mlp.up_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.36.mlp.down_proj.weight": "pytorch_model-00037-of-00053.bin", "model.layers.37.input_layernorm.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.post_attention_layernorm.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.q_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.k_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.v_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.o_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.self_attn.rotary_emb.inv_freq": "pytorch_model-00038-of-00053.bin", "model.layers.37.mlp.gate_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.mlp.up_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.37.mlp.down_proj.weight": "pytorch_model-00038-of-00053.bin", "model.layers.38.input_layernorm.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.post_attention_layernorm.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.q_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.k_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.v_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.o_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.self_attn.rotary_emb.inv_freq": "pytorch_model-00039-of-00053.bin", "model.layers.38.mlp.gate_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.mlp.up_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.38.mlp.down_proj.weight": "pytorch_model-00039-of-00053.bin", "model.layers.39.input_layernorm.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.post_attention_layernorm.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.q_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.k_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.v_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.o_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.self_attn.rotary_emb.inv_freq": "pytorch_model-00040-of-00053.bin", "model.layers.39.mlp.gate_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.mlp.up_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.39.mlp.down_proj.weight": "pytorch_model-00040-of-00053.bin", "model.layers.40.input_layernorm.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.post_attention_layernorm.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.q_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.k_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.v_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.o_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.self_attn.rotary_emb.inv_freq": "pytorch_model-00041-of-00053.bin", "model.layers.40.mlp.gate_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.mlp.up_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.40.mlp.down_proj.weight": "pytorch_model-00041-of-00053.bin", "model.layers.41.input_layernorm.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.post_attention_layernorm.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.q_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.k_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.v_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.o_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.self_attn.rotary_emb.inv_freq": "pytorch_model-00042-of-00053.bin", "model.layers.41.mlp.gate_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.mlp.up_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.41.mlp.down_proj.weight": "pytorch_model-00042-of-00053.bin", "model.layers.42.input_layernorm.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.post_attention_layernorm.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.q_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.k_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.v_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.o_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.self_attn.rotary_emb.inv_freq": "pytorch_model-00043-of-00053.bin", "model.layers.42.mlp.gate_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.mlp.up_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.42.mlp.down_proj.weight": "pytorch_model-00043-of-00053.bin", "model.layers.43.input_layernorm.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.post_attention_layernorm.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.q_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.k_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.v_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.o_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.self_attn.rotary_emb.inv_freq": "pytorch_model-00044-of-00053.bin", "model.layers.43.mlp.gate_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.mlp.up_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.43.mlp.down_proj.weight": "pytorch_model-00044-of-00053.bin", "model.layers.44.input_layernorm.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.post_attention_layernorm.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.q_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.k_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.v_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.o_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.self_attn.rotary_emb.inv_freq": "pytorch_model-00045-of-00053.bin", "model.layers.44.mlp.gate_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.mlp.up_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.44.mlp.down_proj.weight": "pytorch_model-00045-of-00053.bin", "model.layers.45.input_layernorm.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.post_attention_layernorm.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.q_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.k_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.v_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.o_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.self_attn.rotary_emb.inv_freq": "pytorch_model-00046-of-00053.bin", "model.layers.45.mlp.gate_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.mlp.up_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.45.mlp.down_proj.weight": "pytorch_model-00046-of-00053.bin", "model.layers.46.input_layernorm.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.post_attention_layernorm.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.q_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.k_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.v_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.o_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.self_attn.rotary_emb.inv_freq": "pytorch_model-00047-of-00053.bin", "model.layers.46.mlp.gate_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.mlp.up_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.46.mlp.down_proj.weight": "pytorch_model-00047-of-00053.bin", "model.layers.47.input_layernorm.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.post_attention_layernorm.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.q_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.k_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.v_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.o_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.self_attn.rotary_emb.inv_freq": "pytorch_model-00048-of-00053.bin", "model.layers.47.mlp.gate_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.mlp.up_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.47.mlp.down_proj.weight": "pytorch_model-00048-of-00053.bin", "model.layers.48.input_layernorm.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.post_attention_layernorm.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.q_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.k_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.v_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.o_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.self_attn.rotary_emb.inv_freq": "pytorch_model-00049-of-00053.bin", "model.layers.48.mlp.gate_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.mlp.up_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.48.mlp.down_proj.weight": "pytorch_model-00049-of-00053.bin", "model.layers.49.input_layernorm.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.post_attention_layernorm.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.q_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.k_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.v_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.o_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.self_attn.rotary_emb.inv_freq": "pytorch_model-00050-of-00053.bin", "model.layers.49.mlp.gate_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.mlp.up_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.49.mlp.down_proj.weight": "pytorch_model-00050-of-00053.bin", "model.layers.50.input_layernorm.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.post_attention_layernorm.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.q_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.k_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.v_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.o_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.self_attn.rotary_emb.inv_freq": "pytorch_model-00051-of-00053.bin", "model.layers.50.mlp.gate_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.mlp.up_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.50.mlp.down_proj.weight": "pytorch_model-00051-of-00053.bin", "model.layers.51.input_layernorm.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.post_attention_layernorm.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.q_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.k_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.v_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.o_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.self_attn.rotary_emb.inv_freq": "pytorch_model-00052-of-00053.bin", "model.layers.51.mlp.gate_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.mlp.up_proj.weight": "pytorch_model-00052-of-00053.bin", "model.layers.51.mlp.down_proj.weight": "pytorch_model-00052-of-00053.bin", "model.norm.weight": "pytorch_model-00053-of-00053.bin", "model.embed_tokens.weight": "pytorch_model-00053-of-00053.bin", "lm_head.weight": "pytorch_model-00053-of-00053.bin"}}
\ No newline at end of file
diff --git a/model_hubs/Skywork-13B-Base-3T/special_tokens_map.json b/model_hubs/Skywork-13B-Base-3T/special_tokens_map.json
deleted file mode 100644
index d85ba6cb6820b01226ef8bd40b46bb489041c6a8..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/special_tokens_map.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "bos_token": {
- "content": "",
- "lstrip": false,
- "normalized": true,
- "rstrip": false,
- "single_word": false
- },
- "eos_token": {
- "content": "",
- "lstrip": false,
- "normalized": true,
- "rstrip": false,
- "single_word": false
- },
- "unk_token": {
- "content": "",
- "lstrip": false,
- "normalized": true,
- "rstrip": false,
- "single_word": false
- }
-}
diff --git a/model_hubs/Skywork-13B-Base-3T/tokenization_skywork.py b/model_hubs/Skywork-13B-Base-3T/tokenization_skywork.py
deleted file mode 100644
index ac378d77d2d90d17340b3cb8eaf91bdb1656b71d..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/tokenization_skywork.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# Copyright (c) SkyworkAI and the HuggingFace Inc. team. All rights reserved.
-# This code is built upon Huggingface's transformers repository.
-
-"""Tokenization classes for Skywork."""
-import os
-from shutil import copyfile
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
-
-import sentencepiece as spm
-
-from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
-from transformers.utils import logging
-
-if TYPE_CHECKING:
- from transformers.pipelines.conversational import Conversation
-
-logger = logging.get_logger(__name__)
-
-VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
-
-
-SPIECE_UNDERLINE = "▁"
-
-B_INST, E_INST = "[INST]", "[/INST]"
-B_SYS, E_SYS = "<>\n", "\n<>\n\n"
-
-DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
-that your responses are socially unbiased and positive in nature.
-
-If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
-
-class SkyworkTokenizer(PreTrainedTokenizer):
-
- vocab_files_names = VOCAB_FILES_NAMES
- # pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
- # max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
- model_input_names = ["input_ids", "attention_mask"]
-
- def __init__(
- self,
- vocab_file,
- unk_token="",
- bos_token="",
- eos_token="",
- pad_token=None,
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
- add_bos_token=True,
- add_eos_token=False,
- clean_up_tokenization_spaces=False,
- legacy=True,
- **kwargs,
- ):
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
- bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
- eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
- unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
- pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
- self.legacy = legacy
- self.vocab_file = vocab_file
- self.add_bos_token = add_bos_token
- self.add_eos_token = add_eos_token
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- self.sp_model.Load(vocab_file)
- super().__init__(
- bos_token=bos_token,
- eos_token=eos_token,
- unk_token=unk_token,
- pad_token=pad_token,
- add_bos_token=add_bos_token,
- add_eos_token=add_eos_token,
- sp_model_kwargs=self.sp_model_kwargs,
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
- legacy=legacy,
- **kwargs,
- )
- if legacy:
- logger.warning_once(
- f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. "
- )
-
-
- def __getstate__(self):
- state = self.__dict__.copy()
- state["sp_model"] = None
- state["sp_model_proto"] = self.sp_model.serialized_model_proto()
- return state
-
- def __setstate__(self, d):
- self.__dict__ = d
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
- self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
-
- @property
- def vocab_size(self):
- """Returns vocab size"""
- return self.sp_model.get_piece_size()
-
- def get_vocab(self):
- """Returns vocab as a dict"""
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
- vocab.update(self.added_tokens_encoder)
- return vocab
-
- # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
- def tokenize(self, text, **kwargs) -> List[str]:
- # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
- # the beginning of the text
- if not self.legacy:
- text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " ")
- return super().tokenize(text, **kwargs)
-
- # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
- def _tokenize(self, text):
- if not self.legacy:
- is_first = text.startswith(SPIECE_UNDERLINE)
- if is_first:
- text = text[1:]
-
- tokens = self.sp_model.encode(text, out_type=str)
-
- if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(SPIECE_UNDERLINE):
- tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
- return tokens
-
- def _convert_token_to_id(self, token):
- """Converts a token (str) in an id using the vocab."""
- return self.sp_model.piece_to_id(token)
-
- def _convert_id_to_token(self, index):
- """Converts an index (integer) in a token (str) using the vocab."""
- token = self.sp_model.IdToPiece(index)
- return token
-
- def convert_tokens_to_string(self, tokens):
- """Converts a sequence of tokens (string) in a single string."""
- current_sub_tokens = []
- out_string = ""
- prev_is_special = False
- for i, token in enumerate(tokens):
- # make sure that special tokens are not decoded using sentencepiece model
- if token in self.all_special_tokens:
- if not prev_is_special and i != 0:
- out_string += " "
- out_string += self.sp_model.decode(current_sub_tokens) + token
- prev_is_special = True
- current_sub_tokens = []
- else:
- current_sub_tokens.append(token)
- prev_is_special = False
- out_string += self.sp_model.decode(current_sub_tokens)
- return out_string
-
- def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
- if not os.path.isdir(save_directory):
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
- return
- out_vocab_file = os.path.join(
- save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
- )
-
- if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
- copyfile(self.vocab_file, out_vocab_file)
- elif not os.path.isfile(self.vocab_file):
- with open(out_vocab_file, "wb") as fi:
- content_spiece_model = self.sp_model.serialized_model_proto()
- fi.write(content_spiece_model)
-
- return (out_vocab_file,)
-
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
- bos_token_id = [self.bos_token_id] if self.add_bos_token else []
- eos_token_id = [self.eos_token_id] if self.add_eos_token else []
-
- output = bos_token_id + token_ids_0 + eos_token_id
-
- if token_ids_1 is not None:
- output = output + bos_token_id + token_ids_1 + eos_token_id
-
- return output
-
- def get_special_tokens_mask(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
- ) -> List[int]:
- if already_has_special_tokens:
- return super().get_special_tokens_mask(
- token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
- )
-
- bos_token_id = [1] if self.add_bos_token else []
- eos_token_id = [1] if self.add_eos_token else []
-
- if token_ids_1 is None:
- return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
- return (
- bos_token_id
- + ([0] * len(token_ids_0))
- + eos_token_id
- + bos_token_id
- + ([0] * len(token_ids_1))
- + eos_token_id
- )
-
- def create_token_type_ids_from_sequences(
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
- ) -> List[int]:
- bos_token_id = [self.bos_token_id] if self.add_bos_token else []
- eos_token_id = [self.eos_token_id] if self.add_eos_token else []
-
- output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
-
- if token_ids_1 is not None:
- output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
-
- return output
-
- def _build_conversation_input_ids(self, conversation: "Conversation") -> List[int]:
- dialogue = list(conversation.iter_texts())
- if not all([is_user for is_user, msg in dialogue[::2]]) or not all(
- [not is_user for is_user, msg in dialogue[1::2]]
- ):
- raise ValueError(
- "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)"
- )
-
- dialog_tokens: List[int] = []
- if len(conversation.past_user_inputs) > 0:
- if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:
- conversation.past_user_inputs[0] = (
- B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]
- )
- elif not dialogue[0][1].startswith(B_SYS) or E_SYS not in dialogue[0][1]:
- dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + dialogue[0][1])
-
- dialog_tokens += sum(
- [
- [self.bos_token_id]
- + self.encode(
- f"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ", add_special_tokens=False
- )
- + [self.eos_token_id]
- for prompt, answer in zip(dialogue[::2], dialogue[1::2])
- ],
- [],
- )
- if not (dialogue[-1][0]):
- raise ValueError(f"Last message must be from user, got {dialogue[-1]['role']}")
- dialog_tokens += [self.bos_token_id] + self.encode(
- f"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}", add_special_tokens=False
- )
- return dialog_tokens
diff --git a/model_hubs/Skywork-13B-Base-3T/tokenizer.model b/model_hubs/Skywork-13B-Base-3T/tokenizer.model
deleted file mode 100644
index decbfe220922d6a38ff52541ef3927b97fb7893e..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/tokenizer.model
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:36ec9a4d6fd7cc78fbb9e4afd89fb04cba0381b08a842ca0b60826073821f594
-size 994250
diff --git a/model_hubs/Skywork-13B-Base-3T/tokenizer_config.json b/model_hubs/Skywork-13B-Base-3T/tokenizer_config.json
deleted file mode 100644
index 9c232b8b78a3ad2ce894b9a17628f3821627ccd7..0000000000000000000000000000000000000000
--- a/model_hubs/Skywork-13B-Base-3T/tokenizer_config.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "add_bos_token": true,
- "add_eos_token": false,
- "bos_token": {
- "__type": "AddedToken",
- "content": "",
- "lstrip": false,
- "normalized": true,
- "rstrip": false,
- "single_word": false
- },
- "clean_up_tokenization_spaces": false,
- "eos_token": {
- "__type": "AddedToken",
- "content": "",
- "lstrip": false,
- "normalized": true,
- "rstrip": false,
- "single_word": false
- },
- "legacy": true,
- "model_max_length": 1000000000000000019884624838656,
- "pad_token": null,
- "sp_model_kwargs": {},
- "tokenizer_class": "SkyworkTokenizer",
- "unk_token": {
- "__type": "AddedToken",
- "content": "",
- "lstrip": false,
- "normalized": true,
- "rstrip": false,
- "single_word": false
- },
- "auto_map": {
- "AutoTokenizer": [
- "tokenization_skywork.SkyworkTokenizer",
- null
- ]
- }
-}