phoebeklett
commited on
Commit
·
8cc3046
1
Parent(s):
c661a4d
Update modeling_mpt.py
Browse files- modeling_mpt.py +4 -4
modeling_mpt.py
CHANGED
@@ -27,10 +27,10 @@ from llmfoundry.models.layers.custom_embedding import SharedEmbedding
|
|
27 |
from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
|
28 |
from llmfoundry.models.utils.param_init_fns import MODEL_INIT_REGISTRY
|
29 |
|
30 |
-
from
|
31 |
-
from
|
32 |
-
from
|
33 |
-
from
|
34 |
|
35 |
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
|
36 |
|
|
|
27 |
from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
|
28 |
from llmfoundry.models.utils.param_init_fns import MODEL_INIT_REGISTRY
|
29 |
|
30 |
+
from .mpt.configuration import ExtendedMPTConfig
|
31 |
+
from .mpt.attention import attn_bias_shape, build_attn_bias
|
32 |
+
from .mpt.blocks import MPTBlock
|
33 |
+
from .utils import instantiate_from_config
|
34 |
|
35 |
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
|
36 |
|