change
Browse files- modeling_monkey.py +1 -1
- modeling_qwen.py +3 -3
modeling_monkey.py
CHANGED
@@ -26,7 +26,7 @@ try:
|
|
26 |
except ImportError:
|
27 |
rearrange = None
|
28 |
from torch import nn
|
29 |
-
from modeling_qwen import QWenModel,QWenPreTrainedModel,QWenLMHeadModel
|
30 |
SUPPORT_CUDA = torch.cuda.is_available()
|
31 |
SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
|
32 |
SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
|
|
|
26 |
except ImportError:
|
27 |
rearrange = None
|
28 |
from torch import nn
|
29 |
+
from .modeling_qwen import QWenModel,QWenPreTrainedModel,QWenLMHeadModel
|
30 |
SUPPORT_CUDA = torch.cuda.is_available()
|
31 |
SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
|
32 |
SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
|
modeling_qwen.py
CHANGED
@@ -36,15 +36,15 @@ SUPPORT_CUDA = torch.cuda.is_available()
|
|
36 |
SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
|
37 |
SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
|
38 |
|
39 |
-
from configuration_qwen import QWenConfig
|
40 |
-
from qwen_generation_utils import (
|
41 |
HistoryType,
|
42 |
make_context,
|
43 |
decode_tokens,
|
44 |
get_stop_words_ids,
|
45 |
StopWordsLogitsProcessor,
|
46 |
)
|
47 |
-
from visual import VisionTransformer
|
48 |
|
49 |
|
50 |
logger = logging.get_logger(__name__)
|
|
|
36 |
SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
|
37 |
SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
|
38 |
|
39 |
+
from .configuration_qwen import QWenConfig
|
40 |
+
from .qwen_generation_utils import (
|
41 |
HistoryType,
|
42 |
make_context,
|
43 |
decode_tokens,
|
44 |
get_stop_words_ids,
|
45 |
StopWordsLogitsProcessor,
|
46 |
)
|
47 |
+
from .visual import VisionTransformer
|
48 |
|
49 |
|
50 |
logger = logging.get_logger(__name__)
|