File size: 2,694 Bytes
b84549f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from transformers import BertTokenizer, BertModel, BertConfig
from utils.dl.common.model import set_module
from torch import nn
import torch
from utils.common.log import logger


bert_model_tag = 'bert-base-multilingual-cased'


class BertForSenCls(nn.Module):
    def __init__(self, num_classes):
        super(BertForSenCls, self).__init__()
        
        logger.info(f'init bert for sen cls (using {bert_model_tag})')
        self.bert = BertModel.from_pretrained(bert_model_tag)
        self.classifier = nn.Linear(768, num_classes)
        
    def forward(self, **x):
        x['return_dict'] = False
        
        pool_output = self.bert(**x)[-1]
        
        return self.classifier(pool_output)
    
    
class BertForTokenCls(nn.Module):
    def __init__(self, num_classes):
        super(BertForTokenCls, self).__init__()
        
        logger.info(f'init bert for token cls (using {bert_model_tag})')
        self.bert = BertModel.from_pretrained(bert_model_tag)
        self.classifier = nn.Linear(768, num_classes)
        
    def forward(self, **x):
        x['return_dict'] = False
        
        pool_output = self.bert(**x)[0]
        
        return self.classifier(pool_output)


class BertForTranslation(nn.Module):
    def __init__(self):
        super(BertForTranslation, self).__init__()
        
        self.bert = BertModel.from_pretrained(bert_model_tag)
        
        vocab_size = BertConfig.from_pretrained(bert_model_tag).vocab_size
        self.decoder = nn.Linear(768, vocab_size)
        
        logger.info(f'init bert for sen cls (using {bert_model_tag}), vocab size {vocab_size}')
        
        # https://github.com/huggingface/transformers/blob/66954ea25e342fd451c26ec1c295da0b8692086b/src/transformers/models/bert_generation/modeling_bert_generation.py#L594
        self.decoder.weight.data.normal_(mean=0.0, std=0.02)
        
    def forward(self, **x):
        x['return_dict'] = False
        
        seq_output = self.bert(**x)[0]
        
        return self.decoder(seq_output)


def bert_base_sen_cls(num_classes):
    return BertForSenCls(num_classes)


def bert_base_token_cls(num_classes):
    return BertForTokenCls(num_classes)


def bert_base_translation(no_bert_pooler=False):
    # return BertForTranslation()
    from transformers import BertTokenizer, BertModel, BertConfig, EncoderDecoderModel, BertGenerationDecoder
    encoder = BertModel.from_pretrained(bert_model_tag)
    model = BertGenerationDecoder.from_pretrained(bert_model_tag)
    model.bert = encoder
    
    if no_bert_pooler:
        logger.info('replace pooler with nn.Identity()')
        encoder.pooler = nn.Identity()
    
    return model