PeteBleackley commited on
Commit
a5b7b8e
·
1 Parent(s): 8172944

input_embeddings not needed

Browse files
Files changed (1) hide show
  1. qarac/models/QaracDecoderModel.py +3 -5
qarac/models/QaracDecoderModel.py CHANGED
@@ -11,7 +11,7 @@ import torch
11
 
12
  class QaracDecoderHead(torch.nn.Module):
13
 
14
- def __init__(self,config,input_embeddings):
15
  """
16
  Creates the Decoder head
17
 
@@ -28,8 +28,7 @@ class QaracDecoderHead(torch.nn.Module):
28
  super(QaracDecoderHead,self).__init__()
29
  self.layer_0 = transformers.models.roberta.modeling_roberta.RobertaLayer(config)
30
  self.layer_1 = transformers.models.roberta.modeling_roberta.RobertaLayer(config)
31
- self.head = transformers.models.roberta.modeling_roberta.RobertaLMHead(config,
32
- input_embeddings)
33
 
34
 
35
 
@@ -95,8 +94,7 @@ class QaracDecoderModel(transformers.RobertaModel,
95
  super(QaracDecoderModel,self).__init__(config)
96
  self.decoder_base = transformers.RobertaModel.from_pretrained(model_path,
97
  config=config)
98
- self.decoder_head = QaracDecoderHead(self.config,
99
- self.decoder_base.get_input_embeddings())
100
  self.tokenizer = tokenizer
101
 
102
 
 
11
 
12
  class QaracDecoderHead(torch.nn.Module):
13
 
14
+ def __init__(self,config):
15
  """
16
  Creates the Decoder head
17
 
 
28
  super(QaracDecoderHead,self).__init__()
29
  self.layer_0 = transformers.models.roberta.modeling_roberta.RobertaLayer(config)
30
  self.layer_1 = transformers.models.roberta.modeling_roberta.RobertaLayer(config)
31
+ self.head = transformers.models.roberta.modeling_roberta.RobertaLMHead(config)
 
32
 
33
 
34
 
 
94
  super(QaracDecoderModel,self).__init__(config)
95
  self.decoder_base = transformers.RobertaModel.from_pretrained(model_path,
96
  config=config)
97
+ self.decoder_head = QaracDecoderHead(self.config)
 
98
  self.tokenizer = tokenizer
99
 
100