PeteBleackley commited on
Commit
684c1d8
·
1 Parent(s): 69cf4c5

Removed unnecessary parameters

Browse files
Files changed (1) hide show
  1. qarac/models/QaracDecoderModel.py +4 -16
qarac/models/QaracDecoderModel.py CHANGED
@@ -37,7 +37,7 @@ class QaracDecoderHead(torch.nn.Module):
37
  def forward(self,
38
  vector,
39
  hidden_states,
40
- attention_mask=None,training=False):
41
  """
42
  Predicts text fron vector and hidden states of base model
43
 
@@ -58,21 +58,9 @@ class QaracDecoderHead(torch.nn.Module):
58
  1)),
59
  attention_mask])
60
  l0 = self.layer_0(vectors,
61
- attentions,
62
- None,
63
- None,
64
- None,
65
- None,
66
- False,
67
- training)
68
  return self.head(self.layer_1(l0[0][:,1:],
69
- attention_mask,
70
- None,
71
- None,
72
- None,
73
- None,
74
- False,
75
- training)[0])
76
 
77
  class QaracDecoderModel(transformers.RobertaModel,
78
  transformers.generation_utils.GenerationMixin):
@@ -100,7 +88,7 @@ class QaracDecoderModel(transformers.RobertaModel,
100
 
101
  def forward(self,inputs,**kwargs):
102
  """
103
- Predicts text from inputs
104
 
105
  Parameters
106
  ----------
 
37
  def forward(self,
38
  vector,
39
  hidden_states,
40
+ attention_mask=None):
41
  """
42
  Predicts text fron vector and hidden states of base model
43
 
 
58
  1)),
59
  attention_mask])
60
  l0 = self.layer_0(vectors,
61
+ attentions)
 
 
 
 
 
 
62
  return self.head(self.layer_1(l0[0][:,1:],
63
+ attention_mask)[0])
 
 
 
 
 
 
64
 
65
  class QaracDecoderModel(transformers.RobertaModel,
66
  transformers.generation_utils.GenerationMixin):
 
88
 
89
  def forward(self,inputs,**kwargs):
90
  """
91
+ Predicts text from inputsBleakley
92
 
93
  Parameters
94
  ----------