import torch | |
from transformers import PreTrainedModel | |
from .configuration_mlp import MLPConfig | |
class MLP(PreTrainedModel): | |
config_class = MLPConfig | |
def __init__(self, config): | |
super().__init__(config) | |
self.input_layer = torch.nn.Linear(config.input_size, config.hidden_size) | |
self.mid_layer = torch.nn.Linear(config.hidden_size, config.hidden_size) | |
self.output_layer = torch.nn.Linear(config.hidden_size, config.output_size) | |
def forward(self, inputs): | |
x = torch.nn.functional.relu(self.input_layer(inputs)) | |
x = torch.nn.functional.relu(self.mid_layer(x)) | |
return torch.nn.functional.softmax(self.output_layer(x), dim=-1) | |
MLP.register_for_auto_class("AutoModel") | |