Spaces:
Running
Running
File size: 5,180 Bytes
85ec4af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Rhizome
# Version beta 0.0, August 2023
# Property of IBM Research, Accelerated Discovery
#
"""
PLEASE NOTE THIS IMPLEMENTATION INCLUDES THE ORIGINAL SOURCE CODE (AND SOME ADAPTATIONS)
OF THE MHG IMPLEMENTATION OF HIROSHI KAJINO AT IBM TRL ALREADY PUBLICLY AVAILABLE.
THIS MIGHT INFLUENCE THE DECISION OF THE FINAL LICENSE SO CAREFUL CHECK NEEDS BE DONE.
"""
""" Title """
__author__ = "Hiroshi Kajino <[email protected]>"
__copyright__ = "(c) Copyright IBM Corp. 2018"
__version__ = "0.1"
__date__ = "Aug 9 2018"
import abc
import numpy as np
import torch
from torch import nn
class DecoderBase(nn.Module):
def __init__(self):
super().__init__()
self.hidden_dict = {}
@abc.abstractmethod
def forward_one_step(self, tgt_emb_in):
''' one-step forward model
Parameters
----------
tgt_emb_in : Tensor, shape (batch_size, input_dim)
Returns
-------
Tensor, shape (batch_size, hidden_dim)
'''
tgt_emb_out = None
return tgt_emb_out
@abc.abstractmethod
def init_hidden(self):
''' initialize the hidden states
'''
pass
@abc.abstractmethod
def feed_hidden(self, hidden_dict_0):
for each_hidden in self.hidden_dict.keys():
self.hidden_dict[each_hidden][0] = hidden_dict_0[each_hidden]
class GRUDecoder(DecoderBase):
def __init__(self, input_dim: int, hidden_dim: int, num_layers: int,
dropout: float, batch_size: int, use_gpu: bool,
no_dropout=False):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.batch_size = batch_size
self.use_gpu = use_gpu
self.model = nn.GRU(input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True,
bidirectional=False,
dropout=self.dropout if not no_dropout else 0
)
if self.use_gpu:
self.model.cuda()
self.init_hidden()
def init_hidden(self):
self.hidden_dict['h'] = torch.zeros((self.num_layers,
self.batch_size,
self.hidden_dim),
requires_grad=False)
if self.use_gpu:
self.hidden_dict['h'] = self.hidden_dict['h'].cuda()
def forward_one_step(self, tgt_emb_in):
''' one-step forward model
Parameters
----------
tgt_emb_in : Tensor, shape (batch_size, input_dim)
Returns
-------
Tensor, shape (batch_size, hidden_dim)
'''
tgt_emb_out, self.hidden_dict['h'] \
= self.model(tgt_emb_in.view(self.batch_size, 1, -1),
self.hidden_dict['h'])
return tgt_emb_out
class LSTMDecoder(DecoderBase):
def __init__(self, input_dim: int, hidden_dim: int, num_layers: int,
dropout: float, batch_size: int, use_gpu: bool,
no_dropout=False):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.batch_size = batch_size
self.use_gpu = use_gpu
self.model = nn.LSTM(input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True,
bidirectional=False,
dropout=self.dropout if not no_dropout else 0)
if self.use_gpu:
self.model.cuda()
self.init_hidden()
def init_hidden(self):
self.hidden_dict['h'] = torch.zeros((self.num_layers,
self.batch_size,
self.hidden_dim),
requires_grad=False)
self.hidden_dict['c'] = torch.zeros((self.num_layers,
self.batch_size,
self.hidden_dim),
requires_grad=False)
if self.use_gpu:
for each_hidden in self.hidden_dict.keys():
self.hidden_dict[each_hidden] = self.hidden_dict[each_hidden].cuda()
def forward_one_step(self, tgt_emb_in):
''' one-step forward model
Parameters
----------
tgt_emb_in : Tensor, shape (batch_size, input_dim)
Returns
-------
Tensor, shape (batch_size, hidden_dim)
'''
tgt_hidden_out, self.hidden_dict['h'], self.hidden_dict['c'] \
= self.model(tgt_emb_in.view(self.batch_size, 1, -1),
self.hidden_dict['h'], self.hidden_dict['c'])
return tgt_hidden_out
|